コード例 #1
0
    def __init__(
        self,
        in_features: int,
        out_features: int,
        widths: List[int] = [32, 32],
        activation: nn.Module = nn.ReLU,
    ):

        super().__init__(
            ConvBnAct(
                in_features,
                widths[0],
                activation=activation,
                kernel_size=3,
                stride=2,
            ),
            *[
                ConvBnAct(
                    in_features,
                    out_features,
                    activation=activation,
                    kernel_size=3,
                ) for in_features, out_features in zip(widths, widths[1:])
            ],
            ConvBnAct(widths[-1],
                      out_features,
                      activation=activation,
                      kernel_size=3),
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
        )
コード例 #2
0
def test_ConvBnAct():
    conv = ConvBnAct(32, 64, kernel_size=3)
    assert conv.conv != None
    assert conv.bn != None
    assert conv.act != None

    assert type(conv.conv) is Conv2dPad
    assert type(conv.bn) is nn.BatchNorm2d
    assert type(conv.act) is nn.ReLU

    conv = ConvBnAct(32, 64, kernel_size=3, activation=None)
    assert type(conv.conv) is Conv2dPad
    assert type(conv.bn) is nn.BatchNorm2d
    with pytest.raises(AttributeError):
        conv.act

    conv = ConvBnAct(32, 64, kernel_size=3, normalization=None)
    assert type(conv.conv) is Conv2dPad
    assert type(conv.act) is nn.ReLU
    with pytest.raises(AttributeError):
        conv.bn

    conv = ConvBnAct(32, 64, kernel_size=3, conv=nn.Conv2d,
                     activation=nn.SELU, normalization=nn.Identity)
    assert type(conv.conv) is nn.Conv2d
    assert type(conv.act) is nn.SELU
    assert type(conv.bn) is nn.Identity
コード例 #3
0
 def __init__(
     self,
     in_features: int,
     out_features: int,
     features: int = None,
     activation: nn.Module = ReLUInPlace,
     reduction: int = 4,
     stride=1,
     shortcut=ResNetShorcut,
     **kwargs,
 ):
     super().__init__(in_features,
                      out_features,
                      activation,
                      stride,
                      shortcut=shortcut)
     self.features = out_features // reduction if features is None else features
     self.block = nn.Sequential(
         ConvBnAct(in_features,
                   self.features,
                   activation=activation,
                   kernel_size=1),
         ConvBnAct(
             self.features,
             self.features,
             activation=activation,
             kernel_size=3,
             stride=stride,
             **kwargs,
         ),
         ConvBnAct(self.features,
                   out_features,
                   activation=None,
                   kernel_size=1),
     )
コード例 #4
0
 def __init__(self, in_features: int, out_features: int,  activation: nn.Module = ReLUInPlace):
     super().__init__(
         ConvBnAct(in_features, out_features // 2,
                   activation=activation, kernel_size=3, stride=2),
         ConvBnAct(out_features // 2, out_features // 2,
                   activation=activation, kernel_size=3),
         ConvBnAct(out_features // 2,
                   out_features, activation=activation, kernel_size=3),
         nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
     )
コード例 #5
0
    def __init__(self,
                 in_features: int,
                 out_features: int,
                 stride: int = 1,
                 expansion: int = 6,
                 activation: nn.Module = nn.SiLU,
                 drop_rate: float = 0.2,
                 se: bool = True,
                 kernel_size: int = 3,
                 **kwargs):
        super().__init__()

        expanded_features = in_features * expansion
        # do not apply residual when downsamping and when features are different
        # in mobilenet we do not use a shortcut
        self.should_apply_residual = stride == 1 and in_features == out_features
        self.block = nn.Sequential(
            OrderedDict({
                "exp":
                ConvBnAct(
                    in_features,
                    expanded_features,
                    activation=activation,
                    kernel_size=1,
                ) if expansion > 1 else nn.Identity(),
                "depth":
                ConvBnAct(
                    expanded_features,
                    expanded_features,
                    activation=activation,
                    kernel_size=kernel_size,
                    stride=stride,
                    groups=expanded_features,
                    **kwargs,
                ),
                # apply se after depth-wise
                "att":
                SpatialSE(
                    expanded_features,
                    reduced_features=in_features // 4,
                    activation=activation,
                ) if se else nn.Identity(),
                "point":
                nn.Sequential(
                    ConvBnAct(
                        expanded_features,
                        out_features,
                        kernel_size=1,
                        activation=None,
                    )),
                "drop":
                StochasticDepth(drop_rate, mode="batch")
                if self.should_apply_residual and drop_rate > 0 else
                nn.Identity(),
            }))
コード例 #6
0
    def __init__(self, in_features: int, features: int, radix: int, groups: int):
        """Implementation of Split Attention proposed in `"ResNeSt: Split-Attention Networks" <https://arxiv.org/abs/2004.08955>`_

        Grouped convolution have been proved to be impirically better (ResNetXt). The main idea is to apply an attention group-wise.

        `Einops <https://github.com/arogozhnikov/einops>`_ is used to improve the readibility of this module

        Args:
            in_features (int): number of input features
            features (int): attention's features
            radix (int): number of subgroups (`radix`) in the groups
            groups (int): number of groups, each group contains `radix` subgroups
        """
        super().__init__()
        self.radix, self.groups = radix, groups
        self.att = nn.Sequential(
            # this produces U^{/hat}
            Reduce("b r (k c) h w-> b (k c) h w", reduction="sum", r=radix, k=groups),
            # eq 1
            nn.AdaptiveAvgPool2d(1),
            # the two following conv layers are G in the paper
            ConvBnAct(
                in_features,
                features,
                kernel_size=1,
                groups=groups,
                activation=ReLUInPlace,
                bias=True,
            ),
            nn.Conv2d(features, in_features * radix, kernel_size=1, groups=groups),
            Rearrange("b (r k c) h w -> b r k c h w", r=radix, k=groups),
            nn.Softmax(dim=1) if radix > 1 else nn.Sigmoid(),
            Rearrange("b r k c h w -> b r (k c) h w", r=radix, k=groups),
        )
コード例 #7
0
ファイル: __init__.py プロジェクト: abhishekvermasg/glasses
    def __init__(self,
                 in_features: int,
                 out_features: int,
                 stride: int = 1,
                 expansion: int = 6,
                 activation: nn.Module = nn.SiLU,
                 drop_rate: float = 0.2,
                 se: bool = True,
                 kernel_size: int = 3,
                 **kwargs):
        super().__init__()

        reduced_features = in_features // 4
        expanded_features = in_features * expansion
        # do not apply residual when downsamping and when features are different
        # in mobilenet we do not use a shortcut
        self.should_apply_residual = stride == 1 and in_features == out_features
        self.block = nn.Sequential(
            OrderedDict({
                'exp':
                ConvBnAct(in_features,
                          expanded_features,
                          activation=activation,
                          kernel_size=1) if expansion > 1 else nn.Identity(),
                'depth':
                ConvBnAct(expanded_features,
                          expanded_features,
                          activation=activation,
                          kernel_size=kernel_size,
                          stride=stride,
                          groups=expanded_features,
                          **kwargs),
                # apply se after depth-wise
                'att':
                ChannelSE(expanded_features,
                          reduced_features=reduced_features,
                          activation=activation) if se else nn.Identity(),
                'point':
                nn.Sequential(
                    ConvBnAct(expanded_features,
                              out_features,
                              kernel_size=1,
                              activation=None)),
                'drop':
                nn.Dropout2d(drop_rate) if self.should_apply_residual
                and drop_rate > 0 else nn.Identity()
            }))
コード例 #8
0
    def __init__(self,
                 in_channels: int = 3,
                 widths: List[int] = [32, 16, 24, 40, 80, 112, 192, 320, 1280],
                 depths: List[int] = [1, 2, 2, 3, 3, 4, 1],
                 strides: List[int] = [2, 1, 2, 2, 2, 1, 2, 1],
                 expansions: List[int] = [1, 6, 6, 6, 6, 6, 6],
                 kernel_sizes: List[int] = [3, 3, 5, 3, 5, 5, 3],
                 se: List[bool] = [True, True, True, True, True, True, True],
                 drop_rate: float = 0.2,
                 stem: nn.Module = EfficientNetStem,
                 activation: nn.Module = partial(nn.SiLU, inplace=True),
                 **kwargs):
        super().__init__()

        self.widths, self.depths = widths, depths
        self.strides, self.expansions, self.kernel_sizes = (
            strides,
            expansions,
            kernel_sizes,
        )
        self.stem = stem(
            in_channels,
            widths[0],
            activation=activation,
            kernel_size=3,
            stride=strides[0],
        )
        strides = strides[1:]
        self.in_out_widths = list(zip(widths, widths[1:-1]))

        self.layers = nn.ModuleList([
            *[
                EfficientNetLayer(
                    in_features,
                    out_features,
                    depth=n,
                    stride=s,
                    expansion=t,
                    kernel_size=k,
                    se=se,
                    drop_rate=drop_rate,
                    activation=activation,
                    **kwargs,
                ) for (in_features, out_features), n, s, t, k, se in zip(
                    self.in_out_widths,
                    depths,
                    strides,
                    expansions,
                    kernel_sizes,
                    se,
                )
            ]
        ])

        self.layers.append(
            ConvBnAct(self.widths[-2],
                      self.widths[-1],
                      activation=activation,
                      kernel_size=1))
コード例 #9
0
ファイル: __init__.py プロジェクト: abhishekvermasg/glasses
 def __init__(self,
              in_features: int,
              out_features: int,
              activation: nn.Module = partial(nn.ReLU, inplace=True),
              *args,
              **kwargs):
     super().__init__(
         ConvBnAct(in_features,
                   out_features,
                   kernel_size=3,
                   activation=activation,
                   *args,
                   **kwargs),
         ConvBnAct(out_features,
                   out_features,
                   kernel_size=3,
                   activation=activation,
                   *args,
                   **kwargs))