Ejemplo n.º 1
0
 def __init__(self, num_classes=10):
     super(AlexNet, self).__init__()
     self.features = nn.Sequential(
         nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=2),
         nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2),
         nn.Conv2d(64, 192, kernel_size=3,
                   padding=2), nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=2),
         nn.Conv2d(192, 384, kernel_size=3, padding=1),
         nn.ReLU(inplace=True), nn.Conv2d(384,
                                          256,
                                          kernel_size=3,
                                          padding=1), nn.ReLU(inplace=True),
         nn.Conv2d(256, 256, kernel_size=3,
                   padding=1), nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2))
     self.fc_layers = nn.Sequential(
         nn.Dropout(0.6),
         nn.Linear(4096, 2048),
         nn.ReLU(inplace=True),
         nn.Dropout(0.6),
         nn.Linear(2048, 2048),
         nn.ReLU(inplace=True),
         nn.Linear(2048, num_classes),
     )
Ejemplo n.º 2
0
    def __init__(self, in_planes, planes, stride=1):
        super(PreActBlock, self).__init__()
        self.bn1 = nn.BatchNorm2d(in_planes)
        self.conv1 = nn.Conv2d(in_planes,
                               planes,
                               kernel_size=3,
                               stride=stride,
                               padding=1,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(planes)
        self.conv2 = nn.Conv2d(planes,
                               planes,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)

        if stride != 1 or in_planes != planes:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_planes,
                          planes,
                          kernel_size=1,
                          stride=stride,
                          bias=False))

        # SE layers
        self.fc1 = nn.Conv2d(planes, planes // 16, kernel_size=1)
        self.fc2 = nn.Conv2d(planes // 16, planes, kernel_size=1)
Ejemplo n.º 3
0
 def __init__(self, channels):
     super(ResidualBlock, self).__init__()
     self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
     self.bn1 = nn.BatchNorm2d(channels)
     self.prelu = nn.PReLU()
     self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
     self.bn2 = nn.BatchNorm2d(channels)
Ejemplo n.º 4
0
 def __init__(self, in_planes, out_planes, stride=1):
     super(CellB, self).__init__()
     self.stride = stride
     # Left branch
     self.sep_conv1 = SepConv(in_planes,
                              out_planes,
                              kernel_size=7,
                              stride=stride)
     self.sep_conv2 = SepConv(in_planes,
                              out_planes,
                              kernel_size=3,
                              stride=stride)
     # Right branch
     self.sep_conv3 = SepConv(in_planes,
                              out_planes,
                              kernel_size=5,
                              stride=stride)
     if stride == 2:
         self.conv1 = nn.Conv2d(in_planes,
                                out_planes,
                                kernel_size=1,
                                stride=1,
                                padding=0,
                                bias=False)
         self.bn1 = nn.BatchNorm2d(out_planes)
     # Reduce channels
     self.conv2 = nn.Conv2d(2 * out_planes,
                            out_planes,
                            kernel_size=1,
                            stride=1,
                            padding=0,
                            bias=False)
     self.bn2 = nn.BatchNorm2d(out_planes)
Ejemplo n.º 5
0
 def __init__(self):
     super(LeNet, self).__init__()
     self.conv1 = nn.Conv2d(3, 6, 5)
     self.conv2 = nn.Conv2d(6, 16, 5)
     self.fc1   = nn.Linear(16*5*5, 120)
     self.fc2   = nn.Linear(120, 84)
     self.fc3   = nn.Linear(84, 10)
Ejemplo n.º 6
0
 def __init__(self, inplanes, planes, stride=1, dilation=1):
     super(BottleneckX, self).__init__()
     cardinality = BottleneckX.cardinality
     bottle_planes = planes * cardinality // 32
     self.conv1 = nn.Conv2d(inplanes,
                            bottle_planes,
                            kernel_size=1,
                            bias=False)
     self.bn1 = BatchNorm(bottle_planes)
     self.conv2 = nn.Conv2d(
         bottle_planes,
         bottle_planes,
         kernel_size=3,
         stride=stride,
         padding=dilation,
         bias=False,
         dilation=dilation,
         groups=cardinality,
     )
     self.bn2 = BatchNorm(bottle_planes)
     self.conv3 = nn.Conv2d(bottle_planes,
                            planes,
                            kernel_size=1,
                            bias=False)
     self.bn3 = BatchNorm(planes)
     self.relu = nn.ReLU(inplace=True)
     self.stride = stride
Ejemplo n.º 7
0
    def __init__(self, inp: int, oup: int, stride: int) -> None:
        super().__init__()

        if not (1 <= stride <= 3):
            raise ValueError("illegal stride value")
        self.stride = stride

        branch_features = oup // 2
        assert (self.stride != 1) or (inp == branch_features << 1)

        if self.stride > 1:
            self.branch1 = nn.Sequential(
                self.depthwise_conv(inp,
                                    inp,
                                    kernel_size=3,
                                    stride=self.stride,
                                    padding=1),
                nn.BatchNorm2d(inp),
                nn.Conv2d(inp,
                          branch_features,
                          kernel_size=1,
                          stride=1,
                          padding=0,
                          bias=False),
                nn.BatchNorm2d(branch_features),
                nn.ReLU(inplace=True),
            )
        else:
            self.branch1 = nn.Sequential()

        self.branch2 = nn.Sequential(
            nn.Conv2d(
                inp if (self.stride > 1) else branch_features,
                branch_features,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=False,
            ),
            nn.BatchNorm2d(branch_features),
            nn.ReLU(inplace=True),
            self.depthwise_conv(
                branch_features,
                branch_features,
                kernel_size=3,
                stride=self.stride,
                padding=1,
            ),
            nn.BatchNorm2d(branch_features),
            nn.Conv2d(
                branch_features,
                branch_features,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=False,
            ),
            nn.BatchNorm2d(branch_features),
            nn.ReLU(inplace=True),
        )
Ejemplo n.º 8
0
    def __init__(self,
                 in_planes,
                 cardinality=32,
                 bottleneck_width=4,
                 stride=1):
        super(Block, self).__init__()
        group_width = cardinality * bottleneck_width
        self.conv1 = nn.Conv2d(in_planes,
                               group_width,
                               kernel_size=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(group_width)
        self.conv2 = nn.Conv2d(group_width,
                               group_width,
                               kernel_size=3,
                               stride=stride,
                               padding=1,
                               groups=cardinality,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(group_width)
        self.conv3 = nn.Conv2d(group_width,
                               self.expansion * group_width,
                               kernel_size=1,
                               bias=False)
        self.bn3 = nn.BatchNorm2d(self.expansion * group_width)

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != self.expansion * group_width:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_planes,
                          self.expansion * group_width,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                nn.BatchNorm2d(self.expansion * group_width))
Ejemplo n.º 9
0
    def __init__(self, w_in, w_out, stride, group_width, bottleneck_ratio,
                 se_ratio):
        super(Block, self).__init__()
        # 1x1
        w_b = int(round(w_out * bottleneck_ratio))
        self.conv1 = nn.Conv2d(w_in, w_b, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(w_b)
        # 3x3
        num_groups = w_b // group_width
        self.conv2 = nn.Conv2d(w_b,
                               w_b,
                               kernel_size=3,
                               stride=stride,
                               padding=1,
                               groups=num_groups,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(w_b)
        # se
        self.with_se = se_ratio > 0
        if self.with_se:
            w_se = int(round(w_in * se_ratio))
            self.se = SE(w_b, w_se)
        # 1x1
        self.conv3 = nn.Conv2d(w_b, w_out, kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm2d(w_out)

        self.shortcut = nn.Sequential()
        if stride != 1 or w_in != w_out:
            self.shortcut = nn.Sequential(
                nn.Conv2d(w_in,
                          w_out,
                          kernel_size=1,
                          stride=stride,
                          bias=False), nn.BatchNorm2d(w_out))
Ejemplo n.º 10
0
    def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride,
                 first_layer):
        super(Bottleneck, self).__init__()
        self.out_planes = out_planes
        self.dense_depth = dense_depth

        self.conv1 = nn.Conv2d(last_planes,
                               in_planes,
                               kernel_size=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(in_planes)
        self.conv2 = nn.Conv2d(in_planes,
                               in_planes,
                               kernel_size=3,
                               stride=stride,
                               padding=1,
                               groups=32,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(in_planes)
        self.conv3 = nn.Conv2d(in_planes,
                               out_planes + dense_depth,
                               kernel_size=1,
                               bias=False)
        self.bn3 = nn.BatchNorm2d(out_planes + dense_depth)

        self.shortcut = nn.Sequential()
        if first_layer:
            self.shortcut = nn.Sequential(
                nn.Conv2d(last_planes,
                          out_planes + dense_depth,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                nn.BatchNorm2d(out_planes + dense_depth))
Ejemplo n.º 11
0
    def __init__(self, in_planes, planes, stride=1):
        super(BasicBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_planes,
                               planes,
                               kernel_size=3,
                               stride=stride,
                               padding=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = nn.Conv2d(planes,
                               planes,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(planes)

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != self.expansion * planes:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_planes,
                          self.expansion * planes,
                          kernel_size=1,
                          stride=stride,
                          bias=False), nn.BatchNorm2d(self.expansion * planes))
Ejemplo n.º 12
0
 def __init__(self,
              in_ch,
              out_ch,
              kernel_size,
              stride,
              expansion_factor,
              bn_momentum=0.1):
     super(_InvertedResidual, self).__init__()
     assert stride in [1, 2]
     assert kernel_size in [3, 5]
     mid_ch = in_ch * expansion_factor
     self.apply_resudual = in_ch == out_ch and stride == 1
     self.layers = nn.Sequential(
         # Pointwise
         nn.Conv2d(in_ch, mid_ch, 1, bias=False),
         nn.BatchNorm2d(mid_ch, momentum=bn_momentum),
         nn.ReLU(inplace=True),
         # Depthwise
         nn.Conv2d(
             mid_ch,
             mid_ch,
             kernel_size,
             padding=kernel_size // 2,
             stride=stride,
             groups=mid_ch,
             bias=False,
         ),
         nn.BatchNorm2d(mid_ch, momentum=bn_momentum),
         nn.ReLU(inplace=True),
         # Linear pointwise, Note that there's no activation
         nn.Conv2d(mid_ch, out_ch, 1, bias=False),
         nn.BatchNorm2d(out_ch, momentum=bn_momentum),
     )
Ejemplo n.º 13
0
    def __init__(self, in_planes, out_planes, stride, groups):
        super(Bottleneck, self).__init__()
        self.stride = stride

        mid_planes = out_planes // 4
        g = 1 if in_planes == 24 else groups
        self.conv1 = nn.Conv2d(in_planes,
                               mid_planes,
                               kernel_size=1,
                               groups=g,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(mid_planes)
        self.shuffle1 = ShuffleBlock(groups=g)
        self.conv2 = nn.Conv2d(mid_planes,
                               mid_planes,
                               kernel_size=3,
                               stride=stride,
                               padding=1,
                               groups=mid_planes,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(mid_planes)
        self.conv3 = nn.Conv2d(mid_planes,
                               out_planes,
                               kernel_size=1,
                               groups=groups,
                               bias=False)
        self.bn3 = nn.BatchNorm2d(out_planes)

        self.shortcut = nn.Sequential()
        if stride == 2:
            self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
Ejemplo n.º 14
0
 def __init__(self):
     super(Discriminator, self).__init__()
     self.net = nn.Sequential(
         nn.Conv2d(3, 64, kernel_size=3, padding=1),
         nn.LeakyReLU(0.2),
         nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1),
         nn.BatchNorm2d(64),
         nn.LeakyReLU(0.2),
         nn.Conv2d(64, 128, kernel_size=3, padding=1),
         nn.BatchNorm2d(128),
         nn.LeakyReLU(0.2),
         nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1),
         nn.BatchNorm2d(128),
         nn.LeakyReLU(0.2),
         nn.Conv2d(128, 256, kernel_size=3, padding=1),
         nn.BatchNorm2d(256),
         nn.LeakyReLU(0.2),
         nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1),
         nn.BatchNorm2d(256),
         nn.LeakyReLU(0.2),
         nn.Conv2d(256, 512, kernel_size=3, padding=1),
         nn.BatchNorm2d(512),
         nn.LeakyReLU(0.2),
         nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1),
         nn.BatchNorm2d(512),
         nn.LeakyReLU(0.2),
         nn.AdaptiveAvgPool2d(1),
         nn.Conv2d(512, 1024, kernel_size=1),
         nn.LeakyReLU(0.2),
         nn.Conv2d(1024, 1, kernel_size=1),
     )
Ejemplo n.º 15
0
 def __init__(self, inplanes, planes, stride=1, dilation=1):
     super(Bottleneck, self).__init__()
     expansion = Bottleneck.expansion
     bottle_planes = planes // expansion
     self.conv1 = nn.Conv2d(inplanes,
                            bottle_planes,
                            kernel_size=1,
                            bias=False)
     self.bn1 = BatchNorm(bottle_planes)
     self.conv2 = nn.Conv2d(
         bottle_planes,
         bottle_planes,
         kernel_size=3,
         stride=stride,
         padding=dilation,
         bias=False,
         dilation=dilation,
     )
     self.bn2 = BatchNorm(bottle_planes)
     self.conv3 = nn.Conv2d(bottle_planes,
                            planes,
                            kernel_size=1,
                            bias=False)
     self.bn3 = BatchNorm(planes)
     self.relu = nn.ReLU(inplace=True)
     self.stride = stride
Ejemplo n.º 16
0
 def __init__(self, num_classes: int = 1000) -> None:
     super(QuantizationAlexNet, self).__init__()
     self.features = nn.Sequential(
         nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         nn.Conv2d(64, 192, kernel_size=5, padding=2),
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         nn.Conv2d(192, 384, kernel_size=3, padding=1),
         nn.ReLU(inplace=True),
         nn.Conv2d(384, 256, kernel_size=3, padding=1),
         nn.ReLU(inplace=True),
         nn.Conv2d(256, 256, kernel_size=3, padding=1),
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
     )
     self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
     self.classifier = nn.Sequential(
         nn.Dropout(),
         nn.Linear(256 * 6 * 6, 4096),
         nn.ReLU(inplace=True),
         nn.Dropout(),
         nn.Linear(4096, 4096),
         nn.ReLU(inplace=True),
         nn.Linear(4096, num_classes),
     )
Ejemplo n.º 17
0
    def __init__(self, block=BasicBlock, num_classes=10):
        super(DLA, self).__init__()
        self.base = nn.Sequential(
            nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(16),
            nn.ReLU(True)
        )

        self.layer1 = nn.Sequential(
            nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(16),
            nn.ReLU(True)
        )

        self.layer2 = nn.Sequential(
            nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(True)
        )

        self.layer3 = Tree(block,  32,  64, level=1, stride=1)
        self.layer4 = Tree(block,  64, 128, level=2, stride=2)
        self.layer5 = Tree(block, 128, 256, level=2, stride=2)
        self.layer6 = Tree(block, 256, 512, level=1, stride=2)
        self.linear = nn.Linear(512, num_classes)
Ejemplo n.º 18
0
 def __init__(self):
     super().__init__()
     self.conv1 = nn.Conv2d(3, 6, 5)
     self.pool = nn.MaxPool2d(2, 2)
     self.conv2 = nn.Conv2d(6, 16, 5)
     self.fc1 = nn.Linear(16 * 5 * 5, 120)
     self.fc2 = nn.Linear(120, 84)
     self.fc3 = nn.Linear(84, 10)
Ejemplo n.º 19
0
 def __init__(self, input_channels: int, squeeze_factor: int = 4):
     super().__init__()
     squeeze_channels = _make_divisible(input_channels // squeeze_factor, 8)
     self.fc1 = nn.Conv2d(input_channels, squeeze_channels, 1)
     self.relu = nn.ReLU(inplace=True)
     self.fc2 = nn.Conv2d(squeeze_channels, input_channels, 1)
     self.adaptive_avg_pool2d = nn.AdaptiveAvgPool2d(1)
     self.hardsigmoid = nn.Hardsigmoid(inplace=True)
Ejemplo n.º 20
0
    def __init__(
        self,
        in_chs,
        mid_chs,
        out_chs,
        dw_kernel_size=3,
        stride=1,
        act_layer=nn.ReLU,
        se_ratio=0.0,
    ):
        super(GhostBottleneck, self).__init__()
        has_se = se_ratio is not None and se_ratio > 0.0
        self.stride = stride

        # Point-wise expansion
        self.ghost1 = GhostModule(in_chs, mid_chs, relu=True)

        # Depth-wise convolution
        if self.stride > 1:
            self.conv_dw = nn.Conv2d(
                mid_chs,
                mid_chs,
                dw_kernel_size,
                stride=stride,
                padding=(dw_kernel_size - 1) // 2,
                groups=mid_chs,
                bias=False,
            )
            self.bn_dw = nn.BatchNorm2d(mid_chs)

        # Squeeze-and-excitation
        if has_se:
            self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio)
        else:
            self.se = None

        # Point-wise linear projection
        self.ghost2 = GhostModule(mid_chs, out_chs, relu=False)

        # shortcut
        if in_chs == out_chs and self.stride == 1:
            self.shortcut = nn.Sequential()
        else:
            self.shortcut = nn.Sequential(
                nn.Conv2d(
                    in_chs,
                    in_chs,
                    dw_kernel_size,
                    stride=stride,
                    padding=(dw_kernel_size - 1) // 2,
                    groups=in_chs,
                    bias=False,
                ),
                nn.BatchNorm2d(in_chs),
                nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False),
                nn.BatchNorm2d(out_chs),
            )
Ejemplo n.º 21
0
    def __init__(self, input_shape=(80, 64), residual_in_channels=256):
        super(Discriminator, self).__init__()

        self.convLayer1 = nn.Sequential(
            nn.Conv2d(
                in_channels=1,
                out_channels=residual_in_channels // 2,
                kernel_size=(3, 3),
                stride=(1, 1),
                padding=(1, 1),
            ),
            GLU(),
        )

        # Downsampling Layers
        self.downSample1 = self.downsample(
            in_channels=residual_in_channels // 2,
            out_channels=residual_in_channels,
            kernel_size=(3, 3),
            stride=(2, 2),
            padding=1,
        )

        self.downSample2 = self.downsample(
            in_channels=residual_in_channels,
            out_channels=residual_in_channels * 2,
            kernel_size=(3, 3),
            stride=[2, 2],
            padding=1,
        )

        self.downSample3 = self.downsample(
            in_channels=residual_in_channels * 2,
            out_channels=residual_in_channels * 4,
            kernel_size=[3, 3],
            stride=[2, 2],
            padding=1,
        )

        self.downSample4 = self.downsample(
            in_channels=residual_in_channels * 4,
            out_channels=residual_in_channels * 4,
            kernel_size=[1, 10],
            stride=(1, 1),
            padding=(0, 2),
        )

        # Conv Layer
        self.outputConvLayer = nn.Sequential(
            nn.Conv2d(
                in_channels=residual_in_channels * 4,
                out_channels=1,
                kernel_size=(1, 3),
                stride=[1, 1],
                padding=[0, 1],
            )
        )
Ejemplo n.º 22
0
    def __init__(self):
        super(Discriminator, self).__init__()

        self.convLayer1 = nn.Sequential(
            nn.Conv2d(
                in_channels=1,
                out_channels=128,
                kernel_size=(3, 3),
                stride=(1, 1),
                padding=(1, 1),
            ),
            GLU(),
        )

        # DownSample Layer
        self.downSample1 = self.downSample(
            in_channels=128,
            out_channels=256,
            kernel_size=(3, 3),
            stride=(2, 2),
            padding=1,
        )

        self.downSample2 = self.downSample(
            in_channels=256,
            out_channels=512,
            kernel_size=(3, 3),
            stride=[2, 2],
            padding=1,
        )

        self.downSample3 = self.downSample(
            in_channels=512,
            out_channels=1024,
            kernel_size=[3, 3],
            stride=[2, 2],
            padding=1,
        )

        self.downSample4 = self.downSample(
            in_channels=1024,
            out_channels=1024,
            kernel_size=[1, 5],
            stride=(1, 1),
            padding=(0, 2),
        )

        # Conv Layer
        self.outputConvLayer = nn.Sequential(
            nn.Conv2d(
                in_channels=1024,
                out_channels=1,
                kernel_size=(1, 3),
                stride=[1, 1],
                padding=[0, 1],
            ))
Ejemplo n.º 23
0
 def __init__(self, in_channels, out_channels):
     super().__init__()
     self.double_conv = nn.Sequential(
         nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
         nn.BatchNorm2d(out_channels),
         nn.ReLU(inplace=True),
         nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
         nn.BatchNorm2d(out_channels),
         nn.ReLU(inplace=True),
     )
Ejemplo n.º 24
0
 def __init__(self, in_channels, se_channels):
     super(SE, self).__init__()
     self.se1 = nn.Conv2d(in_channels,
                          se_channels,
                          kernel_size=1,
                          bias=True)
     self.se2 = nn.Conv2d(se_channels,
                          in_channels,
                          kernel_size=1,
                          bias=True)
Ejemplo n.º 25
0
    def __init__(self, cfgs, num_classes=1000, width=1.0, dropout=0.2):
        super(GhostNet, self).__init__()
        # setting of inverted residual blocks
        self.cfgs = cfgs
        self.dropout = dropout

        # building first layer
        output_channel = _make_divisible(16 * width, 4)
        self.conv_stem = nn.Conv2d(3, output_channel, 3, 2, 1, bias=False)
        self.bn1 = nn.BatchNorm2d(output_channel)
        self.act1 = nn.ReLU(inplace=True)
        input_channel = output_channel

        # building inverted residual blocks
        stages = []
        block = GhostBottleneck
        for cfg in self.cfgs:
            layers = []
            for k, exp_size, c, se_ratio, s in cfg:
                output_channel = _make_divisible(c * width, 4)
                hidden_channel = _make_divisible(exp_size * width, 4)
                layers.append(
                    block(
                        input_channel,
                        hidden_channel,
                        output_channel,
                        k,
                        s,
                        se_ratio=se_ratio,
                    ))
                input_channel = output_channel
            stages.append(nn.Sequential(*layers))

        output_channel = _make_divisible(exp_size * width, 4)
        stages.append(
            nn.Sequential(ConvBnAct(input_channel, output_channel, 1)))
        input_channel = output_channel

        self.blocks = nn.Sequential(*stages)

        # building last several layers
        output_channel = 1280
        self.global_pool = nn.AdaptiveAvgPool2d((1, 1))
        self.conv_head = nn.Conv2d(input_channel,
                                   output_channel,
                                   1,
                                   1,
                                   0,
                                   bias=True)
        self.act2 = nn.ReLU(inplace=True)
        self.classifier = nn.Linear(output_channel, num_classes)
        self.dropout = nn.Dropout(p=self.dropout)
Ejemplo n.º 26
0
 def __init__(self, in_planes, growth_rate):
     super(Bottleneck, self).__init__()
     self.bn1 = nn.BatchNorm2d(in_planes)
     self.conv1 = nn.Conv2d(in_planes,
                            4 * growth_rate,
                            kernel_size=1,
                            bias=False)
     self.bn2 = nn.BatchNorm2d(4 * growth_rate)
     self.conv2 = nn.Conv2d(4 * growth_rate,
                            growth_rate,
                            kernel_size=3,
                            padding=1,
                            bias=False)
Ejemplo n.º 27
0
    def __init__(
        self,
        stages_repeats: List[int],
        stages_out_channels: List[int],
        num_classes: int = 1000,
        inverted_residual: Callable[..., nn.Module] = InvertedResidual,
    ) -> None:
        super().__init__()

        if len(stages_repeats) != 3:
            raise ValueError(
                "expected stages_repeats as list of 3 positive ints")
        if len(stages_out_channels) != 5:
            raise ValueError(
                "expected stages_out_channels as list of 5 positive ints")
        self._stage_out_channels = stages_out_channels

        input_channels = 3
        output_channels = self._stage_out_channels[0]
        self.conv1 = nn.Sequential(
            nn.Conv2d(input_channels, output_channels, 3, 2, 1, bias=False),
            nn.BatchNorm2d(output_channels),
            nn.ReLU(inplace=True),
        )
        input_channels = output_channels

        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        # Static annotations for mypy
        self.stage2: nn.Sequential
        self.stage3: nn.Sequential
        self.stage4: nn.Sequential
        stage_names = ["stage{}".format(i) for i in [2, 3, 4]]
        for name, repeats, output_channels in zip(
                stage_names, stages_repeats, self._stage_out_channels[1:]):
            seq = [inverted_residual(input_channels, output_channels, 2)]
            for i in range(repeats - 1):
                seq.append(
                    inverted_residual(output_channels, output_channels, 1))
            setattr(self, name, nn.Sequential(*seq))
            input_channels = output_channels

        output_channels = self._stage_out_channels[-1]
        self.conv5 = nn.Sequential(
            nn.Conv2d(input_channels, output_channels, 1, 1, 0, bias=False),
            nn.BatchNorm2d(output_channels),
            nn.ReLU(inplace=True),
        )

        self.fc = nn.Linear(output_channels, num_classes)
Ejemplo n.º 28
0
    def build_conv_block(self, dim, padding_type, norm_layer, use_dropout,
                         use_bias):
        """Construct a convolutional block.

        Parameters:
            dim (int)           -- the number of channels in the conv layer.
            padding_type (str)  -- the name of padding layer: reflect | replicate | zero
            norm_layer          -- normalization layer
            use_dropout (bool)  -- if use dropout layers.
            use_bias (bool)     -- if the conv layer uses bias or not

        Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
        """
        conv_block = []
        p = 0
        if padding_type == "reflect":
            conv_block += [nn.ReflectionPad2d(1)]
        elif padding_type == "replicate":
            conv_block += [nn.ReplicationPad2d(1)]
        elif padding_type == "zero":
            p = 1
        else:
            raise NotImplementedError("padding [%s] is not implemented" %
                                      padding_type)

        conv_block += [
            nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
            norm_layer(dim),
            nn.ReLU(True),
        ]
        if use_dropout:
            conv_block += [nn.Dropout(0.5)]

        p = 0
        if padding_type == "reflect":
            conv_block += [nn.ReflectionPad2d(1)]
        elif padding_type == "replicate":
            conv_block += [nn.ReplicationPad2d(1)]
        elif padding_type == "zero":
            p = 1
        else:
            raise NotImplementedError("padding [%s] is not implemented" %
                                      padding_type)
        conv_block += [
            nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
            norm_layer(dim),
        ]

        return nn.Sequential(*conv_block)
Ejemplo n.º 29
0
    def __init__(self, in_channel, out_channel, kernel, stride, padding):
        super(Down2d, self).__init__()

        self.c1 = nn.Conv2d(in_channel,
                            out_channel,
                            kernel_size=kernel,
                            stride=stride,
                            padding=padding)
        self.n1 = nn.InstanceNorm2d(out_channel)
        self.c2 = nn.Conv2d(in_channel,
                            out_channel,
                            kernel_size=kernel,
                            stride=stride,
                            padding=padding)
        self.n2 = nn.InstanceNorm2d(out_channel)
Ejemplo n.º 30
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride,
                 expand_ratio=1,
                 se_ratio=0.,
                 drop_rate=0.):
        super(Block, self).__init__()
        self.stride = stride
        self.drop_rate = drop_rate
        self.expand_ratio = expand_ratio

        # Expansion
        channels = expand_ratio * in_channels
        self.conv1 = nn.Conv2d(in_channels,
                               channels,
                               kernel_size=1,
                               stride=1,
                               padding=0,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(channels)

        # Depthwise conv
        self.conv2 = nn.Conv2d(channels,
                               channels,
                               kernel_size=kernel_size,
                               stride=stride,
                               padding=(1 if kernel_size == 3 else 2),
                               groups=channels,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(channels)

        # SE layers
        se_channels = int(in_channels * se_ratio)
        self.se = SE(channels, se_channels)

        # Output
        self.conv3 = nn.Conv2d(channels,
                               out_channels,
                               kernel_size=1,
                               stride=1,
                               padding=0,
                               bias=False)
        self.bn3 = nn.BatchNorm2d(out_channels)

        # Skip connection if in and out shapes are the same (MV-V2 style)
        self.has_skip = (stride == 1) and (in_channels == out_channels)