Exemple #1
0
    def __init__(self, inp: int, oup: int, stride: int) -> None:
        super().__init__()

        if not (1 <= stride <= 3):
            raise ValueError("illegal stride value")
        self.stride = stride

        branch_features = oup // 2
        assert (self.stride != 1) or (inp == branch_features << 1)

        if self.stride > 1:
            self.branch1 = nn.Sequential(
                self.depthwise_conv(inp,
                                    inp,
                                    kernel_size=3,
                                    stride=self.stride,
                                    padding=1),
                nn.BatchNorm2d(inp),
                nn.Conv2d(inp,
                          branch_features,
                          kernel_size=1,
                          stride=1,
                          padding=0,
                          bias=False),
                nn.BatchNorm2d(branch_features),
                nn.ReLU(inplace=True),
            )
        else:
            self.branch1 = nn.Sequential()

        self.branch2 = nn.Sequential(
            nn.Conv2d(
                inp if (self.stride > 1) else branch_features,
                branch_features,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=False,
            ),
            nn.BatchNorm2d(branch_features),
            nn.ReLU(inplace=True),
            self.depthwise_conv(
                branch_features,
                branch_features,
                kernel_size=3,
                stride=self.stride,
                padding=1,
            ),
            nn.BatchNorm2d(branch_features),
            nn.Conv2d(
                branch_features,
                branch_features,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=False,
            ),
            nn.BatchNorm2d(branch_features),
            nn.ReLU(inplace=True),
        )
Exemple #2
0
 def __init__(
     self,
     in_channels=1,
     out_channels=32,
     input_dim=312,
     hidden_dim=32,
     output_dim=10,
 ):
     super(cnn1d_ser, self).__init__()
     self.classifier = nn.Sequential(
         nn.Conv1d(in_channels, out_channels, 5, stride=1, padding=2),
         nn.BatchNorm1d(out_channels),
         nn.ReLU(),
         nn.Dropout(0.5),
         nn.Conv1d(out_channels, out_channels, 5, stride=1, padding=2),
         nn.BatchNorm1d(out_channels),
         nn.ReLU(),
         nn.Dropout(0.5),
         nn.Flatten(),
         nn.Linear(input_dim * out_channels, hidden_dim),
         nn.BatchNorm1d(hidden_dim),
         nn.ReLU(),
         nn.Dropout(0.5),
         nn.Linear(hidden_dim, output_dim),
     )
Exemple #3
0
    def __init__(self, num_features, num_classes):
        super(Wav2Letter, self).__init__()

        self.layers = nn.Sequential(
            nn.Conv1d(num_features, 250, 48, 2),
            nn.ReLU(),
            nn.Conv1d(250, 250, 7),
            nn.ReLU(),
            nn.Conv1d(250, 250, 7),
            nn.ReLU(),
            nn.Conv1d(250, 250, 7),
            nn.ReLU(),
            nn.Conv1d(250, 250, 7),
            nn.ReLU(),
            nn.Conv1d(250, 250, 7),
            nn.ReLU(),
            nn.Conv1d(250, 250, 7),
            nn.ReLU(),
            nn.Conv1d(250, 250, 7),
            nn.ReLU(),
            nn.Conv1d(250, 2000, 32),
            nn.ReLU(),
            nn.Conv1d(2000, 2000, 1),
            nn.ReLU(),
            nn.Conv1d(2000, num_classes, 1),
        )
Exemple #4
0
def get_act(act):
    if act == "relu":
        return nn.ReLU()
    elif act == "lrelu":
        return nn.LeakyReLU()
    else:
        return nn.ReLU()
Exemple #5
0
 def __init__(self,
              in_ch,
              out_ch,
              kernel_size,
              stride,
              expansion_factor,
              bn_momentum=0.1):
     super(_InvertedResidual, self).__init__()
     assert stride in [1, 2]
     assert kernel_size in [3, 5]
     mid_ch = in_ch * expansion_factor
     self.apply_resudual = in_ch == out_ch and stride == 1
     self.layers = nn.Sequential(
         # Pointwise
         nn.Conv2d(in_ch, mid_ch, 1, bias=False),
         nn.BatchNorm2d(mid_ch, momentum=bn_momentum),
         nn.ReLU(inplace=True),
         # Depthwise
         nn.Conv2d(
             mid_ch,
             mid_ch,
             kernel_size,
             padding=kernel_size // 2,
             stride=stride,
             groups=mid_ch,
             bias=False,
         ),
         nn.BatchNorm2d(mid_ch, momentum=bn_momentum),
         nn.ReLU(inplace=True),
         # Linear pointwise, Note that there's no activation
         nn.Conv2d(mid_ch, out_ch, 1, bias=False),
         nn.BatchNorm2d(out_ch, momentum=bn_momentum),
     )
Exemple #6
0
    def __init__(self, block=BasicBlock, num_classes=10):
        super(DLA, self).__init__()
        self.base = nn.Sequential(
            nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(16),
            nn.ReLU(True)
        )

        self.layer1 = nn.Sequential(
            nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(16),
            nn.ReLU(True)
        )

        self.layer2 = nn.Sequential(
            nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(True)
        )

        self.layer3 = Tree(block,  32,  64, level=1, stride=1)
        self.layer4 = Tree(block,  64, 128, level=2, stride=2)
        self.layer5 = Tree(block, 128, 256, level=2, stride=2)
        self.layer6 = Tree(block, 256, 512, level=1, stride=2)
        self.linear = nn.Linear(512, num_classes)
 def __init__(
     self, input_size=784, hidden_size1=128, hidden_size2=64, num_classes=10
 ):
     super(Net, self).__init__()
     self.l1 = nn.Linear(input_size, hidden_size1)
     self.relu1 = nn.ReLU()
     self.l2 = nn.Linear(hidden_size1, hidden_size2)
     self.relu2 = nn.ReLU()
     self.l3 = nn.Linear(hidden_size2, num_classes)
Exemple #8
0
 def __init__(self, in_channels, out_channels):
     super().__init__()
     self.double_conv = nn.Sequential(
         nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
         nn.BatchNorm2d(out_channels),
         nn.ReLU(inplace=True),
         nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
         nn.BatchNorm2d(out_channels),
         nn.ReLU(inplace=True),
     )
Exemple #9
0
    def __init__(self, cfgs, num_classes=1000, width=1.0, dropout=0.2):
        super(GhostNet, self).__init__()
        # setting of inverted residual blocks
        self.cfgs = cfgs
        self.dropout = dropout

        # building first layer
        output_channel = _make_divisible(16 * width, 4)
        self.conv_stem = nn.Conv2d(3, output_channel, 3, 2, 1, bias=False)
        self.bn1 = nn.BatchNorm2d(output_channel)
        self.act1 = nn.ReLU(inplace=True)
        input_channel = output_channel

        # building inverted residual blocks
        stages = []
        block = GhostBottleneck
        for cfg in self.cfgs:
            layers = []
            for k, exp_size, c, se_ratio, s in cfg:
                output_channel = _make_divisible(c * width, 4)
                hidden_channel = _make_divisible(exp_size * width, 4)
                layers.append(
                    block(
                        input_channel,
                        hidden_channel,
                        output_channel,
                        k,
                        s,
                        se_ratio=se_ratio,
                    ))
                input_channel = output_channel
            stages.append(nn.Sequential(*layers))

        output_channel = _make_divisible(exp_size * width, 4)
        stages.append(
            nn.Sequential(ConvBnAct(input_channel, output_channel, 1)))
        input_channel = output_channel

        self.blocks = nn.Sequential(*stages)

        # building last several layers
        output_channel = 1280
        self.global_pool = nn.AdaptiveAvgPool2d((1, 1))
        self.conv_head = nn.Conv2d(input_channel,
                                   output_channel,
                                   1,
                                   1,
                                   0,
                                   bias=True)
        self.act2 = nn.ReLU(inplace=True)
        self.classifier = nn.Linear(output_channel, num_classes)
        self.dropout = nn.Dropout(p=self.dropout)
Exemple #10
0
    def __init__(
        self,
        stages_repeats: List[int],
        stages_out_channels: List[int],
        num_classes: int = 1000,
        inverted_residual: Callable[..., nn.Module] = InvertedResidual,
    ) -> None:
        super().__init__()

        if len(stages_repeats) != 3:
            raise ValueError(
                "expected stages_repeats as list of 3 positive ints")
        if len(stages_out_channels) != 5:
            raise ValueError(
                "expected stages_out_channels as list of 5 positive ints")
        self._stage_out_channels = stages_out_channels

        input_channels = 3
        output_channels = self._stage_out_channels[0]
        self.conv1 = nn.Sequential(
            nn.Conv2d(input_channels, output_channels, 3, 2, 1, bias=False),
            nn.BatchNorm2d(output_channels),
            nn.ReLU(inplace=True),
        )
        input_channels = output_channels

        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        # Static annotations for mypy
        self.stage2: nn.Sequential
        self.stage3: nn.Sequential
        self.stage4: nn.Sequential
        stage_names = ["stage{}".format(i) for i in [2, 3, 4]]
        for name, repeats, output_channels in zip(
                stage_names, stages_repeats, self._stage_out_channels[1:]):
            seq = [inverted_residual(input_channels, output_channels, 2)]
            for i in range(repeats - 1):
                seq.append(
                    inverted_residual(output_channels, output_channels, 1))
            setattr(self, name, nn.Sequential(*seq))
            input_channels = output_channels

        output_channels = self._stage_out_channels[-1]
        self.conv5 = nn.Sequential(
            nn.Conv2d(input_channels, output_channels, 1, 1, 0, bias=False),
            nn.BatchNorm2d(output_channels),
            nn.ReLU(inplace=True),
        )

        self.fc = nn.Linear(output_channels, num_classes)
Exemple #11
0
 def __init__(self, num_speakers=2) -> None:
     super(simple_CNN, self).__init__()
     self.convs = nn.Sequential(
         nn.Conv1d(1, 16, 100, stride=10),
         nn.BatchNorm1d(16),
         nn.ReLU(),
         nn.Conv1d(16, 64, 21, stride=10),
         nn.BatchNorm1d(64),
         nn.ReLU(),
         nn.Conv1d(64, 64, 5, stride=5),
         nn.BatchNorm1d(64),
         nn.ReLU(),
     )
     self.linears = nn.Sequential(nn.Linear(1 * 6 * 64, 128),
                                  nn.Linear(128, num_speakers))
Exemple #12
0
 def __init__(self, inplanes, planes, stride=1, dilation=1):
     super(BottleneckX, self).__init__()
     cardinality = BottleneckX.cardinality
     bottle_planes = planes * cardinality // 32
     self.conv1 = nn.Conv2d(inplanes,
                            bottle_planes,
                            kernel_size=1,
                            bias=False)
     self.bn1 = BatchNorm(bottle_planes)
     self.conv2 = nn.Conv2d(
         bottle_planes,
         bottle_planes,
         kernel_size=3,
         stride=stride,
         padding=dilation,
         bias=False,
         dilation=dilation,
         groups=cardinality,
     )
     self.bn2 = BatchNorm(bottle_planes)
     self.conv3 = nn.Conv2d(bottle_planes,
                            planes,
                            kernel_size=1,
                            bias=False)
     self.bn3 = BatchNorm(planes)
     self.relu = nn.ReLU(inplace=True)
     self.stride = stride
Exemple #13
0
 def __init__(
     self,
     inplanes: int,
     planes: int,
     stride: int = 1,
     downsample: Optional[nn.Module] = None,
     groups: int = 1,
     base_width: int = 64,
     dilation: int = 1,
     norm_layer: Optional[Callable[..., nn.Module]] = None,
 ) -> None:
     super(Bottleneck, self).__init__()
     if norm_layer is None:
         norm_layer = nn.BatchNorm2d
     width = int(planes * (base_width / 64.0)) * groups
     # Both self.conv2 and self.downsample layers downsample the input when stride != 1
     self.conv1 = conv1x1(inplanes, width)
     self.bn1 = norm_layer(width)
     self.conv2 = conv3x3(width, width, stride, groups, dilation)
     self.bn2 = norm_layer(width)
     self.conv3 = conv1x1(width, planes * self.expansion)
     self.bn3 = norm_layer(planes * self.expansion)
     self.relu = nn.ReLU()
     self.downsample = downsample
     self.stride = stride
Exemple #14
0
 def __init__(
     self,
     inplanes,
     planes,
     stride=1,
     downsample=None,
     groups=1,
     base_width=64,
     dilation=1,
     norm_layer=None,
 ):
     super(Bottleneck, self).__init__()
     if norm_layer is None:
         norm_layer = nn.BatchNorm2d
     width = int(planes * (base_width / 64.0)) * groups
     # Both self.conv2 and self.downsample layers downsample the input when stride != 1
     self.conv1 = conv1x1(inplanes, width)
     self.bn1 = norm_layer(width)
     self.conv2 = conv3x3(width, width, stride, groups, dilation)
     self.bn2 = norm_layer(width)
     self.conv3 = conv1x1(width, planes * self.expansion)
     self.bn3 = norm_layer(planes * self.expansion)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
Exemple #15
0
 def __init__(
     self,
     inplanes: int,
     planes: int,
     stride: int = 1,
     downsample: Optional[nn.Module] = None,
     groups: int = 1,
     base_width: int = 64,
     dilation: int = 1,
     norm_layer: Optional[Callable[..., nn.Module]] = None,
 ) -> None:
     super(BasicBlock, self).__init__()
     if norm_layer is None:
         norm_layer = nn.BatchNorm2d
     if groups != 1 or base_width != 64:
         raise ValueError("BasicBlock only supports groups=1 and base_width=64")
     if dilation > 1:
         raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
     # Both self.conv1 and self.downsample layers downsample the input when stride != 1
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = norm_layer(planes)
     self.relu = nn.ReLU()
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = norm_layer(planes)
     self.downsample = downsample
     self.stride = stride
Exemple #16
0
 def __init__(
     self,
     inplanes,
     planes,
     stride=1,
     downsample=None,
     groups=1,
     base_width=64,
     dilation=1,
 ):
     super(IBasicBlock, self).__init__()
     if groups != 1 or base_width != 64:
         raise ValueError(
             "BasicBlock only supports groups=1 and base_width=64")
     if dilation > 1:
         raise NotImplementedError(
             "Dilation > 1 not supported in BasicBlock")
     self.bn1 = nn.BatchNorm2d(
         inplanes,
         eps=1e-05,
     )
     self.conv1 = conv3x3(inplanes, planes)
     self.bn2 = nn.BatchNorm2d(
         planes,
         eps=1e-05,
     )
     self.prelu = nn.ReLU(planes)
     self.conv2 = conv3x3(planes, planes, stride)
     self.bn3 = nn.BatchNorm2d(
         planes,
         eps=1e-05,
     )
     self.downsample = downsample
     self.stride = stride
Exemple #17
0
    def __init__(self, num_classes: int = 5) -> None:
        super(PoseNet, self).__init__()

        self.conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
        self.conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
        self.conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
        self.MaxPool_3a_3x3 = nn.MaxPool2d(3, stride=2)
        self.conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
        self.conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
        self.MaxPool_5a_3x3 = nn.MaxPool2d(kernel_size=3, stride=2)  # stem

        self.Mixed_5b = self._generate_inception_module(192, 320, 1, Mixed_5b)
        self.block35 = self._generate_inception_module(320, 320, 1, block35)

        self.conv_ls1 = BasicConv2d(320, 320, kernel_size=3, stride=2, padding=1)
        self.MaxPool_3x3_ls1 = nn.MaxPool2d(kernel_size=3, stride=2)

        self.Mixed_6a = self._generate_inception_module(320, 1088, 1, Mixed_6a)
        self.block17 = self._generate_inception_module(1088, 1088, 1, block17)

        self.conv_ls2 = BasicConv2d(1088, 1088, kernel_size=3, stride=2)

        self.Mixed_7a = self._generate_inception_module(1088, 2080, 1, Mixed_7a)
        self.block8 = self._generate_inception_module(2080, 2080, 1, block8)

        self.conv_ls3 = BasicConv2d(3488, 2080, kernel_size=1)
        self.Conv2d_7b_1x1 = BasicConv2d(2080, 1536, kernel_size=1)
        self.AvgPool_1a_8x8 = nn.AvgPool2d(kernel_size=[8, 8])

        self.dense = nn.Linear(1536, num_classes)
        self.relu = nn.ReLU(inplace=True)
Exemple #18
0
def make_layers(cfg: List[Union[str, int]],
                batch_norm: bool = False) -> nn.Sequential:
    layers: List[nn.Module] = []
    in_channels = 3
    for v in cfg:
        if v == "M":
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            v = cast(int, v)
            conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
            if batch_norm:
                layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
            else:
                layers += [conv2d, nn.ReLU(inplace=True)]
            in_channels = v
    return nn.Sequential(*layers)
Exemple #19
0
 def __init__(self, inplanes, planes, stride=1, dilation=1):
     super(Bottleneck, self).__init__()
     expansion = Bottleneck.expansion
     bottle_planes = planes // expansion
     self.conv1 = nn.Conv2d(inplanes,
                            bottle_planes,
                            kernel_size=1,
                            bias=False)
     self.bn1 = BatchNorm(bottle_planes)
     self.conv2 = nn.Conv2d(
         bottle_planes,
         bottle_planes,
         kernel_size=3,
         stride=stride,
         padding=dilation,
         bias=False,
         dilation=dilation,
     )
     self.bn2 = BatchNorm(bottle_planes)
     self.conv3 = nn.Conv2d(bottle_planes,
                            planes,
                            kernel_size=1,
                            bias=False)
     self.bn3 = BatchNorm(planes)
     self.relu = nn.ReLU(inplace=True)
     self.stride = stride
Exemple #20
0
 def __init__(self):
     super(ModuleDict, self).__init__()
     self.choices = nn.ModuleDict(
         {"conv": nn.Conv2d(10, 10, 3), "pool": nn.MaxPool2d(3)}
     )
     self.activations = nn.ModuleDict(
         {"relu": nn.ReLU(), "prelu": nn.PReLU()}
     )
Exemple #21
0
 def __init__(self, input_channels: int, squeeze_factor: int = 4):
     super().__init__()
     squeeze_channels = _make_divisible(input_channels // squeeze_factor, 8)
     self.fc1 = nn.Conv2d(input_channels, squeeze_channels, 1)
     self.relu = nn.ReLU(inplace=True)
     self.fc2 = nn.Conv2d(squeeze_channels, input_channels, 1)
     self.adaptive_avg_pool2d = nn.AdaptiveAvgPool2d(1)
     self.hardsigmoid = nn.Hardsigmoid(inplace=True)
Exemple #22
0
 def __init__(self, num_classes: int = 1000) -> None:
     super(QuantizationAlexNet, self).__init__()
     self.features = nn.Sequential(
         nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         nn.Conv2d(64, 192, kernel_size=5, padding=2),
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         nn.Conv2d(192, 384, kernel_size=3, padding=1),
         nn.ReLU(inplace=True),
         nn.Conv2d(384, 256, kernel_size=3, padding=1),
         nn.ReLU(inplace=True),
         nn.Conv2d(256, 256, kernel_size=3, padding=1),
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
     )
     self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
     self.classifier = nn.Sequential(
         nn.Dropout(),
         nn.Linear(256 * 6 * 6, 4096),
         nn.ReLU(inplace=True),
         nn.Dropout(),
         nn.Linear(4096, 4096),
         nn.ReLU(inplace=True),
         nn.Linear(4096, num_classes),
     )
Exemple #23
0
 def __init__(self, num_classes=10):
     super(AlexNet, self).__init__()
     self.features = nn.Sequential(
         nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=2),
         nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2),
         nn.Conv2d(64, 192, kernel_size=3,
                   padding=2), nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=2),
         nn.Conv2d(192, 384, kernel_size=3, padding=1),
         nn.ReLU(inplace=True), nn.Conv2d(384,
                                          256,
                                          kernel_size=3,
                                          padding=1), nn.ReLU(inplace=True),
         nn.Conv2d(256, 256, kernel_size=3,
                   padding=1), nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2))
     self.fc_layers = nn.Sequential(
         nn.Dropout(0.6),
         nn.Linear(4096, 2048),
         nn.ReLU(inplace=True),
         nn.Dropout(0.6),
         nn.Linear(2048, 2048),
         nn.ReLU(inplace=True),
         nn.Linear(2048, num_classes),
     )
Exemple #24
0
 def __init__(self, input_dim, hidden_dim, output_dim, batch_size):
     super(lstm_ser, self).__init__()
     self.classifier = nn.Sequential(
         LSTM(input_dim, hidden_dim, batch_size),
         nn.Dropout(0.5),
         nn.Linear(hidden_dim, 32),
         nn.ReLU(),
         nn.Linear(32, output_dim),
     )
Exemple #25
0
 def __init__(self):
     super(LeNet, self).__init__()
     self.conv = nn.Sequential(
         nn.Conv2d(1, 6,
                   kernel_size=5),  # in_channels, out_channels, kernel_size
         nn.ReLU(),
         nn.MaxPool2d(kernel_size=2, stride=2),  # kernel_size, stride
         nn.Conv2d(6, 16, 5),
         nn.ReLU(),
         nn.MaxPool2d(kernel_size=2, stride=2),
     )
     self.fc = nn.Sequential(
         nn.Linear(16 * 4 * 4, 120),
         nn.ReLU(),
         nn.Linear(120, 84),
         nn.ReLU(),
         nn.Linear(84, 10),
     )
Exemple #26
0
 def __init__(self,
              features: nn.Module,
              num_classes: int = 1000,
              init_weights: bool = True) -> None:
     super(VGG, self).__init__()
     self.features = features
     self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
     self.classifier = nn.Sequential(
         nn.Linear(512 * 7 * 7, 4096),
         nn.ReLU(True),
         nn.Dropout(),
         nn.Linear(4096, 4096),
         nn.ReLU(True),
         nn.Dropout(),
         nn.Linear(4096, num_classes),
     )
     if init_weights:
         self._initialize_weights()
Exemple #27
0
 def __init__(self, alpha, num_classes=1000, dropout=0.2):
     super(MNASNet, self).__init__()
     assert alpha > 0.0
     self.alpha = alpha
     self.num_classes = num_classes
     depths = _get_depths(alpha)
     layers = [
         # First layer: regular conv.
         nn.Conv2d(3, depths[0], 3, padding=1, stride=2, bias=False),
         nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM),
         nn.ReLU(inplace=True),
         # Depthwise separable, no skip.
         nn.Conv2d(
             depths[0],
             depths[0],
             3,
             padding=1,
             stride=1,
             groups=depths[0],
             bias=False,
         ),
         nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM),
         nn.ReLU(inplace=True),
         nn.Conv2d(depths[0], depths[1], 1, padding=0, stride=1,
                   bias=False),
         nn.BatchNorm2d(depths[1], momentum=_BN_MOMENTUM),
         # MNASNet blocks: stacks of inverted residuals.
         _stack(depths[1], depths[2], 3, 2, 3, 3, _BN_MOMENTUM),
         _stack(depths[2], depths[3], 5, 2, 3, 3, _BN_MOMENTUM),
         _stack(depths[3], depths[4], 5, 2, 6, 3, _BN_MOMENTUM),
         _stack(depths[4], depths[5], 3, 1, 6, 2, _BN_MOMENTUM),
         _stack(depths[5], depths[6], 5, 2, 6, 4, _BN_MOMENTUM),
         _stack(depths[6], depths[7], 3, 1, 6, 1, _BN_MOMENTUM),
         # Final mapping to classifier input.
         nn.Conv2d(depths[7], 1280, 1, padding=0, stride=1, bias=False),
         nn.BatchNorm2d(1280, momentum=_BN_MOMENTUM),
         nn.ReLU(inplace=True),
     ]
     self.layers = nn.Sequential(*layers)
     self.classifier = nn.Sequential(nn.Dropout(p=dropout, inplace=True),
                                     nn.Linear(1280, num_classes))
     self._initialize_weights()
Exemple #28
0
def make_layers(cfg, in_channels=3, batch_norm=False, dilation=False):
    if dilation:
        d_rate = 2
    else:
        d_rate = 1
    layers = []
    for v in cfg:
        if v == "M":
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            conv2d = nn.Conv2d(in_channels,
                               v,
                               kernel_size=3,
                               padding=d_rate,
                               dilation=d_rate)
            if batch_norm:
                layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
            else:
                layers += [conv2d, nn.ReLU(inplace=True)]
            in_channels = v
    return nn.Sequential(*layers)
Exemple #29
0
 def __init__(
     self,
     num_input_features: int,
     growth_rate: int,
     bn_size: int,
     drop_rate: float,
 ) -> None:
     super(_DenseLayer, self).__init__()
     self.norm1: nn.BatchNorm2d
     self.add_module("norm1", nn.BatchNorm2d(num_input_features))
     self.relu1: nn.ReLU
     self.add_module("relu1", nn.ReLU(inplace=True))
     self.conv1: nn.Conv2d
     self.add_module(
         "conv1",
         nn.Conv2d(
             num_input_features,
             bn_size * growth_rate,
             kernel_size=1,
             stride=1,
             bias=False,
         ),
     )
     self.norm2: nn.BatchNorm2d
     self.add_module("norm2", nn.BatchNorm2d(bn_size * growth_rate))
     self.relu2: nn.ReLU
     self.add_module("relu2", nn.ReLU(inplace=True))
     self.conv2: nn.Conv2d
     self.add_module(
         "conv2",
         nn.Conv2d(
             bn_size * growth_rate,
             growth_rate,
             kernel_size=3,
             stride=1,
             padding=1,
             bias=False,
         ),
     )
     self.drop_rate = float(drop_rate)
Exemple #30
0
    def __init__(self, input_channels):

        super().__init__()
        self.Branch_1 = nn.Sequential(
            BasicConv2d(input_channels, 192, kernel_size=1),
            BasicConv2d(192, 224, kernel_size=[1, 3], padding=[0, 1]),
            BasicConv2d(224, 256, kernel_size=[3, 1], padding=[1, 0]),
        )

        self.Branch_0 = BasicConv2d(input_channels, 192, kernel_size=1)
        self.Conv2d_1x1 = BasicConv2d(448, 2080, kernel_size=1)

        self.relu = nn.ReLU(inplace=True)