Exemple #1
0
    def __init__(self, block=BasicBlock, num_classes=10):
        super(DLA, self).__init__()
        self.base = nn.Sequential(
            nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(16),
            nn.ReLU(True)
        )

        self.layer1 = nn.Sequential(
            nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(16),
            nn.ReLU(True)
        )

        self.layer2 = nn.Sequential(
            nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(True)
        )

        self.layer3 = Tree(block,  32,  64, level=1, stride=1)
        self.layer4 = Tree(block,  64, 128, level=2, stride=2)
        self.layer5 = Tree(block, 128, 256, level=2, stride=2)
        self.layer6 = Tree(block, 256, 512, level=1, stride=2)
        self.linear = nn.Linear(512, num_classes)
Exemple #2
0
    def __init__(self, in_planes, planes, stride=1):
        super(BasicBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_planes,
                               planes,
                               kernel_size=3,
                               stride=stride,
                               padding=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = nn.Conv2d(planes,
                               planes,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(planes)

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != planes:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_planes,
                          planes,
                          kernel_size=1,
                          stride=stride,
                          bias=False), nn.BatchNorm2d(planes))

        # SE layers
        self.fc1 = nn.Conv2d(
            planes, planes // 16,
            kernel_size=1)  # Use nn.Conv2d instead of nn.Linear
        self.fc2 = nn.Conv2d(planes // 16, planes, kernel_size=1)
Exemple #3
0
    def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
        super(ResidualLayer, self).__init__()

        self.conv1d_layer = nn.Sequential(
            nn.Conv1d(
                in_channels=in_channels,
                out_channels=out_channels,
                kernel_size=kernel_size,
                stride=1,
                padding=padding,
            ),
            nn.InstanceNorm1d(num_features=out_channels, affine=True),
        )

        self.conv_layer_gates = nn.Sequential(
            nn.Conv1d(
                in_channels=in_channels,
                out_channels=out_channels,
                kernel_size=kernel_size,
                stride=1,
                padding=padding,
            ),
            nn.InstanceNorm1d(num_features=out_channels, affine=True),
        )

        self.conv1d_out_layer = nn.Sequential(
            nn.Conv1d(
                in_channels=out_channels,
                out_channels=in_channels,
                kernel_size=kernel_size,
                stride=1,
                padding=padding,
            ),
            nn.InstanceNorm1d(num_features=in_channels, affine=True),
        )
Exemple #4
0
    def __init__(self,
                 in_planes,
                 cardinality=32,
                 bottleneck_width=4,
                 stride=1):
        super(Block, self).__init__()
        group_width = cardinality * bottleneck_width
        self.conv1 = nn.Conv2d(in_planes,
                               group_width,
                               kernel_size=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(group_width)
        self.conv2 = nn.Conv2d(group_width,
                               group_width,
                               kernel_size=3,
                               stride=stride,
                               padding=1,
                               groups=cardinality,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(group_width)
        self.conv3 = nn.Conv2d(group_width,
                               self.expansion * group_width,
                               kernel_size=1,
                               bias=False)
        self.bn3 = nn.BatchNorm2d(self.expansion * group_width)

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != self.expansion * group_width:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_planes,
                          self.expansion * group_width,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                nn.BatchNorm2d(self.expansion * group_width))
Exemple #5
0
    def __init__(self, in_planes, planes, stride=1):
        super(BasicBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_planes,
                               planes,
                               kernel_size=3,
                               stride=stride,
                               padding=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = nn.Conv2d(planes,
                               planes,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(planes)

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != self.expansion * planes:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_planes,
                          self.expansion * planes,
                          kernel_size=1,
                          stride=stride,
                          bias=False), nn.BatchNorm2d(self.expansion * planes))
Exemple #6
0
    def __init__(self, input_channels):
        super().__init__()
        self.conv1 = nn.Sequential(
            BasicConv2d(input_channels, 32, kernel_size=3),
            BasicConv2d(32, 32, kernel_size=3, padding=1),
            BasicConv2d(32, 64, kernel_size=3, padding=1),
        )

        self.branch3x3_conv = BasicConv2d(64, 96, kernel_size=3, padding=1)
        self.branch3x3_pool = nn.MaxPool2d(3, stride=1, padding=1)

        self.branch7x7a = nn.Sequential(
            BasicConv2d(160, 64, kernel_size=1),
            BasicConv2d(64, 64, kernel_size=(7, 1), padding=(3, 0)),
            BasicConv2d(64, 64, kernel_size=(1, 7), padding=(0, 3)),
            BasicConv2d(64, 96, kernel_size=3, padding=1),
        )

        self.branch7x7b = nn.Sequential(
            BasicConv2d(160, 64, kernel_size=1),
            BasicConv2d(64, 96, kernel_size=3, padding=1),
        )

        self.branchpoola = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
        self.branchpoolb = BasicConv2d(192, 192, kernel_size=3, stride=1, padding=1)
Exemple #7
0
    def __init__(self, w_in, w_out, stride, group_width, bottleneck_ratio,
                 se_ratio):
        super(Block, self).__init__()
        # 1x1
        w_b = int(round(w_out * bottleneck_ratio))
        self.conv1 = nn.Conv2d(w_in, w_b, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(w_b)
        # 3x3
        num_groups = w_b // group_width
        self.conv2 = nn.Conv2d(w_b,
                               w_b,
                               kernel_size=3,
                               stride=stride,
                               padding=1,
                               groups=num_groups,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(w_b)
        # se
        self.with_se = se_ratio > 0
        if self.with_se:
            w_se = int(round(w_in * se_ratio))
            self.se = SE(w_b, w_se)
        # 1x1
        self.conv3 = nn.Conv2d(w_b, w_out, kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm2d(w_out)

        self.shortcut = nn.Sequential()
        if stride != 1 or w_in != w_out:
            self.shortcut = nn.Sequential(
                nn.Conv2d(w_in,
                          w_out,
                          kernel_size=1,
                          stride=stride,
                          bias=False), nn.BatchNorm2d(w_out))
Exemple #8
0
 def __init__(self, num_classes=10):
     super(AlexNet, self).__init__()
     self.features = nn.Sequential(
         nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=2),
         nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2),
         nn.Conv2d(64, 192, kernel_size=3,
                   padding=2), nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=2),
         nn.Conv2d(192, 384, kernel_size=3, padding=1),
         nn.ReLU(inplace=True), nn.Conv2d(384,
                                          256,
                                          kernel_size=3,
                                          padding=1), nn.ReLU(inplace=True),
         nn.Conv2d(256, 256, kernel_size=3,
                   padding=1), nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2))
     self.fc_layers = nn.Sequential(
         nn.Dropout(0.6),
         nn.Linear(4096, 2048),
         nn.ReLU(inplace=True),
         nn.Dropout(0.6),
         nn.Linear(2048, 2048),
         nn.ReLU(inplace=True),
         nn.Linear(2048, num_classes),
     )
Exemple #9
0
    def __init__(self, inp: int, oup: int, stride: int) -> None:
        super().__init__()

        if not (1 <= stride <= 3):
            raise ValueError("illegal stride value")
        self.stride = stride

        branch_features = oup // 2
        assert (self.stride != 1) or (inp == branch_features << 1)

        if self.stride > 1:
            self.branch1 = nn.Sequential(
                self.depthwise_conv(inp,
                                    inp,
                                    kernel_size=3,
                                    stride=self.stride,
                                    padding=1),
                nn.BatchNorm2d(inp),
                nn.Conv2d(inp,
                          branch_features,
                          kernel_size=1,
                          stride=1,
                          padding=0,
                          bias=False),
                nn.BatchNorm2d(branch_features),
                nn.ReLU(inplace=True),
            )
        else:
            self.branch1 = nn.Sequential()

        self.branch2 = nn.Sequential(
            nn.Conv2d(
                inp if (self.stride > 1) else branch_features,
                branch_features,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=False,
            ),
            nn.BatchNorm2d(branch_features),
            nn.ReLU(inplace=True),
            self.depthwise_conv(
                branch_features,
                branch_features,
                kernel_size=3,
                stride=self.stride,
                padding=1,
            ),
            nn.BatchNorm2d(branch_features),
            nn.Conv2d(
                branch_features,
                branch_features,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=False,
            ),
            nn.BatchNorm2d(branch_features),
            nn.ReLU(inplace=True),
        )
Exemple #10
0
    def __init__(self, in_planes, out_planes, stride, groups):
        super(Bottleneck, self).__init__()
        self.stride = stride

        mid_planes = out_planes // 4
        g = 1 if in_planes == 24 else groups
        self.conv1 = nn.Conv2d(in_planes,
                               mid_planes,
                               kernel_size=1,
                               groups=g,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(mid_planes)
        self.shuffle1 = ShuffleBlock(groups=g)
        self.conv2 = nn.Conv2d(mid_planes,
                               mid_planes,
                               kernel_size=3,
                               stride=stride,
                               padding=1,
                               groups=mid_planes,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(mid_planes)
        self.conv3 = nn.Conv2d(mid_planes,
                               out_planes,
                               kernel_size=1,
                               groups=groups,
                               bias=False)
        self.bn3 = nn.BatchNorm2d(out_planes)

        self.shortcut = nn.Sequential()
        if stride == 2:
            self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
Exemple #11
0
    def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride,
                 first_layer):
        super(Bottleneck, self).__init__()
        self.out_planes = out_planes
        self.dense_depth = dense_depth

        self.conv1 = nn.Conv2d(last_planes,
                               in_planes,
                               kernel_size=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(in_planes)
        self.conv2 = nn.Conv2d(in_planes,
                               in_planes,
                               kernel_size=3,
                               stride=stride,
                               padding=1,
                               groups=32,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(in_planes)
        self.conv3 = nn.Conv2d(in_planes,
                               out_planes + dense_depth,
                               kernel_size=1,
                               bias=False)
        self.bn3 = nn.BatchNorm2d(out_planes + dense_depth)

        self.shortcut = nn.Sequential()
        if first_layer:
            self.shortcut = nn.Sequential(
                nn.Conv2d(last_planes,
                          out_planes + dense_depth,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                nn.BatchNorm2d(out_planes + dense_depth))
Exemple #12
0
 def __init__(self, num_classes: int = 1000) -> None:
     super(QuantizationAlexNet, self).__init__()
     self.features = nn.Sequential(
         nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         nn.Conv2d(64, 192, kernel_size=5, padding=2),
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         nn.Conv2d(192, 384, kernel_size=3, padding=1),
         nn.ReLU(inplace=True),
         nn.Conv2d(384, 256, kernel_size=3, padding=1),
         nn.ReLU(inplace=True),
         nn.Conv2d(256, 256, kernel_size=3, padding=1),
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
     )
     self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
     self.classifier = nn.Sequential(
         nn.Dropout(),
         nn.Linear(256 * 6 * 6, 4096),
         nn.ReLU(inplace=True),
         nn.Dropout(),
         nn.Linear(4096, 4096),
         nn.ReLU(inplace=True),
         nn.Linear(4096, num_classes),
     )
Exemple #13
0
    def __init__(self, input_shape=(80, 64), residual_in_channels=256):
        super(Discriminator, self).__init__()

        self.convLayer1 = nn.Sequential(
            nn.Conv2d(
                in_channels=1,
                out_channels=residual_in_channels // 2,
                kernel_size=(3, 3),
                stride=(1, 1),
                padding=(1, 1),
            ),
            GLU(),
        )

        # Downsampling Layers
        self.downSample1 = self.downsample(
            in_channels=residual_in_channels // 2,
            out_channels=residual_in_channels,
            kernel_size=(3, 3),
            stride=(2, 2),
            padding=1,
        )

        self.downSample2 = self.downsample(
            in_channels=residual_in_channels,
            out_channels=residual_in_channels * 2,
            kernel_size=(3, 3),
            stride=[2, 2],
            padding=1,
        )

        self.downSample3 = self.downsample(
            in_channels=residual_in_channels * 2,
            out_channels=residual_in_channels * 4,
            kernel_size=[3, 3],
            stride=[2, 2],
            padding=1,
        )

        self.downSample4 = self.downsample(
            in_channels=residual_in_channels * 4,
            out_channels=residual_in_channels * 4,
            kernel_size=[1, 10],
            stride=(1, 1),
            padding=(0, 2),
        )

        # Conv Layer
        self.outputConvLayer = nn.Sequential(
            nn.Conv2d(
                in_channels=residual_in_channels * 4,
                out_channels=1,
                kernel_size=(1, 3),
                stride=[1, 1],
                padding=[0, 1],
            )
        )
Exemple #14
0
    def __init__(
        self,
        in_chs,
        mid_chs,
        out_chs,
        dw_kernel_size=3,
        stride=1,
        act_layer=nn.ReLU,
        se_ratio=0.0,
    ):
        super(GhostBottleneck, self).__init__()
        has_se = se_ratio is not None and se_ratio > 0.0
        self.stride = stride

        # Point-wise expansion
        self.ghost1 = GhostModule(in_chs, mid_chs, relu=True)

        # Depth-wise convolution
        if self.stride > 1:
            self.conv_dw = nn.Conv2d(
                mid_chs,
                mid_chs,
                dw_kernel_size,
                stride=stride,
                padding=(dw_kernel_size - 1) // 2,
                groups=mid_chs,
                bias=False,
            )
            self.bn_dw = nn.BatchNorm2d(mid_chs)

        # Squeeze-and-excitation
        if has_se:
            self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio)
        else:
            self.se = None

        # Point-wise linear projection
        self.ghost2 = GhostModule(mid_chs, out_chs, relu=False)

        # shortcut
        if in_chs == out_chs and self.stride == 1:
            self.shortcut = nn.Sequential()
        else:
            self.shortcut = nn.Sequential(
                nn.Conv2d(
                    in_chs,
                    in_chs,
                    dw_kernel_size,
                    stride=stride,
                    padding=(dw_kernel_size - 1) // 2,
                    groups=in_chs,
                    bias=False,
                ),
                nn.BatchNorm2d(in_chs),
                nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False),
                nn.BatchNorm2d(out_chs),
            )
Exemple #15
0
    def __init__(self):
        super(Discriminator, self).__init__()

        self.convLayer1 = nn.Sequential(
            nn.Conv2d(
                in_channels=1,
                out_channels=128,
                kernel_size=(3, 3),
                stride=(1, 1),
                padding=(1, 1),
            ),
            GLU(),
        )

        # DownSample Layer
        self.downSample1 = self.downSample(
            in_channels=128,
            out_channels=256,
            kernel_size=(3, 3),
            stride=(2, 2),
            padding=1,
        )

        self.downSample2 = self.downSample(
            in_channels=256,
            out_channels=512,
            kernel_size=(3, 3),
            stride=[2, 2],
            padding=1,
        )

        self.downSample3 = self.downSample(
            in_channels=512,
            out_channels=1024,
            kernel_size=[3, 3],
            stride=[2, 2],
            padding=1,
        )

        self.downSample4 = self.downSample(
            in_channels=1024,
            out_channels=1024,
            kernel_size=[1, 5],
            stride=(1, 1),
            padding=(0, 2),
        )

        # Conv Layer
        self.outputConvLayer = nn.Sequential(
            nn.Conv2d(
                in_channels=1024,
                out_channels=1,
                kernel_size=(1, 3),
                stride=[1, 1],
                padding=[0, 1],
            ))
Exemple #16
0
    def __init__(self, cfgs, num_classes=1000, width=1.0, dropout=0.2):
        super(GhostNet, self).__init__()
        # setting of inverted residual blocks
        self.cfgs = cfgs
        self.dropout = dropout

        # building first layer
        output_channel = _make_divisible(16 * width, 4)
        self.conv_stem = nn.Conv2d(3, output_channel, 3, 2, 1, bias=False)
        self.bn1 = nn.BatchNorm2d(output_channel)
        self.act1 = nn.ReLU(inplace=True)
        input_channel = output_channel

        # building inverted residual blocks
        stages = []
        block = GhostBottleneck
        for cfg in self.cfgs:
            layers = []
            for k, exp_size, c, se_ratio, s in cfg:
                output_channel = _make_divisible(c * width, 4)
                hidden_channel = _make_divisible(exp_size * width, 4)
                layers.append(
                    block(
                        input_channel,
                        hidden_channel,
                        output_channel,
                        k,
                        s,
                        se_ratio=se_ratio,
                    ))
                input_channel = output_channel
            stages.append(nn.Sequential(*layers))

        output_channel = _make_divisible(exp_size * width, 4)
        stages.append(
            nn.Sequential(ConvBnAct(input_channel, output_channel, 1)))
        input_channel = output_channel

        self.blocks = nn.Sequential(*stages)

        # building last several layers
        output_channel = 1280
        self.global_pool = nn.AdaptiveAvgPool2d((1, 1))
        self.conv_head = nn.Conv2d(input_channel,
                                   output_channel,
                                   1,
                                   1,
                                   0,
                                   bias=True)
        self.act2 = nn.ReLU(inplace=True)
        self.classifier = nn.Linear(output_channel, num_classes)
        self.dropout = nn.Dropout(p=self.dropout)
Exemple #17
0
    def _make_layer(
        self,
        block: Type[Union[BasicBlock, Bottleneck]],
        planes: int,
        blocks: int,
        stride: int = 1,
        dilate: bool = False,
    ) -> nn.Sequential:
        norm_layer = self._norm_layer
        downsample = None
        previous_dilation = self.dilation
        if dilate:
            self.dilation *= stride
            stride = 1
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                conv1x1(self.inplanes, planes * block.expansion, stride),
                norm_layer(planes * block.expansion),
            )

        layers = []
        layers.append(
            block(
                self.inplanes,
                planes,
                stride,
                downsample,
                self.groups,
                self.base_width,
                previous_dilation,
                norm_layer,
                fuse_bn_relu=self.fuse_bn_relu,
                fuse_bn_add_relu=self.fuse_bn_add_relu,
            )
        )
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(
                block(
                    self.inplanes,
                    planes,
                    groups=self.groups,
                    base_width=self.base_width,
                    dilation=self.dilation,
                    norm_layer=norm_layer,
                    fuse_bn_relu=self.fuse_bn_relu,
                    fuse_bn_add_relu=self.fuse_bn_add_relu,
                )
            )

        return nn.Sequential(*layers)
Exemple #18
0
    def __init__(self, input_channels):

        super().__init__()
        self.Branch_2 = nn.MaxPool2d(3, stride=2)

        self.Branch_0 = nn.Sequential(
            BasicConv2d(input_channels, 384, kernel_size=3, stride=2)
        )

        self.Branch_1 = nn.Sequential(
            BasicConv2d(input_channels, 256, kernel_size=1),
            BasicConv2d(256, 256, kernel_size=3, padding=1),
            BasicConv2d(256, 384, kernel_size=3, stride=2),
        )
Exemple #19
0
    def __init__(
        self,
        stages_repeats: List[int],
        stages_out_channels: List[int],
        num_classes: int = 1000,
        inverted_residual: Callable[..., nn.Module] = InvertedResidual,
    ) -> None:
        super().__init__()

        if len(stages_repeats) != 3:
            raise ValueError(
                "expected stages_repeats as list of 3 positive ints")
        if len(stages_out_channels) != 5:
            raise ValueError(
                "expected stages_out_channels as list of 5 positive ints")
        self._stage_out_channels = stages_out_channels

        input_channels = 3
        output_channels = self._stage_out_channels[0]
        self.conv1 = nn.Sequential(
            nn.Conv2d(input_channels, output_channels, 3, 2, 1, bias=False),
            nn.BatchNorm2d(output_channels),
            nn.ReLU(inplace=True),
        )
        input_channels = output_channels

        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        # Static annotations for mypy
        self.stage2: nn.Sequential
        self.stage3: nn.Sequential
        self.stage4: nn.Sequential
        stage_names = ["stage{}".format(i) for i in [2, 3, 4]]
        for name, repeats, output_channels in zip(
                stage_names, stages_repeats, self._stage_out_channels[1:]):
            seq = [inverted_residual(input_channels, output_channels, 2)]
            for i in range(repeats - 1):
                seq.append(
                    inverted_residual(output_channels, output_channels, 1))
            setattr(self, name, nn.Sequential(*seq))
            input_channels = output_channels

        output_channels = self._stage_out_channels[-1]
        self.conv5 = nn.Sequential(
            nn.Conv2d(input_channels, output_channels, 1, 1, 0, bias=False),
            nn.BatchNorm2d(output_channels),
            nn.ReLU(inplace=True),
        )

        self.fc = nn.Linear(output_channels, num_classes)
Exemple #20
0
 def __init__(self, num_speakers=2) -> None:
     super(simple_CNN, self).__init__()
     self.convs = nn.Sequential(
         nn.Conv1d(1, 16, 100, stride=10),
         nn.BatchNorm1d(16),
         nn.ReLU(),
         nn.Conv1d(16, 64, 21, stride=10),
         nn.BatchNorm1d(64),
         nn.ReLU(),
         nn.Conv1d(64, 64, 5, stride=5),
         nn.BatchNorm1d(64),
         nn.ReLU(),
     )
     self.linears = nn.Sequential(nn.Linear(1 * 6 * 64, 128),
                                  nn.Linear(128, num_speakers))
Exemple #21
0
 def _make_layer(self, block, planes, num_blocks, stride):
     strides = [stride] + [1] * (num_blocks - 1)
     layers = []
     for stride in strides:
         layers.append(block(self.in_planes, planes, stride))
         self.in_planes = planes * block.expansion
     return nn.Sequential(*layers)
Exemple #22
0
 def __init__(
     self,
     in_channels=1,
     out_channels=32,
     input_dim=312,
     hidden_dim=32,
     output_dim=10,
 ):
     super(cnn1d_ser, self).__init__()
     self.classifier = nn.Sequential(
         nn.Conv1d(in_channels, out_channels, 5, stride=1, padding=2),
         nn.BatchNorm1d(out_channels),
         nn.ReLU(),
         nn.Dropout(0.5),
         nn.Conv1d(out_channels, out_channels, 5, stride=1, padding=2),
         nn.BatchNorm1d(out_channels),
         nn.ReLU(),
         nn.Dropout(0.5),
         nn.Flatten(),
         nn.Linear(input_dim * out_channels, hidden_dim),
         nn.BatchNorm1d(hidden_dim),
         nn.ReLU(),
         nn.Dropout(0.5),
         nn.Linear(hidden_dim, output_dim),
     )
Exemple #23
0
 def __init__(self,
              in_ch,
              out_ch,
              kernel_size,
              stride,
              expansion_factor,
              bn_momentum=0.1):
     super(_InvertedResidual, self).__init__()
     assert stride in [1, 2]
     assert kernel_size in [3, 5]
     mid_ch = in_ch * expansion_factor
     self.apply_resudual = in_ch == out_ch and stride == 1
     self.layers = nn.Sequential(
         # Pointwise
         nn.Conv2d(in_ch, mid_ch, 1, bias=False),
         nn.BatchNorm2d(mid_ch, momentum=bn_momentum),
         nn.ReLU(inplace=True),
         # Depthwise
         nn.Conv2d(
             mid_ch,
             mid_ch,
             kernel_size,
             padding=kernel_size // 2,
             stride=stride,
             groups=mid_ch,
             bias=False,
         ),
         nn.BatchNorm2d(mid_ch, momentum=bn_momentum),
         nn.ReLU(inplace=True),
         # Linear pointwise, Note that there's no activation
         nn.Conv2d(mid_ch, out_ch, 1, bias=False),
         nn.BatchNorm2d(out_ch, momentum=bn_momentum),
     )
Exemple #24
0
    def _generate_inception_module(input_channels, output_channels, block_num, block):
        layers = nn.Sequential()
        for l in range(block_num):
            layers.add_module("{}_{}".format(block.__name__, l), block(input_channels))
            input_channels = output_channels

        return layers
Exemple #25
0
 def __init__(
     self,
     wide_vocab_size: int,
     deep_vocab_size: int,
     deep_embedding_vec_size: int = 16,
     num_deep_sparse_fields: int = 26,
     num_dense_fields: int = 13,
     hidden_size: int = 1024,
     hidden_units_num: int = 7,
     deep_dropout_rate: float = 0.5,
 ):
     super(LocalWideAndDeep, self).__init__()
     self.wide_embedding = Embedding(
         wide_vocab_size,
         1,
     )
     self.deep_embedding = Embedding(deep_vocab_size,
                                     deep_embedding_vec_size)
     deep_feature_size = (deep_embedding_vec_size * num_deep_sparse_fields +
                          num_dense_fields)
     self.linear_layers = nn.Sequential(
         OrderedDict([(
             f"fc{i}",
             Dense(
                 deep_feature_size if i == 0 else hidden_size,
                 hidden_size,
                 deep_dropout_rate,
             ),
         ) for i in range(hidden_units_num)]))
     self.deep_scores = nn.Linear(hidden_size, 1)
     self.sigmoid = nn.Sigmoid()
Exemple #26
0
 def __init__(self):
     super(Discriminator, self).__init__()
     self.net = nn.Sequential(
         nn.Conv2d(3, 64, kernel_size=3, padding=1),
         nn.LeakyReLU(0.2),
         nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1),
         nn.BatchNorm2d(64),
         nn.LeakyReLU(0.2),
         nn.Conv2d(64, 128, kernel_size=3, padding=1),
         nn.BatchNorm2d(128),
         nn.LeakyReLU(0.2),
         nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1),
         nn.BatchNorm2d(128),
         nn.LeakyReLU(0.2),
         nn.Conv2d(128, 256, kernel_size=3, padding=1),
         nn.BatchNorm2d(256),
         nn.LeakyReLU(0.2),
         nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1),
         nn.BatchNorm2d(256),
         nn.LeakyReLU(0.2),
         nn.Conv2d(256, 512, kernel_size=3, padding=1),
         nn.BatchNorm2d(512),
         nn.LeakyReLU(0.2),
         nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1),
         nn.BatchNorm2d(512),
         nn.LeakyReLU(0.2),
         nn.AdaptiveAvgPool2d(1),
         nn.Conv2d(512, 1024, kernel_size=1),
         nn.LeakyReLU(0.2),
         nn.Conv2d(1024, 1, kernel_size=1),
     )
Exemple #27
0
    def __init__(self, num_features, num_classes):
        super(Wav2Letter, self).__init__()

        self.layers = nn.Sequential(
            nn.Conv1d(num_features, 250, 48, 2),
            nn.ReLU(),
            nn.Conv1d(250, 250, 7),
            nn.ReLU(),
            nn.Conv1d(250, 250, 7),
            nn.ReLU(),
            nn.Conv1d(250, 250, 7),
            nn.ReLU(),
            nn.Conv1d(250, 250, 7),
            nn.ReLU(),
            nn.Conv1d(250, 250, 7),
            nn.ReLU(),
            nn.Conv1d(250, 250, 7),
            nn.ReLU(),
            nn.Conv1d(250, 250, 7),
            nn.ReLU(),
            nn.Conv1d(250, 2000, 32),
            nn.ReLU(),
            nn.Conv1d(2000, 2000, 1),
            nn.ReLU(),
            nn.Conv1d(2000, num_classes, 1),
        )
Exemple #28
0
 def _make_layers(self, in_channels):
     layers = []
     cfg = [
         self.cfg[k] for k in [
             'expansion', 'out_channels', 'num_blocks', 'kernel_size',
             'stride'
         ]
     ]
     b = 0
     blocks = sum(self.cfg['num_blocks'])
     for expansion, out_channels, num_blocks, kernel_size, stride in zip(
             *cfg):
         strides = [stride] + [1] * (num_blocks - 1)
         for stride in strides:
             drop_rate = self.cfg['drop_connect_rate'] * b / blocks
             layers.append(
                 Block(in_channels,
                       out_channels,
                       kernel_size,
                       stride,
                       expansion,
                       se_ratio=0.25,
                       drop_rate=drop_rate))
             in_channels = out_channels
     return nn.Sequential(*layers)
Exemple #29
0
    def __init__(self, scale_factor):
        upsample_block_num = int(math.log(scale_factor, 2))

        super(Generator, self).__init__()
        self.block1 = nn.Sequential(nn.Conv2d(3, 64, kernel_size=9, padding=4),
                                    nn.PReLU())
        self.block2 = ResidualBlock(64)
        self.block3 = ResidualBlock(64)
        self.block4 = ResidualBlock(64)
        self.block5 = ResidualBlock(64)
        self.block6 = ResidualBlock(64)
        self.block7 = nn.Sequential(
            nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.PReLU())
        block8 = [UpsampleBLock(64, 2) for _ in range(upsample_block_num)]
        block8.append(nn.Conv2d(64, 3, kernel_size=9, padding=4))
        block8.append(nn.Tanh())
        self.block8 = nn.Sequential(*block8)
Exemple #30
0
 def _make_layers(self, in_planes):
     layers = []
     for expansion, out_planes, num_blocks, stride in self.cfg:
         strides = [stride] + [1] * (num_blocks - 1)
         for stride in strides:
             layers.append(Block(in_planes, out_planes, expansion, stride))
             in_planes = out_planes
     return nn.Sequential(*layers)