예제 #1
0
    def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
        super(ReductionCell1, self).__init__()
        self.conv_prev_1x1 = []
        self.conv_prev_1x1.append(M.ReLU())
        self.conv_prev_1x1.append(M.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
        self.conv_prev_1x1.append(M.BatchNorm2d(out_channels_left, eps=0.001, momentum=0.1, affine=True))
        self.conv_prev_1x1 = M.Sequential(*self.conv_prev_1x1)

        self.conv_1x1 = []
        self.conv_1x1.append(M.ReLU())
        self.conv_1x1.append(M.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False))
        self.conv_1x1.append(M.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True))
        self.conv_1x1 = M.Sequential(*self.conv_1x1)

        self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False)
        self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False)

        self.comb_iter_1_left = M.MaxPool2d(3, stride=2, padding=1)
        self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False)

        self.comb_iter_2_left = M.AvgPool2d(3, stride=2, padding=1)
        self.comb_iter_2_right = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False)

        self.comb_iter_3_right = M.AvgPool2d(3, stride=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
        self.comb_iter_4_right = M.MaxPool2d(3, stride=2, padding=1)
예제 #2
0
 def __init__(self, intLevel):
     super(Basic, self).__init__()
     self.netBasic = M.Sequential(
         Conv2d(in_channels=8,
                out_channels=32,
                kernel_size=7,
                stride=1,
                padding=3),  # 8=3+3+2
         M.ReLU(),
         Conv2d(in_channels=32,
                out_channels=64,
                kernel_size=7,
                stride=1,
                padding=3),
         M.ReLU(),
         Conv2d(in_channels=64,
                out_channels=32,
                kernel_size=7,
                stride=1,
                padding=3),
         M.ReLU(),
         Conv2d(in_channels=32,
                out_channels=16,
                kernel_size=7,
                stride=1,
                padding=3),
         M.ReLU(),
         Conv2d(in_channels=16,
                out_channels=2,
                kernel_size=7,
                stride=1,
                padding=3))
예제 #3
0
    def __init__(self,
                 in_channels=3,
                 out_channels=3,
                 mid_channels=128,
                 hidden_channels=3 * 4 * 4,
                 blocknums=5,
                 upscale_factor=4,
                 hsa=False,
                 pixel_shuffle=False):
        super(RSDN, self).__init__()
        if hsa:
            self.hsa = HSA(3)
        else:
            self.hsa = Identi()
        self.blocknums = blocknums
        self.hidden_channels = hidden_channels
        SDBlocks = []
        for _ in range(blocknums):
            SDBlocks.append(SDBlock(mid_channels))
        self.SDBlocks = M.Sequential(*SDBlocks)

        self.pre_SD_S = M.Sequential(
            Conv2d(2 * (3 + hidden_channels), mid_channels, 3, 1, 1),
            M.ReLU(),
        )
        self.pre_SD_D = M.Sequential(
            Conv2d(2 * (3 + hidden_channels), mid_channels, 3, 1, 1),
            M.ReLU(),
        )
        self.conv_SD = M.Sequential(
            Conv2d(mid_channels, hidden_channels, 3, 1, 1),
            M.ReLU(),
        )
        self.convS = Conv2d(mid_channels, hidden_channels, 3, 1, 1)
        self.convD = Conv2d(mid_channels, hidden_channels, 3, 1, 1)
        self.convHR = Conv2d(2 * hidden_channels, hidden_channels, 3, 1, 1)

        if pixel_shuffle:
            self.trans_S = PixelShuffle(upscale_factor)
            self.trans_D = PixelShuffle(upscale_factor)
            self.trans_HR = PixelShuffle(upscale_factor)
        else:
            self.trans_S = ConvTranspose2d(hidden_channels,
                                           3,
                                           4,
                                           4,
                                           0,
                                           bias=False)
            self.trans_D = ConvTranspose2d(hidden_channels,
                                           3,
                                           4,
                                           4,
                                           0,
                                           bias=False)
            self.trans_HR = ConvTranspose2d(hidden_channels,
                                            3,
                                            4,
                                            4,
                                            0,
                                            bias=False)
예제 #4
0
    def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
        super(FirstCell, self).__init__()
        self.conv_1x1 = []
        self.conv_1x1.append(M.ReLU())
        self.conv_1x1.append(M.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False))
        self.conv_1x1.append(M.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True))
        self.conv_1x1 = M.Sequential(*self.conv_1x1)

        self.relu = M.ReLU()
        self.path_1 = []
        self.path_1.append(M.AvgPool2d(1, stride=2))
        self.path_1.append(M.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
        self.path_1 = M.Sequential(*self.path_1)

        self.path_2 = []
        # self.path_2.append(M.ZeroPad2d((0, 1, 0, 1)))
        self.path_2.append(M.AvgPool2d(1, stride=2))
        self.path_2.append(M.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
        self.path_2 = M.Sequential(*self.path_2)

        self.final_path_bn = M.BatchNorm2d(out_channels_left * 2, eps=0.001, momentum=0.1, affine=True)

        self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
        self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)

        self.comb_iter_1_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
        self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)

        self.comb_iter_2_left = M.AvgPool2d(3, stride=1, padding=1)

        self.comb_iter_3_left = M.AvgPool2d(3, stride=1, padding=1)
        self.comb_iter_3_right = M.AvgPool2d(3, stride=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
예제 #5
0
 def __init__(self, channel_num):
     super(CARBBlock, self).__init__()
     self.conv1 = M.Sequential(
         M.Conv2d(channel_num,
                  channel_num,
                  kernel_size=3,
                  padding=1,
                  stride=1),
         M.ReLU(),
         M.Conv2d(channel_num,
                  channel_num,
                  kernel_size=3,
                  padding=1,
                  stride=1),
     )
     # self.global_average_pooling = nn.AdaptiveAvgPool2d((1,1))  # B,C,H,W -> B,C,1,1
     self.linear = M.Sequential(M.Linear(channel_num, channel_num // 2),
                                M.ReLU(),
                                M.Linear(channel_num // 2, channel_num),
                                M.Sigmoid())
     self.conv2 = M.Conv2d(channel_num * 2,
                           channel_num,
                           kernel_size=1,
                           padding=0,
                           stride=1)
     self.lrelu = M.LeakyReLU()
예제 #6
0
    def __init__(self, cfg, input_shape: List[layers.ShapeSpec]):
        super().__init__()

        in_channels = input_shape[0].channels
        num_classes = cfg.num_classes
        num_convs = 4
        prior_prob = cfg.cls_prior_prob
        num_anchors = [
            len(cfg.anchor_scales[i]) * len(cfg.anchor_ratios[i])
            for i in range(len(input_shape))
        ]

        assert (len(set(num_anchors)) == 1
                ), "not support different number of anchors between levels"
        num_anchors = num_anchors[0]

        cls_subnet = []
        bbox_subnet = []
        for _ in range(num_convs):
            cls_subnet.append(
                M.Conv2d(in_channels,
                         in_channels,
                         kernel_size=3,
                         stride=1,
                         padding=1))
            cls_subnet.append(M.ReLU())
            bbox_subnet.append(
                M.Conv2d(in_channels,
                         in_channels,
                         kernel_size=3,
                         stride=1,
                         padding=1))
            bbox_subnet.append(M.ReLU())

        self.cls_subnet = M.Sequential(*cls_subnet)
        self.bbox_subnet = M.Sequential(*bbox_subnet)
        self.cls_score = M.Conv2d(in_channels,
                                  num_anchors * num_classes,
                                  kernel_size=3,
                                  stride=1,
                                  padding=1)
        self.bbox_pred = M.Conv2d(in_channels,
                                  num_anchors * 4,
                                  kernel_size=3,
                                  stride=1,
                                  padding=1)

        # Initialization
        for modules in [
                self.cls_subnet, self.bbox_subnet, self.cls_score,
                self.bbox_pred
        ]:
            for layer in modules.modules():
                if isinstance(layer, M.Conv2d):
                    M.init.normal_(layer.weight, mean=0, std=0.01)
                    M.init.fill_(layer.bias, 0)

        # Use prior in model initialization to improve stability
        bias_value = -math.log((1 - prior_prob) / prior_prob)
        M.init.fill_(self.cls_score.bias, bias_value)
예제 #7
0
 def __init__(self, channel_nums):
     super(SDBlock, self).__init__()
     self.netS = M.Sequential(Conv2d(channel_nums, channel_nums, 3, 1, 1),
                              M.ReLU(),
                              Conv2d(channel_nums, channel_nums, 3, 1, 1))
     self.netD = M.Sequential(Conv2d(channel_nums, channel_nums, 3, 1, 1),
                              M.ReLU(),
                              Conv2d(channel_nums, channel_nums, 3, 1, 1))
예제 #8
0
 def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=False):
     super(BranchSeparablesStem, self).__init__()
     self.relu = M.ReLU()
     self.separable_1 = SeparableConv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
     self.bn_sep_1 = M.BatchNorm2d(out_channels, eps=0.001, momentum=0.1, affine=True)
     self.relu1 = M.ReLU()
     self.separable_2 = SeparableConv2d(out_channels, out_channels, kernel_size, 1, padding, bias=bias)
     self.bn_sep_2 = M.BatchNorm2d(out_channels, eps=0.001, momentum=0.1, affine=True)
예제 #9
0
 def __init__(self, in_ch, out_ch):
     super(DoubleConv, self).__init__()
     self.conv = M.Sequential(
         M.Conv2d(in_ch, out_ch, 3, padding=1),
         M.BatchNorm2d(out_ch),
         M.ReLU(),
         M.Conv2d(out_ch, out_ch, 3, padding=1),
         M.BatchNorm2d(out_ch),
         M.ReLU())
예제 #10
0
파일: demo.py 프로젝트: GG-yuki/bugs
 def conv_dw(inp, oup, stride):
     return M.Sequential(
         M.Conv2d(inp, inp, 3, stride, 1, groups=inp),
         M.BatchNorm2d(inp),
         M.ReLU(),
         M.Conv2d(inp, oup, 1, 1, 0),
         M.BatchNorm2d(oup),
         M.ReLU(),
     )
예제 #11
0
 def __init__(self):
     super().__init__()
     self.conv0 = M.Conv2d(1, 20, kernel_size=5, bias=False)
     self.bn0 = M.BatchNorm2d(20)
     self.relu0 = M.ReLU()
     self.pool0 = M.MaxPool2d(2)
     self.conv1 = M.Conv2d(20, 20, kernel_size=5, bias=False)
     self.bn1 = M.BatchNorm2d(20)
     self.relu1 = M.ReLU()
     self.pool1 = M.MaxPool2d(2)
     self.fc0 = M.Linear(500, 64, bias=True)
     self.relu2 = M.ReLU()
     self.fc1 = M.Linear(64, 10, bias=True)
예제 #12
0
 def __init__(self,
              in_ch,
              out_ch,
              ksize,
              stride=1,
              expansion=1.0,
              bias=False,
              norm_layer=M.BatchNorm2d,
              activation=M.ReLU()):
     super(XXBlock, self).__init__()
     if norm_layer is None:
         norm_layer = M.BatchNorm2d
     if activation is None:
         activation = M.ReLU()
     expansion_out_ch = round(out_ch * expansion)
     self.conv_block = M.Sequential(
         M.Conv2d(in_ch,
                  expansion_out_ch,
                  ksize,
                  stride=stride,
                  padding=ksize // 2), norm_layer(expansion_out_ch),
         activation,
         M.Conv2d(expansion_out_ch,
                  out_ch,
                  ksize,
                  stride=1,
                  padding=(ksize - 1) // 2,
                  bias=bias), norm_layer(out_ch))
     self.activation = activation
     self.shortcut = M.Sequential()
     if stride > 1 or in_ch != out_ch:
         if stride > 1:
             self.shortcut = M.Sequential(
                 M.AvgPool2d(kernel_size=stride + 1,
                             stride=stride,
                             padding=stride // 2),
                 M.Conv2d(in_ch,
                          out_ch,
                          kernel_size=1,
                          stride=1,
                          padding=0,
                          bias=bias), norm_layer(out_ch))
         else:
             self.shortcut = M.Sequential(
                 M.Conv2d(in_ch,
                          out_ch,
                          kernel_size=1,
                          stride=1,
                          padding=0,
                          bias=bias), norm_layer(out_ch))
예제 #13
0
    def __init__(self, inp, oup, mid_channels, *, ksize, stride):
        super().__init__()
        self.stride = stride
        assert stride in [1, 2]

        self.mid_channels = mid_channels
        self.ksize = ksize
        pad = ksize // 2
        self.pad = pad
        self.inp = inp

        outputs = oup - inp

        branch_main = [
            # pw
            M.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),
            M.BatchNorm2d(mid_channels),
            M.ReLU(),
            # dw
            M.Conv2d(
                mid_channels,
                mid_channels,
                ksize,
                stride,
                pad,
                groups=mid_channels,
                bias=False,
            ),
            M.BatchNorm2d(mid_channels),
            # pw-linear
            M.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False),
            M.BatchNorm2d(outputs),
            M.ReLU(),
        ]
        self.branch_main = M.Sequential(*branch_main)

        if stride == 2:
            branch_proj = [
                # dw
                M.Conv2d(inp, inp, ksize, stride, pad, groups=inp, bias=False),
                M.BatchNorm2d(inp),
                # pw-linear
                M.Conv2d(inp, inp, 1, 1, 0, bias=False),
                M.BatchNorm2d(inp),
                M.ReLU(),
            ]
            self.branch_proj = M.Sequential(*branch_proj)
        else:
            self.branch_proj = None
예제 #14
0
파일: layers.py 프로젝트: Qsingle/Megvision
    def __init__(self,
                 in_ch,
                 out_ch,
                 ksize=1,
                 stride=1,
                 padding=0,
                 bias=False,
                 dilation=1,
                 groups=1,
                 norm_layer=M.BatchNorm2d,
                 activation=M.ReLU(),
                 gn_groups=32,
                 **kwargs):
        super(Conv2d, self).__init__()
        ksize = _pair(ksize)
        stride = _pair(stride)
        padding = _pair(padding)
        self.conv = M.Conv2d(in_ch,
                             out_ch,
                             ksize,
                             stride=stride,
                             padding=padding,
                             dilation=dilation,
                             groups=groups,
                             bias=bias,
                             **kwargs)
        self.norm_layer = None
        if norm_layer is not None:
            if isinstance(norm_layer, M.GroupNorm):
                self.norm_layer = norm_layer(gn_groups, out_ch)
            else:
                self.norm_layer = norm_layer(out_ch)

        self.activation = activation
예제 #15
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size=3,
                 activation='prelu'):
        super(ResBlock, self).__init__()

        if activation == 'relu':
            self.act = M.ReLU()
        elif activation == 'prelu':
            self.act = M.PReLU(num_parameters=1, init=0.25)
        else:
            raise NotImplementedError("not implemented activation")

        m = []
        m.append(
            M.Conv2d(in_channels,
                     out_channels,
                     kernel_size=kernel_size,
                     stride=1,
                     padding=(kernel_size // 2)))
        m.append(self.act)
        m.append(
            M.Conv2d(out_channels,
                     out_channels,
                     kernel_size=kernel_size,
                     stride=1,
                     padding=(kernel_size // 2)))
        self.body = M.Sequential(*m)
예제 #16
0
 def __init__(self, in_channels, out_channels, kernel_size):
     super(SqueezeInitBlock, self).__init__()
     self.conv = M.Conv2d(in_channels=in_channels,
                          out_channels=out_channels,
                          kernel_size=kernel_size,
                          stride=2)
     self.activ = M.ReLU()
예제 #17
0
 def __init__(self, in_channels, out_channels, kernel_size, padding):
     super(FireConv, self).__init__()
     self.conv = M.Conv2d(in_channels=in_channels,
                          out_channels=out_channels,
                          kernel_size=kernel_size,
                          padding=padding)
     self.activation = M.ReLU()
예제 #18
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride,
                 padding,
                 dilation=1,
                 groups=1,
                 bias=False,
                 use_bn=True,
                 bn_eps=1e-5,
                 activation=M.ReLU()):
        super(ConvBlock, self).__init__()
        self.activation = activation
        self.use_bn = use_bn

        self.conv = M.Conv2d(in_channels=in_channels,
                             out_channels=out_channels,
                             kernel_size=kernel_size,
                             stride=stride,
                             padding=padding,
                             dilation=dilation,
                             groups=groups,
                             bias=bias)
        if self.use_bn:
            self.bn = M.BatchNorm2d(num_features=out_channels, eps=bn_eps)
예제 #19
0
파일: blocks.py 프로젝트: zzh7982/Models
    def __init__(self,
                 in_channels,
                 out_channels,
                 hidden_channels=None,
                 downsample=False):
        super().__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.hidden_channels = hidden_channels if hidden_channels is not None else in_channels
        self.downsample = downsample
        self.learnable_sc = (in_channels != out_channels) or downsample

        # Build the layers
        self.c1 = M.Conv2d(self.in_channels, self.hidden_channels, 3, 1,
                           1)
        self.c2 = M.Conv2d(self.hidden_channels, self.out_channels, 3, 1,
                           1)

        self.activation = M.ReLU()

        M.init.xavier_uniform_(self.c1.weight, math.sqrt(2.0))
        M.init.xavier_uniform_(self.c2.weight, math.sqrt(2.0))

        # Shortcut layer
        if self.learnable_sc:
            self.c_sc = M.Conv2d(in_channels, out_channels, 1, 1, 0)
            M.init.xavier_uniform_(self.c_sc.weight, 1.0)
예제 #20
0
파일: net_mge.py 프로젝트: zongjg/PMRID
    def __init__(self,
                 in_channels: int,
                 mid_channels: int,
                 out_channels: int,
                 stride: int = 1):
        super().__init__()

        self.conv1 = Conv2D(in_channels,
                            mid_channels,
                            kernel_size=5,
                            stride=stride,
                            padding=2,
                            is_seperable=True,
                            has_relu=True)
        self.conv2 = Conv2D(mid_channels,
                            out_channels,
                            kernel_size=5,
                            stride=1,
                            padding=2,
                            is_seperable=True,
                            has_relu=False)

        self.proj = (M.Identity()
                     if stride == 1 and in_channels == out_channels else
                     Conv2D(in_channels,
                            out_channels,
                            kernel_size=3,
                            stride=stride,
                            padding=1,
                            is_seperable=True,
                            has_relu=False))
        self.relu = M.ReLU()
예제 #21
0
파일: layers.py 프로젝트: Qsingle/Megvision
    def __init__(self,
                 channels,
                 reduction=16,
                 norm_layer=M.BatchNorm2d,
                 activation=M.ReLU(),
                 attention_act=M.Sigmoid()):
        """

        Args:
            channels (int):
            reduction (int):
            norm_layer (M.Module):
            activation (M.Module):
            attention_act (M.Module):
        """
        super(SEModule, self).__init__()
        inter_ch = int(channels // reduction)
        self.fc = M.Sequential(
            M.AdaptiveAvgPool2d(1),
            Conv2d(channels,
                   inter_ch,
                   norm_layer=norm_layer,
                   activation=activation),
            Conv2d(inter_ch,
                   channels,
                   norm_layer=norm_layer,
                   activation=attention_act))
예제 #22
0
    def __init__(self,
                 in_channels: int,
                 out_channels: int,
                 kernel_size: int,
                 max_pool=True,
                 max_pool_factor=1.0):
        super(ConvBlock, self).__init__()
        stride = (int(2 * max_pool_factor), int(2 * max_pool_factor))
        if max_pool:
            self.max_pool = M.MaxPool2d(kernel_size=stride, stride=stride)
            stride = (1, 1)
        else:
            self.max_pool = lambda x: x
        self.normalize = M.BatchNorm2d(out_channels, affine=True)
        minit.uniform_(self.normalize.weight)
        self.relu = M.ReLU()

        self.conv = M.Conv2d(
            in_channels,
            out_channels,
            kernel_size,
            stride=stride,
            padding=1,
            bias=True,
        )
        maml_init_(self.conv)
예제 #23
0
 def __init__(self):
     super().__init__()
     # 单信道图片, 两层  5x5 卷积 + ReLU + 池化
     self.conv1 = M.Conv2d(1, 6, 5)
     self.relu1 = M.ReLU()
     self.pool1 = M.MaxPool2d(2, 2)
     self.conv2 = M.Conv2d(6, 16, 5)
     self.relu2 = M.ReLU()
     self.pool2 = M.MaxPool2d(2, 2)
     # 两层全连接 + ReLU
     self.fc1 = M.Linear(16 * 5 * 5, 120)
     self.relu3 = M.ReLU()
     self.fc2 = M.Linear(120, 84)
     self.relu4 = M.ReLU()
     # 分类器
     self.classifier = M.Linear(84, 10)
예제 #24
0
    def __init__(self):
        super().__init__()
        num_convs = 4
        in_channels = 256
        cls_subnet, bbox_subnet = [], []
        for _ in range(num_convs):
            cls_subnet.append(
                M.Conv2d(in_channels,
                         in_channels,
                         kernel_size=3,
                         stride=1,
                         padding=1))
            cls_subnet.append(M.ReLU())
            bbox_subnet.append(
                M.Conv2d(in_channels,
                         in_channels,
                         kernel_size=3,
                         stride=1,
                         padding=1))
            bbox_subnet.append(M.ReLU())
        self.cls_subnet = M.Sequential(*cls_subnet)
        self.bbox_subnet = M.Sequential(*bbox_subnet)
        # predictor
        self.cls_score = M.Conv2d(in_channels,
                                  config.num_cell_anchors *
                                  (config.num_classes - 1) * 1,
                                  kernel_size=3,
                                  stride=1,
                                  padding=1)
        self.bbox_pred = M.Conv2d(in_channels,
                                  config.num_cell_anchors * 4 * 1,
                                  kernel_size=3,
                                  stride=1,
                                  padding=1)

        self.iou_pred = M.Conv2d(in_channels,
                                 config.num_cell_anchors * 1,
                                 kernel_size=3,
                                 stride=1,
                                 padding=1)

        self.num_pred = M.Conv2d(in_channels,
                                 config.num_cell_anchors * 1,
                                 kernel_size=3,
                                 stride=1,
                                 padding=1)
        self._init_weights()
예제 #25
0
    def __init__(self,
                 channels,
                 exp_channels,
                 init_block_channels,
                 final_block_channels,
                 classifier_mid_channels,
                 kernels3,
                 use_relu,
                 use_se,
                 first_stride,
                 final_use_se,
                 in_channels=3,
                 in_size=(224, 224),
                 num_classes=1000):
        super(MobileNetV3, self).__init__()
        self.in_size = in_size
        self.num_classes = num_classes

        self.features = []
        init_block = conv3x3_block(in_channels=in_channels,
                                   out_channels=init_block_channels,
                                   stride=2,
                                   activation=HSwish())
        self.features.append(init_block)

        in_channels = init_block_channels

        for i, channels_per_stage in enumerate(channels):
            stage = []
            for j, out_channels in enumerate(channels_per_stage):
                exp_channels_ij = exp_channels[i][j]
                stride = 2 if (j == 0) and ((i != 0) or first_stride) else 1
                use_kernel3 = kernels3[i][j] == 1
                activation = M.ReLU() if use_relu[i][j] == 1 else HSwish()
                use_se_flag = use_se[i][j] == 1
                unit = MobileNetV3Unit(in_channels=in_channels,
                                       out_channels=out_channels,
                                       exp_channels=exp_channels_ij,
                                       use_kernel3=use_kernel3,
                                       stride=stride,
                                       activation=activation,
                                       use_se=use_se_flag)
                stage.append(unit)
                in_channels = out_channels
            self.features += stage
        final_block = MobileNetV3FinalBlock(in_channels=in_channels,
                                            out_channels=final_block_channels,
                                            use_se=final_use_se)
        self.features.append(final_block)
        in_channels = final_block_channels
        final_pool = M.AvgPool2d(kernel_size=7, stride=1)
        self.features.append(final_pool)
        self.features = M.Sequential(*self.features)

        self.output = MobileNetV3Classifier(
            in_channels=in_channels,
            out_channels=num_classes,
            mid_channels=classifier_mid_channels,
            dropout_rate=0.2)
예제 #26
0
    def __init__(self, class_num=21, pretrained=None):
        super().__init__()

        self.output_stride = 16
        self.sub_output_stride = self.output_stride // 4
        self.class_num = class_num

        self.aspp = ASPP(in_channels=2048,
                         out_channels=256,
                         dr=16 // self.output_stride)
        self.dropout = M.Dropout(0.5)

        self.upstage1 = M.Sequential(
            M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=True),
            M.BatchNorm2d(48),
            M.ReLU(),
        )

        self.upstage2 = M.Sequential(
            M.Conv2d(256 + 48, 256, 3, 1, padding=1, bias=True),
            M.BatchNorm2d(256),
            M.ReLU(),
            M.Dropout(0.5),
            M.Conv2d(256, 256, 3, 1, padding=1, bias=True),
            M.BatchNorm2d(256),
            M.ReLU(),
            M.Dropout(0.1),
        )
        self.convout = M.Conv2d(256, self.class_num, 1, 1, padding=0)

        for m in self.modules():
            if isinstance(m, M.Conv2d):
                M.init.msra_normal_(m.weight,
                                    mode="fan_out",
                                    nonlinearity="relu")
            elif isinstance(m, M.BatchNorm2d):
                M.init.ones_(m.weight)
                M.init.zeros_(m.bias)

        self.backbone = ModifiedResNet(
            Bottleneck, [3, 4, 23, 3],
            replace_stride_with_dilation=[False, False, True])
        if pretrained is not None:
            model_dict = mge.load(pretrained)
            self.backbone.load_state_dict(model_dict)
예제 #27
0
 def __init__(self, in_ch=3, num_classes=1000):
     '''
         The AlexNet.
         args:
             in_ch: int, the number of channels of inputs
             num_classes: int, the number of classes that need to predict
         reference:
             "One weird trick for parallelizing convolutional neural networks"<https://arxiv.org/abs/1404.5997>
     '''
     super(AlexNet, self).__init__()
     #the part to extract feature
     self.features = M.Sequential(
         M.Conv2d(in_ch, 64, kernel_size=11, stride=4, padding=11 // 4),
         M.ReLU(),
         M.MaxPool2d(kernel_size=3, stride=2),
         M.Conv2d(64, 192, kernel_size=5, padding=2),
         M.ReLU(),
         M.MaxPool2d(kernel_size=3, stride=2),
         M.Conv2d(192, 384, kernel_size=3, stride=1, padding=1),
         M.ReLU(),
         M.Conv2d(384, 256, kernel_size=3, stride=1, padding=1),
         M.ReLU(),
         M.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
         M.ReLU(),
         M.MaxPool2d(kernel_size=3, stride=2),
     )
     #global avg pooling
     self.avgpool = M.AdaptiveAvgPool2d((6, 6))
     #classify part
     self.classifier = M.Sequential(M.Dropout(),
                                    M.Linear(256 * 6 * 6, 4096), M.ReLU(),
                                    M.Dropout(), M.Linear(4096, 4096),
                                    M.ReLU(), M.Linear(4096, num_classes))
예제 #28
0
    def __init__(self, cfg):
        super().__init__()

        self.cfg = cfg
        self.output_stride = 16
        self.sub_output_stride = self.output_stride // 4
        self.num_classes = cfg.num_classes

        self.aspp = ASPP(in_channels=2048,
                         out_channels=256,
                         dr=16 // self.output_stride)
        self.dropout = M.Dropout(0.5)

        self.upstage1 = M.Sequential(
            M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=False),
            M.BatchNorm2d(48),
            M.ReLU(),
        )

        self.upstage2 = M.Sequential(
            M.Conv2d(256 + 48, 256, 3, 1, padding=1, bias=False),
            M.BatchNorm2d(256),
            M.ReLU(),
            M.Dropout(0.5),
            M.Conv2d(256, 256, 3, 1, padding=1, bias=False),
            M.BatchNorm2d(256),
            M.ReLU(),
            M.Dropout(0.1),
        )
        self.conv_out = M.Conv2d(256, self.num_classes, 1, 1, padding=0)

        for m in self.modules():
            if isinstance(m, M.Conv2d):
                M.init.msra_normal_(m.weight,
                                    mode="fan_out",
                                    nonlinearity="relu")
            elif isinstance(m, M.BatchNorm2d):
                M.init.ones_(m.weight)
                M.init.zeros_(m.bias)

        self.backbone = getattr(resnet, cfg.backbone)(
            replace_stride_with_dilation=[False, False, True],
            pretrained=cfg.backbone_pretrained,
        )
        del self.backbone.fc
예제 #29
0
파일: resnet.py 프로젝트: Qsingle/Megvision
    def __init__(self,
                 inplanes,
                 outplanes,
                 stride=1,
                 dilation=1,
                 groups=1,
                 downsample=None,
                 base_width=64,
                 norm_layer=None,
                 se_module=None,
                 radix=2,
                 reduction=4,
                 avd=False,
                 avd_first=False,
                 is_first=False):
        '''
            Implementation of the basic block.
            Args:
                inplanes (int): the number of channels of input
                outplanes (int): the number of channels of output (the number of kernels of conv layers)
                stride (int, tuple or list): the stride of the first conv3x3 layer
                dilation (int):the dilation rate of the first conv layer of the block
                groups (int): the number of groups for the first conv3x3 layer
                downsample (megendine.module.Module or None): if not None, will do the downsample for x
                base_width (int): the basic width of the layer
                norm_layer (None or megendine.module.Module): the normalization layer of the block, default is batch normalization
                se_module (SEModule or None): the semodule from SENet
        '''
        super(BasicBlock, self).__init__()
        if norm_layer is None:
            norm_layer = M.BatchNorm2d
        if groups != 1 or base_width != 64:
            raise ValueError(
                'BasicBlock only supports groups=1 and base_width=64')
        if dilation > 1:
            raise NotImplementedError(
                "Dilation > 1 not supportes in BasicBlock")
        # self.downsample  and self.conv1 layer will do the downsample of the input  both when stride != 1
        # layer1
        self.conv1 = conv3x3(inplanes,
                             outplanes,
                             stride=stride,
                             dilation=dilation,
                             groups=groups)
        self.bn1 = norm_layer(outplanes)
        # activation layer
        self.relu = M.ReLU()
        # layer2
        self.conv2 = conv3x3(outplanes, outplanes)
        self.bn2 = norm_layer(outplanes)
        # downsample layer
        self.downsample = downsample
        # semodule
        self.se = se_module

        self.stride = stride
예제 #30
0
 def __init__(self,
              gate_channel,
              reduction_ratio=16,
              dilation_conv_num=2,
              dilation_val=4):
     super(SpatialGate, self).__init__()
     self.gate_s = M.Sequential(
         M.Conv2d(gate_channel, gate_channel//reduction_ratio, kernel_size=1),
         M.BatchNorm2d(gate_channel//reduction_ratio),
         M.ReLU(),
         M.Conv2d(gate_channel // reduction_ratio, gate_channel // reduction_ratio, kernel_size=3, \
                  padding=dilation_val, dilation=dilation_val),
         M.BatchNorm2d(gate_channel // reduction_ratio),
         M.ReLU(),
         M.Conv2d(gate_channel // reduction_ratio, gate_channel // reduction_ratio, kernel_size=3, \
                  padding=dilation_val, dilation=dilation_val),
         M.BatchNorm2d(gate_channel // reduction_ratio),
         M.ReLU(),
         M.Conv2d(gate_channel//reduction_ratio, 1, kernel_size=1))