Example #1
0
 def __init__(self, in_planes, planes, stride=1):
     super(BasicBlock, self).__init__()
     self.conv1 = M.Conv2d(in_planes,
                           planes,
                           kernel_size=3,
                           stride=stride,
                           padding=1,
                           bias=False)
     self.bn1 = M.BatchNorm2d(planes)
     self.conv2 = M.Conv2d(planes,
                           planes,
                           kernel_size=3,
                           stride=1,
                           padding=1,
                           bias=False)
     self.bn2 = M.BatchNorm2d(planes)
     self.shortcut = M.Sequential()
     if stride != 1 or in_planes != planes:
         self.shortcut = M.Sequential(
             M.Conv2d(
                 in_planes,
                 self.expansion * planes,
                 kernel_size=1,
                 stride=stride,
                 bias=False,
             ),
             M.BatchNorm2d(self.expansion * planes),
         )
Example #2
0
    def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
        super(ReductionCell1, self).__init__()
        self.conv_prev_1x1 = []
        self.conv_prev_1x1.append(M.ReLU())
        self.conv_prev_1x1.append(M.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
        self.conv_prev_1x1.append(M.BatchNorm2d(out_channels_left, eps=0.001, momentum=0.1, affine=True))
        self.conv_prev_1x1 = M.Sequential(*self.conv_prev_1x1)

        self.conv_1x1 = []
        self.conv_1x1.append(M.ReLU())
        self.conv_1x1.append(M.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False))
        self.conv_1x1.append(M.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True))
        self.conv_1x1 = M.Sequential(*self.conv_1x1)

        self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False)
        self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False)

        self.comb_iter_1_left = M.MaxPool2d(3, stride=2, padding=1)
        self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False)

        self.comb_iter_2_left = M.AvgPool2d(3, stride=2, padding=1)
        self.comb_iter_2_right = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False)

        self.comb_iter_3_right = M.AvgPool2d(3, stride=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
        self.comb_iter_4_right = M.MaxPool2d(3, stride=2, padding=1)
Example #3
0
    def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
        super(FirstCell, self).__init__()
        self.conv_1x1 = []
        self.conv_1x1.append(M.ReLU())
        self.conv_1x1.append(M.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False))
        self.conv_1x1.append(M.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True))
        self.conv_1x1 = M.Sequential(*self.conv_1x1)

        self.relu = M.ReLU()
        self.path_1 = []
        self.path_1.append(M.AvgPool2d(1, stride=2))
        self.path_1.append(M.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
        self.path_1 = M.Sequential(*self.path_1)

        self.path_2 = []
        # self.path_2.append(M.ZeroPad2d((0, 1, 0, 1)))
        self.path_2.append(M.AvgPool2d(1, stride=2))
        self.path_2.append(M.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
        self.path_2 = M.Sequential(*self.path_2)

        self.final_path_bn = M.BatchNorm2d(out_channels_left * 2, eps=0.001, momentum=0.1, affine=True)

        self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
        self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)

        self.comb_iter_1_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
        self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)

        self.comb_iter_2_left = M.AvgPool2d(3, stride=1, padding=1)

        self.comb_iter_3_left = M.AvgPool2d(3, stride=1, padding=1)
        self.comb_iter_3_right = M.AvgPool2d(3, stride=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
Example #4
0
    def __init__(self, inp, oup, mid_channels, *, ksize, stride):
        super(ShuffleV2Block, self).__init__()
        self.stride = stride
        assert stride in [1, 2]

        self.mid_channels = mid_channels
        self.ksize = ksize
        pad = ksize // 2
        self.pad = pad
        self.inp = inp

        outputs = oup - inp

        self.reduce = M.Conv2d(inp, max(16, inp // 16), 1, 1, 0, bias=True)

        self.wnet1 = WeightNet(inp, mid_channels, 1, 1)
        self.bn1 = M.BatchNorm2d(mid_channels)

        self.wnet2 = WeightNet_DW(mid_channels, ksize, stride)
        self.bn2 = M.BatchNorm2d(mid_channels)

        self.wnet3 = WeightNet(mid_channels, outputs, 1, 1)
        self.bn3 = M.BatchNorm2d(outputs)

        if stride == 2:
            self.wnet_proj_1 = WeightNet_DW(inp, ksize, stride)
            self.bn_proj_1 = M.BatchNorm2d(inp)

            self.wnet_proj_2 = WeightNet(inp, inp, 1, 1)
            self.bn_proj_2 = M.BatchNorm2d(inp)
Example #5
0
 def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=False):
     super(BranchSeparablesStem, self).__init__()
     self.relu = M.ReLU()
     self.separable_1 = SeparableConv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
     self.bn_sep_1 = M.BatchNorm2d(out_channels, eps=0.001, momentum=0.1, affine=True)
     self.relu1 = M.ReLU()
     self.separable_2 = SeparableConv2d(out_channels, out_channels, kernel_size, 1, padding, bias=bias)
     self.bn_sep_2 = M.BatchNorm2d(out_channels, eps=0.001, momentum=0.1, affine=True)
 def __init__(self, in_ch, out_ch):
     super(DoubleConv, self).__init__()
     self.conv = M.Sequential(
         M.Conv2d(in_ch, out_ch, 3, padding=1),
         M.BatchNorm2d(out_ch),
         M.ReLU(),
         M.Conv2d(out_ch, out_ch, 3, padding=1),
         M.BatchNorm2d(out_ch),
         M.ReLU())
Example #7
0
File: demo.py Project: GG-yuki/bugs
 def conv_dw(inp, oup, stride):
     return M.Sequential(
         M.Conv2d(inp, inp, 3, stride, 1, groups=inp),
         M.BatchNorm2d(inp),
         M.ReLU(),
         M.Conv2d(inp, oup, 1, 1, 0),
         M.BatchNorm2d(oup),
         M.ReLU(),
     )
Example #8
0
def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, isReLU=True, if_IN=False, IN_affine=False, if_BN=False):
    if isReLU:
        if if_IN:
            return nn.Sequential(
                nn.Conv2d(in_planes,
                          out_planes,
                          kernel_size=kernel_size,
                          stride=stride,
                          dilation=dilation,
                          padding=((kernel_size - 1) * dilation) // 2,
                          bias=True), nn.LeakyReLU(0.1), nn.InstanceNorm(out_planes, affine=IN_affine))
        elif if_BN:
            return nn.Sequential(
                nn.Conv2d(in_planes,
                          out_planes,
                          kernel_size=kernel_size,
                          stride=stride,
                          dilation=dilation,
                          padding=((kernel_size - 1) * dilation) // 2,
                          bias=True), nn.LeakyReLU(0.1), nn.BatchNorm2d(out_planes, affine=IN_affine))
        else:
            return nn.Sequential(
                nn.Conv2d(in_planes,
                          out_planes,
                          kernel_size=kernel_size,
                          stride=stride,
                          dilation=dilation,
                          padding=((kernel_size - 1) * dilation) // 2,
                          bias=True), nn.LeakyReLU(0.1))
    else:
        if if_IN:
            return nn.Sequential(
                nn.Conv2d(in_planes,
                          out_planes,
                          kernel_size=kernel_size,
                          stride=stride,
                          dilation=dilation,
                          padding=((kernel_size - 1) * dilation) // 2,
                          bias=True), nn.InstanceNorm(out_planes, affine=IN_affine))
        elif if_BN:
            return nn.Sequential(
                nn.Conv2d(in_planes,
                          out_planes,
                          kernel_size=kernel_size,
                          stride=stride,
                          dilation=dilation,
                          padding=((kernel_size - 1) * dilation) // 2,
                          bias=True), nn.BatchNorm2d(out_planes, affine=IN_affine))
        else:
            return nn.Sequential(
                nn.Conv2d(in_planes,
                          out_planes,
                          kernel_size=kernel_size,
                          stride=stride,
                          dilation=dilation,
                          padding=((kernel_size - 1) * dilation) // 2,
                          bias=True))
Example #9
0
 def __init__(self):
     super().__init__()
     self.conv0 = M.Conv2d(1, 20, kernel_size=5, bias=False)
     self.bn0 = M.BatchNorm2d(20)
     self.relu0 = M.ReLU()
     self.pool0 = M.MaxPool2d(2)
     self.conv1 = M.Conv2d(20, 20, kernel_size=5, bias=False)
     self.bn1 = M.BatchNorm2d(20)
     self.relu1 = M.ReLU()
     self.pool1 = M.MaxPool2d(2)
     self.fc0 = M.Linear(500, 64, bias=True)
     self.relu2 = M.ReLU()
     self.fc1 = M.Linear(64, 10, bias=True)
Example #10
0
 def __init__(self, in_channels):
     super().__init__()
     self.conv_frelu1 = M.Conv2d(in_channels,
                                 in_channels, (1, 3),
                                 1, (0, 1),
                                 groups=in_channels,
                                 bias=False)
     self.conv_frelu2 = M.Conv2d(in_channels,
                                 in_channels, (3, 1),
                                 1, (1, 0),
                                 groups=in_channels,
                                 bias=False)
     self.bn1 = M.BatchNorm2d(in_channels)
     self.bn2 = M.BatchNorm2d(in_channels)
Example #11
0
    def __init__(self, inp, oup, mid_channels, *, ksize, stride):
        super(ShuffleV2Block, self).__init__()
        self.stride = stride
        assert stride in [1, 2]

        self.mid_channels = mid_channels
        self.ksize = ksize
        pad = ksize // 2
        self.pad = pad
        self.inp = inp

        outputs = oup - inp

        branch_main = [
            # pw
            M.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),
            M.BatchNorm2d(mid_channels),
            FReLU(mid_channels),
            # dw
            M.Conv2d(
                mid_channels,
                mid_channels,
                ksize,
                stride,
                pad,
                groups=mid_channels,
                bias=False,
            ),
            M.BatchNorm2d(mid_channels),
            # pw-linear
            M.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False),
            M.BatchNorm2d(outputs),
            FReLU(outputs),
        ]
        self.branch_main = M.Sequential(*branch_main)

        if stride == 2:
            branch_proj = [
                # dw
                M.Conv2d(inp, inp, ksize, stride, pad, groups=inp, bias=False),
                M.BatchNorm2d(inp),
                # pw-linear
                M.Conv2d(inp, inp, 1, 1, 0, bias=False),
                M.BatchNorm2d(inp),
                FReLU(inp),
            ]
            self.branch_proj = M.Sequential(*branch_proj)
        else:
            self.branch_proj = None
Example #12
0
 def __init__(self, mode):
     super().__init__()
     self.mode = mode
     self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
     self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
     self.bn1d = M.BatchNorm1d(32)
     self.bn2d = M.BatchNorm2d(3)
Example #13
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride,
                 padding,
                 dilation=1,
                 groups=1,
                 bias=False,
                 use_bn=True,
                 bn_eps=1e-5,
                 activation=M.ReLU()):
        super(ConvBlock, self).__init__()
        self.activation = activation
        self.use_bn = use_bn

        self.conv = M.Conv2d(in_channels=in_channels,
                             out_channels=out_channels,
                             kernel_size=kernel_size,
                             stride=stride,
                             padding=padding,
                             dilation=dilation,
                             groups=groups,
                             bias=bias)
        if self.use_bn:
            self.bn = M.BatchNorm2d(num_features=out_channels, eps=bn_eps)
Example #14
0
    def __init__(self,
                 in_channels: int,
                 out_channels: int,
                 kernel_size: int,
                 max_pool=True,
                 max_pool_factor=1.0):
        super(ConvBlock, self).__init__()
        stride = (int(2 * max_pool_factor), int(2 * max_pool_factor))
        if max_pool:
            self.max_pool = M.MaxPool2d(kernel_size=stride, stride=stride)
            stride = (1, 1)
        else:
            self.max_pool = lambda x: x
        self.normalize = M.BatchNorm2d(out_channels, affine=True)
        minit.uniform_(self.normalize.weight)
        self.relu = M.ReLU()

        self.conv = M.Conv2d(
            in_channels,
            out_channels,
            kernel_size,
            stride=stride,
            padding=1,
            bias=True,
        )
        maml_init_(self.conv)
Example #15
0
    def __init__(self, class_num=21, pretrained=None):
        super().__init__()

        self.output_stride = 16
        self.sub_output_stride = self.output_stride // 4
        self.class_num = class_num

        self.aspp = ASPP(in_channels=2048,
                         out_channels=256,
                         dr=16 // self.output_stride)
        self.dropout = M.Dropout(0.5)

        self.upstage1 = M.Sequential(
            M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=True),
            M.BatchNorm2d(48),
            M.ReLU(),
        )

        self.upstage2 = M.Sequential(
            M.Conv2d(256 + 48, 256, 3, 1, padding=1, bias=True),
            M.BatchNorm2d(256),
            M.ReLU(),
            M.Dropout(0.5),
            M.Conv2d(256, 256, 3, 1, padding=1, bias=True),
            M.BatchNorm2d(256),
            M.ReLU(),
            M.Dropout(0.1),
        )
        self.convout = M.Conv2d(256, self.class_num, 1, 1, padding=0)

        for m in self.modules():
            if isinstance(m, M.Conv2d):
                M.init.msra_normal_(m.weight,
                                    mode="fan_out",
                                    nonlinearity="relu")
            elif isinstance(m, M.BatchNorm2d):
                M.init.ones_(m.weight)
                M.init.zeros_(m.bias)

        self.backbone = ModifiedResNet(
            Bottleneck, [3, 4, 23, 3],
            replace_stride_with_dilation=[False, False, True])
        if pretrained is not None:
            model_dict = mge.load(pretrained)
            self.backbone.load_state_dict(model_dict)
Example #16
0
    def __init__(self,
                 growth_rate=32,
                 block_config=(6, 12, 24, 16),
                 num_init_features=64,
                 bn_size=4,
                 drop_rate=0,
                 num_classes=1000):

        super(DenseNet, self).__init__()

        features = []
        # First convolution
        features.append(
            M.Conv2d(3,
                     num_init_features,
                     kernel_size=7,
                     stride=2,
                     padding=3,
                     bias=False))
        features.append(M.BatchNorm2d(num_init_features))
        features.append(M.ReLU())
        features.append(M.MaxPool2d(kernel_size=3, stride=2, padding=1))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers,
                                num_input_features=num_features,
                                bn_size=bn_size,
                                growth_rate=growth_rate,
                                drop_rate=drop_rate)
            features.append(block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features,
                                    num_output_features=num_features // 2)
                features.append(trans)
                num_features = num_features // 2

        # Final batch norm
        features.append(M.BatchNorm2d(num_features))
        self.features = M.Sequential(*features)

        # Linear layer
        self.classifier = M.Linear(num_features, num_classes)
Example #17
0
    def __init__(self, cfg):
        super().__init__()

        self.cfg = cfg
        self.output_stride = 16
        self.sub_output_stride = self.output_stride // 4
        self.num_classes = cfg.num_classes

        self.aspp = ASPP(in_channels=2048,
                         out_channels=256,
                         dr=16 // self.output_stride)
        self.dropout = M.Dropout(0.5)

        self.upstage1 = M.Sequential(
            M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=False),
            M.BatchNorm2d(48),
            M.ReLU(),
        )

        self.upstage2 = M.Sequential(
            M.Conv2d(256 + 48, 256, 3, 1, padding=1, bias=False),
            M.BatchNorm2d(256),
            M.ReLU(),
            M.Dropout(0.5),
            M.Conv2d(256, 256, 3, 1, padding=1, bias=False),
            M.BatchNorm2d(256),
            M.ReLU(),
            M.Dropout(0.1),
        )
        self.conv_out = M.Conv2d(256, self.num_classes, 1, 1, padding=0)

        for m in self.modules():
            if isinstance(m, M.Conv2d):
                M.init.msra_normal_(m.weight,
                                    mode="fan_out",
                                    nonlinearity="relu")
            elif isinstance(m, M.BatchNorm2d):
                M.init.ones_(m.weight)
                M.init.zeros_(m.bias)

        self.backbone = getattr(resnet, cfg.backbone)(
            replace_stride_with_dilation=[False, False, True],
            pretrained=cfg.backbone_pretrained,
        )
        del self.backbone.fc
 def __init__(self, in_channels):
     super().__init__()
     self.conv_frelu = M.Conv2d(in_channels,
                                in_channels,
                                3,
                                1,
                                1,
                                groups=in_channels)
     self.bn_frelu = M.BatchNorm2d(in_channels)
Example #19
0
 def __init__(self, in_channels):
     """
     Init method.
     """
     super(FReLU, self).__init__()
     self.conv_frelu = M.Conv2d(
         in_channels, in_channels, 3, 1, 1, groups=in_channels
     )
     self.bn_frelu = M.BatchNorm2d(in_channels)
Example #20
0
 def __init__(self, in_channels, channels):
     super(MyBlock, self).__init__()
     self.conv1 = M.Conv2d(in_channels,
                           channels,
                           3,
                           1,
                           padding=1,
                           bias=False)
     self.bn1 = M.BatchNorm2d(channels)
Example #21
0
 def __init__(self,
              gate_channel,
              reduction_ratio=16,
              dilation_conv_num=2,
              dilation_val=4):
     super(SpatialGate, self).__init__()
     self.gate_s = M.Sequential(
         M.Conv2d(gate_channel, gate_channel//reduction_ratio, kernel_size=1),
         M.BatchNorm2d(gate_channel//reduction_ratio),
         M.ReLU(),
         M.Conv2d(gate_channel // reduction_ratio, gate_channel // reduction_ratio, kernel_size=3, \
                  padding=dilation_val, dilation=dilation_val),
         M.BatchNorm2d(gate_channel // reduction_ratio),
         M.ReLU(),
         M.Conv2d(gate_channel // reduction_ratio, gate_channel // reduction_ratio, kernel_size=3, \
                  padding=dilation_val, dilation=dilation_val),
         M.BatchNorm2d(gate_channel // reduction_ratio),
         M.ReLU(),
         M.Conv2d(gate_channel//reduction_ratio, 1, kernel_size=1))
Example #22
0
    def __init__(self, num_input_features, num_output_features):
        super(_Transition, self).__init__()

        self.norm = M.BatchNorm2d(num_input_features)
        self.relu = M.ReLU()
        self.conv = M.Conv2d(num_input_features,
                             num_output_features,
                             kernel_size=1,
                             stride=1,
                             bias=False)
        self.pool = M.AvgPool2d(kernel_size=2, stride=2)
Example #23
0
 def __init__(self):
     super().__init__()
     self.conv1 = M.Conv2d(3,
                           64,
                           kernel_size=7,
                           stride=2,
                           padding=3,
                           bias=False)
     self.bn1 = M.BatchNorm2d(64)
     self.avgpool = M.AvgPool2d(kernel_size=5, stride=5, padding=0)
     self.fc = M.Linear(64, 10)
Example #24
0
def test_batchnorm():
    bn = M.BatchNorm2d(32)
    bn.eval()

    @trace(symbolic=True, capture_as_const=True)
    def fwd(data):
        return bn(data)

    data = Tensor(np.random.random((1, 32, 32, 32)))
    result = fwd(data)
    check_pygraph_dump(fwd, [data], [result])
Example #25
0
def test_mge_81():
    np.random.seed(0)
    N, D = 3, 4
    x = mge.Parameter(value=np.random.normal(size=(N, D)).astype(np.float32))
    y = mge.Parameter(value=np.random.normal(size=(N, D)).astype(np.float32))
    z = mge.Parameter(value=np.random.normal(size=(N, D)).astype(np.float32))
    a = x * y
    b = a + z
    c = F.sum(b)
    grad_x = F.grad(c, x, use_virtual_grad=False)
    grad_y = F.grad(c, y, use_virtual_grad=False)
    grad_z = F.grad(c, z, use_virtual_grad=False)
    print(grad_x.numpy())
    print(grad_y.numpy())
    print(grad_z.numpy())
    m = M.BatchNorm2d(4)
    input = tensor(np.zeros((64, 4, 32, 32), dtype=np.float32))
    _ = m(input)
    m = M.BatchNorm2d(4, affine=False)
    _ = m(input)
Example #26
0
    def __init__(self, block, num_blocks, num_classes=10):
        super(ResNet, self).__init__()
        self.in_planes = 16

        self.conv1 = M.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn1 = M.BatchNorm2d(16)
        self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
        self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
        self.linear = M.Linear(64, num_classes)

        self.apply(_weights_init)
Example #27
0
 def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
     super(_DenseLayer, self).__init__()
     self.feature = []
     self.feature.append(M.BatchNorm2d(num_input_features)),
     self.feature.append(M.ReLU()),
     self.feature.append(
         M.Conv2d(num_input_features,
                  bn_size * growth_rate,
                  kernel_size=1,
                  stride=1,
                  bias=False))
     self.feature.append(M.BatchNorm2d(bn_size * growth_rate)),
     self.feature.append(M.ReLU()),
     self.feature.append(
         M.Conv2d(bn_size * growth_rate,
                  growth_rate,
                  kernel_size=3,
                  stride=1,
                  padding=1,
                  bias=False))
     self.feature = M.Sequential(*self.feature)
     self.drop_rate = drop_rate
Example #28
0
    def __init__(self, in_channels, out_channels, dr=1):
        super().__init__()

        self.conv1 = M.Sequential(
            M.Conv2d(
                in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
            ),
            M.BatchNorm2d(out_channels),
            M.ReLU(),
        )
        self.conv2 = M.Sequential(
            M.Conv2d(
                in_channels,
                out_channels,
                3,
                1,
                padding=6 * dr,
                dilation=6 * dr,
                bias=False,
            ),
            M.BatchNorm2d(out_channels),
            M.ReLU(),
        )
        self.conv3 = M.Sequential(
            M.Conv2d(
                in_channels,
                out_channels,
                3,
                1,
                padding=12 * dr,
                dilation=12 * dr,
                bias=False,
            ),
            M.BatchNorm2d(out_channels),
            M.ReLU(),
        )
        self.conv4 = M.Sequential(
            M.Conv2d(
                in_channels,
                out_channels,
                3,
                1,
                padding=18 * dr,
                dilation=18 * dr,
                bias=False,
            ),
            M.BatchNorm2d(out_channels),
            M.ReLU(),
        )
        self.conv_gp = M.Sequential(
            M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
            M.BatchNorm2d(out_channels),
            M.ReLU(),
        )
        self.conv_out = M.Sequential(
            M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
            M.BatchNorm2d(out_channels),
            M.ReLU(),
        )
Example #29
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hidden_channels=None,
                 upsample=False):
        super().__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.hidden_channels = hidden_channels if hidden_channels is not None else out_channels
        self.learnable_sc = in_channels != out_channels or upsample
        self.upsample = upsample

        self.c1 = M.Conv2d(self.in_channels,
                           self.hidden_channels,
                           3,
                           1,
                           padding=1)
        self.c2 = M.Conv2d(self.hidden_channels,
                           self.out_channels,
                           3,
                           1,
                           padding=1)

        self.b1 = M.BatchNorm2d(self.in_channels)
        self.b2 = M.BatchNorm2d(self.hidden_channels)

        self.activation = M.ReLU()

        M.init.xavier_uniform_(self.c1.weight, math.sqrt(2.0))
        M.init.xavier_uniform_(self.c2.weight, math.sqrt(2.0))

        # Shortcut layer
        if self.learnable_sc:
            self.c_sc = M.Conv2d(in_channels,
                                 out_channels,
                                 1,
                                 1,
                                 padding=0)
            M.init.xavier_uniform_(self.c_sc.weight, 1.0)
Example #30
0
    def __init__(self, feature_dim, channel, size=7):
        """initialzation

        Args:
            feature_dim (int): dimension number of output embedding
            channel (int): channel number of input feature map
            size (int, optional): size of input feature map. defaults to 7
        """
        super().__init__()
        self.size = size
        self.bn1 = M.BatchNorm2d(channel)
        self.dropout = M.Dropout(drop_prob=0.1)
        self.fc = M.Linear(channel, feature_dim)
        self.bn2 = M.BatchNorm1d(feature_dim, affine=False)