def __init__(self):
        super(Net, self).__init__()

        self.cnn_layers = Sequential()
        input_channels = configuration.input_channels
        width = configuration.image_width
        height = configuration.image_height


        i = 1
        for k, f, a, s, p, mf, ms, mp in zip(
                configuration.no_conv_filters, configuration.conv_filters, configuration.activation_list,
                configuration.conv_strides, configuration.conv_padding, configuration.maxpool_filters,
                configuration.maxpool_strides, configuration.maxpool_padding):

            self.cnn_layers.add_module("conv_2d_{}".format(i),
                                       Conv2d(input_channels, k, kernel_size=f, stride=s, padding=p))
            self.cnn_layers.add_module("dropout{}".format(i), Dropout(configuration.cnn_dropout))
            width = (width - f + 2 * p) / s + 1
            height = (height - f + 2 * p) / s + 1
            input_channels = k

            self.cnn_layers.add_module("activation_{}".format(i), get_activation_from_name(a))

            if configuration.batch_norm:
                self.cnn_layers.add_module("batch_norm{}".format(i), BatchNorm2d(input_channels))

            self.cnn_layers.add_module("max_pool_{}".format(i), MaxPool2d(kernel_size=mf, stride=ms, padding=mp))

            width = int((width - mf + 2 * mp) / ms + 1)
            height =int((height - mf + 2 * mp) / ms + 1)

            i += 1

        self.linear_layers = Sequential()
        self.linear_layers.add_module("flatten",Flatten())
        self.linear_layers.add_module("linear_{}".format(i),Linear(input_channels * width * height, configuration.linear))
        self.linear_layers.add_module("dropout_{}".format(i),Dropout(configuration.fcn_dropout))
        self.linear_layers.add_module("activation_{}".format(i),get_activation_from_name(configuration.relu))

        if configuration.batch_norm:
            self.linear_layers.add_module("batch_norm{}".format(i),BatchNorm1d(configuration.linear))

        self.linear_layers.add_module("output_{}".format(i),Linear(configuration.linear, configuration.output_classes))
    def __init__(self,
                 depth,
                 input_size,
                 output_size,
                 dropout=0.0,
                 batch_norm=True):
        super().__init__()

        non_lin = ReLU()

        assert dropout == 0, "Dropout not allowed for ResNet"
        assert batch_norm

        # Configure ResNet
        # number of layers per block
        if (depth - 2) % 6 != 0:
            raise ValueError(
                "Only ResNet-X with X=6N-2 allowed where N natural number.")

        layers = (depth - 2) // 6
        # number of filters for each block
        filters = [16, 32, 64]
        # Subsampling at first layer in each block
        strides = [1, 2, 2]

        # Build ResNet
        self.conv1 = conv3x3(int(input_size[0]), filters[0])
        self.inplanes = filters[0]
        self.bn1 = BatchNorm2d(filters[0])
        self.non_lin1 = non_lin

        # Each layer makes the input smaller using stride, then adds `layers` layers of two layers each with a skip
        self.layer1 = self._make_layer(filters[0], layers, strides[0], non_lin)
        self.layer2 = self._make_layer(filters[1], layers, strides[1], non_lin)
        self.layer3 = self._make_layer(filters[2], layers, strides[2], non_lin)

        # Average each filter
        self.avgpool = AvgPool2d(
            int(input_size[1]) // reduce(operator.mul, strides, 1))
        self.linear_out = Linear(filters[-1], output_size)

        self.kind = depth
        self.non_linearity = non_lin.__class__
        self.batch_norm = batch_norm
Exemple #3
0
    def __init__(self, in_channel, depth, stride):
        super(bottleneck_IR, self).__init__()
        self.stride = stride
        if stride == 2:
            self.shortcut_layer = Sequential(Conv2d(in_channel, depth, (1, 1), stride ,bias=False),
                                             BatchNorm2d(depth,eps=2e-5,momentum=0.9))

            self.res_layer = Sequential(
                BatchNorm2d(in_channel,eps=2e-5,momentum=0.9),
                Conv2d(in_channel, depth, (3, 3), (1, 1), 1 ,bias=False),BatchNorm2d(depth,eps=2e-5,momentum=0.9),
                PReLU(depth),Conv2d(depth, depth, (3, 3), stride, 1 ,bias=False), BatchNorm2d(depth,eps=2e-5,momentum=0.9))
        else:

            self.res_layer = Sequential(
                BatchNorm2d(in_channel,eps=2e-5,momentum=0.9),
                Conv2d(in_channel, depth, (3, 3), (1, 1), 1 ,bias=False),BatchNorm2d(depth,eps=2e-5,momentum=0.9),
                PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1 ,bias=False), BatchNorm2d(depth,eps=2e-5,momentum=0.9))
 def __init__(self):
     super(Net1, self).__init__()
     self.features = nn.Sequential(
         DSC(5, 64, 3, 2, 1), BatchNorm2d(64), ReLU(inplace=True),
         DSC(64, 192, 3, 2, 1), BatchNorm2d(192), ReLU(inplace=True),
         DSC(192, 384, 3, 1, 1), BatchNorm2d(384), ReLU(inplace=True),
         MaxPool2d(kernel_size=3,
                   stride=2,
                   padding=0,
                   dilation=1,
                   ceil_mode=False), DSC(384, 512, 3, 1,
                                         1), BatchNorm2d(512),
         ReLU(inplace=True),
         MaxPool2d(kernel_size=3,
                   stride=2,
                   padding=0,
                   dilation=1,
                   ceil_mode=False), DSC(512, 512, 3, 1, 1),
         BatchNorm2d(512), ReLU(inplace=True), DSC(512, 1024, 3, 1, 1),
         BatchNorm2d(1024), ReLU(inplace=True),
         MaxPool2d(kernel_size=3,
                   stride=2,
                   padding=0,
                   dilation=1,
                   ceil_mode=False), DSC(1024, 1024, 3, 1, 1),
         BatchNorm2d(1024), ReLU(inplace=True), DSC(1024, 512, 3, 1, 1),
         BatchNorm2d(512), ReLU(inplace=True), DSC(512, 256, 3, 1, 1),
         BatchNorm2d(256), ReLU(inplace=True), CBAM(256))
     self.avgpool = AdaptiveAvgPool2d(output_size=(6, 6))
     self.classifier = nn.Sequential(
         Dropout(p=0.5),
         Linear(in_features=9216, out_features=2048, bias=True),
         ReLU(inplace=True), Dropout(p=0.5),
         Linear(in_features=2048, out_features=1024, bias=True),
         ReLU(inplace=True), Dropout(p=0.5),
         Linear(in_features=1024, out_features=3, bias=True))
    def __init__(self, depth, widen_factor, dropout_rate, num_classes, beta=1.0, base=16, name=None):
        super(Wide_ResNet, self).__init__()
        self.in_planes, self.num_classes = base, num_classes
        self.beta = beta

        assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4'
        n = (depth - 4) / 6
        k = widen_factor

        print('| Wide-Resnet %dx%d' % (depth, k))
        nStages = [base, base * k, base * k * 2, base * k * 4]

        self.conv1 = conv3x3(3, nStages[0])
        self.layer1 = self._wide_layer(wide_basic, nStages[1], n, dropout_rate, stride=1)
        self.layer2 = self._wide_layer(wide_basic, nStages[2], n, dropout_rate, stride=2)
        self.layer3 = self._wide_layer(wide_basic, nStages[3], n, dropout_rate, stride=2)
        self.bn1 = BatchNorm2d(nStages[3])
        self.linear = nn.Linear(nStages[3], num_classes, bias=True)
        self.name = name
Exemple #6
0
    def __init__(self, scale_factor):
        upsample_block_num = int(math.log(scale_factor, 2))

        super(Generator, self).__init__()
        self.block1 = Sequential(
            Conv2d(3, 64, kernel_size=(9, 9), padding=(4, 4)), PReLU())
        self.block2 = ResidualBlock(64)
        self.block3 = ResidualBlock(64)
        self.block4 = ResidualBlock(64)
        self.block5 = ResidualBlock(64)
        self.block6 = ResidualBlock(64)
        self.block7 = Sequential(
            Conv2d(64, 64, kernel_size=(3, 3), padding=(1, 1)),
            BatchNorm2d(64))

        # up sampling
        block8 = [UpSampleBlock(64, 2) for _ in range(upsample_block_num)]
        block8.append(Conv2d(64, 3, kernel_size=(9, 9), padding=(4, 4)))
        self.block8 = Sequential(*block8)
Exemple #7
0
    def __init__(self,
                 K_size=2,
                 stride_size=1,
                 padding_size=0,
                 hights=5,
                 widths=5,
                 outs=5):
        super(Net, self).__init__()

        self.K_h = K_size[0] if isinstance(K_size, tuple) else K_size
        self.K_w = K_size[1] if isinstance(K_size, tuple) else K_size
        self.Stride_h = stride_size[0] if isinstance(stride_size,
                                                     tuple) else stride_size
        self.Stride_w = stride_size[1] if isinstance(stride_size,
                                                     tuple) else stride_size
        self.Padding_h = padding_size[0] if isinstance(padding_size,
                                                       tuple) else padding_size
        self.Padding_w = padding_size[1] if isinstance(padding_size,
                                                       tuple) else padding_size

        self.H = hights
        self.W = widths
        self.out_size = outs

        self.cnn_output_h = int((self.H + 2 * self.Padding_h -
                                 (self.K_h - 1) - 1) / self.Stride_h + 1)
        self.cnn_output_w = int((self.W + 2 * self.Padding_w -
                                 (self.K_w - 1) - 1) / self.Stride_w + 1)

        self.cnn_layers = Sequential(
            # Defining a 2D convolution layer
            Conv2d(1,
                   1,
                   kernel_size=(self.K_h, self.K_w),
                   stride=(self.Stride_h, self.Stride_w),
                   padding=(self.Padding_h, self.Padding_w)),
            BatchNorm2d(1),
            ReLU(inplace=True),
            MaxPool2d(kernel_size=(1, self.cnn_output_w),
                      stride=(1, self.cnn_output_w)))

        self.linear_layers = Sequential(
            Linear(self.cnn_output_h, int(self.out_size)))
Exemple #8
0
 def __init__(
     self,
     in_channels: int,
     out_channels: int,
     kernel_size: int,
     stride: int,
     padding: int,
 ):
     super().__init__()
     self.conv = Conv2d(
         in_channels,
         out_channels,
         kernel_size,
         stride,
         padding,
         bias=False,
     )
     self.bn = BatchNorm2d(out_channels)
     self.act = ReLU(num_channels=out_channels, inplace=True)
Exemple #9
0
def get_resnet(encoder_name, pretrained=True):
    """
    Returns a resnet18 or resnet50 model, with AdaptiveAvgPool2d
    :param adaptive_pool_dim: dimension of feature output from adaptive
        average pooling h * w
    :param pretrained: if the model should be Imagenet pretrained
    :return: model, cnn_features_dim
    """

    if encoder_name == "resnet18":
        model = models.resnet18(pretrained=pretrained)
        latent_dim = 512
    else:
        model = models.resnet50(pretrained=pretrained)
        latent_dim = 2048

    children = ([BatchNorm2d(3)] + list(model.children())[:-2])
    model = torch.nn.Sequential(*children)
    return model, latent_dim
    def __init__(self, inplanes, planes, dilation):
        super(ASPP_module, self).__init__()
        if dilation == 1:
            kernel_size = 1
            padding = 0
        else:
            kernel_size = 3
            padding = dilation
        self.atrous_convolution = nn.Conv2d(inplanes,
                                            planes,
                                            kernel_size=kernel_size,
                                            stride=1,
                                            padding=padding,
                                            dilation=dilation,
                                            bias=False)
        self.bn = BatchNorm2d(planes)
        self.relu = nn.ReLU()

        self._init_weight()
    def __init__(self):
        super(Discriminator, self).__init__()
        self.conv = Sequential(
            Conv2d(in_channels=CONFIG["IMAGE_CHANNEL"], out_channels=64, kernel_size=4, stride=2, padding=1),
            LeakyReLU(negative_slope=0.2),
            Conv2d(in_channels=64, out_channels=128, kernel_size=4, stride=2, padding=1),
            BatchNorm2d(num_features=128),
            LeakyReLU(negative_slope=0.2)
        )

        self.fc = Sequential(
            Linear(128 * 7 * 7, 1024),
            BatchNorm1d(1024),
            LeakyReLU(negative_slope=0.2),
            Linear(1024, 1),
            Sigmoid()
        )

        initialize_weights(self)
 def __init__(self, num_layers, mode='ir'):
     super(Backbone, self).__init__()
     assert num_layers in [50, 100,
                           152], 'num_layers should be 50,100, or 152'
     assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
     blocks = get_blocks(num_layers)
     if mode == 'ir':
         unit_module = bottleneck_IR
     elif mode == 'ir_se':
         unit_module = bottleneck_IR_SE
     self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
                                   BatchNorm2d(64), PReLU(64))
     modules = []
     for block in blocks:
         for bottleneck in block:
             modules.append(
                 unit_module(bottleneck.in_channel, bottleneck.depth,
                             bottleneck.stride))
     self.body = Sequential(*modules)
    def __init__(self, features, out_features=512, sizes=(1, 2, 3, 6)):
        super(PSPModule, self).__init__()

        self.stages = []
        self.stages = nn.ModuleList(
            [self._make_stage(features, out_features, size) for size in sizes])
        self.bottleneck = nn.Sequential(
            nn.Conv2d(
                features + len(sizes) * out_features,
                out_features,
                kernel_size=3,
                padding=1,
                dilation=1,
                bias=False,
            ),
            BatchNorm2d(out_features),
            nn.ReLU(),
            nn.Dropout2d(0.1),
        )
Exemple #14
0
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)
    def __init__(self, num_layers, mode='ir', opts=None):
        super(GradualStyleEncoder, self).__init__()
        assert num_layers in [50, 100,
                              152], 'num_layers should be 50,100, or 152'
        assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
        blocks = get_blocks(num_layers)
        if mode == 'ir':
            unit_module = bottleneck_IR
        elif mode == 'ir_se':
            unit_module = bottleneck_IR_SE
        self.input_layer = Sequential(
            Conv2d(opts.input_nc, 64, (3, 3), 1, 1, bias=False),
            BatchNorm2d(64), PReLU(64))
        modules = []
        for block in blocks:
            for bottleneck in block:
                modules.append(
                    unit_module(bottleneck.in_channel, bottleneck.depth,
                                bottleneck.stride))
        self.body = Sequential(*modules)

        self.styles = nn.ModuleList()
        self.style_count = opts.n_styles
        self.coarse_ind = 3
        self.middle_ind = 7
        for i in range(self.style_count):
            if i < self.coarse_ind:
                style = GradualStyleBlock(512, 512, 16)
            elif i < self.middle_ind:
                style = GradualStyleBlock(512, 512, 32)
            else:
                style = GradualStyleBlock(512, 512, 64)
            self.styles.append(style)
        self.latlayer1 = nn.Conv2d(256,
                                   512,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
        self.latlayer2 = nn.Conv2d(128,
                                   512,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
    def _make_layer(self,
                    block,
                    planes,
                    blocks,
                    stride=1,
                    dilation=1,
                    multi_grid=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(
                    self.inplanes,
                    planes * block.expansion,
                    kernel_size=1,
                    stride=stride,
                    bias=False,
                ),
                BatchNorm2d(planes * block.expansion, affine=True),
            )

        layers = []
        generate_multi_grid = (lambda index, grids: grids[index % len(grids)]
                               if isinstance(grids, tuple) else 1)
        layers.append(
            block(
                self.inplanes,
                planes,
                stride,
                dilation=dilation,
                downsample=downsample,
                multi_grid=generate_multi_grid(0, multi_grid),
            ))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(
                block(
                    self.inplanes,
                    planes,
                    dilation=dilation,
                    multi_grid=generate_multi_grid(i, multi_grid),
                ))

        return nn.Sequential(*layers)
Exemple #17
0
 def __init__(self,
              in_c,
              out_c,
              kernel=(1, 1),
              stride=(1, 1),
              padding=(0, 0),
              groups=1):
     super(Conv_block, self).__init__()
     self.conv = Conv2d(
         in_c,
         out_channels=out_c,
         kernel_size=kernel,
         groups=groups,
         stride=stride,
         padding=padding,
         bias=False,
     )
     self.bn = BatchNorm2d(out_c)
     self.prelu = PReLU(out_c)
def conv(in_channels, out_channels, kernel_size=3, padding=1, batch_norm=True):
    """
    A convolution block with a conv layer and batch norm
    :param in_channels: number of input channels
    :param out_channels: number of output channels
    :param kernel_size: size of the kernel
    :param padding: number of pixels to pad on all sides
    :param batch_norm: to use batch norm or not
    :return: PyTorch Tensor
    """
    c = Conv2d(in_channels,
               out_channels,
               kernel_size=kernel_size,
               stride=1,
               padding=padding)
    if batch_norm:
        bn = BatchNorm2d(out_channels)
        return Sequential(c, bn)
    return c
    def __init__(self):
        super(Generator, self).__init__()
        self.fc = Sequential(
            Linear(CONFIG["NOISE_DIM"] + 10, 1024),
            BatchNorm1d(1024),
            ReLU(),
            Linear(1024, 128 * 7 * 7),
            BatchNorm1d(128 * 7 * 7),
            ReLU(),
        )
        self.deconv = Sequential(
            ConvTranspose2d(128, 64, 4, 2, 1),
            BatchNorm2d(64),
            ReLU(),
            ConvTranspose2d(64, CONFIG["IMAGE_CHANNEL"], 4, 2, 1),
            Tanh(),
        )

        normal_init(self)
Exemple #20
0
    def __init__(self):
        super(GCPACC2Net, self).__init__()

        self.hardnet = hardnet(arch=68)

        inplanes = 1024
        interplanes = 256

        self.fam45 = FAM(640, interplanes, interplanes, interplanes)
        self.fam34 = FAM(320, interplanes, interplanes, interplanes)
        self.fam23 = FAM(128, interplanes, interplanes, interplanes)

        self.linear5 = nn.Conv2d(interplanes,
                                 1,
                                 kernel_size=3,
                                 stride=1,
                                 padding=1)
        self.linear4 = nn.Conv2d(interplanes,
                                 1,
                                 kernel_size=3,
                                 stride=1,
                                 padding=1)
        self.linear3 = nn.Conv2d(interplanes,
                                 1,
                                 kernel_size=3,
                                 stride=1,
                                 padding=1)
        self.linear2 = nn.Conv2d(interplanes,
                                 1,
                                 kernel_size=3,
                                 stride=1,
                                 padding=1)

        self.conva = nn.Sequential(
            nn.Conv2d(inplanes, interplanes, 3, padding=1, bias=False),
            BatchNorm2d(interplanes),
            nn.ReLU(interplanes),
        )

        self.long_relation = CrissCrossAttention(interplanes)
        self.local_attention_4 = LocalAttenModule(interplanes)
        self.local_attention_3 = LocalAttenModule(interplanes)
        self.local_attention_2 = LocalAttenModule(interplanes)
    def __init__(self, in_channels, out_channels):
        super(ASPP, self).__init__()

        dilations = [1, 6, 12, 18]
        self.aspp0 = _ASPPModule(in_channels=in_channels, out_channels=out_channels, kernel_size=1,
                                 padding=0, dilation=dilations[0])
        self.aspp1 = _ASPPModule(in_channels=in_channels, out_channels=out_channels, kernel_size=3,
                                 padding=dilations[1], dilation=dilations[1])
        self.aspp2 = _ASPPModule(in_channels=in_channels, out_channels=out_channels, kernel_size=3,
                                 padding=dilations[2], dilation=dilations[2])
        self.aspp3 = _ASPPModule(in_channels=in_channels, out_channels=out_channels, kernel_size=3,
                                 padding=dilations[3], dilation=dilations[3])

        self.conv = Conv2d(out_channels * 4, out_channels, kernel_size=3, stride=1, padding=1, dilation=1, groups=1,
                           bias=True)
        self.relu = ReLU()
        self.batchnorm = BatchNorm2d(out_channels)

        self._init_weight()
Exemple #22
0
 def generatePathBNs(self, srcLayer):
     if self != srcLayer:
         # get current BN
         currBN = self.orgBNs()[self.currWidthIdx()]
         # get current BN num_features
         bnFeatures = currBN.num_features
         # generate new BNs ModuleList
         newBNs = ModuleList([
             BatchNorm2d(bnFeatures) for _ in range(self.nWidths())
         ]).cuda()
         # copy weights to new BNs
         for bn in newBNs:
             bn.load_state_dict(currBN.state_dict())
         # set layer BNs
         self.bn = newBNs
         # update width List
         self._widthList = [self.currWidth()] * self.nWidths()
         # update width ratio list
         self._widthRatioList = [self.currWidthRatio()] * self.nWidths()
Exemple #23
0
    def _create_backbone(self):
        backbone = Sequential()

        section1 = Sequential()
        section1.add_module('Conv1', Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1))
        section1.add_module('BatchNorm1', BatchNorm2d(num_features=32))
        section1.add_module('Activation1', LeakyReLU(negative_slope=.1, inplace=True))

        backbone.add_module('Section1', section1)
        backbone.add_module('Section2', YOLOv3Block(index=2, in_channels=32, out_channels=64, convs_residual_count=1))
        backbone.add_module('Section3', YOLOv3Block(index=5, in_channels=64, out_channels=128, convs_residual_count=2))
        backbone.add_module('Section4',
                            YOLOv3Block(index=10, in_channels=128, out_channels=256, convs_residual_count=8))
        backbone.add_module('Section5',
                            YOLOv3Block(index=27, in_channels=256, out_channels=512, convs_residual_count=8))
        backbone.add_module('Section6',
                            YOLOv3Block(index=44, in_channels=512, out_channels=1024, convs_residual_count=4))

        return backbone
Exemple #24
0
 def __init__(self, number_of_classes, anchors_dims):
     super(Tail, self).__init__()
     self.num_of_yolo_layers = 3
     route_streams = [0, 2**3, 2**2]
     self.tails = ModuleList([
         ModuleList([
             Sequential(
                 Conv2d((2**(5 - i) + route_streams[i]) *
                        filters_multiplier, 2**(4 - i) *
                        filters_multiplier, **bottleneck),
                 BatchNorm2d(2**(4 - i) *
                             filters_multiplier), LeakyReLU(negative_slope),
                 Conv2d(2**(4 - i) * filters_multiplier, 2**(5 - i) *
                        filters_multiplier, **casual),
                 BatchNorm2d(2**(5 - i) *
                             filters_multiplier), LeakyReLU(negative_slope),
                 Conv2d(2**(5 - i) * filters_multiplier, 2**(4 - i) *
                        filters_multiplier, **bottleneck),
                 BatchNorm2d(2**(4 - i) *
                             filters_multiplier), LeakyReLU(negative_slope),
                 Conv2d(2**(4 - i) * filters_multiplier, 2**(5 - i) *
                        filters_multiplier, **casual),
                 BatchNorm2d(2**(5 - i) * filters_multiplier),
                 LeakyReLU(negative_slope)),
             Sequential(
                 Conv2d(2**(5 - i) * filters_multiplier, 2**(4 - i) *
                        filters_multiplier, **bottleneck),
                 BatchNorm2d(2**(4 - i) * filters_multiplier),
                 LeakyReLU(negative_slope)),
             Sequential(
                 Conv2d(2**(4 - i) * filters_multiplier, 2**(5 - i) *
                        filters_multiplier, **casual),
                 BatchNorm2d(2**(5 - i) * filters_multiplier),
                 LeakyReLU(negative_slope)),
             Sequential(
                 Conv2d(2**(5 - i) * filters_multiplier, anchors_dims[i] *
                        (number_of_classes + 5), **prelude))
         ] + [
             Sequential(
                 Conv2d(2**(4 - i) * filters_multiplier, 2**
                        (3 - i) * filters_multiplier, **bottleneck),
                 BatchNorm2d(2**(3 - i) *
                             filters_multiplier), LeakyReLU(negative_slope))
         ] * (i < 2)) for i in range(self.num_of_yolo_layers)
     ])
    def __init__(self, cfg):
        super(ConvResStem, self).__init__()

        C1 = cfg.MODEL.RESNETS.CONVRESSTEM.CONV_CHANNEL

        C2 = cfg.MODEL.RESNETS.CONVRESSTEM.RES_CHANNEL1
        n2 = cfg.MODEL.RESNETS.CONVRESSTEM.NUM_BLOCK_RES1
        C3 = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
        n3 = cfg.MODEL.RESNETS.CONVRESSTEM.NUM_BLOCK_RES2

        res_module = cfg.MODEL.RESNETS.TRANS_FUNC
        block_module = _TRANSFORMATION_MODULES[res_module]
        convs = []
        convs.append(conv3x3(3, C1))
        from torch.nn import BatchNorm2d
        convs.append(BatchNorm2d(C1))
        convs.append(nn.ReLU(inplace=True))
        self.stem_conv = nn.Sequential(*convs)
        self.stem_res2 = _make_stage(
                block_module,
                in_channels=C1,
                bottleneck_channels=C2//4,
                out_channels=C2,
                block_count=n2,
                num_groups=1,
                stride_in_1x1=cfg.MODEL.RESNETS.STRIDE_IN_1X1,
                first_stride=1,
                dilation=1,
                dcn_config={},
            )
        self.stem_res3 = _make_stage(
                block_module,
                in_channels=C2,
                bottleneck_channels=C3//4,
                out_channels=C3,
                block_count=n3,
                num_groups=1,
                stride_in_1x1=cfg.MODEL.RESNETS.STRIDE_IN_1X1,
                first_stride=2,
                dilation=1,
                dcn_config={},
            )
Exemple #26
0
    def __init__(self):
        super(GCPAPSPSmallNet, self).__init__()

        self.hardnet = hardnet(arch=68)
        inplanes = 1024
        interplanes = 128

        self.fam45 = FAM(640, interplanes, interplanes, interplanes)
        self.fam34 = FAM(320, interplanes, interplanes, interplanes)
        self.fam23 = FAM(128, interplanes, interplanes, interplanes)

        self.linear5 = nn.Conv2d(interplanes,
                                 1,
                                 kernel_size=3,
                                 stride=1,
                                 padding=1)
        self.linear4 = nn.Conv2d(interplanes,
                                 1,
                                 kernel_size=3,
                                 stride=1,
                                 padding=1)
        self.linear3 = nn.Conv2d(interplanes,
                                 1,
                                 kernel_size=3,
                                 stride=1,
                                 padding=1)
        self.linear2 = nn.Conv2d(interplanes,
                                 1,
                                 kernel_size=3,
                                 stride=1,
                                 padding=1)

        self.conva = nn.Sequential(
            nn.Conv2d(inplanes, interplanes, 3, padding=1, bias=False),
            BatchNorm2d(interplanes),
            nn.ReLU(interplanes),
        )
        # self.long_relation = SpatialCGNL(interplanes, interplanes // 2)
        self.long_relation = PSPModule(inplanes, interplanes)
        self.local_attention_4 = SmallLocalAttenModule(interplanes)
        self.local_attention_3 = SmallLocalAttenModule(interplanes)
        self.local_attention_2 = SmallLocalAttenModule(interplanes)
Exemple #27
0
    def __init__(self, k=1, n=28, drop_rate=0):
        super(WideResnetBackbone, self).__init__()
        self.k, self.n = k, n
        assert (self.n - 4) % 6 == 0
        n_blocks = (self.n - 4) // 6
        n_layers = [
            16,
        ] + [self.k * 16 * (2**i) for i in range(3)]

        self.conv1 = nn.Conv2d(3,
                               n_layers[0],
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)
        self.layer1 = self.create_layer(
            n_layers[0],
            n_layers[1],
            bnum=n_blocks,
            stride=1,
            drop_rate=drop_rate,
            pre_res_act=True,
        )
        self.layer2 = self.create_layer(
            n_layers[1],
            n_layers[2],
            bnum=n_blocks,
            stride=2,
            drop_rate=drop_rate,
            pre_res_act=False,
        )
        self.layer3 = self.create_layer(
            n_layers[2],
            n_layers[3],
            bnum=n_blocks,
            stride=2,
            drop_rate=drop_rate,
            pre_res_act=False,
        )
        self.bn_last = BatchNorm2d(n_layers[3], momentum=0.001)
        self.relu_last = nn.LeakyReLU(inplace=True, negative_slope=0.1)
        self.init_weight()
Exemple #28
0
    def __init__(self):
        super(GCPACGNLResNet, self).__init__()
        self.bkbone = ResNet()
        inplanes = 2048
        interplanes = 256

        self.fam45 = FAM(1024, interplanes, interplanes, interplanes)
        self.fam34 = FAM(512, interplanes, interplanes, interplanes)
        self.fam23 = FAM(256, interplanes, interplanes, interplanes)

        self.linear5 = nn.Conv2d(interplanes,
                                 1,
                                 kernel_size=3,
                                 stride=1,
                                 padding=1)
        self.linear4 = nn.Conv2d(interplanes,
                                 1,
                                 kernel_size=3,
                                 stride=1,
                                 padding=1)
        self.linear3 = nn.Conv2d(interplanes,
                                 1,
                                 kernel_size=3,
                                 stride=1,
                                 padding=1)
        self.linear2 = nn.Conv2d(interplanes,
                                 1,
                                 kernel_size=3,
                                 stride=1,
                                 padding=1)

        self.conva = nn.Sequential(
            nn.Conv2d(inplanes, interplanes, 3, padding=1, bias=False),
            BatchNorm2d(interplanes),
            nn.ReLU(interplanes),
        )

        self.long_relation = SpatialCGNL(interplanes, interplanes // 2)
        self.local_attention_4 = LocalAttenModule(interplanes)
        self.local_attention_3 = LocalAttenModule(interplanes)
        self.local_attention_2 = LocalAttenModule(interplanes)
        self.local_attention = LocalAttenModule(interplanes)
Exemple #29
0
    def __init__(self, num_classes=10):
        super().__init__()
        self.in_channels = 64

        # First - Conv2D 7x7 with 2 stride -> BatchNorm2D -> ReLU -> MaxPool2D
        self.conv1 = Conv2d(1,
                            64,
                            kernel_size=7,
                            stride=2,
                            padding=3,
                            bias=False)
        self.bn1 = BatchNorm2d(64)
        self.relu = ReLU(inplace=True)
        self.sigmoid = Sigmoid()
        self.max_pool = MaxPool2d(kernel_size=3, stride=2, padding=1)

        # Setup residual parts block groups
        self.layer64 = self._make_residual_layer(out_channels=64, num_blocks=3)
        self.layer128 = self._make_residual_layer(out_channels=128,
                                                  num_blocks=4,
                                                  stride=2)
        self.layer256 = self._make_residual_layer(out_channels=256,
                                                  num_blocks=6,
                                                  stride=2)
        self.layer512 = self._make_residual_layer(out_channels=512,
                                                  num_blocks=3,
                                                  stride=2)

        self.avg_pool = AvgPool2d(kernel_size=(1, 1))
        self.fc_downscale = Linear(32768, 512)
        self.fc_downscale_2 = Linear(512, 128)
        self.fc = Linear(128, num_classes)

        # Initialize weights
        for m in self.modules():
            if isinstance(m, Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode="fan_out",
                                        nonlinearity="relu")
            elif isinstance(m, BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
Exemple #30
0
 def __init__(self,
              in_chan,
              out_chan,
              ks=3,
              stride=1,
              padding=1,
              *args,
              **kwargs):
     super(ConvBNReLU, self).__init__()
     self.conv = nn.Conv2d(
         in_chan,
         out_chan,
         kernel_size=ks,
         stride=stride,
         padding=padding,
         bias=False,
     )
     self.bn = BatchNorm2d(out_chan)
     self.relu = nn.ReLU6(inplace=True)
     self.init_weight()