Exemple #1
0
    def __init__(self, block, layers, baseWidth=26, scale=4, num_classes=1000):
        self.inplanes = 64
        super(Res2Net, self).__init__()
        self.baseWidth = baseWidth
        self.scale = scale
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
Exemple #2
0
 def __init__(self, c1, c2, k=(5, 9, 13)):
     super(SPP, self).__init__()
     c_ = c1 // 2  # hidden channels
     self.cv1 = Conv(c1, c_, 1, 1)
     self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
     self.m = nn.ModuleList(
         [nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
Exemple #3
0
 def test_unpool(self):
     from jittor import nn
     pool = nn.MaxPool2d(2, stride=2, return_indices=True)
     unpool = nn.MaxUnpool2d(2, stride=2)
     input = jt.array([[[[1., 2, 3, 4, 0], [5, 6, 7, 8, 0],
                         [9, 10, 11, 12, 0], [13, 14, 15, 16, 0],
                         [0, 0, 0, 0, 0]]]])
     output, indices = pool(input)
     out = unpool(output, indices, output_size=input.shape)
     assert (out == jt.array([[[[0., 0., 0., 0., 0.], [0., 6., 0., 8., 0.],
                                [0., 0., 0., 0.,
                                 0.], [0., 14., 0., 16., 0.],
                                [0., 0., 0., 0., 0.]]]])).all()
Exemple #4
0
    def __init__(self,
                 arch,
                 block,
                 layers,
                 output_stride,
                 BatchNorm,
                 pretrained=True):
        self.inplanes = 64
        self.layers = layers
        self.arch = arch
        super(ResNet, self).__init__()
        blocks = [1, 2, 4]
        if output_stride == 16:
            strides = [1, 2, 2, 1]
            dilations = [1, 1, 1, 2]
        elif output_stride == 8:
            strides = [1, 2, 1, 1]
            dilations = [1, 1, 2, 4]
        else:
            strides = [1, 2, 2, 2]
            dilations = [1, 1, 1, 1]

        if arch == 'resnext50':
            self.base_width = 4
            self.groups = 32
        else:
            self.base_width = 64
            self.groups = 1

        # Modules
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = BatchNorm(64)
        self.relu = nn.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = self._make_layer(block,
                                       64,
                                       layers[0],
                                       stride=strides[0],
                                       dilation=dilations[0],
                                       BatchNorm=BatchNorm)
        self.layer2 = self._make_layer(block,
                                       128,
                                       layers[1],
                                       stride=strides[1],
                                       dilation=dilations[1],
                                       BatchNorm=BatchNorm)
        self.layer3 = self._make_layer(block,
                                       256,
                                       layers[2],
                                       stride=strides[2],
                                       dilation=dilations[2],
                                       BatchNorm=BatchNorm)
        if self.arch == 'resnet18':
            self.layer4 = self._make_layer(block,
                                           512,
                                           layers[3],
                                           stride=strides[3],
                                           dilation=dilations[3],
                                           BatchNorm=BatchNorm)
        else:
            self.layer4 = self._make_MG_unit(block,
                                             512,
                                             blocks=blocks,
                                             stride=strides[3],
                                             dilation=dilations[3],
                                             BatchNorm=BatchNorm)
        # self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], dilation=dilations[3], BatchNorm=BatchNorm)

        if self.arch == 'resnet18':
            self.toplayer = nn.Conv2d(512,
                                      256,
                                      kernel_size=1,
                                      stride=1,
                                      padding=0)
            self.latlayer1 = nn.Conv2d(256,
                                       256,
                                       kernel_size=1,
                                       stride=1,
                                       padding=0)
            self.latlayer2 = nn.Conv2d(128,
                                       256,
                                       kernel_size=1,
                                       stride=1,
                                       padding=0)
            self.latlayer3 = nn.Conv2d(64,
                                       256,
                                       kernel_size=1,
                                       stride=1,
                                       padding=0)
        else:
            self.toplayer = nn.Conv2d(2048,
                                      256,
                                      kernel_size=1,
                                      stride=1,
                                      padding=0)
            self.latlayer1 = nn.Conv2d(1024,
                                       256,
                                       kernel_size=1,
                                       stride=1,
                                       padding=0)
            self.latlayer2 = nn.Conv2d(512,
                                       256,
                                       kernel_size=1,
                                       stride=1,
                                       padding=0)
            self.latlayer3 = nn.Conv2d(256,
                                       256,
                                       kernel_size=1,
                                       stride=1,
                                       padding=0)

        self.smooth1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
        self.smooth2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
        self.smooth3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)

        if pretrained:
            self._load_pretrained_model()
Exemple #5
0
    def __init__(self, num_labels, depth_nc):
        super(LinkNet_BackBone_FuseNet, self).__init__()

        ##### RGB ENCODER ####
        self.CBR1_RGB_ENC = make_layers_from_size([[3, 64], [64, 64]])
        self.pool1 = jt.nn.Pool(2, return_indices=True)

        self.CBR2_RGB_ENC = make_layers_from_size([[64, 128], [128, 128]])
        self.pool2 = jt.nn.Pool(2, return_indices=True)

        self.CBR3_RGB_ENC = make_layers_from_size([[128, 256], [256, 256],
                                                   [256, 256]])
        self.pool3 = jt.nn.Pool(2, return_indices=True)
        self.dropout3 = nn.Dropout(0.4)

        self.CBR4_RGB_ENC = make_layers_from_size([[256, 512], [512, 512],
                                                   [512, 512]])
        self.pool4 = jt.nn.Pool(2, return_indices=True)
        self.dropout4 = nn.Dropout(0.4)

        self.CBR5_RGB_ENC = make_layers_from_size([[512, 512], [512, 512],
                                                   [512, 512]])
        self.dropout5 = nn.Dropout(0.4)

        self.pool5 = jt.nn.Pool(2, return_indices=True)

        ##### 3D DEPTH/DHAC ENCODER  ####

        self.CBR1_DEPTH_ENC = make_layers_from_size([[depth_nc, 64], [64, 64]])
        self.pool1_d = nn.MaxPool2d(kernel_size=2, stride=2)

        self.CBR2_DEPTH_ENC = make_layers_from_size([[64, 128], [128, 128]])
        self.pool2_d = nn.MaxPool2d(kernel_size=2, stride=2)

        self.CBR3_DEPTH_ENC = make_layers_from_size([[128, 256], [256, 256],
                                                     [256, 256]])
        self.pool3_d = nn.MaxPool2d(kernel_size=2, stride=2)
        self.dropout3_d = nn.Dropout(0.4)

        self.CBR4_DEPTH_ENC = make_layers_from_size([[256, 512], [512, 512],
                                                     [512, 512]])
        self.pool4_d = nn.MaxPool2d(kernel_size=2, stride=2)
        self.dropout4_d = nn.Dropout(0.4)

        self.CBR5_DEPTH_ENC = make_layers_from_size([[512, 512], [512, 512],
                                                     [512, 512]])

        ####  RGB DECODER  ####
        self.unpool5 = jt.nn.MaxUnpool2d(2)
        self.CBR5_RGB_DEC = make_layers_from_size([[512, 512], [512, 512],
                                                   [512, 512]])
        self.dropout5_dec = nn.Dropout(0.4)

        self.unpool4 = jt.nn.MaxUnpool2d(2)
        self.CBR4_RGB_DEC = make_layers_from_size([[512, 512], [512, 512],
                                                   [512, 256]])
        self.dropout4_dec = nn.Dropout(0.4)

        self.unpool3 = jt.nn.MaxUnpool2d(2)
        self.CBR3_RGB_DEC = make_layers_from_size([[256, 256], [256, 256],
                                                   [256, 128]])
        self.dropout3_dec = nn.Dropout(0.4)

        self.unpool2 = jt.nn.MaxUnpool2d(2)
        self.CBR2_RGB_DEC = make_layers_from_size([[128, 128], [128, 64]])

        self.unpool1 = jt.nn.MaxUnpool2d(2)
        self.CBR1_RGB_DEC = make_layers_from_size([[64, 64], [64, num_labels]],
                                                  isFinal=True)
Exemple #6
0
import jittor as jt
from jittor import nn
import numpy as np
from jittor import Module

OPS = {
    'none':
    lambda C, stride, affine: Zero(stride),
    'avg_pool_3x3':
    lambda C, stride, affine: nn.AvgPool2d(
        3, stride=stride, padding=1, count_include_pad=False),
    'max_pool_3x3':
    lambda C, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1),
    'skip_connect':
    lambda C, stride, affine: Identity()
    if stride == 1 else FactorizedReduce(C, C, affine=affine),
    'sep_conv_3x3':
    lambda C, stride, affine: SepConv(C, C, 3, stride, 1, affine=affine),
    'sep_conv_5x5':
    lambda C, stride, affine: SepConv(C, C, 5, stride, 2, affine=affine),
    'dil_conv_3x3':
    lambda C, stride, affine: DilConv(C, C, 3, stride, 2, 2, affine=affine),
    'dil_conv_5x5':
    lambda C, stride, affine: DilConv(C, C, 5, stride, 4, 2, affine=affine),
}


class ReLUConvBN(Module):
    def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
        super(ReLUConvBN, self).__init__()
        self.op = nn.Sequential(