Beispiel #1
0
    def __init__(self, block, layers, num_classes=21):
        self.inplanes = 64
        super(RefineNet, self).__init__()
        self.do = nn.Dropout(p=0.5)
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.p_ims1d2_outl1_dimred = conv3x3(2048, 512, bias=False)
        self.adapt_stage1_b = self._make_rcu(512, 512, 2, 2)
        self.mflow_conv_g1_pool = self._make_crp(512, 512, 4)
        self.mflow_conv_g1_b = self._make_rcu(512, 512, 3, 2)
        self.mflow_conv_g1_b3_joint_varout_dimred = conv3x3(512,
                                                            256,
                                                            bias=False)
        self.p_ims1d2_outl2_dimred = conv3x3(1024, 256, bias=False)
        self.adapt_stage2_b = self._make_rcu(256, 256, 2, 2)
        self.adapt_stage2_b2_joint_varout_dimred = conv3x3(256,
                                                           256,
                                                           bias=False)
        self.mflow_conv_g2_pool = self._make_crp(256, 256, 4)
        self.mflow_conv_g2_b = self._make_rcu(256, 256, 3, 2)
        self.mflow_conv_g2_b3_joint_varout_dimred = conv3x3(256,
                                                            256,
                                                            bias=False)

        self.p_ims1d2_outl3_dimred = conv3x3(512, 256, bias=False)
        self.adapt_stage3_b = self._make_rcu(256, 256, 2, 2)
        self.adapt_stage3_b2_joint_varout_dimred = conv3x3(256,
                                                           256,
                                                           bias=False)
        self.mflow_conv_g3_pool = self._make_crp(256, 256, 4)
        self.mflow_conv_g3_b = self._make_rcu(256, 256, 3, 2)
        self.mflow_conv_g3_b3_joint_varout_dimred = conv3x3(256,
                                                            256,
                                                            bias=False)

        self.p_ims1d2_outl4_dimred = conv3x3(256, 256, bias=False)
        self.adapt_stage4_b = self._make_rcu(256, 256, 2, 2)
        self.adapt_stage4_b2_joint_varout_dimred = conv3x3(256,
                                                           256,
                                                           bias=False)
        self.mflow_conv_g4_pool = self._make_crp(256, 256, 4)
        self.mflow_conv_g4_b = self._make_rcu(256, 256, 3, 2)

        self.clf_conv = nn.Conv2d(256,
                                  num_classes,
                                  kernel_size=3,
                                  stride=1,
                                  padding=1,
                                  bias=True)
Beispiel #2
0
    def __init__(self, in_channels):
        super(MMFNet, self).__init__()

        self.do = nn.Dropout(p=0.5)

        # these next blocks are the same for both RGB and HHA because
        #  the input volumes are exactly the same dimensions (including # channels)
        # conv3 is also used for the convolution before and after fusion

        # pre-fusion RGB blocks
        self.conv1_rgb = conv1x1(in_planes=in_channels, out_planes=in_channels)
        self.RCUs_rgb = nn.Sequential(  # 2 RCU blocks
            BasicBlock(inplanes=in_channels,
                       planes=in_channels),  # expansion=1, no downsampling
            BasicBlock(inplanes=in_channels,
                       planes=in_channels)  # expansion=1, no downsampling
        )
        self.conv3_rgb = conv3x3(in_planes=in_channels, out_planes=in_channels)

        # pre-fusion HHA blocks
        self.conv1_hha = conv1x1(in_planes=in_channels, out_planes=in_channels)
        self.RCUs_hha = nn.Sequential(  # 2 RCU blocks
            BasicBlock(inplanes=in_channels,
                       planes=in_channels),  # expansion=1, no downsampling
            BasicBlock(inplanes=in_channels,
                       planes=in_channels)  # expansion=1, no downsampling
        )
        self.conv3_hha = conv3x3(in_planes=in_channels, out_planes=in_channels)

        # post-fusion block
        #self.maxpool = nn.MaxPool2d(kernel_size=5, stride=1)
        #self.conv3 = conv3x3(in_planes=in_channels, out_planes=in_channels)
        self.crp = CRPBlock(in_planes=in_channels,
                            out_planes=in_channels,
                            n_stages=1)
Beispiel #3
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = nn.BatchNorm2d(planes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = nn.BatchNorm2d(planes)
     self.downsample = downsample
     self.stride = stride
    def __init__(self, num_classes):
        super(MBv2, self).__init__()

        self.layer1 = convbnrelu(3, self.in_planes, kernel_size=3, stride=2)
        c_layer = 2
        for t,c,n,s in (self.mobilenet_config):
            layers = []
            for idx in range(n):
                layers.append(InvertedResidualBlock(self.in_planes, c, expansion_factor=t, stride=s if idx == 0 else 1))
                self.in_planes = c
            setattr(self, 'layer{}'.format(c_layer), nn.Sequential(*layers))
            c_layer += 1

        ## Light-Weight RefineNet ##
        self.conv8 = conv1x1(320, 256, bias=False)
        self.conv7 = conv1x1(160, 256, bias=False)
        self.conv6 = conv1x1(96, 256, bias=False)
        self.conv5 = conv1x1(64, 256, bias=False)
        self.conv4 = conv1x1(32, 256, bias=False)
        self.conv3 = conv1x1(24, 256, bias=False)
        self.crp4 = self._make_crp(256, 256, 4)
        self.crp3 = self._make_crp(256, 256, 4)
        self.crp2 = self._make_crp(256, 256, 4)
        self.crp1 = self._make_crp(256, 256, 4)

        self.conv_adapt4 = conv1x1(256, 256, bias=False)
        self.conv_adapt3 = conv1x1(256, 256, bias=False)
        self.conv_adapt2 = conv1x1(256, 256, bias=False)

        self.segm = conv3x3(256, num_classes, bias=True)
        self.relu = nn.ReLU6(inplace=True)

        self._initialize_weights()
Beispiel #5
0
    def __init__(self, inplanes_lr, inplanes_hr=None, fancy_upsample=False):
        super(RefineNet, self).__init__()

        self.fancy_upsample = fancy_upsample

        # first set of RCU blocks
        if inplanes_hr == None:
            self.inplanes = inplanes_lr
            self.rcu1_lr = self._make_layer(BasicBlock,
                                            planes=inplanes_lr,
                                            blocks=2)
        else:
            self.inplanes = inplanes_hr
            self.rcu1_hr = self._make_layer(BasicBlock,
                                            planes=inplanes_hr,
                                            blocks=2)

        # fusion
        if inplanes_hr != None:
            self.adapt_stage2_b2_joint_varout_dimred = conv3x3(inplanes_hr,
                                                               inplanes_hr,
                                                               bias=False)
            self.mflow_conv_g1_b3_joint_varout_dimred = conv3x3(inplanes_lr,
                                                                inplanes_hr,
                                                                bias=False)
            if fancy_upsample:
                # learnable upsample with a single (strided) transpose convolution (for now)
                self.upsample = nn.ConvTranspose2d(inplanes_hr,
                                                   inplanes_hr,
                                                   kernel_size=3,
                                                   stride=2,
                                                   padding=1)
                #self.upsample = nn.ConvTranspose2d(inplanes_hr, inplanes_hr, kernel_size=5, stride=2, padding=2)

        # CRP and RCU for fusion result
        outplanes = inplanes_hr if inplanes_hr != None else inplanes_lr
        self.mflow_conv_g1_pool = self._make_crp(outplanes, outplanes, 4)
        self.inplanes = outplanes
        self.rcu2 = self._make_layer(BasicBlock, planes=outplanes, blocks=1)