예제 #1
0
    def _make_transition_layer(
            self, num_channels_pre_layer, num_channels_cur_layer):
        num_branches_cur = len(num_channels_cur_layer)
        num_branches_pre = len(num_channels_pre_layer)

        transition_layers = []
        for i in range(num_branches_cur):
            if i < num_branches_pre:
                if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
                    transition_layers.append(nn.Sequential(
                        nn.Conv2d(num_channels_pre_layer[i],
                                  num_channels_cur_layer[i],
                                  3,
                                  1,
                                  1,
                                  bias=False),
                        SyncBatchNorm(
                            num_channels_cur_layer[i], momentum=BN_MOMENTUM),
                        nn.ReLU(inplace=True)))
                else:
                    transition_layers.append(None)
            else:
                conv3x3s = []
                for j in range(i + 1 - num_branches_pre):
                    inchannels = num_channels_pre_layer[-1]
                    outchannels = num_channels_cur_layer[i] \
                        if j == i - num_branches_pre else inchannels
                    conv3x3s.append(nn.Sequential(
                        nn.Conv2d(
                            inchannels, outchannels, 3, 2, 1, bias=False),
                        SyncBatchNorm(outchannels, momentum=BN_MOMENTUM),
                        nn.ReLU(inplace=True)))
                transition_layers.append(nn.Sequential(*conv3x3s))

        return nn.ModuleList(transition_layers)
예제 #2
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 dilation=1,
                 downsample=None,
                 style='pytorch',
                 with_cp=False,
                 normalize=dict(type='BN'),
                 dcn=None):
        super(BasicBlock, self).__init__()
        assert dcn is None, "Not implemented yet."

        # self.norm1_name, norm1 = build_norm_layer(normalize, planes, postfix=1)
        # self.norm2_name, norm2 = build_norm_layer(normalize, planes, postfix=2)

        self.conv1 = conv3x3(inplanes, planes, stride, dilation)
        self.bn1 = SyncBatchNorm(planes)
        # self.add_module(, norm1)
        self.conv2 = conv3x3(planes, planes)
        self.bn2 = SyncBatchNorm(planes)
        # self.add_module(self.norm2_name, norm2)

        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride
        self.dilation = dilation
        assert not with_cp
예제 #3
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = SyncBatchNorm(planes, momentum=BN_MOMENTUM)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = SyncBatchNorm(planes, momentum=BN_MOMENTUM)
     self.downsample = downsample
     self.stride = stride
예제 #4
0
    def _make_fuse_layers(self):
        if self.num_branches == 1:
            return None

        num_branches = self.num_branches
        num_inchannels = self.num_inchannels
        fuse_layers = []
        for i in range(num_branches if self.multi_scale_output else 1):
            fuse_layer = []
            for j in range(num_branches):
                if j > i:
                    fuse_layer.append(
                        nn.Sequential(
                            nn.Conv2d(num_inchannels[j],
                                      num_inchannels[i],
                                      1,
                                      1,
                                      0,
                                      bias=False),
                            SyncBatchNorm(num_inchannels[i],
                                          momentum=BN_MOMENTUM),
                            nn.Upsample(scale_factor=2**(j - i),
                                        mode='nearest')))
                elif j == i:
                    fuse_layer.append(None)
                else:
                    conv3x3s = []
                    for k in range(i - j):
                        if k == i - j - 1:
                            num_outchannels_conv3x3 = num_inchannels[i]
                            conv3x3s.append(
                                nn.Sequential(
                                    nn.Conv2d(num_inchannels[j],
                                              num_outchannels_conv3x3,
                                              3,
                                              2,
                                              1,
                                              bias=False),
                                    SyncBatchNorm(num_outchannels_conv3x3,
                                                  momentum=BN_MOMENTUM)))
                        else:
                            num_outchannels_conv3x3 = num_inchannels[j]
                            conv3x3s.append(
                                nn.Sequential(
                                    nn.Conv2d(num_inchannels[j],
                                              num_outchannels_conv3x3,
                                              3,
                                              2,
                                              1,
                                              bias=False),
                                    SyncBatchNorm(num_outchannels_conv3x3,
                                                  momentum=BN_MOMENTUM),
                                    nn.ReLU(False)))
                    fuse_layer.append(nn.Sequential(*conv3x3s))
            fuse_layers.append(nn.ModuleList(fuse_layer))

        return nn.ModuleList(fuse_layers)
예제 #5
0
 def __init__(self, channels=32, refine=False):
     super(SGABlock, self).__init__()
     self.refine = refine
     if self.refine:
         self.bn_relu = nn.Sequential(SyncBatchNorm(channels),
                                      nn.ReLU(inplace=True))
         self.conv_refine = BasicConv(channels, channels, is_3d=True, kernel_size=3, padding=1, relu=False)
     #            self.conv_refine1 = BasicConv(8, 8, is_3d=True, kernel_size=1, padding=1)
     else:
         self.bn = SyncBatchNorm(channels)
     self.SGA = SGA()
     self.relu = nn.ReLU(inplace=True)
예제 #6
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
     self.bn1 = SyncBatchNorm(planes, momentum=BN_MOMENTUM)
     self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
                            padding=1, bias=False)
     self.bn2 = SyncBatchNorm(planes, momentum=BN_MOMENTUM)
     self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
                            bias=False)
     self.bn3 = SyncBatchNorm(planes * self.expansion,
                            momentum=BN_MOMENTUM)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
예제 #7
0
    def _make_one_branch(self,
                         branch_index,
                         block,
                         num_blocks,
                         num_channels,
                         stride=1):
        downsample = None
        if stride != 1 or \
                self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.num_inchannels[branch_index],
                          num_channels[branch_index] * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                SyncBatchNorm(num_channels[branch_index] * block.expansion,
                              momentum=BN_MOMENTUM),
            )

        layers = []
        layers.append(
            block(self.num_inchannels[branch_index],
                  num_channels[branch_index], stride, downsample))
        self.num_inchannels[branch_index] = \
            num_channels[branch_index] * block.expansion
        for i in range(1, num_blocks[branch_index]):
            layers.append(
                block(self.num_inchannels[branch_index],
                      num_channels[branch_index]))

        return nn.Sequential(*layers)
예제 #8
0
    def make_conv(in_channels,
                  out_channels,
                  kernel_size,
                  stride=1,
                  dilation=1):
        conv = Conv2d(in_channels,
                      out_channels,
                      kernel_size=kernel_size,
                      stride=stride,
                      padding=dilation * (kernel_size - 1) // 2,
                      dilation=dilation,
                      bias=False if norm_func else True)
        # Caffe2 implementation uses XavierFill, which in fact
        # corresponds to kaiming_uniform_ in PyTorch
        nn.init.kaiming_uniform_(conv.weight, a=1)
        if not norm_func:
            nn.init.constant_(conv.bias, 0)
        module = [
            conv,
        ]

        if norm_func == 'GN':
            module.append(group_norm(out_channels))
        elif norm_func == 'SyncBN':
            module.append(SyncBatchNorm(out_channels))
        elif norm_func:
            raise TypeError("invalid norm_func", norm_func)
        if use_relu:
            module.append(nn.ReLU(inplace=True))
        if len(module) > 1:
            return nn.Sequential(*module)
        return conv
예제 #9
0
 def __init__(self, in_channels, out_channels, deconv=False, is_3d=False, bn=True, relu=True, **kwargs):
     super(BasicConv, self).__init__()
     #        print(in_channels, out_channels, deconv, is_3d, bn, relu, kwargs)
     self.relu = relu
     self.use_bn = bn
     if is_3d:
         if deconv:
             self.conv = nn.ConvTranspose3d(in_channels, out_channels, bias=False, **kwargs)
         else:
             self.conv = nn.Conv3d(in_channels, out_channels, bias=False, **kwargs)
         self.bn = SyncBatchNorm(out_channels)
     else:
         if deconv:
             self.conv = nn.ConvTranspose2d(in_channels, out_channels, bias=False, **kwargs)
         else:
             self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
         self.bn = SyncBatchNorm(out_channels)
예제 #10
0
 def _make_stem_layer(self):
     self.conv1 = nn.Conv2d(3,
                            64,
                            kernel_size=7,
                            stride=2,
                            padding=3,
                            bias=False)
     # self.norm1_name, norm1 = build_norm_layer(
     #     self.normalize, 64, postfix=1)
     self.bn1 = SyncBatchNorm(64)
     # self.add_module(self.norm1_name, norm1)
     self.relu = nn.ReLU(inplace=True)
     self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
예제 #11
0
    def _make_layer(self, block, inplanes, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(inplanes, planes * block.expansion,
                          kernel_size=1, stride=stride, bias=False),
                SyncBatchNorm(planes * block.expansion, momentum=BN_MOMENTUM),
            )

        layers = []
        layers.append(block(inplanes, planes, stride, downsample))
        inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(inplanes, planes))

        return nn.Sequential(*layers)
예제 #12
0
def make_res_layer(block,
                   inplanes,
                   planes,
                   blocks,
                   stride=1,
                   dilation=1,
                   style='pytorch',
                   with_cp=False,
                   normalize=dict(type='BN'),
                   dcn=None):
    downsample = None
    if stride != 1 or inplanes != planes * block.expansion:
        downsample = nn.Sequential(
            nn.Conv2d(inplanes,
                      planes * block.expansion,
                      kernel_size=1,
                      stride=stride,
                      bias=False),
            SyncBatchNorm(planes * block.expansion)
            # build_norm_layer(normalize, planes * block.expansion)[1],
        )

    layers = []
    layers.append(
        block(inplanes,
              planes,
              stride,
              dilation,
              downsample,
              style=style,
              with_cp=with_cp,
              normalize=normalize,
              dcn=dcn))
    inplanes = planes * block.expansion
    for i in range(1, blocks):
        layers.append(
            block(inplanes,
                  planes,
                  1,
                  dilation,
                  style=style,
                  with_cp=with_cp,
                  normalize=normalize,
                  dcn=dcn))

    return nn.Sequential(*layers)
예제 #13
0
    def __init__(self, maxdisp=192):
        super(GANet, self).__init__()
        self.maxdisp = maxdisp
        self.conv_start = nn.Sequential(BasicConv(3, 16, kernel_size=3, padding=1),
                                        BasicConv(16, 32, kernel_size=3, padding=1))

        self.conv_x = BasicConv(32, 32, kernel_size=3, padding=1)
        self.conv_y = BasicConv(32, 32, kernel_size=3, padding=1)
        self.conv_refine = nn.Conv2d(32, 32, (3, 3), (1, 1), (1, 1), bias=False)
        self.bn_relu = nn.Sequential(SyncBatchNorm(32), nn.ReLU(inplace=True))
        self.feature = Feature()
        self.guidance = Guidance()
        self.cost_agg = CostAggregation(self.maxdisp)
        self.cv = GetCostVolume(int(self.maxdisp / 3))

        for m in self.modules():
            if isinstance(m, (nn.Conv2d, nn.Conv3d)):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, (SyncBatchNorm, SyncBatchNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
예제 #14
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 dilation=1,
                 downsample=None,
                 style='pytorch',
                 with_cp=False,
                 normalize=dict(type='BN'),
                 dcn=None):
        """Bottleneck block for ResNet.
        If style is "pytorch", the stride-two layer is the 3x3 conv layer,
        if it is "caffe", the stride-two layer is the first 1x1 conv layer.
        """
        super(Bottleneck, self).__init__()
        assert style in ['pytorch', 'caffe']
        assert dcn is None or isinstance(dcn, dict)
        self.inplanes = inplanes
        self.planes = planes
        self.normalize = normalize
        self.dcn = dcn
        self.with_dcn = dcn is not None
        if style == 'pytorch':
            self.conv1_stride = 1
            self.conv2_stride = stride
        else:
            self.conv1_stride = stride
            self.conv2_stride = 1

        # self.norm1_name, norm1 = build_norm_layer(normalize, planes, postfix=1)
        # self.norm2_name, norm2 = build_norm_layer(normalize, planes, postfix=2)
        # self.norm3_name, norm3 = build_norm_layer(
        #     normalize, planes * self.expansion, postfix=3)

        self.conv1 = nn.Conv2d(inplanes,
                               planes,
                               kernel_size=1,
                               stride=self.conv1_stride,
                               bias=False)
        self.bn1 = SyncBatchNorm(planes)
        # self.add_module(self.norm1_name, norm1)
        fallback_on_stride = False
        self.with_modulated_dcn = False
        if self.with_dcn:
            fallback_on_stride = dcn.get('fallback_on_stride', False)
            self.with_modulated_dcn = dcn.get('modulated', False)
        if not self.with_dcn or fallback_on_stride:
            self.conv2 = nn.Conv2d(planes,
                                   planes,
                                   kernel_size=3,
                                   stride=self.conv2_stride,
                                   padding=dilation,
                                   dilation=dilation,
                                   bias=False)
        else:
            deformable_groups = dcn.get('deformable_groups', 1)
            if not self.with_modulated_dcn:
                conv_op = DeformConv
                offset_channels = 18
            else:
                conv_op = ModulatedDeformConv
                offset_channels = 27
            self.conv2_offset = nn.Conv2d(planes,
                                          deformable_groups * offset_channels,
                                          kernel_size=3,
                                          stride=self.conv2_stride,
                                          padding=dilation,
                                          dilation=dilation)
            self.conv2 = conv_op(planes,
                                 planes,
                                 kernel_size=3,
                                 stride=self.conv2_stride,
                                 padding=dilation,
                                 dilation=dilation,
                                 deformable_groups=deformable_groups,
                                 bias=False)
        self.bn2 = SyncBatchNorm(planes)  # add_module(self.norm2_name, norm2)
        self.conv3 = nn.Conv2d(planes,
                               planes * self.expansion,
                               kernel_size=1,
                               bias=False)
        self.bn3 = SyncBatchNorm(
            planes * self.expansion)  # add_module(self.norm3_name, norm3)

        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride
        self.dilation = dilation
        self.with_cp = with_cp
        self.normalize = normalize
예제 #15
0
    def __init__(self,
                 extra,
                 norm_eval=True,
                 zero_init_residual=False,
                 frozen_stages=-1):
        super(SyncHighResolutionNet, self).__init__()
        self.norm_eval = norm_eval
        self.frozen_stages = frozen_stages
        self.zero_init_residual = zero_init_residual
        # for
        self.extra = extra
        # stem network
        # stem net
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=3,
                               stride=2,
                               padding=1,
                               bias=False)
        self.bn1 = SyncBatchNorm(64, momentum=BN_MOMENTUM)
        self.conv2 = nn.Conv2d(64,
                               64,
                               kernel_size=3,
                               stride=2,
                               padding=1,
                               bias=False)
        self.bn2 = SyncBatchNorm(64, momentum=BN_MOMENTUM)
        self.relu = nn.ReLU(inplace=True)

        # stage 1
        self.stage1_cfg = self.extra['stage1']
        num_channels = self.stage1_cfg['num_channels'][0]
        block_type = self.stage1_cfg['block']
        num_blocks = self.stage1_cfg['num_blocks'][0]

        block = blocks_dict[block_type]
        stage1_out_channels = num_channels * block.expansion
        self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)

        # stage 2
        self.stage2_cfg = self.extra['stage2']
        num_channels = self.stage2_cfg['num_channels']
        block_type = self.stage2_cfg['block']

        block = blocks_dict[block_type]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))
        ]
        self.transition1 = self._make_transition_layer([stage1_out_channels],
                                                       num_channels)
        # num_modules, num_branches, num_blocks, num_channels, block, fuse_method, num_inchannels
        self.stage2, pre_stage_channels = self._make_stage(
            self.stage2_cfg, num_channels)

        # stage 3
        self.stage3_cfg = self.extra['stage3']
        num_channels = self.stage3_cfg['num_channels']
        block_type = self.stage3_cfg['block']

        block = blocks_dict[block_type]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))
        ]
        self.transition2 = self._make_transition_layer(pre_stage_channels,
                                                       num_channels)
        self.stage3, pre_stage_channels = self._make_stage(
            self.stage3_cfg, num_channels)

        # stage 4
        self.stage4_cfg = self.extra['stage4']
        num_channels = self.stage4_cfg['num_channels']
        block_type = self.stage4_cfg['block']

        block = blocks_dict[block_type]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))
        ]
        self.transition3 = self._make_transition_layer(pre_stage_channels,
                                                       num_channels)
        self.stage4, pre_stage_channels = self._make_stage(
            self.stage4_cfg, num_channels)