예제 #1
0
    def __init__(self):
        super().__init__()
        if cfg.LESION.USE_POSITION:
            #self.Position_Head = position_Xconv1fc_gn_head(2048,1024,3)
            self.Position_Head = position_Xconv1fc_gn_head(256, 1024, 3)
            self.Position_Cls_Outs = position_cls_outputs(
                self.Position_Head.dim_out)
            self.Position_Reg_Outs = position_reg_outputs(
                self.Position_Head.dim_out)
        if cfg.LESION.POS_CONCAT_RCNN:
            self.Box_Outs = fast_rcnn_heads.fast_rcnn_outputs(
                self.Box_Head.dim_out + self.Position_Head.dim_out)

        self.cbam = CBAM(self.Conv_Body.dim_out * cfg.LESION.NUM_IMAGES_3DCE,
                         16,
                         no_spatial=True)
예제 #2
0
    def __init__(self, block_counts):
        super().__init__()
        self.block_counts = block_counts
        self.convX = len(block_counts) + 1
        self.num_layers = (sum(block_counts) + 3 * (self.convX == 4)) * 3 + 2

        self.res1 = globals()[cfg.RESNETS.STEM_FUNC]()
        dim_in = 64
        dim_bottleneck = cfg.RESNETS.NUM_GROUPS * cfg.RESNETS.WIDTH_PER_GROUP
        self.res2, dim_in = add_stage(dim_in,
                                      256,
                                      dim_bottleneck,
                                      block_counts[0],
                                      dilation=1,
                                      stride_init=1)
        self.res3, dim_in = add_stage(dim_in,
                                      512,
                                      dim_bottleneck * 2,
                                      block_counts[1],
                                      dilation=1,
                                      stride_init=2)
        self.res4, dim_in = add_stage(dim_in,
                                      1024,
                                      dim_bottleneck * 4,
                                      block_counts[2],
                                      dilation=1,
                                      stride_init=2)
        if len(block_counts) == 4:
            stride_init = 2 if cfg.RESNETS.RES5_DILATION == 1 else 1
            self.res5, dim_in = add_stage(dim_in, 2048, dim_bottleneck * 8,
                                          block_counts[3],
                                          cfg.RESNETS.RES5_DILATION,
                                          stride_init)
            self.spatial_scale = 1 / 32 * cfg.RESNETS.RES5_DILATION
        else:
            self.spatial_scale = 1 / 16  # final feature scale wrt. original image scale

        self.dim_out = dim_in
        # Position Branch
        if cfg.LESION.SHALLOW_POSITION:
            self.Position_Head = position_Xconv1fc_gn_head(64)
            self.Position_Cls_Outs = position_cls_outputs(
                self.Position_Head.dim_out)
            self.Position_Reg_Outs = position_reg_outputs(
                self.Position_Head.dim_out)

        self._init_modules()
예제 #3
0
    def __init__(self):
        super().__init__()

        # Position Branch
        if cfg.LESION.USE_POSITION:
            self.Position_Head = position_Xconv1fc_gn_head(2048)
            self.Position_Cls_Outs = position_cls_outputs(self.Position_Head.dim_out)
            self.Position_Reg_Outs = position_reg_outputs(self.Position_Head.dim_out)

        # BBOX Branch
        if not cfg.MODEL.RPN_ONLY:
            # Add box head for context roi-pooling
            self.Context_Box_Head = get_func(cfg.FAST_RCNN.ROI_CONTEXT_BOX_HEAD)(
                self.RPN.dim_out, self.context_roi_feature_transform, self.Conv_Body.spatial_scale)
            # For context-enchanced roi-pooling, we need to use the fast_rcnn_disentangle_outputs instead.
            self.Box_Outs = fast_rcnn_heads.fast_rcnn_disentangle_outputs(
                self.Box_Head.dim_out)

        self.im_info = None
        self._init_modules()
예제 #4
0
    def __init__(self):
        super().__init__()

        # For cache
        self.mapping_to_detectron = None
        self.orphans_in_detectron = None

        # Backbone for feature extraction
        self.Conv_Body = get_func(cfg.MODEL.CONV_BODY)()

        # Layers for left-right view
        # Added by zhangfandong
        # LR_VIEW and GIF should not be on at the same time
        assert (not cfg.MODEL.LR_VIEW_ON) or (not cfg.MODEL.GIF_ON) or (
            not cfg.MODEL.LRASY_MAHA_ON)
        if cfg.MODEL.LR_VIEW_ON:
            #assert cfg.FPN.FPN_ON is False
            self.lr_view_net = lrview_net_body.get_lrv_net(
                self.Conv_Body.dim_out * 2, self.Conv_Body.dim_out,
                cfg.LR_VIEW.FUSION_NET)
        elif cfg.MODEL.LRASY_MAHA_ON:
            self.lrasy_maha_net = lrview_net_body.get_lrvasymaha_net(
                self.Conv_Body.dim_out, self.Conv_Body.dim_out,
                cfg.LR_VIEW.FUSION_NET)
        elif cfg.MODEL.GIF_ON:
            self.gif_net = gif_net_body.get_gif_net(self.Conv_Body.dim_out)

        # Region Proposal Network
        if cfg.RPN.RPN_ON:
            self.RPN = rpn_heads.generic_rpn_outputs(
                self.Conv_Body.dim_out, self.Conv_Body.spatial_scale)

        if cfg.FPN.FPN_ON:
            # Only supports case when RPN and ROI min levels are the same
            #assert cfg.FPN.RPN_MIN_LEVEL == cfg.FPN.ROI_MIN_LEVEL
            # RPN max level can be >= to ROI max level
            assert cfg.FPN.RPN_MAX_LEVEL >= cfg.FPN.ROI_MAX_LEVEL
            # FPN RPN max level might be > FPN ROI max level in which case we
            # need to discard some leading conv blobs (blobs are ordered from
            # max/coarsest level to min/finest level)
            self.num_roi_levels = cfg.FPN.ROI_MAX_LEVEL - cfg.FPN.ROI_MIN_LEVEL + 1

            # Retain only the spatial scales that will be used for RoI heads. `Conv_Body.spatial_scale`
            # may include extra scales that are used for RPN proposals, but not for RoI heads.
            self.Conv_Body.spatial_scale = self.Conv_Body.spatial_scale[
                -self.num_roi_levels:]

        # BBOX Branch
        if not cfg.MODEL.RPN_ONLY:
            self.Box_Head = get_func(cfg.FAST_RCNN.ROI_BOX_HEAD)(
                self.RPN.dim_out, self.roi_feature_transform,
                self.Conv_Body.spatial_scale)
            self.Box_Outs = fast_rcnn_heads.fast_rcnn_outputs(
                self.Box_Head.dim_out)
        if cfg.LESION.POSITION_RCNN:
            self.Position_RCNN = position_outputs(self.Box_Head.dim_out)

        # Position Branch
        if cfg.LESION.USE_POSITION:
            #self.Position_Head = position_Xconv1fc_gn_head(2048,1024,2)
            self.Position_Head = position_Xconv1fc_gn_head(2048, 1024, 3)
            self.Position_Cls_Outs = position_cls_outputs(
                self.Position_Head.dim_out)
            self.Position_Reg_Outs = position_reg_outputs(
                self.Position_Head.dim_out)

        # Mask Branch
        if cfg.MODEL.MASK_ON:
            self.Mask_Head = get_func(cfg.MRCNN.ROI_MASK_HEAD)(
                self.RPN.dim_out, self.roi_feature_transform,
                self.Conv_Body.spatial_scale)
            if getattr(self.Mask_Head, 'SHARE_RES5', False):
                self.Mask_Head.share_res5_module(self.Box_Head.res5)
            self.Mask_Outs = mask_rcnn_heads.mask_rcnn_outputs(
                self.Mask_Head.dim_out)

        # Keypoints Branch
        if cfg.MODEL.KEYPOINTS_ON:
            self.Keypoint_Head = get_func(cfg.KRCNN.ROI_KEYPOINTS_HEAD)(
                self.RPN.dim_out, self.roi_feature_transform,
                self.Conv_Body.spatial_scale)
            if getattr(self.Keypoint_Head, 'SHARE_RES5', False):
                self.Keypoint_Head.share_res5_module(self.Box_Head.res5)
            self.Keypoint_Outs = keypoint_rcnn_heads.keypoint_outputs(
                self.Keypoint_Head.dim_out)

        self._init_modules()
예제 #5
0
파일: FPN.py 프로젝트: xixiobba/MVP-Net
    def __init__(self, conv_body_func, fpn_level_info, P2only=False, cascade_bu=False):
        super().__init__()
        self.fpn_level_info = fpn_level_info
        self.P2only = P2only
        self.cascade_bu = cascade_bu

        self.dim_out = fpn_dim = cfg.FPN.DIM
        min_level, max_level = get_min_max_levels()
        self.num_backbone_stages = len(fpn_level_info.blobs) - (min_level - LOWEST_BACKBONE_LVL)
        fpn_dim_lateral = fpn_level_info.dims
        self.spatial_scale = []  # a list of scales for FPN outputs

        #
        # Step 1: recursively build down starting from the coarsest backbone level
        #
        # For the coarest backbone level: 1x1 conv only seeds recursion
        self.conv_top = nn.Conv2d(fpn_dim_lateral[0], fpn_dim, 1, 1, 0)
        if cfg.FPN.USE_GN:
            self.conv_top = nn.Sequential(
                nn.Conv2d(fpn_dim_lateral[0], fpn_dim, 1, 1, 0, bias=False),
                nn.GroupNorm(net_utils.get_group_gn(fpn_dim), fpn_dim,
                             eps=cfg.GROUP_NORM.EPSILON)
            )
        else:
            self.conv_top = nn.Conv2d(fpn_dim_lateral[0], fpn_dim, 1, 1, 0)
        self.topdown_lateral_modules = nn.ModuleList()
        self.posthoc_modules = nn.ModuleList()

        # For other levels add top-down and lateral connections
        for i in range(self.num_backbone_stages - 1):
            self.topdown_lateral_modules.append(
                topdown_lateral_module(fpn_dim, fpn_dim_lateral[i+1])
            )

        # Post-hoc scale-specific 3x3 convs
        for i in range(self.num_backbone_stages):
            if cfg.FPN.USE_GN:
                self.posthoc_modules.append(nn.Sequential(
                    nn.Conv2d(fpn_dim, fpn_dim, 3, 1, 1, bias=False),
                    nn.GroupNorm(net_utils.get_group_gn(fpn_dim), fpn_dim,
                                 eps=cfg.GROUP_NORM.EPSILON)
                ))
            else:
                self.posthoc_modules.append(
                    nn.Conv2d(fpn_dim, fpn_dim, 3, 1, 1)
                )

            self.spatial_scale.append(fpn_level_info.spatial_scales[i])

        # add for cascade buttom-up path
        if self.cascade_bu:
            self.cascade_strideconv_modules = nn.ModuleList()
            for i in range(self.num_backbone_stages - 1):
                if cfg.FPN.USE_GN:
                    self.cascade_strideconv_modules.append(nn.Sequential(
                        nn.Conv2d(fpn_dim, fpn_dim, 3, 2, 1, bias=True),
                        nn.GroupNorm(net_utils.get_group_gn(fpn_dim), fpn_dim,
                                    eps=cfg.GROUP_NORM.EPSILON),
                        nn.ReLU(inplace=True)
                    ))
                else:
                    self.cascade_strideconv_modules.append(
                        nn.Conv2d(fpn_dim, fpn_dim, 3, 2, 1)
                    )

        #
        # Step 2: build up starting from the coarsest backbone level
        #
        # Check if we need the P6 feature map
        if not cfg.FPN.EXTRA_CONV_LEVELS and max_level == HIGHEST_BACKBONE_LVL + 1:
            # Original FPN P6 level implementation from our CVPR'17 FPN paper
            # Use max pooling to simulate stride 2 subsampling
            self.maxpool_p6 = nn.MaxPool2d(kernel_size=1, stride=2, padding=0)
            self.spatial_scale.insert(0, self.spatial_scale[0] * 0.5)

        # Coarser FPN levels introduced for RetinaNet
        if cfg.FPN.EXTRA_CONV_LEVELS and max_level > HIGHEST_BACKBONE_LVL:
            self.extra_pyramid_modules = nn.ModuleList()
            dim_in = fpn_level_info.dims[0]
            for i in range(HIGHEST_BACKBONE_LVL + 1, max_level + 1):
                self.extra_pyramid_modules(
                    nn.Conv2d(dim_in, fpn_dim, 3, 2, 1)
                )
                dim_in = fpn_dim
                self.spatial_scale.insert(0, self.spatial_scale[0] * 0.5)

        if self.P2only:
            # use only the finest level
            self.spatial_scale = self.spatial_scale[-1]

        self._init_weights()

        # Deliberately add conv_body after _init_weights.
        # conv_body has its own _init_weights function
        self.conv_body = conv_body_func()  # e.g resnet

        # Position Branch
        if cfg.LESION.SHALLOW_POSITION:
            self.Position_Head = position_Xconv1fc_gn_head(64)
            self.Position_Cls_Outs = position_cls_outputs(self.Position_Head.dim_out)
            self.Position_Reg_Outs = position_reg_outputs(self.Position_Head.dim_out)
            self.pos_fc = nn.Sequential( \
                    nn.Linear(self.Position_Head.dim_out, 128, bias=False), \
                    nn.ReLU(inplace=True), \
                    nn.Linear(128, 64, bias=False), \
                    nn.Sigmoid())