Exemplo n.º 1
0
	def __init__(self, dim_in, spatial_scales):
		super().__init__()
		self.dim_in = dim_in
		self.spatial_scales = spatial_scales
		self.dim_out = self.dim_in
		num_anchors = len(cfg.FPN.RPN_ASPECT_RATIOS)
		
		# Create conv ops shared by all FPN levels
		self.FPN_RPN_conv = nn.Conv2d(dim_in, self.dim_out, 3, 1, 1)
		dim_score = num_anchors * 2 if cfg.RPN.CLS_ACTIVATION == 'softmax' \
			else num_anchors
		self.FPN_RPN_cls_score = nn.Conv2d(self.dim_out, dim_score, 1, 1, 0)
		self.FPN_RPN_bbox_pred = nn.Conv2d(self.dim_out, 4 * num_anchors, 1, 1, 0)
		
		self.GenerateProposals_modules = nn.ModuleList()
		k_max = cfg.FPN.RPN_MAX_LEVEL  # coarsest level of pyramid
		k_min = cfg.FPN.RPN_MIN_LEVEL  # finest level of pyramid
		for lvl in range(k_min, k_max + 1):
			sc = self.spatial_scales[k_max - lvl]  # in reversed order
			lvl_anchors = generate_anchors(
				stride = 2. ** lvl,
				sizes = (cfg.FPN.RPN_ANCHOR_START_SIZE * 2. ** (lvl - k_min),),
				aspect_ratios = cfg.FPN.RPN_ASPECT_RATIOS
			)
			self.GenerateProposals_modules.append(GenerateProposalsOp(lvl_anchors, sc))
		
		self.CollectAndDistributeFpnRpnProposals = CollectAndDistributeFpnRpnProposalsOp()
		
		self._init_weights()
Exemplo n.º 2
0
    def __init__(self, dim_in, spatial_scales):
        super().__init__()
        self.dim_in = dim_in
        self.spatial_scales = spatial_scales
        self.dim_out = self.dim_in
        num_anchors = len(cfg.FPN.RPN_ASPECT_RATIOS)

        # Create conv ops shared by all FPN levels
        # 12/25,RPN 3D CONTEXT TEST
        if cfg.LESION.CONCAT_BEFORE_RPN:
            #self.cbam = CBAM(self.dim_in*cfg.LESION.NUM_IMAGES_3DCE, 16)
            #self.FPN_RPN_conv_embedding = nn.Conv2d(self.dim_in*cfg.LESION.NUM_IMAGES_3DCE, self.dim_in, 1)
            self.FPN_RPN_conv = nn.Conv2d(self.dim_in*cfg.LESION.NUM_IMAGES_3DCE, self.dim_in, 1)
        else:
            self.FPN_RPN_conv = nn.Conv2d(self.dim_in, self.dim_out, 3, 1, 1)

        #elif cfg.LESION.SUM_BEFORE_RPN:
            #self.FPN_RPN_conv_embedding = nn.Conv2d(self.dim_in, self.dim_in, 1)
        #self.FPN_RPN_conv = nn.Conv2d(self.dim_in*cfg.LESION.NUM_IMAGES_3DCE, self.dim_out, 3, 1, 1)
        dim_score = num_anchors * 2 if cfg.RPN.CLS_ACTIVATION == 'softmax' \
            else num_anchors
        self.FPN_RPN_cls_score = nn.Conv2d(self.dim_out, dim_score, 1, 1, 0)
        self.FPN_RPN_bbox_pred = nn.Conv2d(self.dim_out, 4 * num_anchors, 1, 1, 0)

        self.GenerateProposals_modules = nn.ModuleList()
        k_max = cfg.FPN.RPN_MAX_LEVEL  # coarsest level of pyramid
        k_min = cfg.FPN.RPN_MIN_LEVEL  # finest level of pyramid
        #anchor_scale = [0,0,2,3,4,6,12]
        #anchor_scale = [0,0,1,2,4,8,16]
        #anchor_scale = [0,0,1,2,3,6,12]
        for lvl in range(k_min, k_max + 1):
            sc = self.spatial_scales[k_max - lvl]  # in reversed order
            lvl_anchors = generate_anchors(
                stride=2.**lvl,
                sizes=(cfg.FPN.RPN_ANCHOR_START_SIZE * 2.**(lvl - k_min), ),
                #sizes=(cfg.FPN.RPN_ANCHOR_START_SIZE * anchor_scale[lvl], ),
                aspect_ratios=cfg.FPN.RPN_ASPECT_RATIOS
            )
            self.GenerateProposals_modules.append(GenerateProposalsOp(lvl_anchors, sc))

        self.CollectAndDistributeFpnRpnProposals = CollectAndDistributeFpnRpnProposalsOp()

        self._init_weights()
Exemplo n.º 3
0
    def __init__(self, dim_in, spatial_scale):
        super().__init__()
        self.dim_in = dim_in
        self.dim_out = dim_in if cfg.RPN.OUT_DIM_AS_IN_DIM else cfg.RPN.OUT_DIM
        anchors = generate_anchors(stride=1. / spatial_scale,
                                   sizes=cfg.RPN.SIZES,
                                   aspect_ratios=cfg.RPN.ASPECT_RATIOS)
        num_anchors = anchors.shape[0]

        # RPN hidden representation
        self.RPN_conv = nn.Conv2d(self.dim_in, self.dim_out, 3, 1, 1)
        # Proposal classification scores
        self.n_score_out = num_anchors * 2 if cfg.RPN.CLS_ACTIVATION == 'softmax' \
            else num_anchors
        self.RPN_cls_score = nn.Conv2d(self.dim_out, self.n_score_out, 1, 1, 0)
        # Proposal bbox regression deltas
        self.RPN_bbox_pred = nn.Conv2d(self.dim_out, num_anchors * 4, 1, 1, 0)

        self.RPN_GenerateProposals = GenerateProposalsOp(
            anchors, spatial_scale)
        self.RPN_GenerateProposalLabels = GenerateProposalLabelsOp()

        self._init_weights()
    def __init__(self, dim_in, spatial_scales):
        super().__init__()
        self.dim_in = dim_in
        self.spatial_scales = spatial_scales
        self.dim_out = self.dim_in
        num_anchors = len(cfg.FPN.RPN_ASPECT_RATIOS)

        # Create conv ops shared by all FPN levels
        '''i think this is the head part, they go across a conv with equal-input-output-size, and then
        go through the score and bbox subnet separately'''

        self.FPN_RPN_conv = nn.Conv2d(dim_in, self.dim_out, 3, 1, 1)
        dim_score = num_anchors * 2 if cfg.RPN.CLS_ACTIVATION == 'softmax' \
            else num_anchors
        self.FPN_RPN_cls_score = nn.Conv2d(self.dim_out, dim_score, 1, 1, 0)
        self.FPN_RPN_bbox_pred = nn.Conv2d(self.dim_out, 4 * num_anchors, 1, 1,
                                           0)

        self.GenerateProposals_modules = nn.ModuleList()
        k_max = cfg.FPN.RPN_MAX_LEVEL  # coarsest level of pyramid
        k_min = cfg.FPN.RPN_MIN_LEVEL  # finest level of pyramid
        for lvl in range(k_min, k_max + 1):
            '''3..0'''
            sc = self.spatial_scales[k_max - lvl]  # in reversed order
            '''FIXME the part to work with!!'''
            lvl_anchors = generate_anchors(
                stride=2.**lvl,
                sizes=(cfg.FPN.RPN_ANCHOR_START_SIZE * 2.**(lvl - k_min), ),
                aspect_ratios=cfg.FPN.RPN_ASPECT_RATIOS)
            self.GenerateProposals_modules.append(
                GenerateProposalsOp(lvl_anchors, sc))

        self.CollectAndDistributeFpnRpnProposals = CollectAndDistributeFpnRpnProposalsOp(
        )

        self._init_weights()