Esempio n. 1
0
    def __init__(self, use_xyz=True, mode='TRAIN'):
        super().__init__()
        self.training_mode = (mode == 'TRAIN')

        MODEL = importlib.import_module(cfg.RPN.BACKBONE)
        self.backbone_net = MODEL.get_model(input_channels=int(
            cfg.RPN.USE_INTENSITY),
                                            use_xyz=use_xyz)

        # classification branch
        cls_layers = []
        pre_channel = cfg.RPN.FP_MLPS[0][-1]
        for k in range(0, cfg.RPN.CLS_FC.__len__()):
            cls_layers.append(
                pt_utils.Conv1d(pre_channel,
                                cfg.RPN.CLS_FC[k],
                                bn=cfg.RPN.USE_BN))
            pre_channel = cfg.RPN.CLS_FC[k]
        cls_layers.append(pt_utils.Conv1d(pre_channel, 1, activation=None))
        if cfg.RPN.DP_RATIO >= 0:
            cls_layers.insert(1, nn.Dropout(cfg.RPN.DP_RATIO))
        self.rpn_cls_layer = nn.Sequential(*cls_layers)

        # regression branch
        per_loc_bin_num = int(cfg.RPN.LOC_SCOPE / cfg.RPN.LOC_BIN_SIZE) * 2
        if cfg.RPN.LOC_XZ_FINE:
            reg_channel = per_loc_bin_num * 4 + cfg.RPN.NUM_HEAD_BIN * 2 + 3
        else:
            reg_channel = per_loc_bin_num * 2 + cfg.RPN.NUM_HEAD_BIN * 2 + 3
        reg_channel += 1  # reg y

        reg_layers = []
        pre_channel = cfg.RPN.FP_MLPS[0][-1]
        for k in range(0, cfg.RPN.REG_FC.__len__()):
            reg_layers.append(
                pt_utils.Conv1d(pre_channel,
                                cfg.RPN.REG_FC[k],
                                bn=cfg.RPN.USE_BN))
            pre_channel = cfg.RPN.REG_FC[k]

        reg_layers.append(
            pt_utils.Conv1d(pre_channel, reg_channel, activation=None))

        if cfg.RPN.DP_RATIO >= 0:
            reg_layers.insert(1, nn.Dropout(cfg.RPN.DP_RATIO))

        self.rpn_reg_layer = nn.Sequential(*reg_layers)

        if cfg.RPN.LOSS_CLS == 'DiceLoss':
            self.rpn_cls_loss_func = loss_utils.DiceLoss(ignore_target=-1)
        elif cfg.RPN.LOSS_CLS == 'SigmoidFocalLoss':
            self.rpn_cls_loss_func = loss_utils.SigmoidFocalClassificationLoss(
                alpha=cfg.RPN.FOCAL_ALPHA[0], gamma=cfg.RPN.FOCAL_GAMMA)
        elif cfg.RPN.LOSS_CLS == 'BinaryCrossEntropy':
            self.rpn_cls_loss_func = F.binary_cross_entropy
        else:
            raise NotImplementedError

        self.proposal_layer = ProposalLayer(mode=mode)
        self.init_weights()
Esempio n. 2
0
    def __init__(self, use_xyz=True, mode='TRAIN'):
        super().__init__()
        self.training_mode = (mode == 'TRAIN')

        MODEL = importlib.import_module(cfg.RPN.BACKBONE)
        self.backbone_net = MODEL.get_model(input_channels=int(
            cfg.RPN.USE_INTENSITY),
                                            use_xyz=use_xyz)

        # here Conv1d is almost the same as torch Conv1d
        # for torch Conv1d see https://pytorch.org/docs/stable/nn.html#conv1d
        # here we use the Conv1d so we can do two levels of batch calculation
        # the first level is at the level of the scenes and the second is at the level of th points
        # The input to both heads is a (B, C, N) shaped tensor.
        # C is number of channels (i.e. the number of features each point has) (it is apparently 128)
        # N is the number of points in one scene
        # this way we regress the output values of all the points using a single run of a Conv1d layer
        # Notice the output has the form: classification head  (B,1,N) , regression head (B,9,N)
        # since the kernel_size is 1 the output is a linear combination of channels just like a simple linear regression plus a bias
        # in the case of the regression head, each of the 9 outputs has its own set of weights and biases.
        # notice the output is the result of the regression/classification for all the points not just a single one.

        # classification branch
        cls_layers = []
        pre_channel = cfg.RPN.FP_MLPS[0][-1]  # = 128
        for k in range(0, cfg.RPN.CLS_FC.__len__()):
            # input is 128 output is also 128
            cls_layers.append(
                pt_utils.Conv1d(
                    pre_channel, cfg.RPN.CLS_FC[k],
                    bn=cfg.RPN.USE_BN))  # bn is batch normalization
            pre_channel = cfg.RPN.CLS_FC[k]
        cls_layers.append(pt_utils.Conv1d(pre_channel, 1, activation=None)
                          )  # sigmoid is applied in the loss function not here
        # this ends up being:
        # 1st layer 128 inputs to 128 outputs
        # 2nd layer 128 to 1
        if cfg.RPN.DP_RATIO >= 0:
            cls_layers.insert(1, nn.Dropout(cfg.RPN.DP_RATIO))
        self.rpn_cls_layer = nn.Sequential(*cls_layers)
        # it adds a dropout layer with ratio 0.5

        # regression branch
        # we will do a normal regression for all the 9 parameters (x,y,z, w,h,l , rx,ry,rz) of our bboxes
        reg_channel = 9

        reg_layers = []
        pre_channel = cfg.RPN.FP_MLPS[0][-1]  # = 128
        for k in range(0, cfg.RPN.REG_FC.__len__()):  # cfg.RPN.REG_FC = [128]
            reg_layers.append(
                pt_utils.Conv1d(pre_channel,
                                cfg.RPN.REG_FC[k],
                                bn=cfg.RPN.USE_BN))
            pre_channel = cfg.RPN.REG_FC[k]

        reg_layers.append(
            pt_utils.Conv1d(pre_channel, reg_channel, activation=None))

        #if you use binning and classification the activation of this last layer is applied in the loss instead
        # see /lib/utils/loss_utils.py "get_reg_loss" it uses BinaryCrossEntropy which applies a softmax (I need to change this !)

        if cfg.RPN.DP_RATIO >= 0:
            reg_layers.insert(1, nn.Dropout(cfg.RPN.DP_RATIO))
        self.rpn_reg_layer = nn.Sequential(*reg_layers)
        # this ends up being:
        # 1st layer 128 inputs to 128 outputs
        # 2nd layer 128 to 9 outputs
        #  it adds a dropout layer with ratio 0.5

        if cfg.RPN.LOSS_CLS == 'DiceLoss':
            self.rpn_cls_loss_func = loss_utils.DiceLoss(ignore_target=-1)
        elif cfg.RPN.LOSS_CLS == 'SigmoidFocalLoss':
            self.rpn_cls_loss_func = loss_utils.SigmoidFocalClassificationLoss(
                alpha=cfg.RPN.FOCAL_ALPHA[0], gamma=cfg.RPN.FOCAL_GAMMA)
        elif cfg.RPN.LOSS_CLS == 'BinaryCrossEntropy':
            self.rpn_cls_loss_func = F.binary_cross_entropy
        else:
            raise NotImplementedError

        # proposal layer is only used in RCNN and not in RPN
        self.proposal_layer = ProposalLayer(mode=mode)
        self.init_weights()