Esempio n. 1
0
    def __init__(self, pred_cls_channel, pred_reg_base_num,
                 pred_reg_channel_num, is_training, pred_attr_velo, bn,
                 pre_channel):
        super().__init__()
        self.pred_cls_channel = pred_cls_channel
        self.pred_reg_base_num = pred_reg_base_num
        self.pred_reg_channel_num = pred_reg_channel_num
        self.is_training = is_training
        self.pred_attr_velo = pred_attr_velo
        self.pre_channel = pre_channel

        self.bn = bn

        cls_layers = []
        cls_layers.append(pt_utils.Conv1d(self.pre_channel, 128, bn=self.bn))
        cls_layers.append(
            pt_utils.Conv1d(128,
                            self.pred_cls_channel,
                            bn=False,
                            activation=None))
        self.cls_layers = nn.Sequential(*cls_layers)

        self.angle_cls_num = cfg.MODEL.ANGLE_CLS_NUM
        reg_layers = []
        reg_layers.append(pt_utils.Conv1d(pre_channel, 128, bn=self.bn))
        reg_channel = pred_reg_base_num * (pred_reg_channel_num +
                                           self.angle_cls_num * 2)
        reg_layers.append(
            pt_utils.Conv1d(128, reg_channel, bn=False, activation=None))
        self.reg_layers = nn.Sequential(*reg_layers)
Esempio n. 2
0
    def __init__(self, batch_size, anchor_num, head_idx, head_cfg, is_training):
        super().__init__()
        self.is_training = is_training
        self.head_idx = head_idx
        self.anchor_num = anchor_num
        self.batch_size = batch_size 

        cur_head = head_cfg

        self.xyz_index = cur_head[0]
        self.feature_index = cur_head[1]
        self.op_type = cur_head[2]
        self.mlp_list = cur_head[3]
        self.bn = cur_head[4]
        self.layer_type = cur_head[5]
        self.scope = cur_head[6]

        if head_idx == 0: # stage 1
            self.head_cfg = cfg.MODEL.FIRST_STAGE
        elif head_idx == 1: # stage 2
            self.head_cfg = cfg.MODEL.SECOND_STAGE
        else: raise Exception('Not Implementation Error!!!') # stage 3

        # determine channel number
        if self.head_cfg.CLS_ACTIVATION == 'Sigmoid':
            self.pred_cls_channel = self.anchor_num
        elif self.head_cfg.CLS_ACTIVATION == 'Softmax':
            self.pred_cls_channel = self.anchor_num + 1
        if self.layer_type == 'IoU':
            self.pred_cls_channel = self.anchor_num

        self.reg_method = self.head_cfg.REGRESSION_METHOD.TYPE 
        anchor_type = self.reg_method.split('-')[-1] # Anchor & free

        pred_reg_base_num = {
            'Anchor': self.anchor_num,
            'free': 1,
        } 
        self.pred_reg_base_num = pred_reg_base_num[anchor_type]

        pred_reg_channel_num = {
            'Dist-Anchor': 6,
            'Log-Anchor': 6,
            'Dist-Anchor-free': 6,
            # bin_x/res_x/bin_z/res_z/res_y/res_size
            'Bin-Anchor': self.head_cfg.REGRESSION_METHOD.BIN_CLASS_NUM * 4 + 4,
        } 
        self.pred_reg_channel_num = pred_reg_channel_num[self.reg_method]

        self.pre_channel = cfg.MODEL.NETWORK.FIRST_STAGE.ARCHITECTURE[self.feature_index[0]-1][15]
        layer_modules = []
        pre_channel = self.pre_channel
        for i in range(len(self.mlp_list)):
            layer_modules.append(pt_utils.Conv1d(pre_channel, self.mlp_list[i], bn=self.bn))
            pre_channel = self.mlp_list[i]
        self.layer_modules = nn.Sequential(*layer_modules)

        self.head_predictor = BoxRegressionHead(self.pred_cls_channel, self.pred_reg_base_num, self.pred_reg_channel_num,
                                                self.is_training, self.head_cfg.PREDICT_ATTRIBUTE_AND_VELOCITY,
                                                self.bn, pre_channel)
Esempio n. 3
0
    def __init__(self, mlp_list, bn, is_training, pre_channel):
        super().__init__()
        self.mlp_list = mlp_list
        self.bn = bn
        self.is_training = is_training

        mlp_modules = []
        for i in range(len(self.mlp_list)):
            mlp_modules.append(
                pt_utils.Conv1d(pre_channel, self.mlp_list[i], bn=self.bn))
            pre_channel = self.mlp_list[i]
        self.mlp_modules = nn.Sequential(*mlp_modules)

        self.ctr_reg = pt_utils.Conv1d(pre_channel,
                                       3,
                                       activation=None,
                                       bn=False)
        self.min_offset = torch.tensor(
            cfg.MODEL.MAX_TRANSLATE_RANGE).float().view(1, 1, 3)
Esempio n. 4
0
    def __init__(self,
                 radius_list,
                 nsample_list,
                 mlp_list,
                 is_training,
                 bn_decay,
                 bn,
                 fps_sample_range_list,
                 fps_method_list,
                 npoint_list,
                 use_attention,
                 scope,
                 dilated_group,
                 aggregation_channel=None,
                 pre_channel=0,
                 debugging=False,
                 epsilon=1e-5):
        super().__init__()

        self.radius_list = radius_list
        self.nsample_list = nsample_list
        self.mlp_list = mlp_list
        self.is_training = is_training
        self.bn_decay = bn_decay
        self.bn = bn
        self.fps_sample_range_list = fps_sample_range_list
        self.fps_method_list = fps_method_list
        self.npoint_list = npoint_list
        self.use_attention = use_attention
        self.scope = scope
        self.dilated_group = dilated_group
        self.aggregation_channel = aggregation_channel
        self.pre_channel = pre_channel

        mlp_modules = []
        for i in range(len(self.radius_list)):
            mlp_spec = [self.pre_channel + 3] + self.mlp_list[i]
            mlp_modules.append(pt_utils.SharedMLP(mlp_spec, bn=self.bn))
        self.mlp_modules = nn.Sequential(*mlp_modules)

        if cfg.MODEL.NETWORK.AGGREGATION_SA_FEATURE and (len(self.mlp_list) !=
                                                         0):
            input_channel = 0
            for mlp_tmp in self.mlp_list:
                input_channel += mlp_tmp[-1]
            self.aggregation_layer = pt_utils.Conv1d(input_channel,
                                                     aggregation_channel,
                                                     bn=self.bn)