예제 #1
0
    def get_rcnn_sample_jit(self, index):
        sample_id = int(self.sample_id_list[index])
        rpn_xyz, rpn_features, rpn_intensity, seg_mask = \
            self.get_rpn_features(self.rcnn_training_feature_dir, sample_id)

        # load rois and gt_boxes3d for this sample
        roi_file = os.path.join(self.rcnn_training_roi_dir,
                                '%06d.txt' % sample_id)
        roi_obj_list = kitti_utils.get_objects_from_label(roi_file)
        roi_boxes3d = kitti_utils.objs_to_boxes3d(roi_obj_list)
        # roi_scores = kitti_utils.objs_to_scores(roi_obj_list)

        gt_obj_list = self.filtrate_objects(self.get_label(sample_id))
        gt_boxes3d = kitti_utils.objs_to_boxes3d(gt_obj_list)

        sample_info = {
            'sample_id': sample_id,
            'rpn_xyz': rpn_xyz,
            'rpn_features': rpn_features,
            'rpn_intensity': rpn_intensity,
            'seg_mask': seg_mask,
            'roi_boxes3d': roi_boxes3d,
            'gt_boxes3d': gt_boxes3d,
            'pts_depth': np.linalg.norm(rpn_xyz, ord=2, axis=1)
        }

        return sample_info
    def get_rpn_sample(self, index):
        sample_id = int(self.sample_id_list[index])
        if sample_id < 10000:
            calib = self.get_calib(sample_id)
            img_left = self.get_image(sample_id % 10000, left_image=True)
            img_right = self.get_image(sample_id % 10000, left_image=False)            
            # img_shape = self.get_image_shape(sample_id)
            W, H = img_left.size
            depth = self.get_depth(sample_id)

            # Pad depth to constant shape for batching
            top_pad = 384 - H
            right_pad = 1248 - W
            depth = np.pad(depth, ((top_pad, 0), (0, right_pad)), 'constant', constant_values=0)

        sample_info = {'sample_id': sample_id, 'random_select': self.random_select}

        if self.mode == 'TEST':
            sample_info['left_image'] = img_left
            sample_info['right_image'] = img_right
            sample_info['gt_depth'] = depth
            sample_info['calib'] = calib
            return sample_info

        gt_obj_list = self.filtrate_objects(self.get_label(sample_id))
        gt_boxes3d = kitti_utils.objs_to_boxes3d(gt_obj_list)

        gt_alpha = np.zeros((gt_obj_list.__len__()), dtype=np.float32)
        for k, obj in enumerate(gt_obj_list):
            gt_alpha[k] = obj.alpha

        aug_gt_boxes3d = gt_boxes3d.copy()

        if cfg.RPN.FIXED:
            sample_info['left_image'] = img_left
            sample_info['right_image'] = img_right
            sample_info['gt_depth'] = depth
            sample_info['calib'] = calib
            sample_info['gt_boxes3d'] = aug_gt_boxes3d
            return sample_info

        sample_info['left_image'] = img_left
        sample_info['right_image'] = img_right
        sample_info['gt_depth'] = depth
        sample_info['calib'] = calib
        sample_info['gt_boxes3d'] = aug_gt_boxes3d
        return sample_info
예제 #3
0
            continue
        total_objs.append(len(labels))
        for label in labels:
            x_list.append(label.pos[0])
            y_list.append(label.pos[1])
            z_list.append(label.pos[2])
            l_list.append(label.l)
            w_list.append(label.w)
            h_list.append(label.h)
            ry_list.append(label.ry)
        points = align_img_and_pc(img_dir, pc_dir, calib_dir)
        # print("numpoints: ", len(points))
        num_pts_per_scene.append(len(points))

        # Get the foreground and background label
        bboxes3d = kitti_utils.objs_to_boxes3d(labels)
        # print("Number of bboxes: ",len(bboxes3d))
        bboxes3d_rotated_corners = kitti_utils.boxes3d_to_corners3d(bboxes3d)
        box3d_roi_inds_overall = None
        sub_box3d_roi_inds_overall = None
        valid_labels = []
        for i, bbox3d_corners in enumerate(bboxes3d_rotated_corners):
            # print("bboxes3d_rotated_corners: ", bboxes3d_rotated_corners[i])
            box3d_roi_inds = kitti_utils.in_hull(points[:, :3], bbox3d_corners)
            # box3d_roi_inds = kitti_utils.in_hull(bbox3d_corners[:,:3], bbox3d_corners)
            # print("xmin: ", np.min(points[:,0]), " xmax: ", np.max(points[:,0]))
            # print("ymin: ", np.min(points[:,1]), " ymax: ", np.max(points[:,1]))
            # print("zmin: ", np.min(points[:,2]), " zmax: ", np.max(points[:,2]))
            # pc_filter = kitti_utils.extract_pc_in_box3d(points[:,:3], bbox3d_corners)
            # sub_pc_filter = kitti_utils.extract_pc_in_box3d(points[:,:3], bbox3d_corners)
            # print("pc_filter.shape: ", pc_filter[0].shape)
    def get_rcnn_training_sample_batch(self, index):
        sample_id = int(self.sample_id_list[index])
        rpn_xyz, rpn_features, rpn_intensity, seg_mask = \
            self.get_rpn_features(self.rcnn_training_feature_dir, sample_id)

        # load rois and gt_boxes3d for this sample
        roi_file = os.path.join(self.rcnn_training_roi_dir, '%06d.txt' % sample_id)
        roi_obj_list = kitti_utils.get_objects_from_label(roi_file)
        roi_boxes3d = kitti_utils.objs_to_boxes3d(roi_obj_list)
        # roi_scores = kitti_utils.objs_to_scores(roi_obj_list)

        gt_obj_list = self.filtrate_objects(self.get_label(sample_id))
        gt_boxes3d = kitti_utils.objs_to_boxes3d(gt_obj_list)

        # calculate original iou
        iou3d = kitti_utils.get_iou3d(kitti_utils.boxes3d_to_corners3d(roi_boxes3d),
                                      kitti_utils.boxes3d_to_corners3d(gt_boxes3d))
        max_overlaps, gt_assignment = iou3d.max(axis=1), iou3d.argmax(axis=1)
        max_iou_of_gt, roi_assignment = iou3d.max(axis=0), iou3d.argmax(axis=0)
        roi_assignment = roi_assignment[max_iou_of_gt > 0].reshape(-1)

        # sample fg, easy_bg, hard_bg
        fg_rois_per_lidar = int(np.round(cfg.RCNN.FG_RATIO * cfg.RCNN.ROI_PER_lidar))
        fg_thresh = min(cfg.RCNN.REG_FG_THRESH, cfg.RCNN.CLS_FG_THRESH)
        fg_inds = np.nonzero(max_overlaps >= fg_thresh)[0]
        fg_inds = np.concatenate((fg_inds, roi_assignment), axis=0)  # consider the roi which has max_overlaps with gt as fg

        easy_bg_inds = np.nonzero((max_overlaps < cfg.RCNN.CLS_BG_THRESH_LO))[0]
        hard_bg_inds = np.nonzero((max_overlaps < cfg.RCNN.CLS_BG_THRESH) &
                                  (max_overlaps >= cfg.RCNN.CLS_BG_THRESH_LO))[0]

        fg_num_rois = fg_inds.size
        bg_num_rois = hard_bg_inds.size + easy_bg_inds.size

        if fg_num_rois > 0 and bg_num_rois > 0:
            # sampling fg
            fg_rois_per_this_lidar = min(fg_rois_per_lidar, fg_num_rois)
            rand_num = np.random.permutation(fg_num_rois)
            fg_inds = fg_inds[rand_num[:fg_rois_per_this_lidar]]

            # sampling bg
            bg_rois_per_this_lidar = cfg.RCNN.ROI_PER_lidar  - fg_rois_per_this_lidar
            bg_inds = self.sample_bg_inds(hard_bg_inds, easy_bg_inds, bg_rois_per_this_lidar)

        elif fg_num_rois > 0 and bg_num_rois == 0:
            # sampling fg
            rand_num = np.floor(np.random.rand(cfg.RCNN.ROI_PER_lidar ) * fg_num_rois)
            rand_num = torch.from_numpy(rand_num).type_as(gt_boxes3d).long()
            fg_inds = fg_inds[rand_num]
            fg_rois_per_this_lidar = cfg.RCNN.ROI_PER_lidar
            bg_rois_per_this_lidar = 0
        elif bg_num_rois > 0 and fg_num_rois == 0:
            # sampling bg
            bg_rois_per_this_lidar = cfg.RCNN.ROI_PER_lidar
            bg_inds = self.sample_bg_inds(hard_bg_inds, easy_bg_inds, bg_rois_per_this_lidar)
            fg_rois_per_this_lidar = 0
        else:
            import pdb
            pdb.set_trace()
            raise NotImplementedError

        # augment the rois by noise
        roi_list, roi_iou_list, roi_gt_list = [], [], []
        if fg_rois_per_this_lidar > 0:
            fg_rois_src = roi_boxes3d[fg_inds].copy()
            gt_of_fg_rois = gt_boxes3d[gt_assignment[fg_inds]]
            fg_rois, fg_iou3d = self.aug_roi_by_noise_batch(fg_rois_src, gt_of_fg_rois, aug_times=10)
            roi_list.append(fg_rois)
            roi_iou_list.append(fg_iou3d)
            roi_gt_list.append(gt_of_fg_rois)

        if bg_rois_per_this_lidar > 0:
            bg_rois_src = roi_boxes3d[bg_inds].copy()
            gt_of_bg_rois = gt_boxes3d[gt_assignment[bg_inds]]
            bg_rois, bg_iou3d = self.aug_roi_by_noise_batch(bg_rois_src, gt_of_bg_rois, aug_times=1)
            roi_list.append(bg_rois)
            roi_iou_list.append(bg_iou3d)
            roi_gt_list.append(gt_of_bg_rois)

        rois = np.concatenate(roi_list, axis=0)
        iou_of_rois = np.concatenate(roi_iou_list, axis=0)
        gt_of_rois = np.concatenate(roi_gt_list, axis=0)

        # collect extra features for point cloud pooling
        if cfg.RCNN.USE_INTENSITY:
            pts_extra_input_list = [rpn_intensity.reshape(-1, 1), seg_mask.reshape(-1, 1)]
        else:
            pts_extra_input_list = [seg_mask.reshape(-1, 1)]

        if cfg.RCNN.USE_DEPTH:
            pts_depth = (np.linalg.norm(rpn_xyz, ord=2, axis=1) / 70.0) - 0.5
            pts_extra_input_list.append(pts_depth.reshape(-1, 1))
        pts_extra_input = np.concatenate(pts_extra_input_list, axis=1)

        pts_input, pts_features, pts_empty_flag = roipool3d_utils.roipool3d_cpu(rois, rpn_xyz, rpn_features,
                                                                                pts_extra_input,
                                                                                cfg.RCNN.POOL_EXTRA_WIDTH,
                                                                                sampled_pt_num=cfg.RCNN.NUM_POINTS,
                                                                                canonical_transform=False)


        valid_mask = (pts_empty_flag == 0).astype(np.int32)

        # regression valid mask
        reg_valid_mask = (iou_of_rois > cfg.RCNN.REG_FG_THRESH).astype(np.int32) & valid_mask

        # classification label
        cls_label = (iou_of_rois > cfg.RCNN.CLS_FG_THRESH).astype(np.int32)
        invalid_mask = (iou_of_rois > cfg.RCNN.CLS_BG_THRESH) & (iou_of_rois < cfg.RCNN.CLS_FG_THRESH)
        cls_label[invalid_mask] = -1
        cls_label[valid_mask == 0] = -1

        # canonical transform and sampling
        pts_input_ct, gt_boxes3d_ct = self.canonical_transform_batch(pts_input, rois, gt_of_rois)

        sample_info = {'sample_id': sample_id,
                       'pts_input': pts_input_ct,
                       'pts_features': pts_features,
                       'cls_label': cls_label,
                       'reg_valid_mask': reg_valid_mask,
                       'gt_boxes3d_ct': gt_boxes3d_ct,
                       'roi_boxes3d': rois,
                       'roi_size': rois[:, 3:6],
                       'gt_boxes3d': gt_of_rois}

        return sample_info
    def get_proposal_from_file(self, index):
        sample_id = int(self.lidar_idx_list[index])
        proposal_file = os.path.join(self.rcnn_eval_roi_dir, '%06d.txt' % sample_id)
        roi_obj_list = kitti_utils.get_objects_from_label(proposal_file)

        rpn_xyz, rpn_features, rpn_intensity, seg_mask = self.get_rpn_features(self.rcnn_eval_feature_dir, sample_id)
        pts_rect, pts_rpn_features, pts_intensity = rpn_xyz, rpn_features, rpn_intensity

        roi_box3d_list, roi_scores = [], []
        for obj in roi_obj_list:
            box3d = np.array([obj.pos[0], obj.pos[1], obj.pos[2], obj.h, obj.w, obj.l, obj.ry], dtype=np.float32)
            roi_box3d_list.append(box3d.reshape(1, 7))
            roi_scores.append(obj.score)

        roi_boxes3d = np.concatenate(roi_box3d_list, axis=0)  # (N, 7)
        roi_scores = np.array(roi_scores, dtype=np.float32)  # (N)

        if cfg.RCNN.ROI_SAMPLE_JIT:
            sample_dict = {'sample_id': sample_id,
                           'rpn_xyz': rpn_xyz,
                           'rpn_features': rpn_features,
                           'seg_mask': seg_mask,
                           'roi_boxes3d': roi_boxes3d,
                           'roi_scores': roi_scores,
                           'pts_depth': np.linalg.norm(rpn_xyz, ord=2, axis=1)}

            if self.mode != 'TEST':
                gt_obj_list = self.filtrate_objects(self.get_label(sample_id))
                gt_boxes3d = kitti_utils.objs_to_boxes3d(gt_obj_list)

                roi_corners = kitti_utils.boxes3d_to_corners3d(roi_boxes3d)
                gt_corners = kitti_utils.boxes3d_to_corners3d(gt_boxes3d)
                iou3d = kitti_utils.get_iou3d(roi_corners, gt_corners)
                if gt_boxes3d.shape[0] > 0:
                    gt_iou = iou3d.max(axis=1)
                else:
                    gt_iou = np.zeros(roi_boxes3d.shape[0]).astype(np.float32)

                sample_dict['gt_boxes3d'] = gt_boxes3d
                sample_dict['gt_iou'] = gt_iou
            return sample_dict

        if cfg.RCNN.USE_INTENSITY:
            pts_extra_input_list = [pts_intensity.reshape(-1, 1), seg_mask.reshape(-1, 1)]
        else:
            pts_extra_input_list = [seg_mask.reshape(-1, 1)]

        if cfg.RCNN.USE_DEPTH:
            cur_depth = np.linalg.norm(pts_rect, axis=1, ord=2)
            cur_depth_norm = (cur_depth / 70.0) - 0.5
            pts_extra_input_list.append(cur_depth_norm.reshape(-1, 1))

        pts_extra_input = np.concatenate(pts_extra_input_list, axis=1)
        pts_input, pts_features = roipool3d_utils.roipool3d_cpu(roi_boxes3d, pts_rect, pts_rpn_features,
                                                                pts_extra_input, cfg.RCNN.POOL_EXTRA_WIDTH,
                                                                sampled_pt_num=cfg.RCNN.NUM_POINTS)

        sample_dict = {'sample_id': sample_id,
                       'pts_input': pts_input,
                       'pts_features': pts_features,
                       'roi_boxes3d': roi_boxes3d,
                       'roi_scores': roi_scores,
                       'roi_size': roi_boxes3d[:, 3:6]}

        if self.mode == 'TEST':
            return sample_dict

        gt_obj_list = self.filtrate_objects(self.get_label(sample_id))
        gt_boxes3d = np.zeros((gt_obj_list.__len__(), 7), dtype=np.float32)

        for k, obj in enumerate(gt_obj_list):
            gt_boxes3d[k, 0:3], gt_boxes3d[k, 3], gt_boxes3d[k, 4], gt_boxes3d[k, 5], gt_boxes3d[k, 6] \
                = obj.pos, obj.h, obj.w, obj.l, obj.ry

        if gt_boxes3d.__len__() == 0:
            gt_iou = np.zeros((roi_boxes3d.shape[0]), dtype=np.float32)
        else:
            roi_corners = kitti_utils.boxes3d_to_corners3d(roi_boxes3d)
            gt_corners = kitti_utils.boxes3d_to_corners3d(gt_boxes3d)
            iou3d = kitti_utils.get_iou3d(roi_corners, gt_corners)
            gt_iou = iou3d.max(axis=1)
        sample_dict['gt_boxes3d'] = gt_boxes3d
        sample_dict['gt_iou'] = gt_iou

        return sample_dict
예제 #6
0
    def get_rpn_sample(self, index):

        sample_id = self.sample_id_list[index]
        pts_lidar = self.get_lidar(sample_id)

        # get valid point (projected points should be in image)
        pts_rect = pts_lidar[:, 0:3]
        valid_mask = self.get_valid_flag(pts_rect)
        pts_rect = pts_rect[valid_mask]
        pts_intensity = np.arange(pts_lidar.shape[0])

        # generate inputs
        if self.mode == 'TRAIN' or self.random_select:

            # Check if sampled points are greater than max points taken as input by the network
            # max_points < total_points
            if self.npoints < len(pts_rect):
                #Selecting the depth column
                pts_depth = pts_rect[:, 2]
                # Creating a Mask for points within a radius of 60.0
                pts_near_flag = np.abs(pts_depth) < 60.0
                # Creating the complimentary mask for far points
                far_idxs_choice = np.where(pts_near_flag == 0)[0]
                # Creating index for near points
                near_idxs = np.where(pts_near_flag == 1)[0]
                # randomly select points from near points indexes, total upto (max points- far points)
                # near_points + far_points --> total points
                near_idxs_choice = np.random.choice(near_idxs,
                                                    self.npoints -
                                                    len(far_idxs_choice),
                                                    replace=True)
                # concatenate the randomly chosen near points indexes with far points indexes
                choice = np.concatenate((near_idxs_choice, far_idxs_choice), axis=0) \
                    if len(far_idxs_choice) > 0 else near_idxs_choice
                np.random.shuffle(choice)

            # max_points > total_points
            else:
                # Case : self.npoints(max_points) == len(pts_rect) (total points)
                choice = np.arange(0, len(pts_rect), dtype=np.int32)
                if self.npoints > len(pts_rect):
                    extra_choice = np.random.choice(choice,
                                                    self.npoints -
                                                    len(pts_rect),
                                                    replace=True)
                    choice = np.concatenate((choice, extra_choice), axis=0)
                np.random.shuffle(choice)

            ret_pts_rect = pts_rect[choice, :]
            ret_pts_intensity = pts_intensity[
                choice] - 0.5  # translate intensity to [-0.5, 0.5]
            #np.save("check",ret_pts_rect)
        else:
            ret_pts_rect = pts_rect
            ret_pts_intensity = pts_intensity - 0.5

        pts_features = [ret_pts_intensity.reshape(-1, 1)]
        ret_pts_features = np.concatenate(
            pts_features,
            axis=1) if pts_features.__len__() > 1 else pts_features[0]

        sample_info = {
            'sample_id': sample_id,
            'random_select': self.random_select
        }

        if self.mode == 'TEST':
            #if cfg.RPN.USE_INTENSITY:
            #pts_input = np.concatenate((ret_pts_rect, ret_pts_features), axis=1)  # (N, C)
            #else:
            pts_input = ret_pts_rect
            sample_info['pts_input'] = pts_input
            sample_info['pts_rect'] = ret_pts_rect
            sample_info['pts_features'] = ret_pts_features
            return sample_info

        gt_obj_list = self.filtrate_objects(self.get_label(sample_id))
        gt_boxes3d = kitti_utils.objs_to_boxes3d(gt_obj_list)

        gt_alpha = np.zeros((gt_obj_list.__len__()), dtype=np.float32)
        for k, obj in enumerate(gt_obj_list):
            gt_alpha[k] = obj.alpha

        # data augmentation
        aug_pts_rect = ret_pts_rect.copy()
        aug_gt_boxes3d = gt_boxes3d.copy()

        # prepare input
        pts_input = aug_pts_rect

        if cfg.RPN.FIXED:
            sample_info['pts_input'] = pts_input
            sample_info['pts_rect'] = aug_pts_rect
            sample_info['pts_features'] = ret_pts_features
            sample_info['gt_boxes3d'] = aug_gt_boxes3d
            return sample_info

        # generate training labels
        rpn_cls_label, rpn_reg_label = self.generate_rpn_training_labels(
            aug_pts_rect, aug_gt_boxes3d)
        sample_info['pts_input'] = pts_input
        sample_info['pts_rect'] = aug_pts_rect
        sample_info['pts_features'] = ret_pts_features
        sample_info['rpn_cls_label'] = rpn_cls_label
        sample_info['rpn_reg_label'] = rpn_reg_label
        sample_info['gt_boxes3d'] = aug_gt_boxes3d
        return sample_info
예제 #7
0
    def get_rpn_sample(self, index):

        #sample data loading
        sample_id = int(self.sample_id_list[index])
        calib = self.get_calib(sample_id)
        # img = self.get_image(sample_id)
        img_shape = self.get_image_shape(sample_id)
        pts_lidar = self.get_lidar(sample_id)
        pts_lidar = pts_lidar[np.argsort(-pts_lidar[:, 2]), :]
        # get valid point (projected points should be in image)
        pts_rect = calib.lidar_to_rect(pts_lidar[:, 0:3])
        pts_intensity = pts_lidar[:, 3]

        #scene augmentation
        if cfg.GT_AUG_ENABLED and self.mode == 'TRAIN':
            # all labels for checking overlapping
            all_gt_obj_list = self.filtrate_objects(self.get_noise_label(sample_id))
            all_gt_boxes3d = kitti_utils.objs_to_boxes3d(all_gt_obj_list)

            gt_aug_flag = False
            if np.random.rand() < cfg.GT_AUG_APPLY_PROB:
                # augment one scene
                gt_aug_flag, pts_rect, pts_intensity, extra_gt_boxes3d, extra_gt_obj_list = \
                    self.apply_gt_aug_to_one_scene(sample_id, pts_rect, pts_intensity, all_gt_boxes3d)

        #get depth and valid points
        pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
        pts_valid_flag = self.get_valid_flag(pts_rect, pts_img, pts_rect_depth, img_shape)
        pts_rect = pts_rect[pts_valid_flag][:, 0:3]
        pts_intensity = pts_intensity[pts_valid_flag]
        pts_depth = pts_rect_depth[pts_valid_flag]

        # generate inputs
        if self.mode == 'TRAIN' or self.random_select:
            if self.npoints < len(pts_rect):
                pts_near_flag = pts_depth < 40.0
                far_idxs_choice = np.where(pts_near_flag == 0)[0]
                near_idxs = np.where(pts_near_flag == 1)[0]
                near_idxs_choice = np.random.choice(near_idxs, self.npoints - len(far_idxs_choice), replace=False)

                choice = np.concatenate((near_idxs_choice, far_idxs_choice), axis=0) \
                    if len(far_idxs_choice) > 0 else near_idxs_choice
                np.random.shuffle(choice)
            else:
                choice = np.arange(0, len(pts_rect), dtype=np.int32)
                extra_choice = np.arange(0, len(pts_rect), dtype=np.int32)
                while self.npoints > len(choice):
                    choice = np.concatenate((choice,extra_choice),axis=0)
                choice = np.random.choice(choice, self.npoints, replace=False)
                #choice = np.concatenate((choice, extra_choice), axis=0)
                np.random.shuffle(choice)

            ret_pts_rect = pts_rect[choice, :]
            ret_pts_intensity = pts_intensity[choice] - 0.5  # translate intensity to [-0.5, 0.5]
        else:
            ret_pts_rect = pts_rect
            ret_pts_intensity = pts_intensity - 0.5


        pts_features = [ret_pts_intensity.reshape(-1, 1)]
        ret_pts_features = np.concatenate(pts_features, axis=1) if pts_features.__len__() > 1 else pts_features[0]
        pts_input = np.concatenate((ret_pts_rect, ret_pts_features), axis=1)  # (N, C)


        #return if test
        if self.mode == 'TEST':
            sample_info = {'sample_id': sample_id,
                           'random_select': self.random_select,
                           'pts_input': pts_input,
                           }
            return sample_info

        #reload labels here
        noise_gt_obj_list = self.filtrate_objects(self.get_noise_label(sample_id))
        if cfg.GT_AUG_ENABLED and self.mode == 'TRAIN' and gt_aug_flag:
            noise_gt_obj_list.extend(extra_gt_obj_list)
        noise_gt_boxes3d = kitti_utils.objs_to_boxes3d(noise_gt_obj_list)

        # data augmentation
        aug_pts_input = pts_input.copy()
        aug_gt_boxes3d = noise_gt_boxes3d.copy()
        if cfg.AUG_DATA and self.mode == 'TRAIN':
            aug_pts_rect, aug_gt_boxes3d, aug_method = self.data_augmentation(aug_pts_input[:,:3], aug_gt_boxes3d)
            aug_pts_input[:,:3] = aug_pts_rect


        # generate weakly mask
        if self.mode == 'TRAIN':
            if cfg.RPN.FIXED:
                sample_info = {'sample_id': sample_id,
                               'random_select': self.random_select,
                               'pts_input': aug_pts_input,
                               'gt_centers': aug_gt_boxes3d[:, :7],
                               'aug_method': aug_method
                               }
            else:
                rpn_cls_label, rpn_reg_label = self.generate_gaussian_training_labels(aug_pts_input[:,:3], aug_gt_boxes3d[:,:3])
                # return dictionary
                sample_info = {'sample_id': sample_id,
                               'random_select': self.random_select,
                               'pts_input': aug_pts_input,
                               'rpn_cls_label': rpn_cls_label,
                               'rpn_reg_label': rpn_reg_label,
                               'gt_centers': aug_gt_boxes3d[:,:3],
                               'aug_method': aug_method
                               }

        else:
            gt_obj_list = self.filtrate_objects(self.get_label(sample_id))
            gt_boxes3d = kitti_utils.objs_to_boxes3d(gt_obj_list)
            rpn_cls_label, rpn_reg_label = self.generate_rpn_training_labels(aug_pts_input[:,:3], aug_gt_boxes3d)
            # return dictionary
            sample_info = {'sample_id': sample_id,
                           'random_select': self.random_select,
                           'pts_input': aug_pts_input,
                           'rpn_cls_label': rpn_cls_label,
                           'rpn_reg_label': rpn_reg_label,
                           'gt_boxes3d': gt_boxes3d,
                           'gt_centers': aug_gt_boxes3d[:,:3],
                           }

        return sample_info