Ejemplo n.º 1
0
def global_rotation_v2(gt_boxes, points, min_rad=-np.pi / 4,
                       max_rad=np.pi / 4):
    noise_rotation = np.random.uniform(min_rad, max_rad)
    points[:, :3] = box_np_ops.rotation_points_single_angle(
        points[:, :3], noise_rotation, axis=2)
    gt_boxes[:, :3] = box_np_ops.rotation_points_single_angle(
        gt_boxes[:, :3], noise_rotation, axis=2)
    gt_boxes[:, 6] += noise_rotation
    return gt_boxes, points
Ejemplo n.º 2
0
def global_rotation(gt_boxes, points, rotation=np.pi / 4):
    if not isinstance(rotation, list):
        rotation = [-rotation, rotation]
    noise_rotation = np.random.uniform(rotation[0], rotation[1])
    points[:, :3] = box_np_ops.rotation_points_single_angle(
        points[:, :3], noise_rotation, axis=2)
    gt_boxes[:, :3] = box_np_ops.rotation_points_single_angle(
        gt_boxes[:, :3], noise_rotation, axis=2)
    gt_boxes[:, 6] += noise_rotation
    return gt_boxes, points
Ejemplo n.º 3
0
def _get_depth_idx(calib, f_view, img_idx, rot_noise, scal_noise):
    d_path = '/mnt/sdb/jhyoo/new_15_second/second.pytorch/second/dataset/depth_maps/trainval'
    depth = np.load(os.path.join(d_path, '{0:06d}'.format(img_idx)+'.npy'))
    r_depth = np.asarray(cv2.resize(depth, (156,48)))
    # r_depth = np.asarray(cv2.resize(depth, (1248,384)))

    c, r = n_mesh(np.arange(156), np.arange(48))
    # c, r = n_mesh(np.arange(1248), np.arange(384))
    r_depth = np.ones([48,156])*30.
    points = np.stack([c*8+4, r*8+4, r_depth])
    # import pdb; pdb.set_trace()
    # points = np.stack([c, r, r_depth])
    points = points.reshape((3, -1))
    points = points.T
    V2C = calib['Trv2c'][:3,:]
    # import pdb; pdb.set_trace()
    C2V = inverse_rigid_trans(V2C)
    P = calib['P2']
    c_u = P[0, 2]
    c_v = P[1, 2]
    f_u = P[0, 0]
    f_v = P[1, 1]
    b_x = P[0, 3] / (-f_u)  # relative
    b_y = P[1, 3] / (-f_v)
    uv_depth = points
    n = uv_depth.shape[0]
    x = ((uv_depth[:, 0] - c_u) * uv_depth[:, 2]) / f_u + b_x
    y = ((uv_depth[:, 1] - c_v) * uv_depth[:, 2]) / f_v + b_y
    pts_3d_rect = np.zeros((n, 3))
    pts_3d_rect[:, 0] = x
    pts_3d_rect[:, 1] = y
    pts_3d_rect[:, 2] = uv_depth[:, 2]
    R0 = calib['rect'][:3, :3]
    pts_3d_rect = np.transpose(np.dot(np.linalg.inv(R0), np.transpose(pts_3d_rect)))
    n = pts_3d_rect.shape[0]
    pts_3d_rect = np.hstack((pts_3d_rect, np.ones((n, 1))))
    pts_3d_rect = np.dot(pts_3d_rect, np.transpose(C2V))
    # return pts_3d_hom
    cloud = pts_3d_rect
    
    cloud = box_np_ops.rotation_points_single_angle(cloud, rot_noise, axis=2)
    cloud *= scal_noise

    # cloud = camera_to_lidar(points, calib['rect'], calib['Trv2c'])
    validx = (cloud[:, 0] >= 0) & (cloud[:, 0] <= 70.4)
    validy = (cloud[:, 1] >= -40.0) & (cloud[:, 1] <= 40.0)
    # validz = (cloud[:, 2] >= -3.0) & (cloud[:, 2] <= 1.0)
    validz = (cloud[:, 2] >= -2.0) & (cloud[:, 2] <= -1.4)
    valid = validx & validy & validz
    clouds = cloud * valid.reshape(-1,1)
    import pdb; pdb.set_trace()
    # import pdb; pdb.set_trace()
    clouds = torch.tensor(clouds)
    clouds[:,0] = clouds[:,0]/70.4*176
    clouds[:,1] = (clouds[:,1]+40)/80*200
    return clouds
Ejemplo n.º 4
0
def global_rotation_v2(gt_boxes, points, min_rad=-np.pi / 4,
                       max_rad=np.pi / 4):
    noise_rotation = np.random.uniform(min_rad, max_rad)
    points[:, :3] = box_np_ops.rotation_points_single_angle(
        points[:, :3], noise_rotation, axis=2)
    gt_boxes[:, :3] = box_np_ops.rotation_points_single_angle(
        gt_boxes[:, :3], noise_rotation, axis=2)
    gt_boxes[:, 6] += noise_rotation
    if gt_boxes.shape[1] == 9:
        # rotate velo vector
        rot_cos = np.cos(noise_rotation)
        rot_sin = np.sin(noise_rotation)
        rot_mat_T = np.array(
            [[rot_cos, -rot_sin], [rot_sin, rot_cos]],
            dtype=points.dtype)

        gt_boxes[:, 7:9] = gt_boxes[:, 7:9] @ rot_mat_T

    return gt_boxes, points
Ejemplo n.º 5
0
    def sample_all(self,
                   root_path,
                   gt_boxes,
                   gt_names,
                   num_point_features,
                   random_crop=False,
                   gt_group_ids=None,
                   rect=None,
                   Trv2c=None,
                   P2=None):
        sampled_num_dict = {}
        sample_num_per_class = []
        for class_name, max_sample_num in zip(
                self._sample_classes,  # 组内采样类别与添加的个数,Car,15
                self._sample_max_nums):
            sampled_num = int(
                max_sample_num -
                np.sum([n == class_name
                        for n in gt_names]))  # 真值有n个训练对象,需要采样15-n个
            sampled_num = np.round(self._rate * sampled_num).astype(np.int64)
            sampled_num_dict[class_name] = sampled_num  # {类别:需要采样的数量}
            sample_num_per_class.append(sampled_num)  # 每个类别需要采样的数量

        sampled_groups = self._sample_classes  # Car
        if self._use_group_sampling:  # False
            assert gt_group_ids is not None
            sampled_groups = []
            sample_num_per_class = []
            for group_name, class_names in self._group_name_to_names:
                sampled_nums_group = [sampled_num_dict[n] for n in class_names]
                sampled_num = np.max(sampled_nums_group)
                sample_num_per_class.append(sampled_num)
                sampled_groups.append(group_name)
            total_group_ids = gt_group_ids
        sampled = []
        sampled_gt_boxes = []
        avoid_coll_boxes = gt_boxes  # 避免采样碰撞,这里是所有类别的真值

        for class_name, sampled_num in zip(
                sampled_groups,  # 目标类别,采样数量
                sample_num_per_class):
            if sampled_num > 0:
                if self._use_group_sampling:  # False
                    sampled_cls = self.sample_group(class_name, sampled_num,
                                                    avoid_coll_boxes,
                                                    total_group_ids)
                else:
                    sampled_cls = self.sample_class_v2(
                        class_name,
                        sampled_num,  # 根据需求采样,返回不冲突的采样框的信息
                        avoid_coll_boxes)

                sampled += sampled_cls
                if len(sampled_cls) > 0:  # 逐行存储采样对象的3d真值框信息
                    if len(sampled_cls) == 1:
                        sampled_gt_box = sampled_cls[0]["box3d_lidar"][
                            np.newaxis, ...]
                    else:
                        sampled_gt_box = np.stack(
                            [s["box3d_lidar"] for s in sampled_cls], axis=0)

                    sampled_gt_boxes += [sampled_gt_box]  # 所有新增采样框
                    avoid_coll_boxes = np.concatenate(
                        [avoid_coll_boxes, sampled_gt_box],
                        axis=0)  # 这是增加采样后所有的真值框
                    if self._use_group_sampling:  # False
                        if len(sampled_cls) == 1:
                            sampled_group_ids = np.array(
                                sampled_cls[0]["group_id"])[np.newaxis, ...]
                        else:
                            sampled_group_ids = np.stack(
                                [s["group_id"] for s in sampled_cls], axis=0)
                        total_group_ids = np.concatenate(
                            [total_group_ids, sampled_group_ids], axis=0)

        if len(sampled) > 0:
            sampled_gt_boxes = np.concatenate(sampled_gt_boxes,
                                              axis=0)  # 所有采样框信息按行排列
            num_sampled = len(sampled)
            s_points_list = []
            for info in sampled:  # 根据采样对象读取对应点云信息,并加进当前帧数据中
                s_points = np.fromfile(str(
                    pathlib.Path(root_path) / info["path"]),
                                       dtype=np.float32)
                s_points = s_points.reshape([-1, num_point_features])
                # if not add_rgb_to_points:
                #     s_points = s_points[:, :4]
                if "rot_transform" in info:  # False
                    rot = info["rot_transform"]
                    s_points[:, :3] = box_np_ops.rotation_points_single_angle(
                        s_points[:, :3], rot, axis=2)
                s_points[:, :3] += info["box3d_lidar"][:
                                                       3]  # 将归一化的点云坐标恢复为原始点云坐标
                s_points_list.append(s_points)  # 采样的点云数据(numpy数组)组织成列表
                # print(pathlib.Path(info["path"]).stem)
            # gt_bboxes = np.stack([s["bbox"] for s in sampled], axis=0)
            # if np.random.choice([False, True], replace=False, p=[0.3, 0.7]):
            # do random crop.
            if random_crop:  # False
                s_points_list_new = []
                gt_bboxes = box_np_ops.box3d_to_bbox(sampled_gt_boxes, rect,
                                                     Trv2c, P2)
                crop_frustums = prep.random_crop_frustum(
                    gt_bboxes, rect, Trv2c, P2)
                for i in range(crop_frustums.shape[0]):
                    s_points = s_points_list[i]
                    mask = prep.mask_points_in_corners(
                        s_points, crop_frustums[i:i + 1]).reshape(-1)
                    num_remove = np.sum(mask)
                    if num_remove > 0 and (s_points.shape[0] -
                                           num_remove) > 15:
                        s_points = s_points[np.logical_not(mask)]
                    s_points_list_new.append(s_points)
                s_points_list = s_points_list_new
            ret = {
                "gt_names":
                np.array([s["name"] for s in sampled]),  # 采样对象类别,一维数组
                "difficulty":
                np.array([s["difficulty"] for s in sampled]),  # 采样对象困难度,一维数组
                "gt_boxes": sampled_gt_boxes,  # 按行排列的采样真值框信息
                "points": np.concatenate(s_points_list,
                                         axis=0),  # 点云是无序的,可以将不同对象的点云数据放在一起
                "gt_masks": np.ones((num_sampled, ), dtype=np.bool_)  # 采样框的标志
            }
            if self._use_group_sampling:  # False
                ret["group_ids"] = np.array([s["group_id"] for s in sampled])
            else:
                ret["group_ids"] = np.arange(gt_boxes.shape[0],
                                             gt_boxes.shape[0] +
                                             len(sampled))  # 采样对象序号
        else:
            ret = None
        return ret
Ejemplo n.º 6
0
    def sample_all(self,
                   root_path,
                   gt_boxes,
                   gt_names,
                   num_point_features,
                   random_crop=False,
                   gt_group_ids=None,
                   rect=None,
                   Trv2c=None,
                   P2=None):
        sampled_num_dict = {}
        sample_num_per_class = []
        for class_name, max_sample_num in zip(self._sample_classes,
                                              self._sample_max_nums):
            sampled_num = int(max_sample_num -
                              np.sum([n == class_name for n in gt_names]))
            sampled_num = np.round(self._rate * sampled_num).astype(np.int64)
            sampled_num_dict[class_name] = sampled_num
            sample_num_per_class.append(sampled_num)

        sampled_groups = self._sample_classes
        if self._use_group_sampling:
            assert gt_group_ids is not None
            sampled_groups = []
            sample_num_per_class = []
            for group_name, class_names in self._group_name_to_names:
                sampled_nums_group = [sampled_num_dict[n] for n in class_names]
                sampled_num = np.max(sampled_nums_group)
                sample_num_per_class.append(sampled_num)
                sampled_groups.append(group_name)
            total_group_ids = gt_group_ids
        sampled = []
        sampled_gt_boxes = []
        avoid_coll_boxes = gt_boxes

        for class_name, sampled_num in zip(sampled_groups,
                                           sample_num_per_class):
            if sampled_num > 0:
                if self._use_group_sampling:
                    sampled_cls = self.sample_group(class_name, sampled_num,
                                                    avoid_coll_boxes,
                                                    total_group_ids)
                else:
                    sampled_cls = self.sample_class_v2(class_name, sampled_num,
                                                       avoid_coll_boxes)

                sampled += sampled_cls
                if len(sampled_cls) > 0:
                    if len(sampled_cls) == 1:
                        sampled_gt_box = sampled_cls[0]["box3d_lidar"][
                            np.newaxis, ...]
                    else:
                        sampled_gt_box = np.stack(
                            [s["box3d_lidar"] for s in sampled_cls], axis=0)

                    sampled_gt_boxes += [sampled_gt_box]
                    avoid_coll_boxes = np.concatenate(
                        [avoid_coll_boxes, sampled_gt_box], axis=0)
                    if self._use_group_sampling:
                        if len(sampled_cls) == 1:
                            sampled_group_ids = np.array(
                                sampled_cls[0]["group_id"])[np.newaxis, ...]
                        else:
                            sampled_group_ids = np.stack(
                                [s["group_id"] for s in sampled_cls], axis=0)
                        total_group_ids = np.concatenate(
                            [total_group_ids, sampled_group_ids], axis=0)

        if len(sampled) > 0:
            sampled_gt_boxes = np.concatenate(sampled_gt_boxes, axis=0)
            num_sampled = len(sampled)
            s_points_list = []
            for info in sampled:
                s_points = np.fromfile(str(
                    pathlib.Path(root_path) / info["path"]),
                                       dtype=np.float32)
                s_points = s_points.reshape([-1, num_point_features])
                # if not add_rgb_to_points:
                #     s_points = s_points[:, :4]
                if "rot_transform" in info:
                    rot = info["rot_transform"]
                    s_points[:, :3] = box_np_ops.rotation_points_single_angle(
                        s_points[:, :3], rot, axis=2)
                s_points[:, :3] += info["box3d_lidar"][:3]
                s_points_list.append(s_points)
                # print(pathlib.Path(info["path"]).stem)
            # gt_bboxes = np.stack([s["bbox"] for s in sampled], axis=0)
            # if np.random.choice([False, True], replace=False, p=[0.3, 0.7]):
            # do random crop.
            if random_crop:
                s_points_list_new = []
                gt_bboxes = box_np_ops.box3d_to_bbox(sampled_gt_boxes, rect,
                                                     Trv2c, P2)
                crop_frustums = prep.random_crop_frustum(
                    gt_bboxes, rect, Trv2c, P2)
                for i in range(crop_frustums.shape[0]):
                    s_points = s_points_list[i]
                    mask = prep.mask_points_in_corners(
                        s_points, crop_frustums[i:i + 1]).reshape(-1)
                    num_remove = np.sum(mask)
                    if num_remove > 0 and (s_points.shape[0] -
                                           num_remove) > 15:
                        s_points = s_points[np.logical_not(mask)]
                    s_points_list_new.append(s_points)
                s_points_list = s_points_list_new
            ret = {
                "gt_names": np.array([s["name"] for s in sampled]),
                "difficulty": np.array([s["difficulty"] for s in sampled]),
                "gt_boxes": sampled_gt_boxes,
                "points": np.concatenate(s_points_list, axis=0),
                "gt_masks": np.ones((num_sampled, ), dtype=np.bool_)
            }
            if self._use_group_sampling:
                ret["group_ids"] = np.array([s["group_id"] for s in sampled])
            else:
                ret["group_ids"] = np.arange(gt_boxes.shape[0],
                                             gt_boxes.shape[0] + len(sampled))
        else:
            ret = None
        return ret
Ejemplo n.º 7
0
def get_projected_idx(input_size,
                      calib,
                      img_shape,
                      z_sel,
                      rot_noise,
                      scal_noise,
                      grid_size=4.,
                      right=False):
    '''Compute anchor boxes for each feature map.

    Args:
        input_size: (tensor) model input size of (w,h).

    Returns:
        boxes: (list) anchor boxes for each feature map. Each of size [#anchors,4],
                    where #anchors = fmw * fmh * #anchors_per_cell
    '''
    ## for FPN50 ##
    #        fm_sizes = [(input_size/pow(2.,i+3)).ceil() for i in range(self.num_fms)]
    #        grid_size = [8., 16., 32., 64., 128.]
    ## for PIXOR ##
    fm_size = input_size

    fm_w, fm_h = int(fm_size[0] / grid_size), int(fm_size[1] / grid_size)
    xy2 = meshgrid(fm_w, fm_h).to(torch.float64) + 0.5
    xy = (xy2 * grid_size).view(fm_w, fm_h, 1, 2).expand(fm_w, fm_h, 1, 2)

    xy = xy.to(torch.float32)
    z = torch.Tensor([z_sel]).view(1, 1, 1, 1).expand(fm_w, fm_h, 1, 1)
    z = z.to(torch.float32)

    box = torch.cat([xy, z], 3)
    anchor_boxes = box.view(-1, 3)
    # Calculate Anchor Center
    anchor_center = torch.zeros(anchor_boxes.shape[0], 3, dtype=torch.float64)
    # anchor_center[:, 0] = 70.4 - (anchor_boxes[:, 0] / 10) ## x
    anchor_center[:, 0] = anchor_boxes[:, 0] / 10
    anchor_center[:, 1] = (anchor_boxes[:, 1] / 10) - 40.  ##y
    anchor_center[:, 2] = anchor_boxes[:, 2] / 10

    # Convert to velodyne coordinates
    # anchor_center[:, 1] = -1 * anchor_center[:, 0]

    # Adjust center_z to center from bottom
    anchor_center[:, 2] += (1.52) / 2

    # Apply inverse augmentation
    # import pdb; pdb.set_trace()
    anchor_center_np = anchor_center.numpy()
    anchor_center_np = box_np_ops.rotation_points_single_angle(
        anchor_center_np, -rot_noise, axis=2)
    anchor_center_np *= 1. / scal_noise

    # anchor_center_np = box_np_ops.rotation_points_single_angle(anchor_center_np, 1/scal_noise, axis=2)

    # import pdb; pdb.set_trace()
    anchor_center = torch.tensor(anchor_center_np, dtype=torch.float64)

    # # Get GT height
    # mask = ((max_ious>0.5)[0::2, ...].nonzero()*2).squeeze()
    # anchor_center[mask, 2] = -1 * boxes_[max_ids[mask], 2].to(torch.float64)
    # anchor_center[mask, 2] += (boxes_[max_ids[mask], 5].to(torch.float64)) / 2
    # anchor_center = anchor_center[0::2, ...]

    # Project to image space
    # pts_2d, pts_2d_norm = anchor_projector.point_to_image(anchor_center, data_dir)
    r_rect = torch.tensor(calib['rect'],
                          dtype=torch.float32,
                          device=torch.device("cpu")).to(torch.float64)
    if right:
        P2 = torch.tensor(calib['P3'],
                          dtype=torch.float32,
                          device=torch.device("cpu")).to(torch.float64)
    else:
        P2 = torch.tensor(calib['P2'],
                          dtype=torch.float32,
                          device=torch.device("cpu")).to(torch.float64)
    velo2cam = torch.tensor(calib['Trv2c'],
                            dtype=torch.float32,
                            device=torch.device("cpu")).to(torch.float64)

    # anchor_center = anchor_center[:,[1,0,2]]
    anchor_center2 = box_torch_ops.lidar_to_camera(anchor_center, r_rect,
                                                   velo2cam)
    idxs = box_torch_ops.project_to_image(anchor_center2, P2)
    # image_h = img_shape[2] ##
    # image_w = img_shape[1]
    # img_shape_torch = torch.tensor([2496, 768]).to(torch.float64).view(1,2)
    img_shape_torch = torch.tensor([1248, 384]).to(torch.float64).view(1, 2)
    idxs_norm = idxs / img_shape_torch
    # import pdb; pdb.set_trace()
    # idx = idxs_norm
    # # Filtering idx
    # mask = torch.mul(idx > 0, idx < 1).sum(dim=1) == 2
    # mask = mask.view(-1,1)

    # import pdb; pdb.set_trace()
    return idxs, idxs_norm
Ejemplo n.º 8
0
    def sample_all(self,
                   points,
                   voxel_size,
                   point_cloud_range,
                   voxel_grids,
                   root_path,
                   gt_boxes,
                   gt_names,
                   num_point_features,
                   random_crop=False,
                   gt_group_ids=None,
                   rect=None,
                   Trv2c=None,
                   P2=None):
        sampled_num_dict = {}
        sample_num_per_class = []
        for class_name, max_sample_num in zip(self._sample_classes,
                                              self._sample_max_nums):
            sampled_num = int(max_sample_num -
                              np.sum([n == class_name for n in gt_names]))
            sampled_num = np.round(self._rate * sampled_num).astype(np.int64)
            sampled_num_dict[class_name] = sampled_num
            sample_num_per_class.append(sampled_num)

        sampled_groups = self._sample_classes
        if self._use_group_sampling:
            assert gt_group_ids is not None
            sampled_groups = []
            sample_num_per_class = []
            for group_name, class_names in self._group_name_to_names:
                sampled_nums_group = [sampled_num_dict[n] for n in class_names]
                sampled_num = np.max(sampled_nums_group)
                sample_num_per_class.append(sampled_num)
                sampled_groups.append(group_name)
            total_group_ids = gt_group_ids
        sampled = []
        sampled_gt_boxes = []
        avoid_coll_boxes = gt_boxes

        finetune_by_grd = False
        voxel_scale = 3
        voxel_size_scaled = voxel_size * voxel_scale
        voxel_grids_scale = voxel_grids // voxel_scale
        if finetune_by_grd:
            grd_filter = ground_filter()
            grd_filter.filter(points)
            grd_gridmask = _points_to_gridmask_2d(grd_filter.ground_pc,
                                                  voxel_size_scaled,
                                                  point_cloud_range[:3],
                                                  voxel_grids_scale)
        else:
            grd_gridmask = None

        for class_name, sampled_num in zip(sampled_groups,
                                           sample_num_per_class):
            if sampled_num > 0:
                if finetune_by_grd:
                    assert self._use_group_sampling is not True
                    all_samples = self._sampler_dict[class_name].sample(
                        sampled_num * 15)
                    all_samples = copy.deepcopy(all_samples)
                    all_samples = _fine_sample_by_grd(all_samples,
                                                      grd_gridmask,
                                                      voxel_size_scaled,
                                                      point_cloud_range[:3],
                                                      voxel_grids_scale)
                    if len(all_samples) > sampled_num:
                        #print(len(all_samples), '>', sampled_num)
                        all_samples = all_samples[:sampled_num]
                else:
                    all_samples = self._sampler_dict[class_name].sample(
                        sampled_num)
                    all_samples = copy.deepcopy(all_samples)

                if self._use_group_sampling:
                    sampled_cls = self.sample_group(class_name, sampled_num,
                                                    avoid_coll_boxes,
                                                    total_group_ids)
                else:
                    if len(all_samples) > 0:
                        sampled_cls = self.sample_class_v2(
                            all_samples, avoid_coll_boxes)
                    else:
                        sampled_cls = []

                sampled += sampled_cls
                if len(sampled_cls) > 0:
                    if len(sampled_cls) == 1:
                        sampled_gt_box = sampled_cls[0]["box3d_lidar"][
                            np.newaxis, ...]
                    else:
                        sampled_gt_box = np.stack(
                            [s["box3d_lidar"] for s in sampled_cls], axis=0)

                    sampled_gt_boxes += [sampled_gt_box]
                    avoid_coll_boxes = np.concatenate(
                        [avoid_coll_boxes, sampled_gt_box], axis=0)
                    if self._use_group_sampling:
                        if len(sampled_cls) == 1:
                            sampled_group_ids = np.array(
                                sampled_cls[0]["group_id"])[np.newaxis, ...]
                        else:
                            sampled_group_ids = np.stack(
                                [s["group_id"] for s in sampled_cls], axis=0)
                        total_group_ids = np.concatenate(
                            [total_group_ids, sampled_group_ids], axis=0)

        if len(sampled) > 0:
            sampled_gt_boxes = np.concatenate(sampled_gt_boxes, axis=0)
            num_sampled = len(sampled)
            s_points_list = []
            for info in sampled:
                s_points = np.fromfile(str(
                    pathlib.Path(root_path) / info["path"]),
                                       dtype=np.float32)
                s_points = s_points.reshape([-1, num_point_features])
                # if not add_rgb_to_points:
                #     s_points = s_points[:, :4]
                if "rot_transform" in info:
                    rot = info["rot_transform"]
                    s_points[:, :3] = box_np_ops.rotation_points_single_angle(
                        s_points[:, :3], rot, axis=2)
                s_points[:, :3] += info["box3d_lidar"][:3]
                s_points_list.append(s_points)
                # print(pathlib.Path(info["path"]).stem)
            # gt_bboxes = np.stack([s["bbox"] for s in sampled], axis=0)
            # if np.random.choice([False, True], replace=False, p=[0.3, 0.7]):
            # do random crop.
            if random_crop:
                s_points_list_new = []
                gt_bboxes = box_np_ops.box3d_to_bbox(sampled_gt_boxes, rect,
                                                     Trv2c, P2)
                crop_frustums = prep.random_crop_frustum(
                    gt_bboxes, rect, Trv2c, P2)
                for i in range(crop_frustums.shape[0]):
                    s_points = s_points_list[i]
                    mask = prep.mask_points_in_corners(
                        s_points, crop_frustums[i:i + 1]).reshape(-1)
                    num_remove = np.sum(mask)
                    if num_remove > 0 and (s_points.shape[0] -
                                           num_remove) > 15:
                        s_points = s_points[np.logical_not(mask)]
                    s_points_list_new.append(s_points)
                s_points_list = s_points_list_new

            finetune_samples_axis_z = True
            if finetune_samples_axis_z:
                gt_box_bottom_avg = np.mean(gt_boxes[:, 2])
                # finetune boxes
                sampled_gt_boxes_bottom = sampled_gt_boxes[:, 2]
                delta = sampled_gt_boxes_bottom - gt_box_bottom_avg
                sampled_gt_boxes[:, 2] -= delta
                # finetune points
                for i in range(len(s_points_list)):
                    s_points_list[i][:, 2] -= delta[i]

            ret = {
                "gt_names": np.array([s["name"] for s in sampled]),
                "difficulty": np.array([s["difficulty"] for s in sampled]),
                "gt_boxes": sampled_gt_boxes,
                "points": np.concatenate(s_points_list, axis=0),
                "gt_masks": np.ones((num_sampled, ), dtype=np.bool_)
            }
            if self._use_group_sampling:
                ret["group_ids"] = np.array([s["group_id"] for s in sampled])
            else:
                ret["group_ids"] = np.arange(gt_boxes.shape[0],
                                             gt_boxes.shape[0] + len(sampled))
        else:
            ret = None
        return ret