Ejemplo n.º 1
0
def get_sung_info(data_path, house_names0):
  data_path = os.path.join(data_path, 'houses')
  house_names1 = os.listdir(data_path)
  house_names = [h for h in house_names1 if h in house_names0]

  infos = []
  for house in house_names:
    house_path = os.path.join(data_path, house)
    object_path = os.path.join(house_path, 'objects')

    pcl_fns = glob.glob(os.path.join(house_path, 'pcl*.bin'))
    pcl_fns.sort()
    #pcl_fns = [pcl_fn.split('houses')[1] for pcl_fn in pcl_fns]
    pcl_num = len(pcl_fns)

    box_fns = glob.glob(os.path.join(object_path, '*.bin'))
    box_fns.sort()
    objects = set([os.path.basename(fn).split('_')[0] for fn in box_fns])

    for i in range(pcl_num):
      info = {}
      info['velodyne_path'] = pcl_fns[i]
      info['pointcloud_num_features'] = 6

      info['image_idx'] = '0'
      info['image_path'] = 'empty'
      info['calib/R0_rect'] = np.eye(4)
      info['calib/Tr_velo_to_cam'] = np.eye(4) # np.array([[0,-1,0,0]. [0,0,-1,0], [1,0,0,0], [0,0,0,1]], dtype=np.float32)
      info['calib/P2'] = np.eye(4)

      base_name = os.path.splitext( os.path.basename(pcl_fns[i]) )[0]
      idx = int(base_name.split('_')[-1])

      annos = defaultdict(list)
      for obj in objects:
        box_fn = os.path.join(object_path, obj+'_'+str(idx)+'.bin')
        box = np.fromfile(box_fn, np.float32)
        box = box.reshape([-1,7])
        box = Bbox3D.convert_to_yx_zb_boxes(box)
        box_num = box.shape[0]
        annos['location'].append(box[:,0:3])
        annos['dimensions'].append(box[:,3:6])
        annos['rotation_y'].append(box[:,6])
        annos['name'].append( np.array([obj]*box_num) )

        annos['difficulty'].append(np.array(['A']*box_num))
        annos['bbox'].append( box3d_t_2d(box, info['calib/P2'] ) )
        annos['box3d_camera'].append( get_box3d_cam(box, info['calib/R0_rect'], info['calib/Tr_velo_to_cam']) )
        bn = box.shape[0]
        annos["truncated"].append(np.array([0.0]*bn))
        annos["occluded"].append(np.array([0.0]*bn))
        annos["alpha"].append( get_alpha(box) )

      for key in annos:
        annos[key] = np.concatenate(annos[key], 0)

      info['annos'] = annos

      infos.append(info)
  return infos
Ejemplo n.º 2
0
    def __getitem__(self, index):
        is_train = self.is_train
        scale = self.scale
        full_scale = self.full_scale
        objects_to_detect = self.objects_to_detect

        zoom_rate = 0.1 * 0
        flip_x = False and is_train
        random_rotate = False and is_train
        distortion = False and is_train
        origin_offset = False and is_train
        norm_noise = 0.01 * int(is_train) * 0

        fn = self.files[index]
        print(f'Loading {fn} in SUNCGDataset')
        hn = os.path.basename(os.path.dirname(fn))
        #if self.is_train:
        #  print(f'\n(suncg_dataset.py) train {index}-th   {hn}\n')
        #else:
        #  print(f'\n(suncg_dataset.py) test  {index}-th  {hn}\n')
        pcl_i, bboxes_dic_i_0 = torch.load(fn)
        #points_sample(pcl_i)
        for obj in bboxes_dic_i_0:
            if type(bboxes_dic_i_0[obj]) == torch.Tensor:
                bboxes_dic_i_0[obj] = bboxes_dic_i_0[obj].data.numpy()

        a = pcl_i[:, 0:3].copy()
        b = pcl_i
        bboxes_dic_i = {}
        for obj in objects_to_detect:
            if not (obj in bboxes_dic_i_0 or obj == 'background'):
                print(f"unknow class {obj}")
                import pdb
                pdb.set_trace()  # XXX BREAKPOINT
                assert False
        for obj in bboxes_dic_i_0:
            if ('all' in objects_to_detect) or (obj in objects_to_detect):
                bboxes_dic_i[obj] = Bbox3D.convert_to_yx_zb_boxes(
                    bboxes_dic_i_0[obj])
                if obj in ['ceiling', 'floor', 'room']:
                    bboxes_dic_i[obj] = Bbox3D.set_yaw_zero(bboxes_dic_i[obj])
        if SHOW_RAW_INPUT:
            show_pcl_boxdic(pcl_i, bboxes_dic_i)

        #---------------------------------------------------------------------
        # augmentation of xyz
        m = np.eye(3) + np.random.randn(3, 3) * zoom_rate  # aug: zoom
        if flip_x:
            m[0][0] *= np.random.randint(0, 2) * 2 - 1  # aug: x flip
        m *= scale
        if random_rotate:
            theta = np.random.rand() * 2 * math.pi  # rotation aug
            m = np.matmul(
                m, [[math.cos(theta), math.sin(theta), 0],
                    [-math.sin(theta), math.cos(theta), 0], [0, 0, 1]])
        a = np.matmul(a, m)
        if distortion:
            a = elastic(a, 6 * scale // 50, 40 * scale / 50)
            a = elastic(a, 20 * scale // 50, 160 * scale / 50)
        m = a.min(0)
        M = a.max(0)
        q = M - m
        # aug: the centroid between [0,full_scale]
        offset = -m
        if origin_offset:
            offset += np.clip(full_scale - M + m - 0.001, 0,
                              None) * np.random.rand(3) + np.clip(
                                  full_scale - M + m + 0.001, None,
                                  0) * np.random.rand(3)
        a += offset

        xyz_min = a.min(0) / scale
        xyz_max = a.max(0) / scale
        size3d = np.expand_dims(np.concatenate([xyz_min, xyz_max], 0),
                                0).astype(np.float32)
        size3d = torch.from_numpy(size3d)
        #---------------------------------------------------------------------
        # augmentation of feature
        # aug norm
        b[:, 3:6] += np.random.randn(3) * norm_noise

        #---------------------------------------------------------------------
        # get elements
        b = b[:, self.elements_ids]
        if 'xyz' in self.elements:
            # import augmentation of xyz to feature
            b[:, 0:3] = a / scale

        #---------------------------------------------------------------------
        # augment gt boxes
        for obj in bboxes_dic_i:
            #print(bboxes_dic_i[obj][:,0:3])
            bboxes_dic_i[obj][:, 0:3] += np.expand_dims(offset, 0) / scale
            #print(bboxes_dic_i[obj][:,0:3])
            pass

        #---------------------------------------------------------------------
        assert a.min() >= 0, f"point location should not < 0: {a.min()}"
        up_check = np.all(a < full_scale[np.newaxis, :], 1)
        if not np.all(up_check):
            max_scale = a.max(0)
            print(f'file: {self.files[index]}')
            print(
                f'\nmax scale: {max_scale} > full_scale: {full_scale}, some points will be missed\n'
            )
            if not ENABLE_POINTS_MISSED:
                import pdb
                pdb.set_trace()  # XXX BREAKPOINT
                assert False

        idxs = (a.min(1) >= 0) * (up_check)
        a = a[idxs]
        b = b[idxs]
        #c=c[idxs]
        a = torch.from_numpy(a).long()
        #locs = torch.cat([a,torch.LongTensor(a.shape[0],1).fill_(index)],1)
        locs = a
        feats = torch.from_numpy(b)

        #---------------------------------------------------------------------
        bboxlist3d = bbox_dic_to_BoxList3D(bboxes_dic_i, size3d,
                                           self.dset_metas)
        labels = bboxlist3d
        if SHOW_AUG_INPUT:
            show_pcl_boxdic(pcl_i, bboxes_dic_i)
            bboxlist3d.show()
            import pdb
            pdb.set_trace()  # XXX BREAKPOINT
            pass

        #batch_scopes(locs, scale)
        data = {'x': [locs, feats], 'y': labels, 'id': index, 'fn': fn}
        return data