예제 #1
0
파일: data.py 프로젝트: SilvesterHsu/fiery
    def _get_poly_region_in_image(self, instance_annotation, ego_translation,
                                  ego_rotation):
        box = Box(instance_annotation['translation'],
                  instance_annotation['size'],
                  Quaternion(instance_annotation['rotation']))
        box.translate(ego_translation)
        box.rotate(ego_rotation)

        pts = box.bottom_corners()[:2].T
        pts = np.round(
            (pts - self.bev_start_position[:2] + self.bev_resolution[:2] / 2.0)
            / self.bev_resolution[:2]).astype(np.int32)
        pts[:, [1, 0]] = pts[:, [0, 1]]

        z = box.bottom_corners()[2, 0]
        return pts, z
예제 #2
0
    def get_binimg(self, rec):
        egopose = self.nusc.get(
            'ego_pose',
            self.nusc.get('sample_data',
                          rec['data']['LIDAR_TOP'])['ego_pose_token'])
        trans = -np.array(egopose['translation'])
        rot = Quaternion(egopose['rotation']).inverse
        img = np.zeros((self.nx[0], self.nx[1]))
        for tok in rec['anns']:
            inst = self.nusc.get('sample_annotation', tok)
            # add category for lyft
            if not inst['category_name'].split('.')[0] == 'vehicle':
                continue
            box = Box(inst['translation'], inst['size'],
                      Quaternion(inst['rotation']))
            box.translate(trans)
            box.rotate(rot)

            pts = box.bottom_corners()[:2].T
            pts = np.round((pts - self.bx[:2] + self.dx[:2] / 2.) /
                           self.dx[:2]).astype(np.int32)
            pts[:, [1, 0]] = pts[:, [0, 1]]
            cv2.fillPoly(img, [pts], 1.0)

        return torch.Tensor(img).unsqueeze(0)
예제 #3
0
    def get_binimg(self, rec):
        egopose = self.nusc.get(
            'ego_pose',
            self.nusc.get('sample_data',
                          rec['data']['LIDAR_TOP'])['ego_pose_token'])
        trans = -np.array(egopose['translation'])
        rot = Quaternion(egopose['rotation']).inverse

        #vehicle label
        img_vehicle = np.zeros((self.nx[0], self.nx[1]))
        for tok in rec['anns']:
            inst = self.nusc.get('sample_annotation', tok)
            # add category for lyft
            if not inst['category_name'].split('.')[0] == 'vehicle':
                continue
            box = Box(inst['translation'], inst['size'],
                      Quaternion(inst['rotation']))
            box.translate(trans)
            box.rotate(rot)
            pts = box.bottom_corners()[:2].T
            pts = np.round((pts - self.bx[:2] + self.dx[:2] / 2.) /
                           self.dx[:2]).astype(np.int32)
            pts[:, [1, 0]] = pts[:, [0, 1]]
            cv2.fillPoly(img_vehicle, [pts], 1.0)

        # cv2.imwrite('./output/vehicle{}.png'.format(rec['timestamp']),img_vehicle*255)

        #map label
        map_name = self.scene2map[self.nusc.get('scene',
                                                rec['scene_token'])['name']]
        rot_map = Quaternion(egopose['rotation']).rotation_matrix
        rot_map = np.arctan2(rot_map[1, 0], rot_map[0, 0])
        center = np.array([
            egopose['translation'][0], egopose['translation'][1],
            np.cos(rot_map),
            np.sin(rot_map)
        ])
        lmap = get_local_map(self.nusc_maps[map_name], center, 50.0,
                             ['road_segment', 'lane'],
                             ['lane_divider', 'road_divider'])
        #road_segment
        img_road_segment = np.zeros((self.nx[0], self.nx[1]))
        arr_pts = []
        for pts in lmap['road_segment']:
            pts = np.round((pts - self.bx[:2] + self.dx[:2] / 2.) /
                           self.dx[:2]).astype(np.int32)
            pts[:, [1, 0]] = pts[:, [0, 1]]
            arr_pts.append(pts)
        cv2.fillPoly(img_road_segment, arr_pts, 1.0)
        #lane
        #lane = np.zeros((self.nx[0], self.nx[1]))
        arr_pts = []
        for pts in lmap['lane']:
            pts = np.round((pts - self.bx[:2] + self.dx[:2] / 2.) /
                           self.dx[:2]).astype(np.int32)
            pts[:, [1, 0]] = pts[:, [0, 1]]
            arr_pts.append(pts)
        # cv2.fillPoly(lane,arr_pts,1.0)
        cv2.fillPoly(img_road_segment, arr_pts, 1.0)
        #road_divider
        # img_road_divider = np.zeros((self.nx[0], self.nx[1]))
        # arr_pts=[]
        # for pts in lmap['road_divider']:
        #     pts = np.round(
        #             (pts - self.bx[:2] + self.dx[:2]/2.) / self.dx[:2]
        #             ).astype(np.int32)
        #     pts[:, [1, 0]] = pts[:, [0, 1]]
        #     arr_pts.append(pts)

        # cv2.polylines(img_road_divider,arr_pts,False,1.0,1)
        #lane_divider

        img_lane_divider = np.zeros((self.nx[0], self.nx[1]))
        arr_pts = []
        for pts in lmap['lane_divider']:
            pts = np.round((pts - self.bx[:2] + self.dx[:2] / 2.) /
                           self.dx[:2]).astype(np.int32)
            pts[:, [1, 0]] = pts[:, [0, 1]]
            arr_pts.append(pts)

        cv2.polylines(img_lane_divider, arr_pts, False, 1.0, 2)

        # cv2.imwrite('./output/lane_divider{}.png'.format(rec['timestamp']),img_lane_divider*255)
        #plt.plot(pts[:, 1], pts[:, 0], c=(159./255., 0.0, 1.0), alpha=0.5)

        return torch.Tensor(
            np.stack([img_vehicle, img_road_segment, img_lane_divider]))