def __init__(self, image_set, year): db.__init__(self, 'coco_' + year + '_' + image_set) # COCO specific config options self.config = {'use_salt': True, 'cleanup': True} # name, paths self._year = year self._image_set = image_set self._data_path = osp.join(cfg.DATA_DIR, 'coco') # load COCO API, classes, class <-> id mappings self._COCO = COCO(self._get_ann_file()) cats = self._COCO.loadCats(self._COCO.getCatIds()) self._classes = tuple(['__background__'] + [c['name'] for c in cats]) self._class_to_ind = dict( list(zip(self.classes, list(range(self.num_classes))))) self._class_to_coco_cat_id = dict( list(zip([c['name'] for c in cats], self._COCO.getCatIds()))) self._image_index = self._load_image_set_index() # Default to roidb handler self.set_proposal_method('gt') self.competition_mode(False) # Some image sets are "views" (i.e. subsets) into others. # For example, minival2014 is a random 5000 image subset of val2014. # This mapping tells us where the view's images and proposals come from. self._view_map = { 'minival2014': 'val2014', # 5k val2014 subset 'valminusminival2014': 'val2014', # val2014 \setminus minival2014 'test-dev2015': 'test2015', } coco_name = image_set + year # e.g., "val2014" self._data_name = (self._view_map[coco_name] if coco_name in self._view_map else coco_name) # Dataset splits that have ground-truth annotations (test splits # do not have gt annotations) self._gt_splits = ('train', 'val', 'minival')
def __init__(self, mode='test', limiter=0, shuffle_en=False): name = 'waymo' self.type = 'lidar' db.__init__(self, name, mode) self._train_scenes = [] self._val_scenes = [] self._test_scenes = [] if (mode == 'test'): self._tod_filter_list = cfg.TEST.TOD_FILTER_LIST else: self._tod_filter_list = cfg.TRAIN.TOD_FILTER_LIST self._uncertainty_sort_type = cfg.UC.SORT_TYPE self._draw_width = int((cfg.LIDAR.X_RANGE[1] - cfg.LIDAR.X_RANGE[0]) * (1 / cfg.LIDAR.VOXEL_LEN)) self._draw_height = int((cfg.LIDAR.Y_RANGE[1] - cfg.LIDAR.Y_RANGE[0]) * (1 / cfg.LIDAR.VOXEL_LEN)) self._num_slices = cfg.LIDAR.NUM_SLICES self._bev_slice_locations = [1, 2, 3, 4, 5, 7] self._filetype = 'npy' self._imtype = 'PNG' self._scene_sel = True #For now one large cache file is OK, but ideally just take subset of actually needed data and cache that. No need to load nusc every time. self._classes = ( 'dontcare', # always index 0 'vehicle.car') # 'human.pedestrian', #'vehicle.bicycle') self.config = {'cleanup': True, 'matlab_eval': False, 'rpn_file': None} self._class_to_ind = dict( list(zip(self.classes, list(range(self.num_classes))))) self._train_index = os.listdir( os.path.join(self._devkit_path, 'train', 'point_clouds')) self._val_index = os.listdir( os.path.join(self._devkit_path, 'val', 'point_clouds')) self._val_index.sort(key=natural_keys) rand = SystemRandom() if (shuffle_en): print('shuffling pc indices') rand.shuffle(self._train_index) rand.shuffle(self._val_index) if (limiter != 0): if (limiter < len(self._val_index)): self._val_index = self._val_index[:limiter] if (limiter < len(self._train_index)): self._train_index = self._train_index[:limiter] if (limiter < len(self._test_index)): self._test_index = self._test_index[:limiter] #if(18000 < len(self._val_index)): # self._val_index = self._val_index[:18000] assert os.path.exists( self._devkit_path), 'waymo dataset path does not exist: {}'.format( self._devkit_path)
def __init__(self, image_set, year, use_diff=False): name = 'voc_' + year + '_' + image_set if use_diff: name += '_diff' db.__init__(self, name) self._year = year self._image_set = image_set self._devkit_path = self._get_default_path() self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year) self._classes = ( '__background__', # always index 0 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') self._class_to_ind = dict( list(zip(self.classes, list(range(self.num_classes))))) self._image_ext = '.jpg' self._image_index = self._load_image_set_index() self._salt = str(uuid.uuid4()) self._comp_id = 'comp4' # PASCAL specific config options self.config = { 'cleanup': True, 'use_salt': True, 'use_diff': use_diff, 'matlab_eval': False, 'rpn_file': None } assert os.path.exists(self._devkit_path), \ 'VOCdevkit path does not exist: {}'.format(self._devkit_path) assert os.path.exists(self._data_path), \ 'Path does not exist: {}'.format(self._data_path)
def __init__(self, mode='test', limiter=0, shuffle_en=True): name = 'cadc' db.__init__(self, name, mode) self._devkit_path = self._get_default_path() self._data_path = self._devkit_path self._mode = mode self._uncertainty_sort_type = cfg.UC.SORT_TYPE self._frame_sub_dir = 'image_00' if (mode == 'test'): self._tod_filter_list = cfg.TEST.CADC_FILTER_LIST else: self._tod_filter_list = cfg.TRAIN.CADC_FILTER_LIST scene_desc_filename = os.path.join(self._data_path, 'cadc_scene_description.csv') self._load_scene_meta(scene_desc_filename) self._train_dir = os.path.join(self._data_path, 'train', self._frame_sub_dir) self._val_dir = os.path.join(self._data_path, 'val', self._frame_sub_dir) #self._test_dir = os.path.join(self._data_path, 'testing', self._frame_sub_dir) crop_top = 150 crop_bottom = 250 self._imwidth = 1280 self._imheight = 1024 - crop_top - crop_bottom self._imtype = 'png' self._filetype = 'png' self.type = 'image' self._mode = mode #Backwards compatibility #self._train_sub_folder = 'training' #self._val_sub_folder = 'evaluation' #self._test_sub_folder = 'testing' self._classes = ( 'dontcare', # always index 0 #'Pedestrian', #'Cyclist', 'Car') self.config = {'cleanup': True, 'matlab_eval': False, 'rpn_file': None} self._class_to_ind = dict( list(zip(self.classes, list(range(self.num_classes))))) self._train_index = sorted( [d for d in os.listdir(self._train_dir) if d.endswith('.png')]) self._val_index = sorted( [d for d in os.listdir(self._val_dir) if d.endswith('.png')]) #self._test_index = sorted([d for d in os.listdir(self._test_dir) if d.endswith('.png')]) #Limiter if (limiter != 0): if (limiter < len(self._val_index)): self._val_index = self._val_index[:limiter] if (limiter < len(self._train_index)): self._train_index = self._train_index[:limiter] #if(limiter < len(self._test_index)): # self._test_index = self._test_index[:limiter] rand = SystemRandom() if (shuffle_en): print('shuffling image indices') rand.shuffle(self._val_index) rand.shuffle(self._train_index) #rand.shuffle(self._test_index) assert os.path.exists(self._devkit_path), \ 'cadc dataset path does not exist: {}'.format(self._devkit_path) assert os.path.exists(self._data_path), \ 'Path does not exist: {}'.format(self._data_path)
def __init__(self, mode='test', limiter=0, shuffle_en=True): name = 'kitti' db.__init__(self, name, mode) self._devkit_path = self._get_default_path() self._data_path = self._devkit_path self._mode = mode self._uncertainty_sort_type = cfg.UC.SORT_TYPE self._frame_sub_dir = 'image_2' self._train_dir = os.path.join(self._data_path, 'training', self._frame_sub_dir) self._val_dir = os.path.join(self._data_path, 'training', self._frame_sub_dir) self._test_dir = os.path.join(self._data_path, 'testing', self._frame_sub_dir) self._split_dir = os.path.join(self._data_path, 'splits') self._imwidth = 1242 self._imheight = 375 self._imtype = 'png' self._filetype = 'png' self.type = 'image' self._mode = mode #Backwards compatibility self._train_sub_folder = 'training' self._val_sub_folder = 'training' self._test_sub_folder = 'testing' self._classes = ( 'dontcare', # always index 0 #'Pedestrian', #'Cyclist', 'Car') self.config = {'cleanup': True, 'matlab_eval': False, 'rpn_file': None} self._class_to_ind = dict( list(zip(self.classes, list(range(self.num_classes))))) self._test_index = open(self._split_dir + '/test.txt').read().splitlines() self._train_index = open(self._split_dir + '/train.txt').read().splitlines() self._val_index = open(self._split_dir + '/val.txt').read().splitlines() #self._train_index = self._train_index + self._val_index[250:] #self._val_index = self._val_index[:250] #self._train_index = sorted([d for d in os.listdir(self._train_dir) if d.endswith('.png')]) #self._val_index = sorted([d for d in os.listdir(self._val_dir) if d.endswith('.png')]) #self._test_index = sorted([d for d in os.listdir(self._test_dir) if d.endswith('.png')]) #Limiter if (limiter != 0): if (limiter < len(self._val_index)): self._val_index = self._val_index[:limiter] if (limiter < len(self._train_index)): self._train_index = self._train_index[:limiter] if (limiter < len(self._test_index)): self._test_index = self._test_index[:limiter] rand = SystemRandom() if (shuffle_en): print('shuffling image indices') rand.shuffle(self._val_index) rand.shuffle(self._train_index) rand.shuffle(self._test_index) assert os.path.exists(self._devkit_path), \ 'Kitti dataset path does not exist: {}'.format(self._devkit_path) assert os.path.exists(self._data_path), \ 'Path does not exist: {}'.format(self._data_path)
def __init__(self, mode='test', limiter=0, shuffle_en=True): name = 'cadc' db.__init__(self, name, mode) self._devkit_path = self._get_default_path() self._data_path = self._devkit_path self._mode = mode if (mode == 'test'): self._tod_filter_list = cfg.TEST.CADC_FILTER_LIST else: self._tod_filter_list = cfg.TRAIN.CADC_FILTER_LIST scene_desc_filename = os.path.join(self._data_path, 'cadc_scene_description.csv') self._load_scene_meta(scene_desc_filename) self._uncertainty_sort_type = cfg.UC.SORT_TYPE self._draw_width = int((cfg.LIDAR.X_RANGE[1] - cfg.LIDAR.X_RANGE[0]) * (1 / cfg.LIDAR.VOXEL_LEN)) self._draw_height = int((cfg.LIDAR.Y_RANGE[1] - cfg.LIDAR.Y_RANGE[0]) * (1 / cfg.LIDAR.VOXEL_LEN)) self._num_slices = cfg.LIDAR.NUM_SLICES self._frame_sub_dir = 'point_clouds' self._annotation_sub_dir = 'annotation_00' self._calib_sub_dir = 'calib' self._train_dir = os.path.join(self._data_path, 'train', self._frame_sub_dir) self._val_dir = os.path.join(self._data_path, 'val', self._frame_sub_dir) #self._test_dir = os.path.join(self._data_path, 'testing', self._frame_sub_dir) self._filetype = 'bin' self._imtype = 'PNG' self.type = 'lidar' self._bev_slice_locations = [1, 2, 3, 4, 5, 7] self._mode = mode #Backwards compatibility #self._train_sub_folder = 'training' #self._val_sub_folder = 'evaluation' #self._test_sub_folder = 'testing' self._classes = ( 'dontcare', # always index 0 #'Pedestrian', #'Cyclist', 'Car') self.config = {'cleanup': True, 'matlab_eval': False, 'rpn_file': None} self._class_to_ind = dict( list(zip(self.classes, list(range(self.num_classes))))) self._train_index = sorted( [d for d in os.listdir(self._train_dir) if d.endswith('.bin')]) self._val_index = sorted( [d for d in os.listdir(self._val_dir) if d.endswith('.bin')]) #self._test_index = sorted([d for d in os.listdir(self._test_dir) if d.endswith('.bin')]) #Limiter if (limiter != 0): if (limiter < len(self._val_index)): self._val_index = self._val_index[:limiter] if (limiter < len(self._train_index)): self._train_index = self._train_index[:limiter] #if(limiter < len(self._test_index)): # self._test_index = self._test_index[:limiter] rand = SystemRandom() if (shuffle_en): print('shuffling frame indices') rand.shuffle(self._val_index) rand.shuffle(self._train_index) #rand.shuffle(self._test_index) assert os.path.exists(self._devkit_path), \ 'cadc dataset path does not exist: {}'.format(self._devkit_path) assert os.path.exists(self._data_path), \ 'Path does not exist: {}'.format(self._data_path)
def __init__(self, mode='test',limiter=0, shuffle_en=True): name = 'kitti' db.__init__(self, name, mode) self._devkit_path = self._get_default_path() self._data_path = self._devkit_path self._mode = mode self._uncertainty_sort_type = cfg.UC.SORT_TYPE self._draw_width = int((cfg.LIDAR.X_RANGE[1] - cfg.LIDAR.X_RANGE[0])*(1/cfg.LIDAR.VOXEL_LEN)) self._draw_height = int((cfg.LIDAR.Y_RANGE[1] - cfg.LIDAR.Y_RANGE[0])*(1/cfg.LIDAR.VOXEL_LEN)) self._num_slices = cfg.LIDAR.NUM_SLICES self._frame_sub_dir = 'velodyne' self._train_dir = os.path.join(self._data_path, 'training', self._frame_sub_dir) self._val_dir = os.path.join(self._data_path, 'training', self._frame_sub_dir) self._test_dir = os.path.join(self._data_path, 'testing', self._frame_sub_dir) self._split_dir = os.path.join(self._data_path, 'splits') self._test_index = open(self._split_dir+'/test.txt').read().splitlines() self._train_index = open(self._split_dir+'/train.txt').read().splitlines() self._val_index = open(self._split_dir+'/val.txt').read().splitlines() self._filetype = 'bin' self._imtype = 'PNG' self.type = 'lidar' self._bev_slice_locations = [1,2,3,4,5,7] self._mode = mode #Backwards compatibility self._train_sub_folder = 'training' self._val_sub_folder = 'training' self._test_sub_folder = 'testing' self._classes = ( 'dontcare', # always index 0 #'Pedestrian', #'Cyclist', 'Car') self.config = { 'cleanup': True, 'matlab_eval': False, 'rpn_file': None } self._class_to_ind = dict( list(zip(self.classes, list(range(self.num_classes))))) #self._train_index = sorted([d for d in os.listdir(self._train_dir) if d.endswith('.bin')]) #self._val_index = sorted([d for d in os.listdir(self._val_dir) if d.endswith('.bin')]) #self._test_index = sorted([d for d in os.listdir(self._test_dir) if d.endswith('.bin')]) #Limiter if(limiter != 0): if(limiter < len(self._val_index)): self._val_index = self._val_index[:limiter] if(limiter < len(self._train_index)): self._train_index = self._train_index[:limiter] if(limiter < len(self._test_index)): self._test_index = self._test_index[:limiter] rand = SystemRandom() if(shuffle_en): print('shuffling frame indices') rand.shuffle(self._val_index) rand.shuffle(self._train_index) rand.shuffle(self._test_index) assert os.path.exists(self._devkit_path), \ 'Kitti dataset path does not exist: {}'.format(self._devkit_path) assert os.path.exists(self._data_path), \ 'Path does not exist: {}'.format(self._data_path)
def __init__(self, mode='test', limiter=0): name = 'nuscenes' db.__init__(self, name) self._train_scenes = [] self._val_scenes = [] self._test_scenes = [] self._train_index = [] self._val_index = [] self._test_index = [] self._devkit_path = self._get_default_path() self._mode = mode self._nusc = None self._scene_sel = True #For now one large cache file is OK, but ideally just take subset of actually needed data and cache that. No need to load nusc every time. self._classes = ( 'dontcare', # always index 0 'vehicle.car', 'human.pedestrian', 'vehicle.bicycle') self.config = {'cleanup': True, 'matlab_eval': False, 'rpn_file': None} self._class_to_ind = dict( list(zip(self.classes, list(range(self.num_classes))))) self._val_scenes = create_splits_scenes()['val'] self._train_scenes = create_splits_scenes()['train'] self._test_scenes = create_splits_scenes()['test'] #TODO: create custom scene list #print(self._train_scenes) for rec in self.nusc.sample_data: if (rec['channel'] == 'CAM_FRONT' and rec['is_key_frame'] is True): rec_tmp = deepcopy(rec) #Reverse lookup, getting the overall sample from the picture sample token, to get the scene information. scene_name = self.nusc.get( 'scene', self.nusc.get('sample', rec['sample_token'])['scene_token'])['name'] desc = self.nusc.get( 'scene', self.nusc.get('sample', rec['sample_token']) ['scene_token'])['description'].lower() if (self._scene_sel and 'night' not in desc and 'rain' not in desc and 'cones' not in desc): sample = self.nusc.get('sample', rec['sample_token']) rec_tmp['anns'] = sample['anns'] rec_tmp['lidar_token'] = sample['data']['LIDAR_TOP'] if (scene_name in self._train_scenes): self._train_index.append(rec_tmp) elif (scene_name in self._val_scenes): self._val_index.append(rec_tmp) elif (scene_name in self._train_scenes): self._test_index.append(rec_tmp) rand = SystemRandom() #Get global image info if (mode == 'train'): img_index = self._train_index rand.shuffle(self._val_index) elif (mode == 'val'): img_index = self._val_index elif (mode == 'test'): img_index = self._test_index self._imwidth = img_index[0]['width'] self._imheight = img_index[0]['height'] self._imtype = img_index[0]['fileformat'] rand = SystemRandom() rand.shuffle(img_index) if (limiter != 0): img_index = img_index[:limiter] if (mode == 'train'): self._train_index = img_index elif (mode == 'val'): self._val_index = img_index elif (mode == 'test'): self._test_index = img_index assert os.path.exists( self._devkit_path ), 'nuscenes dataset path does not exist: {}'.format(self._devkit_path)