def __init__(self, data_dir, dictionary_file, split, split_ratio=1.0, img_size=512, padding=31): super(COCOSEGMSNAKE, self).__init__() self.num_classes = 80 self.class_name = COCO_NAMES self.valid_ids = COCO_IDS self.cat_ids = {v: i for i, v in enumerate(self.valid_ids)} self.data_rng = np.random.RandomState(99) self.eig_val = np.array(COCO_EIGEN_VALUES, dtype=np.float32) self.eig_vec = np.array(COCO_EIGEN_VECTORS, dtype=np.float32) self.mean = np.array(COCO_MEAN, dtype=np.float32)[None, None, :] self.std = np.array(COCO_STD, dtype=np.float32)[None, None, :] self.split = split self.dictionary_file = dictionary_file self.data_dir = data_dir self.img_dir = os.path.join(self.data_dir, '%s2017' % split) if split == 'test': self.annot_path = os.path.join(self.data_dir, 'annotations', 'image_info_test-dev2017.json') else: self.annot_path = os.path.join(self.data_dir, 'annotations', 'instances_%s2017.json' % split) self.max_objs = 128 self.padding = padding self.down_ratio = 4 self.img_size = {'h': img_size, 'w': img_size} self.fmap_size = { 'h': img_size // self.down_ratio, 'w': img_size // self.down_ratio } self.rand_scales = np.arange(0.6, 1.3, 0.1) self.gaussian_iou = 0.7 self.n_vertices = 32 self.n_codes = 64 self.sparse_alpha = 0.01 print('==> initializing coco 2017 %s data.' % split) self.coco = coco.COCO(self.annot_path) self.images = self.coco.getImgIds() self.dictionary = np.load( self.dictionary_file) # ndarray, shape (n_coeffs, n_vertices * 2) if 0 < split_ratio < 1: split_size = int( np.clip(split_ratio * len(self.images), 1, len(self.images))) self.images = self.images[:split_size] self.num_samples = len(self.images) print('Loaded %d %s samples' % (self.num_samples, split))
def __init__(self, opt, split): super(COCO, self).__init__() self.data_dir = os.path.join(opt.data_dir, 'coco') self.img_dir = os.path.join(self.data_dir, "images", '{}2014'.format(split)) if split == 'test': self.annot_path = os.path.join( self.data_dir, 'annotations', 'image_info_test-dev2017.json').format(split) else: self.annot_path = os.path.join( self.data_dir, 'annotations', 'instances_{}2014.json').format(split) self.max_objs = 128 self.class_name = [ '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush' ] self._valid_ids = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90 ] self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)} self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \ for v in range(1, self.num_classes + 1)] self._data_rng = np.random.RandomState(123) self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32) self._eig_vec = np.array([[-0.58752847, -0.69563484, 0.41340352], [-0.5832747, 0.00994535, -0.81221408], [-0.56089297, 0.71832671, 0.41158938]], dtype=np.float32) # self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3) # self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3) self.split = split self.opt = opt print('==> initializing coco 2017 {} data.'.format(split)) self.coco = coco.COCO(self.annot_path) self.images = self.coco.getImgIds() self.num_samples = len(self.images) print('Loaded {} {} samples'.format(split, self.num_samples))
def evaluate_coco(): c = coco.COCO(minival_gt_file) cocoDt = c.loadRes(minival_det_file) cocoEval = COCOeval(c, cocoDt, 'bbox') cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() print(cocoEval.stats[0])
def __init__(self, cfg, split='train', augment=True): super(COCO, self).__init__() self.data_dir = cfg.data_dir self.img_dir = os.path.join(self.data_dir, '{}2017'.format(split)) self.annot_path = os.path.join(self.data_dir, 'annotations', 'instances_{}2017.json').format(split) self.split = split print('==> initializing coco 2017 {} data.'.format(split)) self.coco = coco.COCO(self.annot_path) self.images = self.coco.getImgIds() self.num_samples = len(self.images) self.class_name = [ '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush' ] self._valid_ids = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90 ] if cfg.class_name != '*': self._valid_ids = [self.class_name.index(cfg.class_name)] self.class_name = [cfg.class_name] catIds = self.coco.getCatIds(self.class_name[-1]) assert catIds == self._valid_ids self.images = self.coco.getImgIds(self.images, catIds) self.num_samples = len(self.images) self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)} self.input_w = cfg.input_w self.input_h = cfg.input_h self.base_stride = cfg.base_stride self.base_window = cfg.base_window self.k = cfg.k self.num_class = len(self.class_name) self.augment = augment self.max_objs = cfg.max_objs self.jitter = cfg.jitter self.cfg = cfg if not self.augment: self.jitter = 0 print('Loaded {} {} samples'.format(split, self.num_samples))
def __init__(self, opt, split): super(KITTI, self).__init__() self.data_dir = os.path.join(opt.data_dir, 'kitti') # todo: check if this works self.img_dir = os.path.join(self.data_dir, 'training', 'image_2') if opt.trainval: split = 'trainval' if split == 'train' else 'test' self.img_dir = os.path.join(self.data_dir, 'images', split) self.annot_path = os.path.join(self.data_dir, 'annotations', 'kitti_{}.json').format(split) elif split == 'test': self.img_dir = os.path.join(self.data_dir, 'testing', 'image_2') self.annot_path = os.path.join(self.data_dir, 'annotations', 'kitti_{}.json').format(split) elif split == "video": self.img_dir = os.path.join(self.data_dir, '2011_09_30_drive_0027_sync', 'data') self.annot_path = os.path.join( self.data_dir, 'annotations', 'kitti_2011_09_30_drive_0027_sync.json') else: self.annot_path = os.path.join(self.data_dir, 'annotations', 'kitti_{}_{}.json').format( opt.kitti_split, split) self.max_objs = 30 self.class_name = ['__background__', 'Car', 'Pedestrian', 'Cyclist'] # Pedestrian: 1, Car: 2, Cyclist: 3 self.cat_ids = { 1: 1, 2: 0, 3: 2, 4: -3, 5: -3, 6: -2, 7: -99, 8: -99, 9: -1 } self._data_rng = np.random.RandomState(123) self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32) self._eig_vec = np.array([[-0.58752847, -0.69563484, 0.41340352], [-0.5832747, 0.00994535, -0.81221408], [-0.56089297, 0.71832671, 0.41158938]], dtype=np.float32) self.split = split self.opt = opt self.alpha_in_degree = False print('==> initializing kitti {}, {} data.'.format( opt.kitti_split, split)) self.coco = coco.COCO(self.annot_path) self.images = self.coco.getImgIds() self.num_samples = len(self.images) print('Loaded {} {} samples'.format(split, self.num_samples))
def __init__(self, opt, split): super(FoggyCityscapes, self).__init__() self.data_dir = os.path.join( opt.data_dir, 'foggy_cityscapes_data/coco_foggy_cityscapes') # check self.img_dir = os.path.join(self.data_dir, 'images') # check if split == 'val': self.annot_path = os.path.join( self.data_dir, 'annotations', 'foggy_instancesonly_filtered_gtFine_val.json') # check elif split == 'test': self.annot_path = os.path.join( self.data_dir, 'annotations', 'foggy_instancesonly_filtered_gtFine_val.json') # check else: if opt.task == 'exdet': self.annot_path = os.path.join( self.data_dir, 'annotations', 'foggy_instancesonly_filtered_gtFine_train.json') # check else: self.annot_path = os.path.join( self.data_dir, 'annotations', 'foggy_instancesonly_filtered_gtFine_train.json') # check self.max_objs = 128 self.class_name = [ # check 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle', ] self._valid_ids = [1, 2, 3, 4, 5, 6, 7, 8] # check self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)} self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \ for v in range(1, self.num_classes + 1)] self._data_rng = np.random.RandomState(123) self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32) self._eig_vec = np.array([[-0.58752847, -0.69563484, 0.41340352], [-0.5832747, 0.00994535, -0.81221408], [-0.56089297, 0.71832671, 0.41158938]], dtype=np.float32) # self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3) # self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3) self.split = split self.opt = opt print('==> initializing coco_foggy_cityscapes {} data.'.format(split)) self.coco = coco.COCO(self.annot_path) self.images = self.coco.getImgIds() self.num_samples = len(self.images) print('Loaded {} {} samples'.format(split, self.num_samples))
def test_single_scale(): cocoGt = cc.COCO("annotations/person_keypoints_val2017.json") cocoDt = cocoGt.loadRes("coco_result.json") cocoEval = ce.COCOeval(cocoGt, cocoDt, 'keypoints') cocoEval.params.imgIds = cocoGt.getImgIds() cocoEval.evaluate() cocoEval.accumulate() print("Single Scale") cocoEval.summarize()
def init_coco(self): # only import this dependency on demand import pycocotools.coco as coco self.coco = coco.COCO(self.annotation_file) ann_ids = self.coco.getAnnIds([]) self.anns = self.coco.loadAnns(ann_ids) self.label_map = {k - 1: v for k, v in self.coco.cats.items()} self.filename_to_anns = dict() self.build_filename_to_anns_dict()
def coco_bbox_eval(result_file, annotation_file): ann_type = 'bbox' coco_gt = COCO.COCO(annotation_file) coco_dt = coco_gt.loadRes(result_file) cocoevaler = COCOeval.COCOeval(coco_gt, coco_dt, ann_type) cocoevaler.evaluate() cocoevaler.accumulate() cocoevaler.summarize()
def adjust_detections(): c = coco.COCO(minival_gt_file) keys = list(c.cats.keys()) detections_list = json.load(open(minival_det_file)) for det in detections_list: det['category_id'] = keys[det['category_id']] json.dump( detections_list, open("/home/krause/vision/savitar2/forwarded/temp_edited.json", 'w'))
def __init__(self, opt, split): super(BDD_Daytime, self).__init__() self.data_dir = os.path.join(opt.data_dir, 'bdd_data\\bdd_daytime_city') # check self.img_dir = os.path.join(self.data_dir, 'images') # check if split == 'val': self.annot_path = os.path.join(self.data_dir, 'annotations', 'bdd_daytime_val.json') # check elif split == 'test': self.annot_path = os.path.join(self.data_dir, 'annotations', 'bdd_daytime_val.json') # check else: if opt.task == 'exdet': self.annot_path = os.path.join( self.data_dir, 'annotations', 'bdd_daytime_train.json') # check else: self.annot_path = os.path.join( self.data_dir, 'annotations', 'bdd_daytime_train.json') # check self.max_objs = 128 self.class_name = [ # check "person", "rider", "car", "bus", "truck", "bike", "motor", "traffic light", "traffic sign", "train", ] self._valid_ids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] # check self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)} self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \ for v in range(1, self.num_classes + 1)] self._data_rng = np.random.RandomState(123) self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32) self._eig_vec = np.array([[-0.58752847, -0.69563484, 0.41340352], [-0.5832747, 0.00994535, -0.81221408], [-0.56089297, 0.71832671, 0.41158938]], dtype=np.float32) # self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3) # self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3) self.split = split self.opt = opt print('==> initializing coco_bdd_daytime {} data.'.format(split)) self.coco = coco.COCO(self.annot_path) self.images = self.coco.getImgIds() self.num_samples = len(self.images) print('Loaded {} {} samples'.format(split, self.num_samples))
def __init__(self, opt, split): super(CAMERA, self).__init__() self.data_dir = opt.data_dir self.img_dir = {} for sensor in self.sensor_list: self.img_dir[sensor] = os.path.join(self.data_dir, sensor, 'images') if split == 'val': self.annot_path = os.path.join( self.data_dir, 'annotations', 'val.json') else: if opt.task == 'exdet': self.annot_path = os.path.join( self.data_dir, 'annotations', 'train.json') if split == 'test': self.annot_path = os.path.join( self.data_dir, 'annotations', opt.test_dataset+'.json') else: self.annot_path = os.path.join( self.data_dir, 'annotations', 'train.json') self.max_objs = 100 if len(cf.categories)==5: self.class_name = [ '__background__', 'bike', 'car', 'car_stop', 'color_cone', 'person'] self._valid_ids = [0, 1, 2, 3, 4, 5] else: self.class_name = [ '__background__', 'bike', 'car', 'color_cone', 'person'] self._valid_ids = [0, 1, 2, 3, 4] self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)} self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \ for v in range(1, self.num_classes + 1)] self._data_rng = np.random.RandomState(123) self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32) self._eig_vec = np.array([ [-0.58752847, -0.69563484, 0.41340352], [-0.5832747, 0.00994535, -0.81221408], [-0.56089297, 0.71832671, 0.41158938] ], dtype=np.float32) self.split = split self.opt = opt print('==> initializing coco 2017 {} data.'.format(split)) self.coco = coco.COCO(self.annot_path) self.images = self.coco.getImgIds() self.num_samples = len(self.images) print('Loaded {} {} samples'.format(split, self.num_samples))
def __init__(self, opt, split): super(MHP, self).__init__() self.data_dir = os.path.join(opt.data_dir, 'coco') self.img_dir = os.path.join(self.data_dir, '{}2017'.format(split)) if split == 'test': self.annot_path = os.path.join( self.data_dir, 'annotations', 'image_info_test-dev2017.json').format(split) else: if opt.task == 'exdet': self.annot_path = os.path.join( self.data_dir, 'annotations', 'instances_extreme_{}2017.json').format(split) else: self.annot_path = os.path.join( self.data_dir, 'annotations', 'instances_{}2017.json').format(split) self.max_objs = 128 self.class_name = [ "cap/hat", "helmet", "face", "hair", "left-arm", "right-arm", "left-hand", "right-hand", "protector", "bikini-bra", "jacket-windbreaker-hoodie", "t-shirt", "polo-shirt", "sweater", "sin-glet", "torso-skin", "pants", "shorts-swim-shorts", "skirt", "stock-ings", "socks", "left-boot", "right-boot", "left-shoe", "right-shoe", "left-highheel", "right-highheel", "left-sandal", "right-sandal", "left-leg", "right-leg", "left-foot", "right-foot", "coat", "dress", "robe", "jumpsuits", "other-full-body-clothes", "headwear", "backpack", "ball", "bats", "belt", "bottle", "carrybag", "cases", "sunglasses", "eyewear", "gloves", "scarf", "umbrella", "wallet-purse", "watch", "wristband", "tie", "other-accessaries", "other-upper-body-clothes", "other-lower-body-clothes" ] self._valid_ids = range(1, 59) self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)} self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \ for v in range(1, self.num_classes + 1)] self._data_rng = np.random.RandomState(123) self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32) self._eig_vec = np.array([[-0.58752847, -0.69563484, 0.41340352], [-0.5832747, 0.00994535, -0.81221408], [-0.56089297, 0.71832671, 0.41158938]], dtype=np.float32) # self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3) # self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3) self.split = split self.opt = opt print('==> initializing coco 2017 {} data.'.format(split)) self.coco = coco.COCO(self.annot_path) self.images = self.coco.getImgIds() self.num_samples = len(self.images) print('Loaded {} {} samples'.format(split, self.num_samples))
def __init__(self, opt, split): super(Graduation, self).__init__() self.data_dir = os.path.join(opt.data_dir, 'graduation') self.img_dir = os.path.join(self.data_dir, 'images') if split == 'val': self.annot_path = os.path.join(self.data_dir, 'annotations', 'train.json').format(split) else: if opt.task == 'exdet': self.annot_path = os.path.join(self.data_dir, 'annotations', 'train.json').format(split) else: self.annot_path = os.path.join(self.data_dir, 'annotations', 'train.json').format(split) self.max_objs = 128 self.class_name = [ '__background__', 'fishing_boat', 'river_boat', 'container_ship', 'speedboat', 'official_ship', 'bulker', 'cruise', 'ferry', 'tug', 'tanker', 'engineering_ship', 'RoRo_ship', 'timber_ship', 'LPG_ship', ] self._valid_ids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)} self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \ for v in range(1, self.num_classes + 1)] self._data_rng = np.random.RandomState(123) self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32) self._eig_vec = np.array([[-0.58752847, -0.69563484, 0.41340352], [-0.5832747, 0.00994535, -0.81221408], [-0.56089297, 0.71832671, 0.41158938]], dtype=np.float32) # self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3) # self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3) self.split = split self.opt = opt print('==> initializing coco 2017 {} data.'.format(split)) self.coco = coco.COCO(self.annot_path) self.images = self.coco.getImgIds() self.num_samples = len(self.images) print('Loaded {} {} samples'.format(split, self.num_samples))
def __init__(self, config, split): super(COCO, self).__init__() config = config.DATASET self.data_dir = config.DATA_DIR self.img_dir = os.path.join(self.data_dir, '{}2017'.format(split)) self.annot_path = os.path.join(self.data_dir, 'annotations', 'instances_{}2017.json').format(split) self.class_name = [ '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush' ] self._valid_ids = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90 ] self.max_objs = 80 self.num_classes = 80 self.mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32).reshape(1, 1, 3) self.std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32).reshape(1, 1, 3) self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)} self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \ for v in range(1, self.num_classes + 1)] self.split = split self.config = config print('==> initializing coco 2017 {} data.'.format(split)) self.coco = coco.COCO(self.annot_path) self.images = self.coco.getImgIds() self.num_samples = len(self.images) print('Loaded {} {} samples'.format(split, self.num_samples))
def __init__(self, config, subset, coord, fraction=1.0, ignore_classes=IGNORE_CLASSES, num_classes=NUM_CLASSES): super(COCODataset, self).__init__("coco", COCO_DEFAULT_PATH, num_classes, config, subset, coord, INPUT_SIZE, COCO_VOID_LABEL, fraction, label_load_fn=self.label_load_fn, img_load_fn=self.img_load_fn, ignore_classes=ignore_classes) if subset == "train": self.data_type = "train2014" self.filter_crowd_images = config.bool("filter_crowd_images", False) self.min_box_size = config.float("min_box_size", -1.0) else: self.data_type = "val2014" self.filter_crowd_images = False self.min_box_size = config.float("min_box_size_val", -1.0) # Use the minival split as done in https://github.com/rbgirshick/py-faster-rcnn/blob/master/data/README.md self.annotation_file = '%s/annotations/instances_%s.json' % ( self.data_dir, subset) self.restricted_image_category_list = config.unicode_list( "restricted_image_category_list", []) if len(self.restricted_image_category_list) == 0: self.restricted_image_category_list = None self.restricted_annotations_category_list = config.unicode_list( "restricted_annotations_category_list", []) if len(self.restricted_annotations_category_list) == 0: self.restricted_annotations_category_list = None #either both of them or none should be specified for now to avoid unintuitive behaviour assert (self.restricted_image_category_list is None and self.restricted_annotations_category_list is None) or \ (self.restricted_image_category_list is not None and self.restricted_annotations_category_list is not None),\ (self.restricted_image_category_list, self.restricted_annotations_category_list) # only import this dependency on demand import pycocotools.coco as coco self.coco = coco.COCO(self.annotation_file) ann_ids = self.coco.getAnnIds([]) self.anns = self.coco.loadAnns(ann_ids) self.label_map = {k - 1: v for k, v in list(self.coco.cats.items())} self.filename_to_anns = dict() self.build_filename_to_anns_dict()
def __init__(self, opt, split): super(JAC_COCO_36, self).__init__() self.data_dir = os.path.join(opt.data_dir, 'Jacquard') if split: self.img_dir = os.path.join(self.data_dir, 'coco/512_cnt_angle', split, 'grasps_{}2018'.format(split)) if opt.flag_test: self.annot_path = os.path.join( self.data_dir, 'coco/512_cnt_angle', split, 'instances_grasps_{}2018.json').format(split) else: self.annot_path = os.path.join( self.data_dir, 'coco/512_cnt_angle', split, 'instances_grasps_{}2018_filter.json').format(split) self.max_objs = 128 self.avg_h = 20. self.class_name = [ "__background__", "orient01", "orient02", "orient03", "orient04", "orient05", "orient06", "orient07", "orient08", "orient09", "orient10", "orient11", "orient12", "orient13", "orient14", "orient15", "orient16", "orient17", "orient18", "orient19", "orient20", "orient21", "orient22", "orient23", "orient24", "orient25", "orient26", "orient27", "orient28", "orient29", "orient30", "orient31", "orient32", "orient33", "orient34", "orient35", "orient36" ] self._valid_ids = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36 ] self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)} # rx self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \ for v in range(1, self.num_classes + 1)] self._data_rng = np.random.RandomState(123) self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32) self._eig_vec = np.array([[-0.58752847, -0.69563484, 0.41340352], [-0.5832747, 0.00994535, -0.81221408], [-0.56089297, 0.71832671, 0.41158938]], dtype=np.float32) self.split = split self.opt = opt print( '==> initializing jacquard dataset in coco format {} data.'.format( split)) self.coco = coco.COCO(self.annot_path) self.images = self.coco.getImgIds()[:] self.num_samples = len(self.images) print('Loaded {} {} samples'.format(split, self.num_samples))
def process_coco_edge_box(): pool = Pool(processes=8) db = coco.COCO( '/media/zawlin/ssd/coco/annotations/captions_train2014.json') cnt = 0 for k in db.imgs: cnt += 1 im = db.imgs[k] pool.apply_async(do_one_image, (im['file_name'], )) pool.close() pool.join()
def __init__(self, data_dir, split, split_ratio=1.0, gaussian=True, img_size=511): super(COCO, self).__init__() self.split = split self.gaussian = gaussian self.down_ratio = 4 self.img_size = {'h': img_size, 'w': img_size} self.fmap_size = { 'h': (img_size + 1) // self.down_ratio, 'w': (img_size + 1) // self.down_ratio } self.padding = 128 self.data_rng = np.random.RandomState(123) self.rand_scales = np.arange(0.6, 1.4, 0.1) self.gaussian_iou = 0.3 self.data_dir = os.path.join(data_dir, 'coco') self.img_dir = os.path.join(self.data_dir, '%s2017' % split) if split == 'test': self.annot_path = os.path.join(self.data_dir, 'annotations', 'image_info_test-dev2017.json') else: self.annot_path = os.path.join(self.data_dir, 'annotations', 'instances_%s2017.json' % split) self.num_classes = 80 self.class_name = COCO_NAMES self.valid_ids = COCO_IDS self.cat_ids = {v: i for i, v in enumerate(self.valid_ids)} self.max_objs = 128 self.eig_val = np.array(COCO_EIGEN_VALUES, dtype=np.float32) self.eig_vec = np.array(COCO_EIGEN_VECTORS, dtype=np.float32) self.mean = np.array(COCO_MEAN, dtype=np.float32)[None, None, :] self.std = np.array(COCO_STD, dtype=np.float32)[None, None, :] print('==> initializing coco 2017 %s data.' % split) self.coco = coco.COCO(self.annot_path) self.images = self.coco.getImgIds() if 0 < split_ratio < 1: split_size = int( np.clip(split_ratio * len(self.images), 1, len(self.images))) self.images = self.images[:split_size] self.num_samples = len(self.images) print('Loaded %d %s samples' % (self.num_samples, split))
def eval(gt_file, det_file): coco_gt = coco.COCO(gt_file) coco_dets = coco_gt.loadRes(det_file) coco_eval = COCOeval(coco_gt, coco_dets, "bbox") coco_eval.params.maxDets = [200] coco_eval.params.iouThrs = np.array([0.5]) coco_eval.params.fppiThrs = np.logspace(-2, 0, 9) coco_eval.evaluate() accumulate(coco_eval) summarize(coco_eval) return coco_eval
def LoadAnnotations(self, annotations): """Load annotations dictionary into COCO datastructure. See http://mscoco.org/dataset/#format for a description of the annotations format. As above, this function replicates the default behavior of the API but does not require writing to external storage. Args: annotations: python list holding object detection results where each detection is encoded as a dict with required keys ['image_id', 'category_id', 'score'] and one of ['bbox', 'segmentation'] based on `detection_type`. Returns: a coco.COCO datastructure holding object detection annotations results Raises: ValueError: if annotations is not a list ValueError: if annotations do not correspond to the images contained in self. """ results = coco.COCO() results.dataset['images'] = [img for img in self.dataset['images']] # tf.logging.info('Loading and preparing annotation results...') print('Loading and preparing annotation results...') tic = time.time() if not isinstance(annotations, list): raise ValueError('annotations is not a list of objects') annotation_img_ids = [ann['image_id'] for ann in annotations] if (set(annotation_img_ids) != (set(annotation_img_ids) & set(self.getImgIds()))): raise ValueError('Results do not correspond to current coco set') results.dataset['categories'] = copy.deepcopy( self.dataset['categories']) if self._detection_type == 'bbox': for idx, ann in enumerate(annotations): bb = ann['bbox'] ann['area'] = bb[2] * bb[3] ann['id'] = idx + 1 ann['iscrowd'] = 0 elif self._detection_type == 'segmentation': for idx, ann in enumerate(annotations): ann['area'] = mask.area(ann['segmentation']) ann['bbox'] = mask.toBbox(ann['segmentation']) ann['id'] = idx + 1 ann['iscrowd'] = 0 # tf.logging.info('DONE (t=%0.2fs)', (time.time() - tic)) print('DONE (t=%0.2fs)', (time.time() - tic)) results.dataset['annotations'] = annotations results.createIndex() return results
def __init__(self, opt, dataset): self.images = dataset.images self.load_image_func = dataset.coco.loadImgs self.img_dir = dataset.img_dir self.get_default_calib = dataset.get_default_calib self.opt = opt split_name = "val" data_dir = os.path.join(opt.data_dir, "nuscenes") ann_path = os.path.join(data_dir, "annotations", "{}{}.json").format(opt.dataset_version, split_name) self.coco = coco.COCO(ann_path)
def _test_dataset(annotation_file): coco = cocoapi.COCO(annotation_file) # Load categories from annotation file cats = [cat['name'] for cat in coco.loadCats(coco.getCatIds())] classes = ['__background__'] + cats num_classes = len(classes) _class_to_ind = dict(zip(classes, range(num_classes))) _class_to_coco_ind = dict(zip(cats, coco.getCatIds())) _coco_ind_to_class_ind = dict([(_class_to_coco_ind[cls], _class_to_ind[cls]) for cls in classes[1:]])
def __init__(self, opt, split): super(VisDrone, self).__init__() self.data_dir = os.path.join(opt.data_dir, 'VISDRONE') self.data_dir = os.path.join(self.data_dir, 'Images') if split == 'test': self.data_dir = os.path.join(self.data_dir, 'VisDrone2018-DET-val') self.img_dir = os.path.join(self.data_dir, 'images') self.annot_dir = os.path.join(self.data_dir, 'annotations') self.annot_path = os.path.join(self.annot_dir, 'instances.json').format(split) else: self.data_dir = os.path.join(self.data_dir, 'VisDrone2018-DET-train') self.img_dir = os.path.join(self.data_dir, 'images') self.annot_dir = os.path.join(self.data_dir, 'annotations') self.annot_path = os.path.join(self.annot_dir, 'instances.json').format(split) self.max_objs = 512 self.class_name = [ 'Ignored Regions', 'Pedestrian', 'People', 'Bicycle', 'Car', 'Van', 'Truck', 'Tricycle', 'Awning-tricycle', 'Bus', 'Motorbike', 'Other' ] self._valid_ids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)} self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \ for v in range(1, self.num_classes + 1)] self._data_rng = np.random.RandomState(123) self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32) self._eig_vec = np.array([[-0.58752847, -0.69563484, 0.41340352], [-0.5832747, 0.00994535, -0.81221408], [-0.56089297, 0.71832671, 0.41158938]], dtype=np.float32) self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3) self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3) self.split = split self.opt = opt print('==> initializing visdrone {} data.'.format(split)) self.coco = coco.COCO(self.annot_path) self.images = self.coco.getImgIds() self.num_samples = len(self.images) print('Loaded {} {} samples'.format(split, self.num_samples))
def __init__(self, opt, split): super(COCOHP, self).__init__() self.edges = [[0, 1], [1, 2], [2, 3], [3, 0]] # self.edges = [[0, 1], [0, 2], [1, 3], [2, 4], # [4, 6], [3, 5], [5, 6], # [5, 7], [7, 9], [6, 8], [8, 10], # [6, 12], [5, 11], [11, 12], # [12, 14], [14, 16], [11, 13], [13, 15]] self.acc_idxs = [1, 2, 3, 4] self.data_dir = os.path.join(opt.data_dir, 'coco') self.img_dir = os.path.join(self.data_dir, 'images', '{}4582'.format(split)) # self.img_dir = os.path.join(self.data_dir, 'images', '{}2017'.format(split)) if split == 'test': self.annot_path = os.path.join( self.data_dir, 'annotations', 'parking_keypoints_{}4582.json').format(split) # 'image_info_test-dev2017.json').format(split) else: self.annot_path = os.path.join( self.data_dir, 'annotations', 'parking_keypoints_{}4582.json').format(split) # 'person_keypoints_{}2017.json').format(split) self.max_objs = 64 self._data_rng = np.random.RandomState(123) self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32) self._eig_vec = np.array([ [-0.58752847, -0.69563484, 0.41340352], [-0.5832747, 0.00994535, -0.81221408], [-0.56089297, 0.71832671, 0.41158938] ], dtype=np.float32) self.split = split self.opt = opt # print('==> initializing coco 2017 {} data.'.format(split)) print('==> initializing coco 4582 {} data.'.format(split)) self.coco = coco.COCO(self.annot_path) image_ids = self.coco.getImgIds() if split == 'train': # if split == 'trainval': self.images = [] for img_id in image_ids: idxs = self.coco.getAnnIds(imgIds=[img_id]) if len(idxs) > 0: self.images.append(img_id) else: self.images = image_ids self.num_samples = len(self.images) print('Loaded {} {} samples'.format(split, self.num_samples))
def evaluate(args): data_dir = args.dataset_root data_type = 'val2014' ann_file = "{0}/annotations/{1}_{2}.cars.json".format(data_dir, 'instances', data_type) results_file = "predictions.json" coco_gt = coco.COCO(ann_file) coco_det = coco_gt.loadRes(results_file) coco_eval = cocoeval.COCOeval(coco_gt, coco_det, 'bbox') coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize()
def __init__( self, ann_file: str, img_dir: str, stage: str = "train", transforms=A.Compose([A.ToFloat()]), ): super().__init__() self.coco = coco.COCO(ann_file) self.ids = list(sorted(self.coco.imgs.keys())) self.img_dir = img_dir self.stage = stage self.transforms = transforms
def __init__(self, opt, split): super(COCO, self).__init__() self.data_dir = os.path.join(opt.data_dir, 'coco') self.img_dir = os.path.join(self.data_dir, '{}2017'.format(split)) if split == 'test': self.annot_path = os.path.join(self.data_dir, "validation", 'vis_val.json') else: if opt.task == 'exdet': self.annot_path = os.path.join( self.data_dir, 'annotations', 'instances_extreme_{}2017.json').format(split) else: self.annot_path = os.path.join(self.data_dir, 'annotations', 'new.json').format(split) self.max_objs = 128 self.class_name = [ '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat' ] self._valid_ids = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90 ] self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)} self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \ for v in range(1, self.num_classes + 1)] self._data_rng = np.random.RandomState(123) self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32) self._eig_vec = np.array([[-0.58752847, -0.69563484, 0.41340352], [-0.5832747, 0.00994535, -0.81221408], [-0.56089297, 0.71832671, 0.41158938]], dtype=np.float32) # self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3) # self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3) self.split = split self.opt = opt print('==> initializing data.'.format(split)) self.coco = coco.COCO(self.annot_path) self.images = self.coco.getImgIds() self.num_samples = len(self.images) # print(self.coco.loadImgs([1])) print('Loaded {} {} samples'.format(split, self.num_samples))
def __init__(self, opt, split): super(Driving, self).__init__() self.data_dir = '/scratch/jl5/' self.img_dir = os.path.join(self.data_dir, 'driving1000') if split == 'test': self.annot_path = '/data2/jl5/mmdetect_results/driving1000/fifth_test.json' else: self.annot_path = '/data2/jl5/mmdetect_results/driving1000/fifth_train.json' self.modify_json(self.annot_path, opt.data_thresh) self.annot_path = '/scratch/jl5/fifth_train.json' self.max_objs = 128 self.class_name = [ '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush' ] self._valid_ids = [i for i in range(1, 81)] self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)} self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \ for v in range(1, self.num_classes + 1)] self._data_rng = np.random.RandomState(123) self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32) self._eig_vec = np.array([[-0.58752847, -0.69563484, 0.41340352], [-0.5832747, 0.00994535, -0.81221408], [-0.56089297, 0.71832671, 0.41158938]], dtype=np.float32) # self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3) # self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3) self.split = split self.opt = opt print('==> initializing coco 2017 {} data.'.format(split)) self.coco = coco.COCO(self.annot_path) self.images = self.coco.getImgIds() self.num_samples = len(self.images) print('Loaded {} {} samples'.format(split, self.num_samples))
def __init__(self, opt, split): super(Cigar, self).__init__() dt_name = 'cigar_box' # todo: rect box self.data_dir = os.path.join(opt.data_dir, dt_name) self.img_dir = '' # as file_name is absolute path if split == 'test': self.annot_path = os.path.join(self.data_dir, 'annotations', '{}_{}_{}.json').format( Cigar.num_classes, dt_name, split) else: if opt.task == 'exdet': # train or val self.annot_path = os.path.join(self.data_dir, 'annotations', '{}_{}_{}.json').format( Cigar.num_classes, dt_name, split) else: # ctdet,..? self.annot_path = os.path.join(self.data_dir, 'annotations', '{}_{}_{}.json').format( Cigar.num_classes, dt_name, split) self.max_objs = 10 # self.max_objs = 128 self.class_name = [ '__background__', 'DaZhongJiu_A', 'YunYan_a', 'JiaoZi_B', 'ZhongHua_B', 'LiQun_a', 'HuangHeLou_e', 'YunYan_A', 'JiaoZi_F', 'HuangHeLou_h', 'HuangHeLou_E', 'HuangJinYe_C', '555_a', 'HongTaShan_b', 'YuXi_A', 'HuangGuoShu_a', 'JiaoZi_K', 'HuangHeLou_A', 'JiaoZi_E', 'TianZi_a', 'TianZi_A' ] # note: _valid_ids same to real cat_id in xx.json self._valid_ids = np.arange(1, 21, dtype=np.int32) self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)} # value, idx self._data_rng = np.random.RandomState(123) self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32) self._eig_vec = np.array([[-0.58752847, -0.69563484, 0.41340352], [-0.5832747, 0.00994535, -0.81221408], [-0.56089297, 0.71832671, 0.41158938]], dtype=np.float32) self.split = split self.opt = opt print('==> initializing cigar {} data.'.format(split)) self.coco = coco.COCO(self.annot_path) self.images = sorted(self.coco.getImgIds()) self.num_samples = len(self.images) print('Loaded {} {} samples'.format(split, self.num_samples))