split, year, use_diff=True)) # Set up coco_2014_<split> for year in ['2014']: for split in ['train', 'val', 'minival', 'valminusminival', 'trainval']: name = 'coco_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: coco(split, year)) # Set up coco_2015_<split> for year in ['2015']: for split in ['test', 'test-dev']: name = 'coco_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: coco(split, year)) imagenet_devkit_path = '/media/lealex262/OS/Users/Alex Thien An Le/Documents/GeorgiaTech/ClubsnProjects/Hackaton/HomeDepot_DeepLearning_Fall2017/active_window/tf-faster-rcnn/data' for split in ['train', 'test']: name = 'imagenet_{}_{}'.format('2017', split) __sets[name] = ( lambda split=split: imagenet(split, "2017", imagenet_devkit_path)) def get_imdb(name): """Get an imdb (image database) by name.""" if name not in __sets: raise KeyError('Unknown dataset: {}'.format(name)) return __sets[name]() def list_imdbs(): """List all registered imdbs.""" return list(__sets.keys())
print('Results:') for ap in aps: print('{:.3f}'.format(ap)) print('{:.3f}'.format(np.mean(aps))) print('~~~~~~~~') print('') print('--------------------------------------------------------------') print('Results computed with the **unofficial** Python eval code.') print('Results should be very close to the official MATLAB eval code.') print('Recompute with `./tools/reval.py --matlab ...` for your paper.') print('-- Thanks, The Management') print('--------------------------------------------------------------') def evaluate_detections(self, all_boxes, output_dir): self._write_ILSVRC_results_file(all_boxes) self._do_python_eval(output_dir) if self.config['cleanup']: for cls in self._classes: if cls == '__background__': continue filename = self._get_ILSVRC_results_file_template().format(cls) os.remove(filename) if __name__ == '__main__': from datasets.imagenet import imagenet d = imagenet('train', '2015', '/disk2/data/ILSVRC2015') res = d.roidb from IPython import embed; embed()
from datasets.caltech import caltech from datasets.sun import sun from datasets.cam2 import cam2 from datasets.pascal_voc import pascal_voc from datasets.imagenet import imagenet from datasets.coco import coco import numpy as np # Set up imagenet_<year>_<split> for year in ['2014']: for split in [ 'train', 'val', 'val1', 'val2', 'test', 'short_train', 'very_short_train', 'val1_short' ]: name = 'imagenet_{}'.format(split) __sets[name] = (lambda split=split: imagenet(split)) # Set up voc_<year>_<split> using selective search "fast" mode for year in ['2007', '2012']: for split in ['train', 'val', 'trainval', 'test', 'val_small']: name = 'voc_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: pascal_voc(split, year)) # Set up coco_2014_<split> for year in ['2014']: for split in ['train', 'val', 'minival', 'valminusminival']: name = 'coco_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: coco(split, year)) # Set up coco_2015_<split> for year in ['2015']:
print('Running:\n{}'.format(cmd)) status = subprocess.call(cmd, shell=True) def evaluate_detections(self, all_boxes, output_dir): self._write_voc_results_file(all_boxes) self._do_python_eval(output_dir) if self.config['matlab_eval']: self._do_matlab_eval(output_dir) if self.config['cleanup']: for cls in self._classes: if cls == '__background__': continue filename = self._get_voc_results_file_template().format(cls) os.remove(filename) def competition_mode(self, on): if on: self.config['use_salt'] = False self.config['cleanup'] = False else: self.config['use_salt'] = True self.config['cleanup'] = True if __name__ == '__main__': from datasets.imagenet import imagenet d = imagenet('train', '2014') res = d.roidb from IPython import embed embed()
]: for split in [ 'minitrain', 'smalltrain', 'train', 'minival', 'smallval', 'val', 'test' ]: name = 'vg_{}_{}'.format(version, split) __sets[name] = ( lambda split=split, version=version: vg(version, split)) # set up image net. for split in [ 'DET_train_30classes', 'VID_train_15frames', 'VID_val_frames', 'VID_val_videos' ]: name = 'imagenet_{}'.format(split) data_path = 'data/ILSVRC2015' __sets[name] = ( lambda split=split, data_path=data_path: imagenet(split, data_path)) def get_imdb(name): """Get an imdb (image database) by name.""" if name not in __sets: raise KeyError('Unknown dataset: {}'.format(name)) return __sets[name]() def list_imdbs(): """List all registered imdbs.""" return list(__sets.keys())
# Set up vg_<split> # for version in ['1600-400-20']: # for split in ['minitrain', 'train', 'minival', 'val', 'test']: # name = 'vg_{}_{}'.format(version,split) # __sets[name] = (lambda split=split, version=version: vg(version, split)) for version in ['150-50-20', '150-50-50', '500-150-80', '750-250-150', '1750-700-450', '1600-400-20']: for split in ['minitrain', 'smalltrain', 'train', 'minival', 'smallval', 'val', 'test']: name = 'vg_{}_{}'.format(version, split) __sets[name] = (lambda split=split, version=version: vg(version, split)) # set up image net. for split in ['train', 'val', 'val1', 'val2', 'test']: name = 'imagenet_{}'.format(split) devkit_path = 'data/imagenet/ILSVRC/devkit' data_path = 'data/imagenet/ILSVRC' __sets[name] = ( lambda split=split, devkit_path=devkit_path, data_path=data_path: imagenet(split, devkit_path, data_path)) def get_imdb(name): """Get an imdb (image database) by name.""" if name not in __sets: raise KeyError('Unknown dataset: {}'.format(name)) return __sets[name]() def list_imdbs(): """List all registered imdbs.""" return list(__sets.keys())
status = subprocess.call(cmd, shell=True) def evaluate_detections(self, all_boxes, output_dir): self._write_imagenet_results_file(all_boxes) self._do_python_eval(output_dir) if self.config['matlab_eval']: self._do_matlab_eval(output_dir) if self.config['cleanup']: for cls in self._classes: if cls == '__background__': continue filename = self._get_imagenet_results_file_template().format( cls) os.remove(filename) def competition_mode(self, on): if on: self.config['use_salt'] = False self.config['cleanup'] = False else: self.config['use_salt'] = True self.config['cleanup'] = True if __name__ == '__main__': from datasets.imagenet import imagenet d = imagenet('train', '2016') res = d.roidb from IPython import embed embed()
for year in ['2007', '2012']: for split in ['train', 'val', 'trainval', 'test']: name = 'voc_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: pascal_voc(split, year)) # Set up coco_2014_<split> for year in ['2014']: for split in ['train', 'val', 'minival', 'valminusminival']: name = 'coco_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: coco(split, year)) # Set up coco_2015_<split> for year in ['2015']: for split in ['test', 'test-dev']: name = 'coco_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: coco(split, year)) __sets['imagenet'] = lambda: imagenet("train") def get_imdb(name): """Get an imdb (image database) by name.""" if not __sets.has_key(name): raise KeyError('Unknown dataset: {}'.format(name)) return __sets[name]() def list_imdbs(): """List all registered imdbs.""" return __sets.keys()
.format(self._devkit_path, self._get_comp_id(), self._image_set, output_dir) print('Running:\n{}'.format(cmd)) status = subprocess.call(cmd, shell=True) def evaluate_detections(self, all_boxes, output_dir): self._write_imagenet_results_file(all_boxes) self._do_python_eval(output_dir) if self.config['matlab_eval']: self._do_matlab_eval(output_dir) if self.config['cleanup']: for cls in self._classes: if cls == '__background__': continue filename = self._get_imagenet_results_file_template().format(cls) os.remove(filename) def competition_mode(self, on): if on: self.config['use_salt'] = False self.config['cleanup'] = False else: self.config['use_salt'] = True self.config['cleanup'] = True if __name__ == '__main__': from datasets.imagenet import imagenet d = imagenet('train', 'VID') res = d.roidb from IPython import embed; embed()
# Set up coco_2014_<split> for year in ['2014']: for split in ['train', 'val', 'minival', 'valminusminival']: name = 'coco_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: coco(split, year)) # Set up coco_2015_<split> for year in ['2015']: for split in ['test', 'test-dev']: name = 'coco_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: coco(split, year)) # Set up imagenet_2015_<split> for year in ['2015']: for split in ['train', 'val']: name = 'imagenet_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: imagenet(split, year)) def get_imdb(name): """Get an imdb (image database) by name.""" if not __sets.has_key(name): raise KeyError('Unknown dataset: {}'.format(name)) return __sets[name]() def list_imdbs(): """List all registered imdbs.""" return __sets.keys()
status = subprocess.call(cmd, shell=True) def evaluate_detections(self, all_boxes, output_dir): self._write_imagenet_results_file(all_boxes) self._do_python_eval(output_dir) if self.config['matlab_eval']: self._do_matlab_eval(output_dir) if self.config['cleanup']: for cls in self._wnid: if cls == '__background__': continue filename = self._get_imagenet_results_file_template().format( cls) os.remove(filename) def competition_mode(self, on): if on: self.config['use_salt'] = False self.config['cleanup'] = False else: self.config['use_salt'] = True self.config['cleanup'] = True if __name__ == '__main__': from datasets.imagenet import imagenet d = imagenet('trainval') res = d.roidb from IPython import embed embed()
class imagenet(imdb): def __init__(self, image_set, year, devkit_path): imdb.__init__(self, 'imagenet_' + year + '_' + image_set) self._year = year self._image_set = image_set self._devkit_path = devkit_path self._data_path = os.path.join(self._devkit_path, "data") self._classes = ( '__background__', # always index 0 'active_window') self._class_to_ind = dict( list(zip(self.classes, list(range(self.num_classes))))) self._image_ext = ['.jpg', '.png', '.jpeg', '.JPEG'] self._image_index = self._load_image_set_index() # Default to roidb handler self._roidb_handler = self.gt_roidb self._salt = str(uuid.uuid4()) self._comp_id = 'comp4' # Specific config options self.config = {'cleanup': True, 'use_salt': True, 'top_k': 2000} assert os.path.exists(self._devkit_path), \ 'Devkit path does not exist: {}'.format(self._devkit_path) assert os.path.exists(self._data_path), \ 'Path does not exist: {}'.format(self._data_path) def image_path_at(self, i): """ Return the absolute path to image i in the image sequence. """ return self.image_path_from_index(self._image_index[i]) def image_path_from_index(self, index): """ Construct an image path from the image's "index" identifier. """ for ext in self._image_ext: image_path = os.path.join(self._data_path, 'Images', index + ext) if os.path.exists(image_path): break assert os.path.exists(image_path), \ 'Path does not exist: {}'.format(image_path) return image_path def _load_image_set_index(self): """ Load the indexes listed in this dataset's image set file. """ # Example path to image set file: # self._data_path + /ImageSets/val.txt image_set_file = os.path.join(self._data_path, 'ImageSets', self._image_set + '.txt') assert os.path.exists(image_set_file), \ 'Path does not exist: {}'.format(image_set_file) with open(image_set_file) as f: image_index = [x.strip() for x in f.readlines()] return image_index def gt_roidb(self): """ Return the database of ground-truth regions of interest. This function loads/saves from/to a cache file to speed up future calls. """ cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl') if os.path.exists(cache_file): with open(cache_file, 'rb') as fid: try: roidb = pickle.load(fid) except: roidb = pickle.load(fid, encoding='bytes') print('{} gt roidb loaded from {}'.format(self.name, cache_file)) return roidb gt_roidb = [ self._load_imagenet_annotation(index) for index in self.image_index ] with open(cache_file, 'wb') as fid: pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL) print('wrote gt roidb to {}'.format(cache_file)) return gt_roidb def _load_imagenet_annotation(self, index): """ Load image and bounding boxes info from txt files """ filename = os.path.join(self._data_path, 'Annotations', index + '.txt') with open(filename) as f: data = f.read() num_objs = len(data.splitlines()) boxes = np.zeros((num_objs, 4), dtype=np.uint16) gt_classes = np.zeros((num_objs), dtype=np.int32) overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32) # "Seg" area for pascal is just the box area seg_areas = np.zeros((num_objs), dtype=np.float32) # Load object bounding boxes into a data frame. for ix, line in enumerate(data.splitlines()): object = line.split(" ") # Make pixel indexes 0-based x1 = float(object[1]) - 1 y1 = float(object[2]) - 1 x2 = float(object[3]) - 1 y2 = float(object[4]) - 1 cls = 1 boxes[ix, :] = [x1, y1, x2, y2] gt_classes[ix] = cls overlaps[ix, cls] = 1.0 seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1) overlaps = scipy.sparse.csr_matrix(overlaps) return { 'boxes': boxes, 'gt_classes': gt_classes, 'gt_overlaps': overlaps, 'flipped': False, 'seg_areas': seg_areas } def _get_comp_id(self): comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt'] else self._comp_id) return comp_id def _get_imagenet_results_file_template(self): # VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt filename = self._get_comp_id( ) + '_det_' + self._image_set + '_{:s}.txt' path = os.path.join(self._devkit_path, 'results', filename) return path def _write_imagenet_results_file(self, all_boxes): for cls_ind, cls in enumerate(self.classes): if cls == '__background__': continue print('Writing {} Imagenet results file'.format(cls)) filename = self._get_imagenet_results_file_template().format(cls) with open(filename, 'wt') as f: for im_ind, index in enumerate(self.image_index): dets = all_boxes[cls_ind][im_ind] if dets == []: continue # the VOCdevkit expects 1-based indices for k in range(dets.shape[0]): f.write( '{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.format( index, dets[k, -1], dets[k, 0] + 1, dets[k, 1] + 1, dets[k, 2] + 1, dets[k, 3] + 1)) def _do_python_eval(self, output_dir='output'): annopath = os.path.join(self._devkit_path, 'data', 'Annotations', '{:s}.txt') imagesetfile = os.path.join(self._devkit_path, 'data', 'ImageSets', self._image_set + '.txt') cachedir = os.path.join(self._devkit_path, 'annotations_cache') aps = [] # The PASCAL VOC metric changed in 2010 use_07_metric = True if int(self._year) < 2010 else False print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No')) if not os.path.isdir(output_dir): os.mkdir(output_dir) for i, cls in enumerate(self._classes): if cls == '__background__': continue filename = self._get_imagenet_results_file_template().format(cls) rec, prec, ap = imagenet_eval(filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5, use_07_metric=use_07_metric) aps += [ap] print(('AP for {} = {:.4f}'.format(cls, ap))) with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f: pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f) print(('Mean AP = {:.4f}'.format(np.mean(aps)))) print('~~~~~~~~') print('Results:') for ap in aps: print(('{:.3f}'.format(ap))) print(('{:.3f}'.format(np.mean(aps)))) print('~~~~~~~~') print('') print('--------------------------------------------------------------') print('Results computed with the **unofficial** Python eval code.') print('Results should be very close to the official MATLAB eval code.') print('Recompute with `./tools/reval.py --matlab ...` for your paper.') print('-- Thanks, The Management') print('--------------------------------------------------------------') def _do_matlab_eval(self, output_dir='output'): print('-----------------------------------------------------') print('Computing results with the official MATLAB eval code.') print('-----------------------------------------------------') path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets', 'VOCdevkit-matlab-wrapper') cmd = 'cd {} && '.format(path) cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB) cmd += '-r "dbstop if error; ' cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \ .format(self._devkit_path, self._get_comp_id(), self._image_set, output_dir) print(('Running:\n{}'.format(cmd))) status = subprocess.call(cmd, shell=True) def evaluate_detections(self, all_boxes, output_dir): self._write_imagenet_results_file(all_boxes) self._do_python_eval(output_dir) # if self.config['matlab_eval']: # self._do_matlab_eval(output_dir) # if self.config['cleanup']: # for cls in self._classes: # if cls == '__background__': # continue # filename = self._get_imagenet_results_file_template().format(cls) # os.remove(filename) def competition_mode(self, on): if on: self.config['use_salt'] = False self.config['cleanup'] = False else: self.config['use_salt'] = True self.config['cleanup'] = True if __name__ == '__main__': from datasets.imagenet import imagenet d = imagenet('train', '2017', ' ') res = d.roidb from IPython import embed embed()
def __init__(self, image_set, year, devkit_path=None, shuffled=None): imdb.__init__(self, 'merged_' + image_set) self._year = year self._image_set = image_set self._anno_set_dir = image_set if "val" in image_set: self._image_set_dir = "val" if "val1" in image_set: self._anno_set_dir = "val1" if "val2" in image_set: self._anno_set_dir = "val2" elif "train" in image_set: self._anno_set_dir = "train" elif "test" in image_set: self._anno_set_dir = "test" if image_set == "train": self.imdbs = [imagenet(image_set),\ coco(image_set, '2015'),\ cam2(image_set,'2017'),\ sun(image_set,'2012'),\ caltech(image_set,'2009'),\ kitti(image_set,'2013'),\ inria(image_set,'2005'),\ pascal_voc(image_set,'2007'),\ pascal_voc(image_set,'2012')] elif image_set == "test": self.imdbs = [imagenet('val'),\ coco('test-dev', '2015'),\ cam2('all','2017'),\ sun('test','2012'),\ caltech('test','2009'),\ kitti('val','2013'),\ inria('all','2005'),\ pascal_voc('test','2007')] self.roidbs = [None for _ in range(len(self.datasets))] for idx,imdb in enumerate(self.imdbs): self.roidbs[idx] = get_training_roidb(imdb) self._devkit_path = self._get_default_path() if devkit_path is None \ else devkit_path self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year) self._classes = ('__background__', # always index 0 'voc', 'imagenet', 'caltech', 'coco', 'sun', 'kitti', 'inria', 'cam2') self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes))) self._image_ext = '.jpg' self._image_index = self._load_image_set_index() # Default to roidb handler self._roidb_handler = self.selective_search_roidb self._salt = str(uuid.uuid4()) self._comp_id = 'comp4' # PASCAL specific config options self.config = {'cleanup' : True, 'use_salt' : True, 'use_diff' : False, 'matlab_eval' : False, 'rpn_file' : None, 'min_size' : 2} assert os.path.exists(self._devkit_path), \ 'VOCdevkit path does not exist: {}'.format(self._devkit_path) assert os.path.exists(self._data_path), \ 'Path does not exist: {}'.format(self._data_path)
from datasets.imagenet import imagenet from datasets.vg import vg import numpy as np # Set up voc_<year>_<split> using selective search "fast" mode for year in ['2007', '2012', '0712']: for split in ['train', 'val', 'trainval', 'test']: name = 'voc_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: pascal_voc(split, year)) for split in ['train', 'val']: name = 'imagenet_{}'.format(split) devkit_path = '/scratch0/ILSVRC/devkit/' data_path = '/scratch0/ILSVRC2015/' __sets[name] = (lambda split=split, devkit_path=devkit_path, data_path= data_path: imagenet(split, devkit_path, data_path)) print(name) print(__sets[name]) # Set up coco_2014_<split> for year in ['2014']: for split in ['train', 'val', 'minival', 'valminusminival']: name = 'coco_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: coco(split, year)) # Set up coco_2015_<split> for year in ['2015']: for split in ['test', 'test-dev']: name = 'coco_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: coco(split, year))
name = 'coco_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: coco(split, year)) # Set up vg_<split> # for version in ['1600-400-20']: # for split in ['minitrain', 'train', 'minival', 'val', 'test']: # name = 'vg_{}_{}'.format(version,split) # __sets[name] = (lambda split=split, version=version: vg(version, split)) for version in ['150-50-20', '150-50-50', '500-150-80', '750-250-150', '1750-700-450', '1600-400-20']: for split in ['minitrain', 'smalltrain', 'train', 'minival', 'smallval', 'val', 'test']: name = 'vg_{}_{}'.format(version,split) __sets[name] = (lambda split=split, version=version: vg(version, split)) # set up image net. for split in ['train', 'val', 'val1', 'val2', 'test']: name = 'imagenet_{}'.format(split) devkit_path = 'data/imagenet/ILSVRC/devkit' data_path = 'data/imagenet/ILSVRC' __sets[name] = (lambda split=split, devkit_path=devkit_path, data_path=data_path: imagenet(split,devkit_path,data_path)) def get_imdb(name): """Get an imdb (image database) by name.""" if name not in __sets: raise KeyError('Unknown dataset: {}'.format(name)) return __sets[name]() def list_imdbs(): """List all registered imdbs.""" return list(__sets.keys())
# Set up vg_<split> # for version in ['1600-400-20']: # for split in ['minitrain', 'train', 'minival', 'val', 'test']: # name = 'vg_{}_{}'.format(version,split) # __sets[name] = (lambda split=split, version=version: vg(version, split)) for version in ['150-50-20', '150-50-50', '500-150-80', '750-250-150', '1750-700-450', '1600-400-20']: for split in ['minitrain', 'smalltrain', 'train', 'minival', 'smallval', 'val', 'test']: name = 'vg_{}_{}'.format(version,split) __sets[name] = (lambda split=split, version=version: vg(version, split)) # set up image net. for split in ['train', 'val', 'val1', 'val2', 'test']: name = 'imagenet_{}'.format(split) devkit_path = 'data/imagenet/ILSVRC/devkit' data_path = 'data/imagenet/ILSVRC' __sets[name] = (lambda split=split, devkit_path=devkit_path, data_path=data_path: imagenet(split,devkit_path,data_path)) for split in ['train', 'val', 'test']: name = 'wider_face_{}'.format(split) __sets[name] = (lambda split=split: wider_face(split)) for split in ['train', 'val', 'test']: name = 'MI3_{}'.format(split) __sets[name] = (lambda split=split: mi3(split)) def get_imdb(name): """Get an imdb (image database) by name.""" if name not in __sets: raise KeyError('Unknown dataset: {}'.format(name)) return __sets[name]()
# Set up voc_<year>_<split> using selective search "fast" mode for year in ['2007', '2012']: for split in ['train', 'val', 'trainval', 'test']: name = 'voc_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: pascal_voc(split, year)) # Set up coco_2014_<split> for year in ['2014']: for split in ['train', 'val', 'minival', 'valminusminival']: name = 'coco_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: coco(split, year)) # Set up coco_2015_<split> for year in ['2015']: for split in ['test', 'test-dev']: name = 'coco_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: coco(split, year)) __sets['imagenet'] = lambda :imagenet("train") def get_imdb(name): """Get an imdb (image database) by name.""" if not __sets.has_key(name): raise KeyError('Unknown dataset: {}'.format(name)) return __sets[name]() def list_imdbs(): """List all registered imdbs.""" return __sets.keys()