.format(self._devkit_path, self._get_comp_id(), self._image_set, output_dir) print('Running:\n{}'.format(cmd)) status = subprocess.call(cmd, shell=True) def evaluate_detections(self, all_boxes, output_dir): self._write_voc_results_file(all_boxes) self._do_python_eval(output_dir) if self.config['matlab_eval']: self._do_matlab_eval(output_dir) if self.config['cleanup']: for cls in self._classes: if cls == '__background__': continue filename = self._get_voc_results_file_template().format(cls) os.remove(filename) def competition_mode(self, on): if on: self.config['use_salt'] = False self.config['cleanup'] = False else: self.config['use_salt'] = True self.config['cleanup'] = True if __name__ == '__main__': from datasets.wider import wider d = wider() res = d.roidb from IPython import embed; embed()
split, year, use_diff=True)) # Set up coco_2014_<split> for year in ['2014']: for split in ['train', 'val', 'minival', 'valminusminival', 'trainval']: name = 'coco_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: coco(split, year)) # Set up coco_2015_<split> for year in ['2015']: for split in ['test', 'test-dev']: name = 'coco_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: coco(split, year)) # Set up wider dataset for split in ['train', 'val', 'test']: name = 'WIDER_{}'.format(split) __sets[name] = (lambda split=split: wider(split)) def get_imdb(name): """Get an imdb (image database) by name.""" if name not in __sets: raise KeyError('Unknown dataset: {}'.format(name)) return __sets[name]() def list_imdbs(): """List all registered imdbs.""" return list(__sets.keys())
"""Factory method for easily getting imdbs by name.""" from datasets.wider import wider import numpy as np __sets = {} for split in ['train', 'val', 'test', 'train_val']: name = 'wider_{}'.format(split) __sets[name] = ( lambda split=split: wider(split, wider_path='./data/wider')) def get_imdb(name): """Get an imdb (image database) by name.""" if not __sets.has_key(name): raise KeyError('Unknown dataset: {}'.format(name)) return __sets[name]() def list_imdbs(): """List all registered imdbs.""" return __sets.keys()
for split in ['train', 'val', 'trainval', 'test', 'val1', 'val2']: name = 'psdbCrop_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: psdbCrop(split, year)) # Set up psdb_<year>_<split> using selective search "fast" mode for year in ['2015']: for split in ['train', 'val', 'trainval', 'test']: name = 'psdb_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: psdb(split, year)) # Set up wider_<year>_<split> using selective search "fast" mode for year in ['2015']: for split in ['train', 'val', 'trainval', 'test']: name = 'wider_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: wider(split, year)) # Set up aichal_<year>_<split> using selective search "fast" mode for year in ['2017']: for split in ['train', 'val', 'trainval', 'test']: name = 'aichal_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: aichal(split, year)) # Set up aichalCrop_<year>_<split> using selective search "fast" mode for year in ['2017']: for split in ['train', 'val', 'trainval', 'test']: name = 'aichalCrop_{}_{}'.format(year, split) __sets[name] = (lambda split=split, year=year: aichalCrop(split, year))
print('Running:\n{}'.format(cmd)) status = subprocess.call(cmd, shell=True) def evaluate_detections(self, all_boxes, output_dir): self._write_voc_results_file(all_boxes) self._do_python_eval(output_dir) if self.config['matlab_eval']: self._do_matlab_eval(output_dir) if self.config['cleanup']: for cls in self._classes: if cls == '__background__': continue filename = self._get_voc_results_file_template().format(cls) os.remove(filename) def competition_mode(self, on): if on: self.config['use_salt'] = False self.config['cleanup'] = False else: self.config['use_salt'] = True self.config['cleanup'] = True if __name__ == '__main__': from datasets.wider import wider d = wider() res = d.roidb from IPython import embed embed()
# ------------------------------------------------------------------------------------------------ # This file is a modified version of https://github.com/rbgirshick/py-faster-rcnn by Ross Girshick # Modified by Mahyar Najibi # ------------------------------------------------------------------------------------------------ from datasets.wider import wider __sets = {} for split in ['train','val','test']: name = 'wider_{}'.format(split) __sets[name] = (lambda split=split: wider(split)) def get_imdb(name): """Get an imdb (image database) by name.""" if not __sets.has_key(name): raise KeyError('Unknown dataset: {}'.format(name)) return __sets[name]()
# # def evaluate_detections(self, all_boxes, output_dir): # self._write_voc_results_file(all_boxes) # self._do_python_eval(output_dir) # if self.config['matlab_eval']: # self._do_matlab_eval(output_dir) # if self.config['cleanup']: # for cls in self._classes: # if cls == '__background__': # continue # filename = self._get_voc_results_file_template().format(cls) # os.remove(filename) def competition_mode(self, on): if on: self.config['use_salt'] = False self.config['cleanup'] = False else: self.config['use_salt'] = True self.config['cleanup'] = True if __name__ == '__main__': from datasets.wider import wider d = wider('train') res = d.roidb from IPython import embed embed()