コード例 #1
0
    def __init__(self, image_set, year, test=False, devkit_path=None):
        imdb.__init__(self, 'voc_' + year + '_' + image_set)
        self._year = year
        self._image_set = image_set
        #os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)
        self._devkit_path = self._get_default_path() if devkit_path is None \
                            else devkit_path
        self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)

        if not test:
            options = config(section_name='all_categories_train',
                             conf_file='lib/cfgs/detection.cfg')
        else:
            options = config(section_name='all_categories_test',
                             conf_file='lib/cfgs/detection.cfg')

        self._classes = eval(options['classes'])

        self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
        self._image_ext = '.jpg'

        # Default to roidb handler
        self._roidb_handler = self.selective_search_roidb
        self._salt = str(uuid.uuid4())
        self._comp_id = 'comp4'

        ########### Begin my own implementation
        # self._index_to_path = self.index_to_fullPath_dict('/home/liuhuawei/detection/annotate_data.txt')
        ## index to image absolute path
        self._index_to_path = {}
        ## index to image id
        self._index_to_id = {}
        ## image id to index
        self._id_to_index = {}
        ## absolute path to json file
        self._json_file = options['input_json_file']
        self._output_json_file = options['ouput_json_file']
        ## main function to convert json file into xml format
        self._json_to_xml(self._json_file)
        ## As we generate train/test txt file when converting json to xml
        self._image_index = self._load_image_set_index()
        ########### End of my own implementation

        # PASCAL specific config options
        self.config = {
            'cleanup': True,
            'use_salt': True,
            'use_diff': False,
            'matlab_eval': False,
            'rpn_file': None,
            'min_size': 2
        }

        assert os.path.exists(self._devkit_path), \
                'VOCdevkit path does not exist: {}'.format(self._devkit_path)
        assert os.path.exists(self._data_path), \
                'Path does not exist: {}'.format(self._data_path)
コード例 #2
0
    def __init__(self, task_name, image_set, year, test=False):
        imdb.__init__(self, 'coco_' + year + '_' + image_set)
        # COCO specific config options
        self.config = {
            'top_k': 2000,
            'use_salt': True,
            'cleanup': True,
            'crowd_thresh': 0.7,
            'min_size': 2
        }
        # name, paths
        self._year = year
        self._image_set = image_set
        self._data_path = osp.join(cfg.DATA_DIR, 'coco')
        # load COCO API, classes, class <-> id mappings
        #########liu
        if not test:
            options = config(section_name=task_name + '_train',
                             conf_file='lib/cfgs/detection.cfg')
        else:
            options = config(section_name=task_name + '_test',
                             conf_file='lib/cfgs/detection.cfg')
            self._res_file = options['ouput_json_file']
        self._ann_file = options['input_json_file']
        self._COCO = COCO(self._ann_file)
        #########end
        ##liu cats dict with keys of 'name', 'id'.....
        cats = self._COCO.loadCats(self._COCO.getCatIds())

        #### we'd better use class in this way, so that, we can share the results from
        ####    both voc model and coco model, as we use 'eval' in pascal voc code,
        ####    we can also change code in voc to support coco style.
        # self._classes = tuple(['__background__'] + [c['name'] for c in cats])
        self._classes = eval(options['classes'])
        ##take care: class_to_ind(we will use ind in caffe class label) and class_to_coco_cat_id
        self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
        self._class_to_coco_cat_id = dict(
            zip([c['name'] for c in cats], self._COCO.getCatIds()))
        #########################
        self._coco_ind_to_class_ind = dict([(self._class_to_coco_cat_id[cls],
                                             self._class_to_ind[cls])
                                            for cls in self.classes[1:]])
        self._class_ind_to_coco_ind = dict([(self._class_to_ind[cls],
                                             self._class_to_coco_cat_id[cls])
                                            for cls in self.classes[1:]])
        ##########################
        ##we use img_id as index instead of img_name as we use in pascal voc
        self._image_index = self._load_image_set_index()
        #########liu
        self._image_index_to_path = self._load_imgid_to_path_dict(
            self._ann_file)
        #########end
        # Default to roidb handler
        self.set_proposal_method('selective_search')
        self.competition_mode(False)

        # Some image sets are "views" (i.e. subsets) into others.
        # For example, minival2014 is a random 5000 image subset of val2014.
        # This mapping tells us where the view's images and proposals come from.
        self._view_map = {
            'minival2014': 'val2014',  # 5k val2014 subset
            'valminusminival2014': 'val2014',  # val2014 \setminus minival2014
        }
        coco_name = image_set + year  # e.g., "trainval2014" or "test2014"
        self._data_name = (self._view_map[coco_name]
                           if self._view_map.has_key(coco_name) else coco_name)
コード例 #3
0
    f.write('export PYTHONUNBUFFERED="True"\n')
    f.write(log_str + '\n')
    f.write('exec &> >(tee -a "$LOG") \n')
    f.write('echo Logging output to {} \n'.format('"$LOG"'))

    f.write('time ./tools/train_net.py --gpu {} \\\n'.format(GPU_ID))
    f.write('--solver {} \\\n'.format(solver_file))
    f.write('--weights {} \\\n'.format(weights))
    # f.write('--weights data/imagenet_models/{}.v2.caffemodel \\\n'.format(NET))
    f.write('--imdb {} \\\n'.format(TRAIN_LMDB))
    f.write('--iters {} \\\n'.format(exp_ITERS))
    f.write('--cfg {} \n'.format(
        os.path.join(cfgs_dir, 'faster_rcnn_end2end.yml')))
os.chmod(os.path.join(scripts_dir, 'faster_rcnn_end2end.sh'), stat.S_IRWXU)

options = config(section_name='{}_test'.format(dataset),
                 conf_file='lib/cfgs/detection.cfg')
annFile = options['input_json_file']
resFile = options['ouput_json_file']
class_names = ','.join(eval(options['classes']))

## generating testing scripts
with open(os.path.join(scripts_dir, 'test.sh'), 'w') as f:
    f.write('time ./tools/test_net.py --gpu {} \\\n'.format(GPU_ID))
    f.write('--def models/{}/{}/faster_rcnn_end2end/test.prototxt \\\n'.format(
        dataset,
        NET + ft_flag,
    ))
    f.write('--net output/faster_rcnn_end2end/2007_trainval_{}/{}_iter_{}.caffemodel \\\n'.format(dataset,\
         solver_param_snapshot_prefix, exp_ITERS))
    f.write('--imdb {} \\\n'.format(TEST_LMDB))
    f.write('--cfg {} \n'.format(