def get_imdb(dataset, splitName): imdbPaths = getImdbPaths(dataset, splitName) imdb = coco_voc.coco_voc(dataset, splitName, image_path=imdbPaths['imageDir'], captionJsonPath=imdbPaths['jsonPath']) return imdb
import os import sg_utils as utils import coco_voc import shutil # Make directories for i in xrange(60): utils.mkdir_if_missing(os.path.join('..', 'data', 'images', '{:02d}'.format(i))) # Copy files over sets = ['train', 'val', 'test'] for set_ in sets: imdb = coco_voc.coco_voc(set_) for i in xrange(imdb.num_images): in_file = os.path.join('../data', set_ + '2014', \ 'COCO_{}2014_{:012d}.jpg'.format(set_, imdb.image_index[i])); out_file = imdb.image_path_at(i) # print in_file, out_file shutil.copyfile(in_file, out_file) utils.tic_toc_print(1, ' Copying images [{}]: {:06d} / {:06d}\n'.format(set_, i, imdb.num_images));
import os import sg_utils as utils import coco_voc import shutil # Make directories for i in xrange(60): utils.mkdir_if_missing(os.path.join('data', 'images', '{:02d}'.format(i))) # Copy files over sets = ['train', 'val', 'test'] for set_ in sets: imdb = coco_voc.coco_voc(set_) for i in xrange(imdb.num_images): in_file = os.path.join(set_ + '2014', \ 'COCO_{}2014_{:012d}.jpg'.format(set_, imdb.image_index[i])); out_file = imdb.image_path_at(i) # print in_file, out_file shutil.copyfile(in_file, out_file) utils.tic_toc_print(1, ' Copying images [{}]: {:06d} / {:06d}\n'.format(set_, i, imdb.num_images));
type=str) if len(sys.argv) == 1: parser.print_help() sys.exit(1) args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() print('Called with args:') print(args) imdb = coco_voc.coco_voc('test') vocab = utils.load_variables(args.vocab_file) gt_label = preprocess.get_vocab_counts( imdb.image_index, imdb.coco_caption_data, 5, vocab ) det_file = args.det_file det_dir = os.path.dirname(det_file) # get root dir of det_file eval_file = os.path.join(det_dir, imdb.name + '_eval.pkl') benchmark(imdb, vocab, gt_label, 5, det_file, eval_file=eval_file) map_file = args.map_file gt_label_det = preprocess.get_vocab_counts_det(
# # set up caffe caffe.set_mode_gpu() if args.gpu_id is not None: caffe.set_device(args.gpu_id) # Load the vocabulary vocab = utils.load_variables(args.vocab_file) if args.task == 'compute_targets': imdb = [] output_dir = args.train_dir sets = ['train', 'val'] for i, imset in enumerate([args.train_set, args.val_set]): imdb.append(coco_voc.coco_voc(imset)) print 'Loaded dataset {:s}'.format(imdb[i].name) # Compute targets for the file counts = preprocess.get_vocab_counts(imdb[i].image_index, \ imdb[i].coco_caption_data, 5, vocab) if args.write_labels: label_file = os.path.join(output_dir, 'labels_' + sets[i] + '.h5') print 'Writing labels to {}'.format(label_file) with h5py.File(label_file, 'w') as f: for j in xrange(imdb[i].num_images): ind = imdb[i].image_index[j] ind_str = '{:02d}/{:d}'.format(int(math.floor(ind)/1e4), ind) l = f.create_dataset('/labels-{}'.format(ind_str), (1, 1, counts.shape[1], 1), dtype = 'f') c = counts[j,:].copy(); c = c > 0; c = c.astype(np.float32); c = c.reshape((1, 1, c.size, 1))
def get_imdb(dataset, splitName): imdbPaths = getImdbPaths(dataset, splitName); imdb = coco_voc.coco_voc(dataset, splitName, image_path=imdbPaths['imageDir'], captionJsonPath = imdbPaths['jsonPath']) return imdb;