def rpn_generate_single_gpu(prototxt, caffemodel, imdb, rank, gpus, output_dir): cfg.GPU_ID = gpus[rank] caffe.set_mode_gpu() caffe.set_device(cfg.GPU_ID) net = caffe.Net(prototxt, caffemodel, caffe.TEST) imdb_boxes = imdb_proposals(net, imdb, rank, len(gpus), output_dir)
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None, rpn_test_prototxt=None): """Use a trained RPN to generate proposals. """ cfg.TEST.RPN_PRE_NMS_TOP_N = -1 # no pre NMS filtering cfg.TEST.RPN_POST_NMS_TOP_N = 2000 # limit top boxes after NMS print 'RPN model: {}'.format(rpn_model_path) print('Using config:') pprint.pprint(cfg) import caffe _init_caffe(cfg) # NOTE: the matlab implementation computes proposals on flipped images, too. # We compute them on the image once and then flip the already computed # proposals. This might cause a minor loss in mAP (less proposal jittering). imdb = get_imdb(imdb_name) print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name) # Load RPN and configure output directory rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST) output_dir = get_output_dir(imdb, None) print 'Output will be saved to `{:s}`'.format(output_dir) # Generate proposals on the imdb rpn_proposals = imdb_proposals(rpn_net, imdb) # Write proposals to disk and send the proposal file path through the # multiprocessing queue rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0] rpn_proposals_path = os.path.join( output_dir, rpn_net_name + '_proposals.pkl') with open(rpn_proposals_path, 'wb') as f: cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL) print 'Wrote RPN proposals to {}'.format(rpn_proposals_path) return {'proposal_path': rpn_proposals_path}
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None, rpn_test_prototxt=None): """Use a trained RPN to generate proposals. """ cfg.TEST.RPN_PRE_NMS_TOP_N = -1 # no pre NMS filtering cfg.TEST.RPN_POST_NMS_TOP_N = 2000 # limit top boxes after NMS print 'RPN model: {}'.format(rpn_model_path) print('Using config:') pprint.pprint(cfg) import caffe _init_caffe(cfg) # NOTE: the matlab implementation computes proposals on flipped images, too. # We compute them on the image once and then flip the already computed # proposals. This might cause a minor loss in mAP (less proposal jittering). imdb = get_imdb(imdb_name) print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name) # Load RPN and configure output directory rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST) output_dir = get_output_dir(imdb) print 'Output will be saved to `{:s}`'.format(output_dir) # Generate proposals on the imdb rpn_proposals = imdb_proposals(rpn_net, imdb) # Write proposals to disk and send the proposal file path through the # multiprocessing queue rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0] rpn_proposals_path = os.path.join( output_dir, rpn_net_name + '_proposals.pkl') with open(rpn_proposals_path, 'wb') as f: cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL) print 'Wrote RPN proposals to {}'.format(rpn_proposals_path) queue.put({'proposal_path': rpn_proposals_path})
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None, rpn_test_prototxt=None): """Use a trained RPN to generate proposals. """ cfg.TEST.RPN_PRE_NMS_TOP_N = -1 # no pre NMS filtering cfg.TEST.RPN_POST_NMS_TOP_N = 2000 # limit top boxes after NMS print 'RPN model: {}'.format(rpn_model_path) print('Using config:') pprint.pprint(cfg) import caffe _init_caffe(cfg) if '+' in imdb_name: imdbs_list = imdb_name.split('+') imdb = datasets.imdb.imdb(imdb_name) output_dir = get_output_dir(imdb, None) print 'Output will be saved to `{:s}`'.format(output_dir) else: imdbs_list = [imdb_name] output_dir = None # Gets set later for single database case rpn_proposals_path = [None] * len(imdbs_list) for i, imdb_name in enumerate(imdbs_list): # NOTE: the matlab implementation computes proposals on flipped images, too. # We compute them on the image once and then flip the already computed # proposals. This might cause a minor loss in mAP (less proposal jittering). imdb = get_imdb(imdb_name) print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name) if output_dir is None: output_dir = get_output_dir(imdb, None) print 'Output will be saved to `{:s}`'.format(output_dir) # Load RPN and configure output directory rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST) rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0] rpn_proposals_path[i] = os.path.join( output_dir, rpn_net_name + '_' + imdb_name + '_proposals.pkl') # Check if rpn proposals have already been computed # If so, don't recompute if not os.path.isfile(rpn_proposals_path[i]): # Generate proposals on the imdb rpn_proposals = imdb_proposals(rpn_net, imdb) # Write proposals to disk with open(rpn_proposals_path[i], 'wb') as f: cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL) print 'Wrote RPN proposals to {}'.format(rpn_proposals_path[i]) else: print "Proposals for " + imdb_name + " exist already." if len(rpn_proposals_path) == 1: rpn_proposals_path = rpn_proposals_path[0] # Send the proposal file path through the # multiprocessing queue queue.put({'proposal_path': rpn_proposals_path})
def rpn_generate_demo(args): '''''' net = _init_net(args) imdb = get_imdb(args.imdb_name) imdb_boxes = imdb_proposals(net, imdb) # output_dir = os.path.dirname(args.caffemodel) output_dir = get_output_dir(imdb, net) if not os.path.exists(output_dir): os.makedirs(output_dir) rpn_file = os.path.join(output_dir, net.name + '_rpn_proposals.pkl') with open(rpn_file, 'wb') as f: cPickle.dump(imdb_boxes, f, cPickle.HIGHEST_PROTOCOL) print 'Wrote RPN proposals to {}'.format(rpn_file)
def roi_generate(queue=None, imdb_name=None, roi_conv_model_path=None, cfg=None, test_prototxt=None, overwrite=None): """Use a trained RPN to generate proposals. """ output_dir = './output/' net_name = os.path.splitext(os.path.basename(roi_conv_model_path))[0] output_path_name = os.path.join(output_dir, net_name) queue.put({'proposal_path': output_path_name}) if not os.path.exists(output_path_name): os.makedirs(output_path_name) elif overwrite: shutil.rmtree(output_path_name) os.makedirs(output_path_name) else: return print 'RPN model: {}'.format(roi_conv_model_path) print('Using config:') pprint.pprint(cfg) # fix the random seeds (numpy and caffe) for reproducibility import caffe np.random.seed(cfg.RNG_SEED) caffe.set_random_seed(cfg.RNG_SEED) # set up caffe caffe.set_mode_gpu() caffe.set_device(cfg.GPU_ID) # NOTE: the matlab implementation computes proposals on flipped images, too. # We compute them on the image once and then flip the already computed # proposals. This might cause a minor loss in mAP (less proposal jittering). from datasets.factory import get_imdb imdb = get_imdb(imdb_name) print 'Loaded dataset `{:s}` for bbox generation'.format(imdb.name) # Load RPN and configure output directory roi_net = caffe.Net(test_prototxt, roi_conv_model_path, caffe.TEST) roi_net.name = net_name print 'Output will be saved to `{:s}`'.format(output_path_name) print 'roinet.name: ', roi_net.name # Generate proposals on the imdb roi_proposals = imdb_proposals(roi_net, imdb, output_path_name)
def rpn_generate_signle_gpu(rank): cfg.GPU_ID=gpus[rank] print('Using config:') pprint.pprint(cfg) import caffe np.random.seed(cfg.RNG_SEED) caffe.set_random_seed(cfg.RNG_SEED) # set up caffe caffe.set_mode_gpu() caffe.set_device(cfg.GPU_ID) # Load RPN and configure output directory rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST) # Generate proposals on the imdb rpn_proposals = imdb_proposals(rpn_net, imdb, rank, len(gpus), output_dir)
cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) cfg.GPU_ID = args.gpu_id # RPN test settings cfg.TEST.RPN_PRE_NMS_TOP_N = -1 cfg.TEST.RPN_POST_NMS_TOP_N = 2000 print('Using config:') pprint.pprint(cfg) while not os.path.exists(args.caffemodel) and args.wait: print('Waiting for {} to exist...'.format(args.caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(args.gpu_id) net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(args.caffemodel))[0] imdb = get_imdb(args.imdb_name) imdb_boxes = imdb_proposals(net, imdb) output_dir = get_output_dir(imdb, net) rpn_file = os.path.join(output_dir, net.name + '_rpn_proposals_my.pkl') with open(rpn_file, 'wb') as f: cPickle.dump(imdb_boxes, f, cPickle.HIGHEST_PROTOCOL) print 'Wrote RPN proposals to {}'.format(rpn_file)
pprint.pprint(cfg) ''' while not os.path.exists(args.caffemodel) and args.wait: print('Waiting for {} to exist...'.format(args.caffemodel)) time.sleep(10) ''' caffe.set_mode_gpu() caffe.set_device(args.gpu_id) net = caffe.Net(prototxt, caffemodel, caffe.TEST) print '\n\nLoaded network {:s}'.format(caffemodel) net.name = os.path.splitext(os.path.basename(caffemodel))[0] imdb = get_imdb(args.imdb_name) #imdb.competition_mode(args.comp_mode) boxes, score, image = imdb_proposals(net, imdb) print 'finish detection' ''' rpn_file = 'rpn_proposals.pkl' with open(rpn_file, 'wb') as f: cPickle.dump(boxes, f, cPickle.HIGHEST_PROTOCOL) cPickle.dump(score, f, cPickle.HIGHEST_PROTOCOL) cPickle.dump(image, f, cPickle.HIGHEST_PROTOCOL) ''' filename = '/home/shouyang/kitti/results/comp4-guo_det_kitti_test_car.txt' with open(filename, 'wt') as f: for i in range(len(image)): for j in range(len(score[i])): f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.format( image[i], score[i][j][0], boxes[i][j][0] + 1, boxes[i][j][1] + 1, boxes[i][j][2] + 1,
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None, \ rpn_test_prototxt=None, rpn_cache_path=None): """ Use a trained RPN to generate proposals. """ print 'RPN model: {}'.format(rpn_model_path) print # no pre NMS filtering cfg.TEST.RPN_PRE_NMS_TOP_N = -1 # limit top boxes after NMS cfg.TEST.RPN_POST_NMS_TOP_N = cfg.TEST.RPN_POST_NMS_TOP_N_DEFAULT_VAL print('Using config:') pprint.pprint(cfg) import caffe _init_caffe(cfg) # NOTE: the matlab implementation computes proposals on flipped images, too. # We compute them on the image once and then flip the already computed # proposals. This might cause a minor loss in mAP (less proposal jittering). data = cfg.TRAIN.DATA cache = cfg.TRAIN.CACHE D_INPUT_DIR = cfg.TRAIN.D_INPUT_DIR D_INPUT_FILE = cfg.TRAIN.D_INPUT_FILE D_INPUT_LAB_DIR = cfg.TRAIN.D_INPUT_LAB_DIR D_INPUT_IMG_DIR = cfg.TRAIN.D_INPUT_IMG_DIR stage_flag = "GENERATE RPN PROPOSALS" print print "data:", data print "cache:", cache print "stage_flag:", stage_flag print sleep(3) # Instance imdb = get_imdb(imdb_name, D_INPUT_DIR, D_INPUT_IMG_DIR, \ D_INPUT_LAB_DIR, D_INPUT_FILE, data, cache) print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name) # Output directory output_dir = get_output_dir(imdb, None) print 'Output will be saved to `{:s}`'.format(output_dir) print "\n\n" # Network -> Load RPN & Configurations print "rpn generate -> load trained model from:" print " pt:", rpn_test_prototxt print " model:", rpn_model_path print "\n\n" sleep(3) rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST) sleep(3) # Get proposals from rpn network and write them into files if rpn_cache_path is not None and len(rpn_cache_path) > 0: print "use cache strategy for generating props -- per image per props file" print "start writing props into a file for each image" imdb_proposals2pkls(rpn_net, imdb, rpn_cache_path) print "finish writing props into a file for each image" print "and set rpn proposals path into queue" rpn_proposals_path = rpn_cache_path queue.put({'proposal_path': rpn_proposals_path}) else: print "use whole strategy for generating props -- all images one props file" print "generate proposals on the imdb" rpn_proposals = imdb_proposals(rpn_net, imdb) print "write proposals to disk and send the proposal file path through the" print "multiprocessing queue" rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0] rpn_proposals_path = os.path.join(output_dir, rpn_net_name + '_proposals.pkl') # write into file with open(rpn_proposals_path, 'wb') as f: cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL) print 'Wrote RPN proposals to {}'.format(rpn_proposals_path) # put into queue queue.put({'proposal_path': rpn_proposals_path}) sleep(3)
cfg.GPU_ID = args.gpu_id # RPN test settings cfg.TEST.RPN_PRE_NMS_TOP_N = -1 cfg.TEST.RPN_POST_NMS_TOP_N = 2000 print('Using config:') pprint.pprint(cfg) while not os.path.exists(args.caffemodel) and args.wait: print('Waiting for {} to exist...'.format(args.caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(args.gpu_id) net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(args.caffemodel))[0] imdb = get_imdb(args.imdb_name) imdb_boxes, imdb_scores = imdb_proposals(net, imdb) # output_dir = os.path.dirname(args.caffemodel) output_dir = get_output_dir(imdb, net) if not os.path.exists(output_dir): os.makedirs(output_dir) rpn_file = os.path.join(output_dir, net.name + '_rpn_proposals.pkl') with open(rpn_file, 'wb') as f: cPickle.dump([imdb_boxes, imdb_scores], f, cPickle.HIGHEST_PROTOCOL) print 'Wrote RPN proposals to {}'.format(rpn_file)
if __name__ == '__main__': args = parse_args() if args.path_cfg is not None: cfg_from_file(args.path_cfg) cfg.GPU_ID = args.gpu_id print 'RPN model: {}'.format(args.path_net_weights) print('Using config:') pprint.pprint(cfg) import caffe _init_caffe(cfg) imdb = get_imdb(args.imdb_name) print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name) # Load RPN and configure output directory rpn_net = caffe.Net(args.path_net_proto, args.path_net_weights, caffe.TEST) output_dir = get_output_dir(imdb) print 'Output will be saved to `{:s}`'.format(args.output_dir) # Generate proposals on the imdb rpn_proposals = imdb_proposals(rpn_net, imdb) # Write proposals to disk and send the proposal file path through the # multiprocessing queue rpn_proposals_path = os.path.join(args.output_dir, args.output_file_name) with open(rpn_proposals_path, 'wb') as f: cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL) print 'Wrote RPN proposals to {}'.format(rpn_proposals_path)
cfg.GPU_ID = args.gpu_id # RPN test settings cfg.TEST.RPN_PRE_NMS_TOP_N = -1 cfg.TEST.RPN_POST_NMS_TOP_N = 2000 print('Using config:') pprint.pprint(cfg) while not os.path.exists(args.caffemodel) and args.wait: print('Waiting for {} to exist...'.format(args.caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(args.gpu_id) net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(args.caffemodel))[0] imdb = get_imdb(args.imdb_name) imdb_boxes = imdb_proposals(net, imdb) # output_dir = os.path.dirname(args.caffemodel) output_dir = get_output_dir(imdb, net) if not os.path.exists(output_dir): os.makedirs(output_dir) rpn_file = os.path.join(output_dir, net.name + '_rpn_proposals.pkl') with open(rpn_file, 'wb') as f: cPickle.dump(imdb_boxes, f, cPickle.HIGHEST_PROTOCOL) print 'Wrote RPN proposals to {}'.format(rpn_file)
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None, rpn_test_prototxt=None): """Use a trained RPN to generate proposals. """ cfg.TEST.RPN_PRE_NMS_TOP_N = -1 # no pre NMS filtering cfg.TEST.RPN_POST_NMS_TOP_N = 2000 # limit top boxes after NMS print 'RPN model: {}'.format(rpn_model_path) print('Using config:') pprint.pprint(cfg) import caffe _init_caffe(cfg) # NOTE: the matlab implementation computes proposals on flipped images, too. # We compute them on the image once and then flip the already computed # proposals. This might cause a minor loss in mAP (less proposal jittering). imdb = get_imdb(imdb_name) print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name) # Load RPN and configure output directory rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST) output_dir = get_output_dir(imdb, None) print 'Output will be saved to `{:s}`'.format(output_dir) # Generate proposals on the imdb start = 0 interval = 10000 all_proposals = [] rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0] for i in range(start, imdb.num_images, interval): # Write proposals to disk and send the proposal file path through the # multiprocessing queue end = min(imdb.num_images, i + interval) rpn_proposals_path = os.path.join( output_dir, rpn_net_name + '_proposals_{}_{}.pkl'.format(i, end)) rpn_proposals = imdb_proposals(rpn_net, imdb, i, end) all_proposals += rpn_proposals with open(rpn_proposals_path, 'wb') as f: cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL) print 'Wrote RPN proposals [{},{}) to {}'.format( i, end, rpn_proposals_path) for i in range(0, start, interval): end = min(imdb.num_images, i + interval) if i == 330000: end = 333474 rpn_proposals_path = os.path.join( output_dir, rpn_net_name + '_proposals_{}_{}.pkl'.format(i, end)) print 'Loading previous RPN proposals from {}'.format( rpn_proposals_path) with open(rpn_proposals_path, 'rb') as f: this_proposal = cPickle.load(f) all_proposals = this_proposal + all_proposals rpn_proposals_path = os.path.join(output_dir, rpn_net_name + '_proposals.pkl') with open(rpn_proposals_path, 'wb') as f: cPickle.dump(all_proposals, f, cPickle.HIGHEST_PROTOCOL) print 'Wrote RPN proposals to {}'.format(rpn_proposals_path) queue.put({'proposal_path': rpn_proposals_path})
pprint.pprint(cfg) ''' while not os.path.exists(args.caffemodel) and args.wait: print('Waiting for {} to exist...'.format(args.caffemodel)) time.sleep(10) ''' caffe.set_mode_gpu() caffe.set_device(args.gpu_id) net = caffe.Net(prototxt,caffemodel, caffe.TEST) print '\n\nLoaded network {:s}'.format(caffemodel) net.name = os.path.splitext(os.path.basename(caffemodel))[0] imdb = get_imdb(args.imdb_name) #imdb.competition_mode(args.comp_mode) boxes,score,image = imdb_proposals(net, imdb) print 'finish detection' ''' rpn_file = 'rpn_proposals.pkl' with open(rpn_file, 'wb') as f: cPickle.dump(boxes, f, cPickle.HIGHEST_PROTOCOL) cPickle.dump(score, f, cPickle.HIGHEST_PROTOCOL) cPickle.dump(image, f, cPickle.HIGHEST_PROTOCOL) ''' filename='/home/shouyang/kitti/results/comp4-guo_det_kitti_test_car.txt' with open(filename, 'wt') as f: for i in range(len(image)): for j in range(len(score[i])): f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'. format(image[i], score[i][j][0], boxes[i][j][0] + 1, boxes[i][j][1] + 1,