def main(args_list): args = parse_args(args_list) print('Called with args:') print(args) if args.cfg_file is not None: cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) cfg.GPU_ID = args.GPU_ID print('Using config:') pprint.pprint(cfg) if not args.randomize: # fix the random seeds (numpy and caffe) for reproducibility np.random.seed(cfg.RNG_SEED) caffe.set_random_seed(cfg.RNG_SEED) print 'Setting GPU device %d for training' % cfg.GPU_ID caffe.set_mode_gpu() caffe.set_device(cfg.GPU_ID) imdb, roidb = combined_roidb(args.imdb_name) print '{:d} roidb entries'.format(len(roidb)) output_dir = get_output_dir(imdb) print 'Output will be saved to `{:s}`'.format(output_dir) train_net(args.solver, roidb, output_dir, pretrained_model=args.pretrained_model, max_iters=args.max_iters)
def main(args_list): args = parse_args(args_list) print('Called with args:') print(args) if args.cfg_file is not None: cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) cfg.GPU_ID = args.gpu_id print('Using config:') pprint.pprint(cfg) while not os.path.exists(args.caffemodel) and args.wait: print('Waiting for {} to exist...'.format(args.caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(args.gpu_id) net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(args.caffemodel))[0] imdb = get_imdb(args.imdb_name) imdb.competition_mode(args.comp_mode) if not cfg.TEST.HAS_RPN: imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD) test_net(net, imdb, max_per_image=args.max_per_image, vis=args.vis)
roidbs = [get_roidb(s) for s in imdb_names.split('+')] roidb = roidbs[0] if len(roidbs) > 1: for r in roidbs[1:]: roidb.extend(r) imdb = datasets.imdb.imdb(imdb_names) else: imdb = get_imdb(imdb_names) return imdb, roidb if __name__ == '__main__': if CFG_FILE is not None: cfg_from_file(CFG_FILE) if SET_CFGS is not None: cfg_from_list(SET_CFGS) print('Using config:') pprint.pprint(cfg) if not RANDOMIZE: # fix the random seeds (numpy and caffe) for reproducibility np.random.seed(cfg.RNG_SEED) caffe.set_random_seed(cfg.RNG_SEED) # set up caffe caffe.set_mode_cpu() imdb, roidb = combined_roidb(IMDB_NAME) print '{:d} roidb entries'.format(len(roidb))
roidb.extend(r) imdb = datasets.imdb.imdb(imdb_names) else: imdb = get_imdb(imdb_names) return imdb, roidb if __name__ == '__main__': args = parse_args() print('Called with args:') print(args) if args.cfg_file is not None: cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) cfg.GPU_ID = args.gpu_id print('Using config:') pprint.pprint(cfg) if not args.randomize: # fix the random seeds (numpy and caffe) for reproducibility np.random.seed(cfg.RNG_SEED) caffe.set_random_seed(cfg.RNG_SEED) # set up caffe #caffe.set_mode_gpu() caffe.set_mode_cpu() # caffe.set_device(args.gpu_id)
def main(args): if args.cfg_file is not None: cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) # parse gpus gpus = map(int, args.gpus.split(',')) assert len(gpus) >= mpi_size, "Number of GPUs must be >= MPI size" cfg.GPU_ID = gpus[mpi_rank] # parse feature blob names blob_names = args.blob_names.split(',') print('Using config:') pprint.pprint(cfg) while not osp.exists(args.caffemodel) and args.wait: print('Waiting for {} to exist...'.format(args.caffemodel)) time.sleep(10) # load imdb imdb = get_imdb(args.imdb_name) root_dir = imdb._root_dir images_dir = imdb._data_path output_dir = get_output_dir(imdb.name, osp.splitext(osp.basename(args.caffemodel))[0]) if args.eval_only: def _load(fname): fpath = osp.join(output_dir, fname) assert osp.isfile(fpath), "Must have extracted detections and " \ "features first before evaluation" return unpickle(fpath) if mpi_rank == 0: gboxes = _load('gallery_detections.pkl') gfeatures = _load('gallery_features.pkl') pfeatures = _load('probe_features.pkl') else: # setup caffe caffe.mpi_init() caffe.set_mode_gpu() caffe.set_device(cfg.GPU_ID) # 1. Detect and extract features from all the gallery images in the imdb start, end = mpi_dispatch(len(imdb.image_index), mpi_size, mpi_rank) if args.use_gt: net = caffe.Net(args.probe_def, args.caffemodel, caffe.TEST) gboxes, gfeatures = usegt_and_exfeat(net, imdb, start=start, end=end, blob_names=blob_names) else: net = caffe.Net(args.gallery_def, args.caffemodel, caffe.TEST) gboxes, gfeatures = detect_and_exfeat(net, imdb, start=start, end=end, blob_names=blob_names) gboxes = mpi_collect(mpi_comm, mpi_rank, gboxes) gfeatures = mpi_collect(mpi_comm, mpi_rank, gfeatures) del net # to release the cudnn conv static workspace # 2. Only extract features from given probe rois start, end = mpi_dispatch(len(imdb.probes), mpi_size, mpi_rank) net = caffe.Net(args.probe_def, args.caffemodel, caffe.TEST) pfeatures = exfeat(net, imdb.probes, start=start, end=end, blob_names=blob_names) pfeatures = mpi_collect(mpi_comm, mpi_rank, pfeatures) del net # Save if mpi_rank == 0: pickle(gboxes, osp.join(output_dir, 'gallery_detections.pkl')) pickle(gfeatures, osp.join(output_dir, 'gallery_features.pkl')) pickle(pfeatures, osp.join(output_dir, 'probe_features.pkl')) # Evaluate if mpi_rank == 0: imdb.evaluate_detections(gboxes, det_thresh=args.det_thresh) imdb.evaluate_detections(gboxes, det_thresh=args.det_thresh, labeled_only=True) imdb.evaluate_search(gboxes, gfeatures['feat'], pfeatures['feat'], det_thresh=args.det_thresh, gallery_size=args.gallery_size, dump_json=osp.join(output_dir, 'results.json'))
def main(args): if args.cfg_file is not None: cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) # Setup caffe if args.gpu >= 0: caffe.mpi_init() caffe.set_mode_gpu() caffe.set_device(cfg.GPU_ID) else: caffe.mpi_init() caffe.set_mode_cpu() # Get query image and roi query_img = 'demo/query.jpg' query_roi = [0, 0, 466, 943] # [x1, y1, x2, y2] # Extract feature of the query person net = caffe.Net(args.probe_def, args.caffemodel, caffe.TEST) query_feat = demo_exfeat(net, query_img, query_roi) del net # Necessary to release cuDNN conv static workspace # Get gallery images gallery_imgs = sorted(glob('demo/gallery*.jpg')) # Detect and extract feature of persons in each gallery image net = caffe.Net(args.gallery_def, args.caffemodel, caffe.TEST) # Necessary to warm-up the net, otherwise the first image results are wrong # Don't know why. Possibly a bug in caffe's memory optimization. # Nevertheless, the results are correct after this warm-up. demo_detect(net, query_img) for gallery_img in gallery_imgs: print gallery_img, '...' boxes, features = demo_detect(net, gallery_img, threshold=args.det_thresh) if boxes is None: print gallery_img, 'no detections' continue # Compute pairwise cosine similarities, # equals to inner-products, as features are already L2-normed similarities = features.dot(query_feat) # Visualize the results fig, ax = plt.subplots(figsize=(16, 9)) ax.imshow(plt.imread(gallery_img)) plt.axis('off') for box, sim in zip(boxes, similarities): x1, y1, x2, y2, _ = box ax.add_patch( plt.Rectangle((x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor='#4CAF50', linewidth=3.5)) ax.add_patch( plt.Rectangle((x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor='white', linewidth=1)) ax.text(x1 + 5, y1 - 18, '{:.2f}'.format(sim), bbox=dict(facecolor='#4CAF50', linewidth=0), fontsize=20, color='white') plt.tight_layout() fig.savefig(gallery_img.replace('gallery', 'result')) plt.show() plt.close(fig) del net
sys.exit(1) args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() print('Called with args:') print(args) if args.cfg_file is not None: cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) cfg.GPU_ID = args.gpu_id print('Using config:') pprint.pprint(cfg) while not os.path.exists(args.caffemodel) and args.wait: print('Waiting for {} to exist...'.format(args.caffemodel)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(args.gpu_id) net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
'fetch_faster_rcnn_models.sh?').format(caffemodel)) if args.cpu_mode: caffe.set_mode_cpu() else: caffe.set_mode_gpu() caffe.set_device(args.gpu_id) cfg.GPU_ID = args.gpu_id net = caffe.Net(prototxt, caffemodel, caffe.TEST) print '\n\nLoaded network {:s}'.format(caffemodel) import datetime nowStr = datetime.datetime.now().strftime('%Y-%m-%d_%Hh%Mm') resDir = os.path.join(cfg.ROOT_DIR, 'output', 'kaistv2_2015_test20', nowStr, caffemodel.split('/')[-1].split('.')[0]) if not os.path.exists(resDir): os.makedirs(resDir) from datasets.kaistv2 import kaistv2 imdb = kaistv2('test20', '2015') roidb = imdb.roidb cfg_from_list(['TEST.SCALES', '[960]']) # for ii in range(5): import numpy.random as npr for ii in npr.choice(imdb.num_images, size=(20), replace=False): roidb[ii]['image'] = imdb.image_path_at(ii) demo(net, roidb[ii], args.conf_thres, args.nms_thres, resDir)
def main(): args = parse_args() print('Called with args:') print(args) cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) netdir,net_name = os.path.split(args.net) # -------------------------------------------------------------------------- # Pycaffe doesn't reliably free GPU memory when instantiated nets are # discarded (e.g. "del net" in Python code). To work around this issue, each # training stage is executed in a separate process using # multiprocessing.Process. # -------------------------------------------------------------------------- # queue for communicated results between processes mp_queue = mp.Queue() # solves, iters, etc. for each training stage solvers, max_iters, rpn_test_prototxt = get_solvers(args.net) print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Stage 1 RPN, init from ImageNet model' print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' cfg.TRAIN.SNAPSHOT_INFIX = 'stage1' mp_kwargs = dict( queue=mp_queue, imdb_name=args.imdb_name, init_model=args.pretrained_model, solver=solvers[0], max_iters=max_iters[0], cfg=cfg) p = mp.Process(target=train_rpn, kwargs=mp_kwargs) p.start() rpn_stage1_out = mp_queue.get() p.join() print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Stage 1 RPN, generate proposals' print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' mp_kwargs = dict( queue=mp_queue, imdb_name=args.imdb_name, rpn_model_path=str(rpn_stage1_out['model_path']), cfg=cfg, rpn_test_prototxt=rpn_test_prototxt) p = mp.Process(target=rpn_generate, kwargs=mp_kwargs) p.start() rpn_stage1_out['proposal_path'] = mp_queue.get()['proposal_path'] p.join() print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Stage 1 Fast R-CNN using RPN proposals, init from ImageNet model' print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' cfg.TRAIN.SNAPSHOT_INFIX = 'stage1' mp_kwargs = dict( queue=mp_queue, imdb_name=args.imdb_name, init_model=args.pretrained_model, solver=solvers[1], max_iters=max_iters[1], cfg=cfg, rpn_file=rpn_stage1_out['proposal_path']) p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs) p.start() fast_rcnn_stage1_out = mp_queue.get() p.join() print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Stage 2 RPN, init from stage 1 Fast R-CNN model' print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' cfg.TRAIN.SNAPSHOT_INFIX = 'stage2' mp_kwargs = dict( queue=mp_queue, imdb_name=args.imdb_name, init_model=str(fast_rcnn_stage1_out['model_path']), solver=solvers[2], max_iters=max_iters[2], cfg=cfg) p = mp.Process(target=train_rpn, kwargs=mp_kwargs) p.start() rpn_stage2_out = mp_queue.get() p.join() print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Stage 2 RPN, generate proposals' print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' mp_kwargs = dict( queue=mp_queue, imdb_name=args.imdb_name, rpn_model_path=str(rpn_stage2_out['model_path']), cfg=cfg, rpn_test_prototxt=rpn_test_prototxt) p = mp.Process(target=rpn_generate, kwargs=mp_kwargs) p.start() rpn_stage2_out['proposal_path'] = mp_queue.get()['proposal_path'] p.join() print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Stage 2 Fast R-CNN, init from stage 2 RPN R-CNN model' print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' cfg.TRAIN.SNAPSHOT_INFIX = 'stage2' mp_kwargs = dict( queue=mp_queue, imdb_name=args.imdb_name, init_model=str(rpn_stage2_out['model_path']), solver=solvers[3], max_iters=max_iters[3], cfg=cfg, rpn_file=rpn_stage2_out['proposal_path']) p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs) p.start() fast_rcnn_stage2_out = mp_queue.get() p.join() # Create final model (just a copy of the last stage) final_path = os.path.join( os.path.dirname(fast_rcnn_stage2_out['model_path']), net_name + '_faster_rcnn_final.caffemodel') print 'cp {} -> {}'.format( fast_rcnn_stage2_out['model_path'], final_path) shutil.copy(fast_rcnn_stage2_out['model_path'], final_path) print 'Final model: {}'.format(final_path)
def main(): args = parse_args() ins_id = args.id instance = getTrainingInstance(ins_id) assert instance != None generateCFG(instance) cfg_from_file(instance.cfg_file) if instance.set_cfgs is not None: cfg_from_list(instance.set_cfgs) #generate model if it not exists yet if instance.net == '': instance.setDefaultNet() models_path = os.path.join(instance.devkit, 'models') if os.path.exists(models_path): os.popen('rm -rf {}'.format(models_path)) print 'Generating model ' + models_path generate_custom_net.main(instance.cls_num,models_path,instance.steps,instance.lr) #if not os.path.exists(instance.net): # generate_custom_net.main(instance.cls_num,instance.netdir,instance.steps,instance.lr) #copy net_def to devkit net_src = os.path.join(instance.devkit,instance.net, "faster_rcnn_alt_opt", \ "faster_rcnn_test.pt") net_dst = os.path.join(instance.devkit, "results") if (not os.path.exists(net_dst)): os.makedirs(net_dst) print 'Copying {} to {}'.format(net_src, net_dst) shutil.copy(net_src, net_dst) #generate factory.py generateFactory(instance) #generate train.sh import generate_train_sh generate_train_sh.main(instance) #make symbolic link to VOCCode generateVOCCode(instance.devkit) if instance.validate() == False: print 'Error in training instance.' exit(1) dbconn = sql.MySQLConnection('192.168.1.90','test','test','zb_label') dbconn.connect() sqlstr = 'update zb_train set status = 2 where id = {}'.format(ins_id) dbconn.query(sqlstr) dbconn.commit() dbconn.close() #start training try: os.system('experiments/scripts/train.sh') acc_rate = getAccuracy(ins_id) acc_str = json.dumps(acc_rate) #sqlstr = 'update zb_train set status = 3 , accuracy = {} where id = {}'.format(json.dumps(acc_str), ins_id) #prev dbconn may be time-out and closed by the server. sqlstr = 'update zb_train set status = 3 where id = {}'.format( ins_id) dbconn = sql.MySQLConnection('192.168.1.90','test','test','zb_label') dbconn.connect() dbconn.query(sqlstr) dbconn.commit() dbconn.close() except Exception,e: sqlerrstr = 'update zb_train set status = -1 where id = {}'.format(ins_id) dbconn = sql.MySQLConnection('192.168.1.90','test','test','zb_label') dbconn.connect() dbconn.query(sqlstr) dbconn.commit() dbconn.close() print e
imdb = datasets.imdb.imdb(imdb_names) else: imdb = get_imdb(imdb_names) return imdb, roidb if __name__ == '__main__': cfg_file = os.path.join(cfg.ROOT_DIR, 'experiments/cfgs/faster_rcnn_end2end.yml') set_cfgs = None imdb_name = 'voc_2007_trainval' if cfg_file is not None: cfg_from_file(cfg_file) if set_cfgs is not None: cfg_from_list(set_cfgs) np.random.seed(cfg.RNG_SEED) caffe.set_random_seed(cfg.RNG_SEED) # set up caffe caffe.set_mode_cpu() #caffe.set_device(0) imdb, roidb = combined_roidb(imdb_name) print '{:d} roidb entries'.format(len(roidb)) output_dir = 'output/my_output' print 'Output will be saved to `{:s}`'.format(output_dir) solver = os.path.join(cfg.ROOT_DIR,
def main(args_list): args = parse_args(args_list) print('Called with args:') print(args) if args.cfg_file is not None: cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) cfg.GPU_ID = args.gpu_id # -------------------------------------------------------------------------- # Pycaffe doesn't reliably free GPU memory when instantiated nets are # discarded (e.g. "del net" in Python code). To work around this issue, each # training stage is executed in a separate process using # multiprocessing.Process. # -------------------------------------------------------------------------- # queue for communicated results between processes mp_queue = mp.Queue() # solves, iters, etc. for each training stage assert args.net_name is not None solvers, max_iters, rpn_test_prototxt = get_solvers(args.net_name) print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Stage 1 RPN, init from ImageNet model' print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' cfg.TRAIN.SNAPSHOT_INFIX = 'stage1' mp_kwargs = dict(queue=mp_queue, imdb_name=args.imdb_name, init_model=args.pretrained_model, solver=solvers[0], max_iters=max_iters[0], cfg=cfg) p = mp.Process(target=train_rpn, kwargs=mp_kwargs) p.start() rpn_stage1_out = mp_queue.get() p.join() print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Stage 1 RPN, generate proposals' print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' mp_kwargs = dict(queue=mp_queue, imdb_name=args.imdb_name, rpn_model_path=str(rpn_stage1_out['model_path']), cfg=cfg, rpn_test_prototxt=rpn_test_prototxt) p = mp.Process(target=rpn_generate, kwargs=mp_kwargs) p.start() rpn_stage1_out['proposal_path'] = mp_queue.get()['proposal_path'] p.join() print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Stage 1 Fast R-CNN using RPN proposals, init from ImageNet model' print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' cfg.TRAIN.SNAPSHOT_INFIX = 'stage1' mp_kwargs = dict(queue=mp_queue, imdb_name=args.imdb_name, init_model=args.pretrained_model, solver=solvers[1], max_iters=max_iters[1], cfg=cfg, rpn_file=rpn_stage1_out['proposal_path']) p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs) p.start() fast_rcnn_stage1_out = mp_queue.get() p.join() print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Stage 2 RPN, init from stage 1 Fast R-CNN model' print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' cfg.TRAIN.SNAPSHOT_INFIX = 'stage2' mp_kwargs = dict(queue=mp_queue, imdb_name=args.imdb_name, init_model=str(fast_rcnn_stage1_out['model_path']), solver=solvers[2], max_iters=max_iters[2], cfg=cfg) p = mp.Process(target=train_rpn, kwargs=mp_kwargs) p.start() rpn_stage2_out = mp_queue.get() p.join() print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Stage 2 RPN, generate proposals' print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' mp_kwargs = dict(queue=mp_queue, imdb_name=args.imdb_name, rpn_model_path=str(rpn_stage2_out['model_path']), cfg=cfg, rpn_test_prototxt=rpn_test_prototxt) p = mp.Process(target=rpn_generate, kwargs=mp_kwargs) p.start() rpn_stage2_out['proposal_path'] = mp_queue.get()['proposal_path'] p.join() print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Stage 2 Fast R-CNN, init from stage 2 RPN R-CNN model' print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' cfg.TRAIN.SNAPSHOT_INFIX = 'stage2' mp_kwargs = dict(queue=mp_queue, imdb_name=args.imdb_name, init_model=str(rpn_stage2_out['model_path']), solver=solvers[3], max_iters=max_iters[3], cfg=cfg, rpn_file=rpn_stage2_out['proposal_path']) p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs) p.start() fast_rcnn_stage2_out = mp_queue.get() p.join() # Create final model (just a copy of the last stage) final_path = os.path.join( os.path.dirname(fast_rcnn_stage2_out['model_path']), args.net_name + '_faster_rcnn_final.caffemodel') print 'cp {} -> {}'.format(fast_rcnn_stage2_out['model_path'], final_path) shutil.copy(fast_rcnn_stage2_out['model_path'], final_path) print 'Final model: {}'.format(final_path)
def get_dimensions(file_): with Image.open(file_) as im: return im.size if __name__ == '__main__': args = parse_args() print('Called with args:') print(args) if args.cfg_file1 is not None: cfg_from_file(args.cfg_file1) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) cfg.GPU_ID = args.gpu_id while not os.path.exists(args.caffemodel1) and args.wait: print('Waiting for {} to exist...'.format(args.caffemodel1)) time.sleep(10) caffe.set_mode_gpu() caffe.set_device(args.gpu_id) #get test image filenames imageSet_test = args.images test_files = get_files( imageSet_test ) #['/home/ubuntu/try1/data/Images/g8_t1_up/g6010001.jpg']
raise IOError(('{:s} not found.\nDid you run ./data/script/' 'fetch_faster_rcnn_models.sh?').format(caffemodel)) if args.cpu_mode: caffe.set_mode_cpu() else: caffe.set_mode_gpu() caffe.set_device(args.gpu_id) cfg.GPU_ID = args.gpu_id net = caffe.Net(prototxt, caffemodel, caffe.TEST) print '\n\nLoaded network {:s}'.format(caffemodel) import datetime nowStr = datetime.datetime.now().strftime('%Y-%m-%d_%Hh%Mm') resDir = os.path.join(cfg.ROOT_DIR, 'output', 'kaist_2015_test20', nowStr, caffemodel.split('/')[-1].split('.')[0]) if not os.path.exists(resDir): os.makedirs(resDir) from datasets.kaist import kaist imdb = kaist('test20', '2015') roidb = imdb.roidb cfg_from_list(['TEST.SCALES', '[1024]']) # for ii in range(5): import numpy.random as npr for ii in npr.choice(imdb.num_images, size=(5), replace=False): roidb[ii]['image'] = imdb.image_path_at(ii) demo(net, roidb[ii], args.conf_thres, args.nms_thres, resDir)