def multi_gpu_generate_rpn_on_dataset(weights_file, dataset_name, _proposal_file_ignored, num_images, output_dir): """Multi-gpu inference on a dataset.""" # Retrieve the test_net binary path binary_dir = envu.get_runtime_dir() binary_ext = envu.get_py_bin_ext() binary = os.path.join(binary_dir, 'test_net' + binary_ext) assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary) # Pass the target dataset via the command line opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)] opts += ['TEST.WEIGHTS', weights_file] # Run inference in parallel in subprocesses outputs = subprocess_utils.process_in_parallel('rpn_proposals', num_images, binary, output_dir, opts) # Collate the results from each subprocess boxes, scores, ids = [], [], [] for rpn_data in outputs: boxes += rpn_data['boxes'] scores += rpn_data['scores'] ids += rpn_data['ids'] rpn_file = os.path.join(output_dir, 'rpn_proposals.pkl') cfg_yaml = envu.yaml_dump(cfg) save_object(dict(boxes=boxes, scores=scores, ids=ids, cfg=cfg_yaml), rpn_file) logger.info('Wrote RPN proposals to {}'.format(os.path.abspath(rpn_file))) return boxes, scores, ids, rpn_file
def multi_gpu_generate_rpn_on_dataset( weights_file, dataset_name, _proposal_file_ignored, num_images, output_dir ): """Multi-gpu inference on a dataset.""" # Retrieve the test_net binary path binary_dir = envu.get_runtime_dir() binary_ext = envu.get_py_bin_ext() binary = os.path.join(binary_dir, 'test_net' + binary_ext) assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary) # Pass the target dataset via the command line opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)] opts += ['TEST.WEIGHTS', weights_file] # Run inference in parallel in subprocesses outputs = subprocess_utils.process_in_parallel( 'rpn_proposals', num_images, binary, output_dir, opts ) # Collate the results from each subprocess boxes, scores, ids = [], [], [] for rpn_data in outputs: boxes += rpn_data['boxes'] scores += rpn_data['scores'] ids += rpn_data['ids'] rpn_file = os.path.join(output_dir, 'rpn_proposals.pkl') cfg_yaml = yaml.dump(cfg) save_object( dict(boxes=boxes, scores=scores, ids=ids, cfg=cfg_yaml), rpn_file ) logger.info('Wrote RPN proposals to {}'.format(os.path.abspath(rpn_file))) return boxes, scores, ids, rpn_file
def multi_gpu_test_net_on_dataset( weights_file, dataset_name, proposal_file, num_images, output_dir ): """Multi-gpu inference on a dataset.""" binary_dir = envu.get_runtime_dir() binary_ext = envu.get_py_bin_ext() binary = os.path.join(binary_dir, 'test_net' + binary_ext) assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary) # Pass the target dataset and proposal file (if any) via the command line opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)] opts += ['TEST.WEIGHTS', weights_file] if proposal_file: opts += ['TEST.PROPOSAL_FILES', '("{}",)'.format(proposal_file)] # Run inference in parallel in subprocesses # Outputs will be a list of outputs from each subprocess, where the output # of each subprocess is the dictionary saved by test_net(). if os.path.exists(os.path.join(output_dir, 'detections.pkl')): out_result = cPickle.load(open(os.path.join(output_dir, 'detections.pkl'))) return out_result['all_boxes'], out_result['all_segms'], out_result['all_keyps'], out_result['all_bodys'] outputs = subprocess_utils.process_in_parallel( 'detection', num_images, binary, output_dir, opts ) # Collate the results from each subprocess all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)] all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)] all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)] all_bodys = [[] for _ in range(cfg.MODEL.NUM_CLASSES)] for det_data in outputs: all_boxes_batch = det_data['all_boxes'] all_segms_batch = det_data['all_segms'] all_keyps_batch = det_data['all_keyps'] all_bodys_batch = det_data['all_bodys'] for cls_idx in range(1, cfg.MODEL.NUM_CLASSES): all_boxes[cls_idx] += all_boxes_batch[cls_idx] all_segms[cls_idx] += all_segms_batch[cls_idx] all_keyps[cls_idx] += all_keyps_batch[cls_idx] all_bodys[cls_idx] += all_bodys_batch[cls_idx] det_file = os.path.join(output_dir, 'detections.pkl') cfg_yaml = yaml.dump(cfg) save_object( dict( all_boxes=all_boxes, all_segms=all_segms, all_keyps=all_keyps, all_bodys=all_bodys, cfg=cfg_yaml ), det_file ) logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file))) return all_boxes, all_segms, all_keyps, all_bodys
def multi_gpu_test_net_on_dataset( weights_file, dataset_name, proposal_file, num_images, output_dir ): """Multi-gpu inference on a dataset.""" binary_dir = envu.get_runtime_dir() binary_ext = envu.get_py_bin_ext() binary = os.path.join(binary_dir, 'test_net' + binary_ext) assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary) # Pass the target dataset and proposal file (if any) via the command line opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)] opts += ['TEST.WEIGHTS', weights_file] if proposal_file: opts += ['TEST.PROPOSAL_FILES', '("{}",)'.format(proposal_file)] # Run inference in parallel in subprocesses # Outputs will be a list of outputs from each subprocess, where the output # of each subprocess is the dictionary saved by test_net(). outputs = subprocess_utils.process_in_parallel( 'detection', num_images, binary, output_dir, opts ) # Collate the results from each subprocess all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)] all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)] all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)] all_bodys = [[] for _ in range(cfg.MODEL.NUM_CLASSES)] for det_data in outputs: all_boxes_batch = det_data['all_boxes'] all_segms_batch = det_data['all_segms'] all_keyps_batch = det_data['all_keyps'] all_bodys_batch = det_data['all_bodys'] for cls_idx in range(1, cfg.MODEL.NUM_CLASSES): all_boxes[cls_idx] += all_boxes_batch[cls_idx] all_segms[cls_idx] += all_segms_batch[cls_idx] all_keyps[cls_idx] += all_keyps_batch[cls_idx] all_bodys[cls_idx] += all_bodys_batch[cls_idx] det_file = os.path.join(output_dir, 'detections.pkl') cfg_yaml = yaml.dump(cfg) save_object( dict( all_boxes=all_boxes, all_segms=all_segms, all_keyps=all_keyps, all_bodys=all_bodys, cfg=cfg_yaml ), det_file ) logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file))) return all_boxes, all_segms, all_keyps, all_bodys
def multi_gpu_test_cls_net_on_dataset(num_images, output_dir): """Multi-gpu inference on a dataset.""" binary_dir = envu.get_runtime_dir() binary_ext = envu.get_py_bin_ext() binary = os.path.join(binary_dir, 'test_net' + binary_ext) assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary) # Run inference in parallel in subprocesses # Outputs will be a list of outputs from each subprocess, where the output # of each subprocess is the dictionary saved by test_net(). logger.info('start to classify') outputs = subprocess_utils.process_in_parallel('detection', num_images, binary, output_dir) # Collate the results from each subprocess all_cls = [[] for _ in range(cfg.MODEL.NUM_CLASSES)] for cls_data in outputs: all_cls_batch = cls_data['all_scores'] for cls_idx in range(1, cfg.MODEL.NUM_CLASSES): all_cls[cls_idx] += all_cls_batch[cls_idx] return all_cls