def extract_feat_faster_start(args, cfg): os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id num_gpus = len(args.gpu_id.split(',')) MIN_BOXES = cfg.MODEL.BUA.EXTRACTOR.MIN_BOXES MAX_BOXES = cfg.MODEL.BUA.EXTRACTOR.MAX_BOXES CONF_THRESH = cfg.MODEL.BUA.EXTRACTOR.CONF_THRESH # Extract features. imglist = os.listdir(args.image_dir) num_images = len(imglist) print('Number of images: {}.'.format(num_images)) if args.num_cpus != 0: ray.init(num_cpus=args.num_cpus) else: ray.init() img_lists = [imglist[i::num_gpus] for i in range(num_gpus)] pb = ProgressBar(len(imglist)) actor = pb.actor print('Number of GPUs: {}.'.format(num_gpus)) extract_feat_list = [] for i in range(num_gpus): extract_feat_list.append( extract_feat_faster.remote(i, img_lists[i], cfg, args, actor)) pb.print_until_done() ray.get(extract_feat_list) ray.get(actor.get_counter.remote())
def extract_image_feature(args): # Change configs to be used check_dirs(args.output_dir) cfg = setup(args) cfg.defrost() cfg.MODEL.WEIGHTS = args.image_model # Not this problem. The logger path is set up in default_setup(cfg, args) in setup(args.) # cfg.OUTPUT_DIR = args.image_log cfg.freeze() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id num_gpus = len(args.gpu_id.split(',')) MIN_BOXES = cfg.MODEL.BUA.EXTRACTOR.MIN_BOXES MAX_BOXES = cfg.MODEL.BUA.EXTRACTOR.MAX_BOXES CONF_THRESH = cfg.MODEL.BUA.EXTRACTOR.CONF_THRESH # Extract features. imglist = os.listdir(args.image_dir) num_images = len(imglist) print('Number of images: {}.'.format(num_images)) if args.num_cpus != 0: ray.init(num_cpus=args.num_cpus) else: ray.init() img_lists = [imglist[i::num_gpus] for i in range(num_gpus)] pb = ProgressBar(len(imglist)) actor = pb.actor print('Number of GPUs: {}.'.format(num_gpus)) extract_feat_list = [] for i in range(num_gpus): extract_feat_list.append(extract_feat.remote(i, img_lists[i], cfg, args, actor)) pb.print_until_done() ray.get(extract_feat_list) ray.get(actor.get_counter.remote())
def main(): parser = argparse.ArgumentParser( description="PyTorch Object Detection2 Inference") parser.add_argument( "--config-file", default="configs/bua-caffe/extract-bua-caffe-r101.yaml", metavar="FILE", help="path to config file", ) parser.add_argument('--num-cpus', default=1, type=int, help='number of cpus to use for ray, 0 means no limit') parser.add_argument('--gpus', dest='gpu_id', help='GPU id(s) to use', default='0', type=str) parser.add_argument("--mode", default="caffe", type=str, help="bua_caffe, ...") parser.add_argument( '--extract-mode', default='roi_feats', type=str, help="'roi_feats', 'bboxes' and 'bbox_feats' indicates \ 'extract roi features directly', 'extract bboxes only' and \ 'extract roi features with pre-computed bboxes' respectively" ) parser.add_argument('--min-max-boxes', default='min_max_default', type=str, help='the number of min-max boxes of extractor') parser.add_argument('--out-dir', dest='output_dir', help='output directory for features', default="features") parser.add_argument('--image-dir', dest='image_dir', help='directory with images', default="image") parser.add_argument('--bbox-dir', dest='bbox_dir', help='directory with bbox', default="bbox") parser.add_argument( "--resume", action="store_true", help="whether to attempt to resume from the checkpoint directory", ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() cfg = setup(args) os.makedirs(args.output_dir, exist_ok=True) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id num_gpus = len(args.gpu_id.split(',')) MIN_BOXES = cfg.MODEL.BUA.EXTRACTOR.MIN_BOXES MAX_BOXES = cfg.MODEL.BUA.EXTRACTOR.MAX_BOXES CONF_THRESH = cfg.MODEL.BUA.EXTRACTOR.CONF_THRESH # Extract features. imglist = os.listdir(args.image_dir) num_images = len(imglist) print('Number of images: {}.'.format(num_images)) if args.num_cpus != 0: ray.init(num_cpus=args.num_cpus) else: ray.init() img_lists = [imglist[i::num_gpus] for i in range(num_gpus)] pb = ProgressBar(len(imglist)) actor = pb.actor print('Number of GPUs: {}.'.format(num_gpus)) extract_feat_list = [] for i in range(num_gpus): extract_feat_list.append( extract_feat.remote(i, img_lists[i], cfg, args, actor)) pb.print_until_done() ray.get(extract_feat_list) ray.get(actor.get_counter.remote())
def main(): parser = argparse.ArgumentParser(description="PyTorch Object Detection2 Inference") parser.add_argument( "--config-file", default="configs/bua-caffe/extract-bua-caffe-r101.yaml", metavar="FILE", help="path to config file", ) parser.add_argument('--num-cpus', default=1, type=int, help='number of cpus to use for ray, 0 means no limit') parser.add_argument('--gpus', dest='gpu_id', help='GPU id(s) to use', default='0', type=str) parser.add_argument("--mode", default="caffe", type=str, help="bua_caffe, ...") parser.add_argument('--extract-mode', default='roi_feats', type=str, help="'roi_feats', 'bboxes' and 'bbox_feats' indicates \ 'extract roi features directly', 'extract bboxes only' and \ 'extract roi features with pre-computed bboxes' respectively") parser.add_argument('--min-max-boxes', default='min_max_default', type=str, help='the number of min-max boxes of extractor') parser.add_argument('--out-dir', dest='output_dir', help='output directory for features', default="features") parser.add_argument('--image-dir', dest='image_dir', help='directory with images', default="image") parser.add_argument('--bbox-dir', dest='bbox_dir', help='directory with bbox', default="bbox") parser.add_argument( "--resume", action="store_true", help="whether to attempt to resume from the checkpoint directory", ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() cfg = setup(args) os.makedirs(args.output_dir, exist_ok=True) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id num_gpus = len(args.gpu_id.split(',')) # Load classes classes = [] # ['__background__'] with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "evaluation", 'objects_vocab.txt')) as f: for object in f.readlines(): classes.append(object.split(',')[0].lower().strip()) # Load attributes attributes = [] # ['__no_attribute__'] with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "evaluation", 'attributes_vocab.txt')) as f: for att in f.readlines(): attributes.append(att.split(',')[0].lower().strip()) # Save class and attribute names to file np.savez_compressed( os.path.join(args.output_dir, "info.npz"), classes=classes, attributes=attributes, cfg=cfg, args=args) #info={'cfg':cfg, 'args':args}) # Extract features. imglist = os.listdir(args.image_dir) num_images = len(imglist) print('Number of images: {}.'.format(num_images)) if args.num_cpus != 0: ray.init(num_cpus=args.num_cpus) else: ray.init() img_lists = [imglist[i::num_gpus] for i in range(num_gpus)] pb = ProgressBar(len(imglist)) actor = pb.actor print('Number of GPUs: {}.'.format(num_gpus)) extract_feat_list = [] for i in range(num_gpus): extract_feat_list.append(extract_feat.remote(i, img_lists[i], cfg, args, actor)) pb.print_until_done() ray.get(extract_feat_list) ray.get(actor.get_counter.remote())