def main(): args = parser() update_config(args.cfg) if args.set_cfg_list: update_config_from_list(args.set_cfg_list) context = [mx.gpu(int(gpu)) for gpu in config.gpus.split(',')] if not os.path.isdir(config.output_path): os.mkdir(config.output_path) # Create roidb roidb, imdb = load_proposal_roidb(config.dataset.dataset, config.dataset.test_image_set, config.dataset.root_path, config.dataset.dataset_path, proposal=config.dataset.proposal, only_gt=True, flip=False, result_path=config.output_path, proposal_path=config.proposal_path, get_imdb=True) # Creating the Logger logger, output_path = create_logger(config.output_path, args.cfg, config.dataset.image_set) print(output_path) model_prefix = os.path.join(output_path, args.save_prefix) arg_params, aux_params = load_param(model_prefix, config.TEST.TEST_EPOCH, convert=True, process=True) sym_inst = eval('{}.{}'.format(config.symbol, config.symbol)) if config.TEST.EXTRACT_PROPOSALS: imdb_proposal_extraction_wrapper(sym_inst, config, imdb, roidb, context, arg_params, aux_params, args.vis) else: imdb_detection_wrapper(sym_inst, config, imdb, roidb, context, arg_params, aux_params, args.vis)
def main(): args = parser() update_config(args.cfg) if args.set_cfg_list: update_config_from_list(args.set_cfg_list) if not os.path.isdir(config.output_path): os.mkdir(config.output_path) # Create roidb #print(config) roidb, imdb = load_proposal_roidb(config.dataset.dataset, config.dataset.test_image_set, config.dataset.root_path, config.dataset.dataset_path, proposal=config.dataset.proposal, only_gt=True, flip=False, result_path=config.output_path, proposal_path=config.proposal_path, get_imdb=True) #roidb = roidb[:100] #check_point = torch.load('output/faster_rcnn_1220_0_19000.pth') load_name = 'output/nofix_3_15000.pth' #load_name = 'output/faster_rcnn_jwyang.pth' # faster-rcnn fasterRCNN = resnet(config.dataset.NUM_CLASSES, 101, pretrained=True, class_agnostic=config.CLASS_AGNOSTIC) # init weight fasterRCNN.create_architecture() print("load checkpoint %s" % (load_name)) checkpoint = torch.load(load_name) from collections import OrderedDict new_state_dict = OrderedDict() for k, v in checkpoint['model'].items(): if k[0:6] == 'module': name = k[7:] # remove `module.` else: name = k new_state_dict[name] = v fasterRCNN.load_state_dict(new_state_dict) fasterRCNN.cuda() fasterRCNN = nn.DataParallel(fasterRCNN) fasterRCNN.eval() if config.TEST.EXTRACT_PROPOSALS: imdb_proposal_extraction_wrapper(sym_inst, config, imdb, roidb, context, arg_params, aux_params, args.vis) else: imdb_detection_wrapper(fasterRCNN, config, imdb, roidb)
if args.set_cfg_list: update_config_from_list(args.set_cfg_list) context = [mx.gpu(int(gpu)) for gpu in config.gpus.split(',')] nGPUs = len(context) batch_size = nGPUs * config.TRAIN.BATCH_IMAGES print("batch size is", batch_size) if not os.path.isdir(config.output_path): os.mkdir(config.output_path) # Create roidb image_sets = [iset for iset in config.dataset.image_set.split('+')] roidbs = [load_proposal_roidb(config.dataset.dataset, image_set, config.dataset.root_path, config.dataset.dataset_path, proposal=config.dataset.proposal, append_gt=True, flip=config.TRAIN.FLIP, result_path=config.output_path, proposal_path=config.proposal_path, load_mask=config.TRAIN.WITH_MASK, only_gt=not config.TRAIN.USE_NEG_CHIPS) for image_set in image_sets] roidb = merge_roidb(roidbs) roidb = filter_roidb(roidb, config) bbox_means, bbox_stds = add_bbox_regression_targets(roidb, config) print('Creating Iterator with {} Images'.format(len(roidb))) train_iter = MNIteratorE2E(roidb=roidb, config=config, batch_size=batch_size, nGPUs=nGPUs, threads=config.TRAIN.NUM_THREAD, pad_rois_to=400) #, crop_size=(config.TRAIN.SCALES[-1],config.TRAIN.SCALES[-1])) print('The Iterator has {} samples!'.format(len(train_iter)))
if __name__ == '__main__': args = parser() update_config(args.cfg) context = [mx.gpu(int(gpu)) for gpu in config.gpus.split(',')] nGPUs = len(context) batch_size = nGPUs * config.TRAIN.BATCH_IMAGES if not os.path.isdir(config.output_path): os.mkdir(config.output_path) # Create roidb image_sets = [iset for iset in config.dataset.image_set.split('+')] roidbs = [load_proposal_roidb(config.dataset.dataset, image_set, config.dataset.root_path, config.dataset.dataset_path, proposal=config.dataset.proposal, append_gt=True, flip=config.TRAIN.FLIP, result_path=config.output_path, proposal_path=config.proposal_path, load_mask=config.TRAIN.WITH_MASK) for image_set in image_sets] roidb = merge_roidb(roidbs) roidb = filter_roidb(roidb, config) bbox_means, bbox_stds = add_bbox_regression_targets(roidb, config) print('Creating Iterator with {} Images'.format(len(roidb))) train_iter = MNIteratorE2E(roidb=roidb, config=config, batch_size=batch_size, nGPUs=nGPUs, threads=config.TRAIN.NUM_THREAD, pad_rois_to=400) print('The Iterator has {} samples!'.format(len(train_iter)))