def main(): args = parse_args() cfg = mmcv.Config.fromfile(args.config) cfg.data.videos_per_gpu = 1 net = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) net.eval() load_checkpoint(net, args.checkpoint, force_matching=True) input_time_size = cfg.input_clip_length input_image_size = (tuple(cfg.input_img_size) if isinstance( cfg.input_img_size, (list, tuple)) else (cfg.input_img_size, cfg.input_img_size)) input_size = (3, input_time_size) + input_image_size output_path = args.output_name if not output_path.endswith('.onnx'): output_path = '{}.onnx'.format(output_path) base_output_dir = dirname(output_path) if not exists(base_output_dir): makedirs(base_output_dir) if hasattr(net, 'forward_inference'): net.forward = net.forward_inference convert_to_onnx(net, input_size, args.output_name, check=args.check)
def main(): parser = ArgumentParser() parser.add_argument('config', help='Config file path') parser.add_argument('--load_from', help='the checkpoint file to init weights from') parser.add_argument('--load2d_from', help='the checkpoint file to init 2D weights from') parser.add_argument('--update_config', nargs='+', action=ExtendedDictAction, help='arguments in dict') args = parser.parse_args() cfg = mmcv.Config.fromfile(args.config) if args.update_config is not None: cfg.merge_from_dict(args.update_config) cfg = update_config(cfg, args) net = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) net.eval() if cfg.load_from: logger = get_root_logger(log_level=cfg.log_level) load_checkpoint(net, cfg.load_from, strict=False, logger=logger, show_converted=True, force_matching=True) conv_layers = collect_conv_layers(net) show_stat(conv_layers)
def main(args): cfg = mmcv.Config.fromfile(args.config) if args.update_config is not None: cfg.merge_from_dict(args.update_config) cfg.data.videos_per_gpu = 1 if cfg.get('seed'): print(f'Set random seed to {cfg.seed}') set_random_seed(cfg.seed) class_maps = None if cfg.get('classes'): class_maps = {0: {k: v for k, v in enumerate(sorted(cfg.classes))}} model = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg, class_maps=class_maps) model.eval() load_checkpoint(model, args.checkpoint, force_matching=True) if hasattr(model, 'forward_inference'): model.forward = model.forward_inference input_time_size = cfg.input_clip_length input_image_size = (tuple(cfg.input_img_size) if isinstance( cfg.input_img_size, (list, tuple)) else (cfg.input_img_size, cfg.input_img_size)) input_size = (3, input_time_size) + input_image_size onnx_model_path = join(args.output_dir, splitext(basename(args.config))[0] + '.onnx') base_output_dir = dirname(onnx_model_path) if not exists(base_output_dir): makedirs(base_output_dir) convert_to_onnx(model, input_size, onnx_model_path, opset=args.opset, check=True) if args.target == 'openvino': input_shape = (1, ) + input_size export_to_openvino(cfg, onnx_model_path, args.output_dir, input_shape, args.input_format) meta = {'model_classes': model.CLASSES[0]} with open(args.meta_info, 'w') as output_meta_stream: json.dump(meta, output_meta_stream)
def main(): args = parse_args() cfg = mmcv.Config.fromfile(args.config) cfg.data.videos_per_gpu = 1 cfg.model.type += '_Inference' cfg.model.backbone.type += '_Inference' cfg.model.backbone.inference = True cfg.model.cls_head.type += '_Inference' if args.num_classes is not None and args.num_classes > 0: cfg.num_test_classes = args.num_classes cfg.model.cls_head.num_classes = args.num_classes net = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) net.eval() load_checkpoint(net, args.checkpoint, force_matching=True) time_length = cfg.data.test.out_length if hasattr(cfg.data.test, 'out_length') else cfg.data.test.new_length input_size = (3, time_length) + cfg.data.test.input_size convert_to_onnx(net, input_size, args.output_name, check=args.check)
def main(): parser = ArgumentParser() parser.add_argument('--config', type=str, required=True, help='Test config file path') parser.add_argument('--checkpoint', type=str, required=True, help='Checkpoint file') parser.add_argument('--data_dir', type=str, required=True, help='The dir with dataset') parser.add_argument('--out_dir', type=str, required=True, help='Output directory') parser.add_argument('--dataset', type=str, required=True, help='Dataset name') parser.add_argument('--gpus', default=1, type=int, help='GPU number used for annotating') parser.add_argument('--proc_per_gpu', default=2, type=int, help='Number of processes per GPU') parser.add_argument('--mode', choices=['train', 'val', 'test'], default='train') args = parser.parse_args() assert exists(args.config) assert exists(args.checkpoint) assert exists(args.data_dir) cfg = Config.fromfile(args.config) cfg = update_config(cfg, args, trg_name=args.dataset) cfg = propagate_root_dir(cfg, args.data_dir) dataset = build_dataset(cfg.data, args.mode, dict(test_mode=True)) data_pipeline = Compose(dataset.pipeline.transforms[1:]) print('{} dataset:\n'.format(args.mode) + str(dataset)) tasks = prepare_tasks(dataset, cfg.input_clip_length) print('Prepared tasks: {}'.format(sum([len(v) for v in tasks.values()]))) if not exists(args.out_dir): makedirs(args.out_dir) model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) load_checkpoint(model, args.checkpoint, strict=False) batch_size = 4 * cfg.data.videos_per_gpu if args.gpus == 1: model = MMDataParallel(model, device_ids=[0]) model.eval() process_tasks(tasks, dataset, model, args.out_dir, batch_size, cfg.input_clip_length, data_pipeline) else: raise NotImplementedError
def main(): args = parse_args() cfg = mmcv.Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True if args.num_classes is not None and args.num_classes > 0: cfg.num_test_classes = args.num_classes if args.data_dir is not None: cfg = update_data_paths(cfg, args.data_dir) assert args.mode in cfg.data data_cfg = getattr(cfg.data, args.mode) data_cfg.test_mode = True dataset = obj_from_dict(data_cfg, datasets, dict(test_mode=True)) if args.gpus == 1: model = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) load_checkpoint(model, args.checkpoint, strict=False) model = MMDataParallel(model, device_ids=[0]) data_loader = build_dataloader( dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, num_gpus=1, dist=False, shuffle=False) outputs = single_test(model, data_loader) else: model_args = cfg.model.copy() model_args.update(train_cfg=None, test_cfg=cfg.test_cfg) model_type = getattr(recognizers, model_args.pop('type')) outputs = parallel_test(model_type, model_args, args.checkpoint, dataset, _data_func, range(args.gpus), workers_per_gpu=args.proc_per_gpu) gt_labels = [] for i in range(len(dataset)): ann = dataset.get_ann_info(i) gt_labels.append(ann['label']) results = np.array([res.cpu().numpy().mean(axis=0) for res in outputs], dtype=np.float32) if cfg.data.num_test_classes is not None and cfg.data.num_test_classes > 0: results = results[:, :cfg.data.num_test_classes] top1_value = mean_top_k_accuracy(results, gt_labels, k=1) top5_value = mean_top_k_accuracy(results, gt_labels, k=5) print("\nMean Top-1 Accuracy = {:.03f}%".format(top1_value * 100)) print("Mean Top-5 Accuracy = {:.03f}%".format(top5_value * 100)) map_value = mean_average_precision(results, gt_labels) print("mAP = {:.03f}%".format(map_value * 100)) invalid_ids = invalid_filtered(results, gt_labels) print('\nNum invalid classes: {} / {}'.format(len(invalid_ids), cfg.data.num_test_classes)) num_invalid_samples = sum([len(ids) for ids in invalid_ids.values()]) print('Num invalid samples: {} / {}'.format(num_invalid_samples, len(gt_labels)))
def main(args): cfg = mmcv.Config.fromfile(args.config) if args.update_config is not None: cfg.merge_from_dict(args.update_config) cfg.data.videos_per_gpu = 1 if cfg.get('seed'): print(f'Set random seed to {cfg.seed}') set_random_seed(cfg.seed) class_maps = None if cfg.get('classes'): class_maps = {0: {k: v for k, v in enumerate(sorted(cfg.classes))}} model = build_recognizer( cfg.model, train_cfg=None, test_cfg=cfg.test_cfg, class_maps=class_maps ) model.eval() load_checkpoint(model, args.checkpoint, force_matching=True) if hasattr(model, 'forward_inference'): model.forward = model.forward_inference input_time_size = cfg.input_clip_length input_image_size = (tuple(cfg.input_img_size) if isinstance(cfg.input_img_size, (list, tuple)) else (cfg.input_img_size, cfg.input_img_size)) input_size = (3, input_time_size) + input_image_size # BEGIN nncf part was_model_compressed = is_checkpoint_nncf(args.checkpoint) cfg_contains_nncf = cfg.get('nncf_config') if cfg_contains_nncf and not was_model_compressed: raise RuntimeError('Trying to make export with NNCF compression ' 'a model snapshot that was NOT trained with NNCF') if was_model_compressed and not cfg_contains_nncf: # reading NNCF config from checkpoint nncf_part = get_nncf_config_from_meta(args.checkpoint) for k, v, in nncf_part.items(): cfg[k] = v if cfg.get('nncf_config'): if torch.cuda.is_available(): model.cuda() check_nncf_is_enabled() cfg.load_from = args.checkpoint cfg.resume_from = None compression_ctrl, model = wrap_nncf_model(model, cfg, None, get_fake_input, export=True) compression_ctrl.prepare_for_export() # END nncf part onnx_model_path = join(args.output_dir, splitext(basename(args.config))[0] + '.onnx') base_output_dir = dirname(onnx_model_path) if not exists(base_output_dir): makedirs(base_output_dir) convert_to_onnx(model, input_size, onnx_model_path, opset=args.opset, check=True) if args.target == 'openvino': input_shape = (1,) + input_size export_to_openvino(cfg, onnx_model_path, args.output_dir, input_shape, args.input_format) meta = {'model_classes': model.CLASSES[0]} with open(args.meta_info, 'w') as output_meta_stream: json.dump(meta, output_meta_stream)
def main(): parser = ArgumentParser() parser.add_argument('--config', '-c', type=str, required=True) parser.add_argument('--checkpoint', '-w', type=str, required=True) parser.add_argument('--dataset_name', '-n', type=str, required=True) parser.add_argument('--data_dir', '-d', type=str, required=True) parser.add_argument('--predictions', '-p', type=str, required=True) parser.add_argument('--movements', '-m', type=str, required=True) parser.add_argument('--keypoints', '-k', type=str, required=True) parser.add_argument('--out_annotation', '-o', type=str, required=True) args = parser.parse_args() assert exists(args.config) assert exists(args.weights) assert exists(args.data_dir) assert exists(args.predictions) assert exists(args.movements) assert exists(args.keypoints) assert args.dataset_name is not None and args.dataset_name != '' assert args.out_annotation is not None and args.out_annotation != '' cfg = Config.fromfile(args.config) cfg = update_config(cfg, args, trg_name=args.dataset_name) cfg = propagate_root_dir(cfg, args.data_dir) dataset = build_dataset(cfg.data, 'train', dict(test_mode=True)) data_pipeline = Compose(dataset.pipeline.transforms[1:]) print('{} dataset:\n'.format(args.mode) + str(dataset)) model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) load_checkpoint(model, args.checkpoint, strict=False) model = MMDataParallel(model, device_ids=[0]) model.eval() annotation_path = join(args.data_dir, cfg.data.train.sources[0], cfg.data.train.ann_file) records = load_annotation(annotation_path) predictions = load_distributed_data(args.predictions, parse_predictions_file, 'txt') movements = load_distributed_data(args.movements, parse_movements_file, 'txt') hand_kpts = load_distributed_data(args.keypoints, parse_kpts_file, 'json') print('Loaded records: {}'.format(len(records))) invalid_stat = dict() all_candidates = [] ignore_candidates = get_ignore_candidates(records, IGNORE_LABELS) all_candidates += ignore_candidates static_candidates, static_invalids = get_regular_candidates( records, predictions, movements, hand_kpts, cfg.data.output.length, False, STATIC_LABELS, NEGATIVE_LABEL, NO_MOTION_LABEL, min_score=0.9, min_length=4, max_distance=1) all_candidates += static_candidates invalid_stat = update_stat(invalid_stat, static_invalids) print('Static candidates: {}'.format(len(static_candidates))) if len(invalid_stat) > 0: print('Ignored records after static analysis:') for ignore_label, ignore_values in invalid_stat.items(): print(' - {}: {}'.format(ignore_label.replace('_', ' '), len(ignore_values))) dynamic_candidates, dynamic_invalids = get_regular_candidates( records, predictions, movements, hand_kpts, cfg.data.output.length, True, DYNAMIC_LABELS, NEGATIVE_LABEL, NO_MOTION_LABEL, min_score=0.9, min_length=4, max_distance=1) all_candidates += dynamic_candidates invalid_stat = update_stat(invalid_stat, dynamic_invalids) print('Dynamic candidates: {}'.format(len(dynamic_candidates))) if len(invalid_stat) > 0: print('Ignored records after dynamic analysis:') for ignore_label, ignore_values in invalid_stat.items(): print(' - {}: {}'.format(ignore_label.replace('_', ' '), len(ignore_values))) fixed_records, fix_stat = find_best_match(all_candidates, model, dataset, NEGATIVE_LABEL) invalid_stat = update_stat(invalid_stat, fix_stat) print('Final records: {}'.format(len(fixed_records))) if len(invalid_stat) > 0: print('Final ignored records:') for ignore_label, ignore_values in invalid_stat.items(): print(' - {}: {}'.format(ignore_label.replace('_', ' '), len(ignore_values))) for ignored_record in ignore_values: print(' - {}'.format(ignored_record.path)) dump_records(fixed_records, args.out_annotation) print('Fixed annotation has been stored at: {}'.format( args.out_annotation))