def main(): args = parse_args() update_config(cfg, args) logger, final_output_dir, tb_log_dir = create_logger( cfg, args.cfg, 'valid') logger.info(pprint.pformat(args)) logger.info(cfg) # cudnn related setting cudnn.benchmark = cfg.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')( cfg, is_train=False ) if cfg.TEST.MODEL_FILE: logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE)) model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False) else: model_state_file = os.path.join( final_output_dir, 'final_state.pth' ) logger.info('=> loading model from {}'.format(model_state_file)) model.load_state_dict(torch.load(model_state_file)) #model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda() # define loss function (criterion) and optimizer criterion = JointsMSELoss( use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT ).cuda() # Data loading code normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) valid_dataset = eval('dataset.'+cfg.DATASET.DATASET)( cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False, transforms.Compose([ transforms.ToTensor(), normalize, ]) ) valid_loader = torch.utils.data.DataLoader( valid_dataset, batch_size=cfg.TEST.BATCH_SIZE_PER_GPU*len(cfg.GPUS), shuffle=False, num_workers=cfg.WORKERS, pin_memory=True ) # evaluate on validation set validate(cfg, valid_loader, valid_dataset, model, criterion, final_output_dir, tb_log_dir)
def main(): args = cfg.parse_args() torch.manual_seed(args.random_seed) random.seed(args.random_seed) torch.cuda.manual_seed(args.random_seed) torch.backends.cudnn.deterministic = False torch.backends.cudnn.benchmark = True assert args.exp_name assert args.load_path.endswith('.pth') assert os.path.exists(args.load_path) args.path_helper = set_log_dir('logs_eval', args.exp_name) logger = create_logger(args.path_helper['log_path'], phase='test') # set tf env _init_inception() inception_path = check_or_download_inception(None) create_inception_graph(inception_path) # import network gen_net = eval('models.' + args.model + '.Generator')(args=args).cuda() # fid stat if args.dataset.lower() == 'cifar10': fid_stat = 'fid_stat/fid_stats_cifar10_train.npz' else: raise NotImplementedError(f'no fid stat for {args.dataset.lower()}') assert os.path.exists(fid_stat) # initial np.random.seed(args.random_seed) fixed_z = torch.cuda.FloatTensor( np.random.normal(0, 1, (25, args.latent_dim))) # set writer logger.info(f'=> resuming from {args.load_path}') checkpoint_file = args.load_path assert os.path.exists(checkpoint_file) checkpoint = torch.load(checkpoint_file) if 'avg_gen_state_dict' in checkpoint: gen_net.load_state_dict(checkpoint['avg_gen_state_dict']) epoch = checkpoint['epoch'] logger.info(f'=> loaded checkpoint {checkpoint_file} (epoch {epoch})') else: gen_net.load_state_dict(checkpoint) logger.info(f'=> loaded checkpoint {checkpoint_file}') logger.info(args) writer_dict = { 'writer': SummaryWriter(args.path_helper['log_path']), 'valid_global_steps': 0, } inception_score, fid_score = validate(args, fixed_z, fid_stat, gen_net, writer_dict, epoch) logger.info(f'Inception score: {inception_score}, FID score: {fid_score}.') writer_dict['writer'].close()
def main(): mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225] args = parse_args() logger, final_output_dir, _ = create_logger(config, args.cfg, 'test') logger.info(pprint.pformat(args)) logger.info(pprint.pformat(config)) # cudnn related setting cudnn.benchmark = config.CUDNN.BENCHMARK cudnn.deterministic = config.CUDNN.DETERMINISTIC cudnn.enabled = config.CUDNN.ENABLED # build model if torch.__version__.startswith('1'): module = eval('models.' + config.MODEL.NAME) module.BatchNorm2d_class = module.BatchNorm2d = torch.nn.BatchNorm2d model = eval('models.' + config.MODEL.NAME + '.get_seg_model')(config) dump_input = torch.rand( (1, 3, config.TRAIN.IMAGE_SIZE[1], config.TRAIN.IMAGE_SIZE[0])) logger.info(get_model_summary(model.cuda(), dump_input.cuda())) if config.TEST.MODEL_FILE: model_state_file = config.TEST.MODEL_FILE else: # model_state_file = os.path.join(final_output_dir, 'best_0.7589.pth') model_state_file = os.path.join(final_output_dir, 'best.pth') logger.info('=> loading model from {}'.format(model_state_file)) pretrained_dict = torch.load('model_best_bacc.pth.tar') if 'state_dict' in pretrained_dict: pretrained_dict = pretrained_dict['state_dict'] newstate_dict = { k: v for k, v in pretrained_dict.items() if k in model.state_dict() } # print(pretrained_dict.keys()) model.load_state_dict(newstate_dict) model = model.cuda() if True: save_wts = True print(model) if save_wts: f = open('DDRNet_CS.wts', 'w') f.write('{}\n'.format(len(model.state_dict().keys()))) for k, v in model.state_dict().items(): print("Layer {} ; Size {}".format(k, v.cpu().numpy().shape)) vr = v.reshape(-1).cpu().numpy() f.write('{} {} '.format(k, len(vr))) for vv in vr: f.write(' ') f.write(struct.pack('>f', float(vv)).hex()) f.write('\n')
def main(): args = parse_args() logger, final_output_dir, _ = create_logger( config, args.cfg, 'test') logger.info(pprint.pformat(args)) logger.info(pprint.pformat(config)) # cudnn related setting cudnn.benchmark = config.CUDNN.BENCHMARK cudnn.deterministic = config.CUDNN.DETERMINISTIC cudnn.enabled = config.CUDNN.ENABLED # build model if torch.__version__.startswith('1'): module = eval('models.'+config.MODEL.NAME) module.BatchNorm2d_class = module.BatchNorm2d = torch.nn.BatchNorm2d model = eval('models.'+config.MODEL.NAME + '.get_seg_model')(config) if config.TEST.MODEL_FILE: model_state_file = config.TEST.MODEL_FILE else: model_state_file = os.path.join(final_output_dir, 'best_0.7589.pth') # model_state_file = os.path.join(final_output_dir, 'final_state.pth') logger.info('=> loading model from {}'.format(model_state_file)) pretrained_dict = torch.load(model_state_file, map_location='cpu') if 'state_dict' in pretrained_dict: pretrained_dict = pretrained_dict['state_dict'] model_dict = model.state_dict() pretrained_dict = {k[6:]: v for k, v in pretrained_dict.items() if k[6:] in model_dict.keys()} for k, _ in pretrained_dict.items(): logger.info( '=> loading {} from pretrained model'.format(k)) model_dict.update(pretrained_dict) model.load_state_dict(model_dict) net = onnx_net(model) net = net.eval() # x = torch.randn((1, 3, 512, 384)) x = torch.randn((1,3,480,640)) torch_out = net(x) output_path = "output/ddrnet23.onnx" torch.onnx.export(net, # model being run x, # model input (or a tuple for multiple inputs) output_path, # where to save the model (can be a file or file-like object) export_params=True, # store the trained parameter weights inside the model file opset_version=11, # the ONNX version to export the model to do_constant_folding=True, # whether to execute constant folding for optimization input_names = ['inputx'], # the model's input names output_names = ['outputy'], # the model's output names verbose=True, )
def main(): args = parse_args() logger, final_output_dir, tb_log_dir = create_logger( config, args.cfg, 'valid') logger.info(pprint.pformat(args)) logger.info(pprint.pformat(config)) # cudnn related setting cudnn.benchmark = config.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = config.CUDNN.ENABLED model = eval('models.' + config.MODEL.NAME + '.get_cls_net')(config) dump_input = torch.rand( (1, 3, config.MODEL.IMAGE_SIZE[1], config.MODEL.IMAGE_SIZE[0])) logger.info(get_model_summary(model, dump_input)) if config.TEST.MODEL_FILE: logger.info('=> loading model from {}'.format(config.TEST.MODEL_FILE)) model.load_state_dict(torch.load(config.TEST.MODEL_FILE)) else: model_state_file = os.path.join(final_output_dir, 'final_state.pth.tar') logger.info('=> loading model from {}'.format(model_state_file)) model.load_state_dict(torch.load(model_state_file)) gpus = list(config.GPUS) model = torch.nn.DataParallel(model, device_ids=gpus).cuda() # define loss function (criterion) and optimizer criterion = torch.nn.CrossEntropyLoss().cuda() # Data loading code valdir = os.path.join(config.DATASET.ROOT, config.DATASET.TEST_SET) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) valid_loader = torch.utils.data.DataLoader( datasets.ImageFolder( valdir, transforms.Compose([ transforms.Resize(int(config.MODEL.IMAGE_SIZE[0] / 0.875)), transforms.CenterCrop(config.MODEL.IMAGE_SIZE[0]), transforms.ToTensor(), normalize, ])), batch_size=config.TEST.BATCH_SIZE_PER_GPU * len(gpus), shuffle=False, num_workers=config.WORKERS, pin_memory=True) # evaluate on validation set validate(config, valid_loader, model, criterion, final_output_dir, tb_log_dir, None)
def main(): args = parse_args() update_config(cfg, args) logger, final_output_dir, tb_log_dir = create_logger( cfg, args.cfg, 'valid') logger.info(pprint.pformat(args)) logger.info(cfg) # cudnn related setting torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg, is_train=False) if cfg.TEST.MODEL_FILE: logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE)) state = torch.load(cfg.TEST.MODEL_FILE) if 'best_state_dict' in state.keys(): state = state['best_state_dict'] state = model_key_helper(state) model.load_state_dict(state) else: model_state_file = os.path.join(final_output_dir, 'final_state.pth') logger.info('=> loading model from {}'.format(model_state_file)) model.load_state_dict(model_key_helper(torch.load(model_state_file))) # define loss function (criterion) and optimizer matcher = build_matcher(cfg.MODEL.NUM_JOINTS) weight_dict = {'loss_ce': 1, 'loss_kpts': cfg.MODEL.EXTRA.KPT_LOSS_COEF} criterion = SetCriterion(model.num_classes, matcher, weight_dict, cfg.MODEL.EXTRA.EOS_COEF, ['labels', 'kpts', 'cardinality']).cuda() model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda() # Data loading code normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) valid_dataset = eval('dataset.' + cfg.DATASET.DATASET)( cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False, transforms.Compose([ transforms.ToTensor(), normalize, ])) valid_loader = torch.utils.data.DataLoader( valid_dataset, batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS), shuffle=False, num_workers=cfg.WORKERS, pin_memory=True) # evaluate on validation set validate(cfg, valid_loader, valid_dataset, model, criterion, final_output_dir, tb_log_dir)
def main(): args = cfg.parse_args() torch.cuda.manual_seed(args.random_seed) assert args.exp_name assert args.load_path.endswith(".pth") assert os.path.exists(args.load_path) args.path_helper = set_log_dir("logs_eval", args.exp_name) logger = create_logger(args.path_helper["log_path"], phase="test") # set tf env _init_inception() inception_path = check_or_download_inception(None) create_inception_graph(inception_path) # import network gen_net = eval("models." + args.gen_model + ".Generator")(args=args).cuda() # fid stat if args.dataset.lower() == "cifar10": fid_stat = "fid_stat/fid_stats_cifar10_train.npz" elif args.dataset.lower() == "stl10": fid_stat = "fid_stat/stl10_train_unlabeled_fid_stats_48.npz" else: raise NotImplementedError(f"no fid stat for {args.dataset.lower()}") assert os.path.exists(fid_stat) # initial fixed_z = torch.cuda.FloatTensor( np.random.normal(0, 1, (25, args.latent_dim))) # set writer logger.info(f"=> resuming from {args.load_path}") checkpoint_file = args.load_path assert os.path.exists(checkpoint_file) checkpoint = torch.load(checkpoint_file) if "avg_gen_state_dict" in checkpoint: gen_net.load_state_dict(checkpoint["avg_gen_state_dict"]) epoch = checkpoint["epoch"] logger.info(f"=> loaded checkpoint {checkpoint_file} (epoch {epoch})") else: gen_net.load_state_dict(checkpoint) logger.info(f"=> loaded checkpoint {checkpoint_file}") logger.info(args) writer_dict = { "writer": SummaryWriter(args.path_helper["log_path"]), "valid_global_steps": 0, } inception_score, fid_score = validate(args, fixed_z, fid_stat, gen_net, writer_dict, clean_dir=False) logger.info(f"Inception score: {inception_score}, FID score: {fid_score}.")
def main(): args = parse_args() update_config(cfg, args) check_config(cfg) logger, final_output_dir, tb_log_dir = create_logger( cfg, args.cfg, "valid") logger.info(pprint.pformat(args)) logger.info(cfg) # cudnn related setting cudnn.benchmark = cfg.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED _, dataset = make_test_dataloader(cfg) total_size = len(dataset) pred_queue = Queue(100) workers = [] for i in range(args.world_size): indices = list(range(i, total_size, args.world_size)) p = Process(target=worker, args=(i, dataset, indices, cfg, logger, final_output_dir, pred_queue)) p.start() workers.append(p) logger.info("==>" + " Worker {} Started, responsible for {} images".format( i, len(indices))) all_preds = [] for idx in range(args.world_size): all_preds += pred_queue.get() for p in workers: p.join() res_folder = os.path.join(final_output_dir, "results") if not os.path.exists(res_folder): os.makedirs(res_folder) res_file = os.path.join(res_folder, "keypoints_%s_results.json" % dataset.dataset) json.dump(all_preds, open(res_file, 'w')) info_str = dataset._do_python_keypoint_eval(res_file, res_folder) name_values = OrderedDict(info_str) if isinstance(name_values, list): for name_value in name_values: _print_name_value(logger, name_value, cfg.MODEL.NAME) else: _print_name_value(logger, name_values, cfg.MODEL.NAME)
def main(): args = parse_args() reset_config(config, args) logger, final_output_dir, tb_log_dir = create_logger( config, args.cfg, 'valid') logger.info(pprint.pformat(args)) logger.info(pprint.pformat(config)) # cudnn related setting cudnn.benchmark = config.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = config.CUDNN.ENABLED backbone_model = eval('models.' + config.BACKBONE_MODEL + '.get_pose_net')( config, is_train=False) model = eval('models.' + config.MODEL + '.get_multiview_pose_net')( backbone_model, config) if config.TEST.MODEL_FILE: logger.info('=> loading model from {}'.format(config.TEST.MODEL_FILE)) model.load_state_dict(torch.load(config.TEST.MODEL_FILE)) else: model_path = 'model_best.pth.tar' if config.TEST.STATE == 'best' else 'final_state.pth.tar' model_state_file = os.path.join(final_output_dir, model_path) logger.info('=> loading model from {}'.format(model_state_file)) model.load_state_dict(torch.load(model_state_file)) gpus = [int(i) for i in config.GPUS.split(',')] model = torch.nn.DataParallel(model, device_ids=gpus).cuda() # define loss function (criterion) and optimizer criterion = JointsMSELoss( use_target_weight=config.LOSS.USE_TARGET_WEIGHT).cuda() # Data loading code normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) valid_dataset = eval('dataset.' + config.DATASET.TEST_DATASET)( config, config.DATASET.TEST_SUBSET, False, transforms.Compose([ transforms.ToTensor(), normalize, ])) valid_loader = torch.utils.data.DataLoader( valid_dataset, batch_size=config.TEST.BATCH_SIZE * len(gpus), shuffle=False, num_workers=config.WORKERS, pin_memory=True) # evaluate on validation set validate(config, valid_loader, valid_dataset, model, criterion, final_output_dir, tb_log_dir)
def main(): args = parse_args() update_config(cfg, args) logger, final_output_dir, tb_log_dir = create_logger( cfg, args.cfg, 'valid') exec_net, net_input_shape = load_to_IE(args.model) # We need dynamically generated key for fetching output tensor output_key = list(exec_net.outputs.keys())[0] # Data loading code normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) valid_dataset = eval('dataset.'+cfg.DATASET.DATASET)( cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False, transforms.Compose([ transforms.ToTensor(), normalize, ]) ) valid_loader = torch.utils.data.DataLoader( valid_dataset, batch_size=1, shuffle=False, num_workers=cfg.WORKERS, pin_memory=True ) process_time = AverageMeter() with torch.no_grad(): for i, (input, target, target_weight, meta) in enumerate(valid_loader): start_time = time.time() # compute output output = sync_inference(exec_net, image=np.expand_dims(input[0].numpy(), 0)) batch_heatmaps = output[output_key] coords, maxvals = get_max_preds(batch_heatmaps) # measure elapsed time process_time.update(time.time() - start_time) prefix = '{}_{}'.format( os.path.join(final_output_dir, 'val'), i ) save_debug_images(cfg, input, meta, target, coords * 4, torch.from_numpy(batch_heatmaps), prefix) if i == 100: break logger.info(f'OpenVINO IE: Inference EngineAverage processing time of model:{process_time.avg}')
def main(): mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] args = parse_args() logger, final_output_dir, _ = create_logger( config, args.cfg, 'test') logger.info(pprint.pformat(args)) logger.info(pprint.pformat(config)) # cudnn related setting cudnn.benchmark = config.CUDNN.BENCHMARK cudnn.deterministic = config.CUDNN.DETERMINISTIC cudnn.enabled = config.CUDNN.ENABLED # build model if torch.__version__.startswith('1'): module = eval('models.'+config.MODEL.NAME) module.BatchNorm2d_class = module.BatchNorm2d = torch.nn.BatchNorm2d model = eval('models.'+config.MODEL.NAME + '.get_seg_model')(config) dump_input = torch.rand( (1, 3, config.TRAIN.IMAGE_SIZE[1], config.TRAIN.IMAGE_SIZE[0]) ) logger.info(get_model_summary(model.cuda(), dump_input.cuda())) if config.TEST.MODEL_FILE: model_state_file = config.TEST.MODEL_FILE else: # model_state_file = os.path.join(final_output_dir, 'best_0.7589.pth') model_state_file = os.path.join(final_output_dir, 'best.pth') logger.info('=> loading model from {}'.format(model_state_file)) pretrained_dict = torch.load('/home/kong/Documents/DDRNet.Pytorch/DDRNet.Pytorch/output/face/ddrnet23_slim/checkpoint.pth.tar') if 'state_dict' in pretrained_dict: pretrained_dict = pretrained_dict['state_dict'] newstate_dict = {k:v for k,v in pretrained_dict.items() if k in model.state_dict()} # print(pretrained_dict.keys()) model.load_state_dict(newstate_dict) model = model.to("cpu") print(model) model.eval() example = torch.rand(1, 3, 512, 512) model = torch.quantization.convert(model) # traced_script_module = torch.jit.trace(model, example) # traced_script_module.save("ddrnetfp32.pt") scriptedm = torch.jit.script(model) opt_model = torch.utils.optimize_for_mobile(scriptedm) torch.jit.save(opt_model, "ddrnetint8.pt")
def logger_config(): global logger # log folder path LOG_FOLDER = os.path.join(os.path.dirname(__file__), "log/") # create log folder if os.path.exists(LOG_FOLDER) is False: os.mkdir(LOG_FOLDER) logger = create_logger( (LOG_FOLDER + datetime.now().strftime("%Y-%m-%d--%H-%M-%S") + ".log"))
def main(): savewts = False args = parse_args() logger, final_output_dir, tb_log_dir = create_logger( config, args.cfg, 'demo') logger.info(pprint.pformat(args)) logger.info(pprint.pformat(config)) # cudnn related setting cudnn.benchmark = config.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = config.CUDNN.ENABLED # eval() 函数用来执行一个字符串表达式,并返回表达式的值。 model = eval('models.'+config.MODEL.NAME+'.get_cls_net')( config) model.load_state_dict(torch.load(args.testModel)) if savewts: f = open('HRNetClassify.wts', 'w') f.write('{}\n'.format(len(model.state_dict().keys()))) for k, v in model.state_dict().items(): vr = v.reshape(-1).cpu().numpy() f.write('{} {} '.format(k, len(vr))) for vv in vr: f.write(' ') f.write(struct.pack('>f', float(vv)).hex()) f.write('\n') exit(0) # load img image = cv2.imread(args.testImg) #BGR 0-255 hwc #im = Image.open(args.testImg) #print(im.getpixel((0,0))) ## 0-255 #resize # config.MODEL.IMAGE_SIZE[0] resized_img = cv2.resize(image, (config.MODEL.IMAGE_SIZE[0], config.MODEL.IMAGE_SIZE[1])) resized_img = cv2.cvtColor(resized_img, cv2.COLOR_BGR2RGB) #RGB # normalize mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] inp_image = ((resized_img/255. - mean) / std).astype(np.float32) # R-0.485 B- inp_image = inp_image.transpose(2, 0, 1) # chw inp_image = torch.from_numpy(inp_image).unsqueeze(0) # to_tensor model.eval() output = model(inp_image) #print(output) _, pred = output.topk(1) pred = pred.t() print(pred)
def main(): args = parse_args() update_config(cfg, args) logger, final_output_dir, tb_log_dir = create_logger( cfg, args.cfg, 'inference') logger.info(pprint.pformat(args)) logger.info(pprint.pformat(cfg)) # cudnn related setting cudnn.benchmark = cfg.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED # Data loading code normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) valid_dataset = eval('dataset.' + cfg.DATASET.DATASET)( cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False, transforms.Compose([ transforms.ToTensor(), normalize, ])) valid_loader = torch.utils.data.DataLoader( valid_dataset, batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS), shuffle=False, num_workers=cfg.WORKERS, pin_memory=True) model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg, is_train=False) if cfg.TEST.MODEL_FILE: logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE)) state_dict = torch.load(cfg.TEST.MODEL_FILE) renamed_state_dict = {} for k, v in state_dict.items(): if k.startswith('module.'): k = k[len('module.'):] renamed_state_dict[k] = v model.load_state_dict(renamed_state_dict) else: model_state_file = os.path.join(final_output_dir, 'final_state.pth.tar') logger.info('=> loading model from {}'.format(model_state_file)) model.load_state_dict(torch.load(model_state_file)) model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda() # evaluate on validation set inference(cfg, valid_loader, valid_dataset, model, final_output_dir)
def main(): args = parse_args() update_config(cfg, args) logger, final_output_dir, tb_log_dir = create_logger( cfg, args.cfg, 'valid') logger.info(pprint.pformat(args)) logger.info(cfg) # cudnn related setting cudnn.benchmark = cfg.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg, is_train=False) if cfg.TEST.MODEL_FILE: logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE)) model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False) else: model_state_file = os.path.join(final_output_dir, 'final_state.pth') logger.info('=> loading model from {}'.format(model_state_file)) model.load_state_dict(torch.load(model_state_file)) # model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda() image = cv2.imread('resource/testdata/IMG_20210208_135527.jpg') #resized = cv2.resize(image, (192,256)) resized = cv2.resize(image, (256, 256)) #img_in = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB) #img_in = img_in.astype(np.float32) #img_in /= 255.0 #mean=[0.485, 0.456, 0.406] #std=[0.229, 0.224, 0.225] #img_in /= mean #img_in -= std #img_in = np.transpose(img_in, (2, 0, 1)) tensorImage = torch.tensor(resized).byte() #tensorImage = torch.tensor(img_in, dtype=torch.float) tensorImage = tensorImage.unsqueeze(0) outputs = model(tensorImage) #for i in range(17): # sample = outputs[0][i].to('cpu').detach().numpy().copy() # cv2.imshow('output', sample) # cv2.waitKey(0) print(outputs[0]) print(outputs[1])
def main(): args = parse_args() logger, final_output_dir, tb_log_dir = create_logger( config, args.cfg, 'test3d') prediction_path = os.path.join(final_output_dir, config.TEST.HEATMAP_LOCATION_FILE) test_dataset = eval('dataset.' + config.DATASET.TEST_DATASET)( config, config.DATASET.TEST_SUBSET, False) all_heatmaps = h5py.File(prediction_path)['heatmaps'] pairwise_file = config.PICT_STRUCT.PAIRWISE_FILE with open(pairwise_file, 'rb') as f: pairwise = pickle.load(f)['pairwise_constrain'] cnt = 0 grouping = test_dataset.grouping mpjpes = [] for items in grouping: heatmaps = [] boxes = [] poses = [] cameras = [] for idx in items: datum = test_dataset.db[idx] camera = datum['camera'] cameras.append(camera) poses.append( camera_to_world_frame(datum['joints_3d'], camera['R'], camera['T'])) box = {} box['scale'] = np.array(datum['scale']) box['center'] = np.array(datum['center']) boxes.append(box) heatmaps.append(all_heatmaps[cnt]) cnt += 1 heatmaps = np.array(heatmaps) # This demo uses GT root locations and limb length; but can be replaced by statistics grid_center = poses[0][0] body = HumanBody() limb_length = compute_limb_length(body, poses[0]) prediction = rpsm(cameras, heatmaps, boxes, grid_center, limb_length, pairwise, config) mpjpe = np.mean(np.sqrt(np.sum((prediction - poses[0])**2, axis=1))) mpjpes.append(mpjpe) print(np.mean(mpjpes))
def main(): # convert to train mode config.MODE = 'test' extra() # create a logger logger = create_logger(config, 'test') # logging configurations logger.info(pprint.pformat(config)) # cudnn related setting cudnn.benchmark = config.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = config.CUDNN.ENABLED # create a model os.environ["CUDA_VISIBLE_DEVICES"] = config.GPUS gpus = [int(i) for i in config.GPUS.split(',')] gpus = range(gpus.__len__()) model_rgb = create_model() model_rgb.my_load_state_dict(torch.load(config.TEST.STATE_DICT_RGB), strict=True) model_rgb = model_rgb.cuda(gpus[0]) model_flow = create_model() model_flow.my_load_state_dict(torch.load(config.TEST.STATE_DICT_FLOW), strict=True) model_flow = model_flow.cuda(gpus[0]) # load data test_dataset_rgb = get_dataset(mode='test', modality='rgb') test_dataset_flow = get_dataset(mode='test', modality='flow') test_loader_rgb = torch.utils.data.DataLoader( test_dataset_rgb, batch_size=config.TEST.BATCH_SIZE * len(gpus), shuffle=False, num_workers=config.WORKERS, pin_memory=True) test_loader_flow = torch.utils.data.DataLoader( test_dataset_flow, batch_size=config.TEST.BATCH_SIZE * len(gpus), shuffle=False, num_workers=config.WORKERS, pin_memory=True) result_file_path = test_final(test_dataset_rgb, model_rgb, test_dataset_flow, model_flow) eval_mAP(config.DATASET.GT_JSON_PATH, result_file_path)
def main(): args = parse_args() update_config(cfg, args) # 所有配置更新完毕后,将节点的序号传递给配置文件 cfg.defrost() cfg.RANK = args.rank cfg.freeze() logger, final_output_dir, tb_log_dir = create_logger( cfg, args.cfg, 'train' ) logger.info(pprint.pformat(args)) logger.info(cfg) if args.gpu is not None: warnings.warn('You have chosen a specific GPU. This will completely ' 'disable data parallelism.') # 得到总的节点数目 if args.dist_url == "env://" and args.world_size == -1: args.world_size = int(os.environ["WORLD_SIZE"]) args.distributed = args.world_size > 1 or cfg.MULTIPROCESSING_DISTRIBUTED # 检测硬件设备上有多少块GPU,基于此配置相应的进程数目 # 为了在指定GPU块上进行训练,可用CUDA_VISIBLE_DEVICES进行手动屏蔽 ngpus_per_node = torch.cuda.device_count() if cfg.MULTIPROCESSING_DISTRIBUTED: # Since we have ngpus_per_node processes per node, the total world_size # needs to be adjusted accordingly args.world_size = ngpus_per_node * args.world_size # Use torch.multiprocessing.spawn to launch distributed processes: the # main_worker process function mp.spawn( main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args, final_output_dir, tb_log_dir) ) else: # Simply call main_worker function main_worker( ','.join([str(i) for i in cfg.GPUS]), ngpus_per_node, args, final_output_dir, tb_log_dir )
def main(): args = parse_args() update_config(cfg, args) logger, final_output_dir, tb_log_dir = create_logger( cfg, args.cfg, 'valid') logger.info(pprint.pformat(args)) logger.info(cfg) # cudnn related setting cudnn.benchmark = cfg.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg, is_train=False) if cfg.TEST.MODEL_FILE: logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE)) model.load_state_dict( torch.load(cfg.TEST.MODEL_FILE, map_location=torch.device('cpu')), strict=False, ) #model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda() # EXPORT # Export ONNX file input_names = [ "input:0" ] # this are our standardized in/out nameing (required for runtime) output_names = ["output:0"] dummy_input = torch.randn([1] + args.ONNX_resolution) ONNX_path = args.outfile # Exporting -- CAFFE2 compatible # requires operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK # https://github.com/pytorch/pytorch/issues/41848 # for CAFFE2 backend (old exports mode...) #torch.onnx.export(model, dummy_input, ONNX_path, input_names=input_names, output_names=output_names, # keep_initializers_as_inputs=True, operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK) # Exporting -- ONNX runtime compatible # keep_initializers_as_inputs=True -> is required for onnx optimizer... torch.onnx.export(model, dummy_input, ONNX_path, input_names=input_names, output_names=output_names, keep_initializers_as_inputs=True, opset_version=11)
def main(): args = parse_args() # update config update_config(args.cfg) reset_config(config, args) # output dir path onnx_path = './models/onnx/' logger, final_output_dir, tb_log_dir = create_logger( config, args.cfg, 'convert') if not os.path.isdir(onnx_path): logger.info('Creating ' + onnx_path + 'folder...') os.makedirs(onnx_path) height = 0 width = 0 with open(args.cfg) as file: documents = yaml.load(file, Loader=yaml.FullLoader) height = documents['MODEL']['IMAGE_SIZE'][0] width = documents['MODEL']['IMAGE_SIZE'][1] # cudnn related setting cudnn.benchmark = config.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = config.CUDNN.ENABLED model = eval('models.' + config.MODEL.NAME + '.get_pose_net')( config, is_train=False) logger.info('=> loading model from {}'.format(config.TEST.MODEL_FILE)) filename = config.TEST.MODEL_FILE cuda_available = torch.cuda.is_available() model.load_state_dict( torch.load( config.TEST.MODEL_FILE, map_location=None if cuda_available else torch.device('cpu'))) logger.info('=> Converting...') data = torch.zeros((1, 3, height, width)) if cuda_available: model.cuda() data = data.cuda() model.float() head, filename = os.path.split(filename) if 'pth.tar' in filename: filename = filename[:-8] torch.onnx.export(model, data, onnx_path + filename + '.onnx') logger.info('=> Model saved as: ' + onnx_path + filename + '.onnx') logger.info('=> Done.')
def main(): args = parse_args() # args.world_size = 8 update_config(cfg, args) check_config(cfg) logger, final_output_dir, tb_log_dir = create_logger(cfg, args.cfg, "inference") logger.info(pprint.pformat(args)) logger.info(cfg) # cudnn related setting cudnn.benchmark = cfg.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED img_dir = args.img_dir save_dir = args.save_dir if not os.path.exists(save_dir): os.makedirs(save_dir) dataset = glob(os.path.join(img_dir, "*")) total_size = len(dataset) pred_queue = Queue(100) workers = [] for i in range(args.world_size): sub_img_list = dataset[i::args.world_size] p = Process( target = worker, args = ( i, sub_img_list, cfg, logger, final_output_dir, save_dir, pred_queue ) ) p.start() workers.append(p) logger.info("==>" + " Worker {} Started, responsible for {} images".format(i, len(sub_img_list))) all_preds = [] for idx in range(args.world_size): all_preds += pred_queue.get() for p in workers: p.join() res_file = os.path.join(save_dir, "keypoints_results.json") json.dump(all_preds, open(res_file, 'w'))
def main(): args = parse_args() reset_config(config, args) # tensorboard logger, final_output_dir, tb_log_dir = create_logger( config, args.cfg, 'test', 'valid') # cudnn related setting cudnn.benchmark = config.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = config.CUDNN.ENABLED torch.backends.cudnn.benchmark = True model = Network(config, gt.DARTS) if config.TEST.MODEL_FILE: logger.info('=> loading model from {}'.format(config.TEST.MODEL_FILE)) model.load_state_dict(torch.load(config.TEST.MODEL_FILE), strict=False) else: model_state_file = os.path.join(final_output_dir, 'final_state.pth') logger.info('=> loading model from {}'.format(model_state_file)) model.load_state_dict(torch.load(model_state_file)) gpus = [int(i) for i in config.GPUS.split(',')] criterion = JointsMSELoss( use_target_weight=config.LOSS.USE_TARGET_WEIGHT).to(device) model = nn.DataParallel(model, device_ids=gpus).to(device) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) valid_dataset = eval('dataset.' + config.DATASET.DATASET)( config, config.DATASET.ROOT, config.TRAIN.TEST_SET, False, transforms.Compose([ transforms.ToTensor(), normalize, ])) valid_loader = torch.utils.data.DataLoader( valid_dataset, batch_size=config.TEST.BATCH_SIZE * len(gpus), shuffle=False, num_workers=config.WORKERS, pin_memory=True) validate(config, valid_loader, valid_dataset, model, criterion, final_output_dir, tb_log_dir)
def main(): args = parse_args() logger, final_output_dir, tb_log_dir = create_logger( config, args.cfg, 'validate') logger.info(pprint.pformat(args)) logger.info(pprint.pformat(config)) gpus = [int(i) for i in config.GPUS.split(',')] print('=> Loading data ..') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) test_dataset = eval('dataset.' + config.DATASET.TEST_DATASET)( config, config.DATASET.TEST_SUBSET, False, transforms.Compose([ transforms.ToTensor(), normalize, ])) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=config.TEST.BATCH_SIZE * len(gpus), shuffle=False, num_workers=config.WORKERS, pin_memory=True) cudnn.benchmark = config.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = config.CUDNN.ENABLED print('=> Constructing models ..') model = eval('models.' + config.MODEL + '.get_multi_person_pose_net')( config, is_train=True) with torch.no_grad(): model = torch.nn.DataParallel(model, device_ids=gpus).cuda() test_model_file = os.path.join(final_output_dir, config.TEST.MODEL_FILE) if config.TEST.MODEL_FILE and os.path.isfile(test_model_file): logger.info('=> load models state {}'.format(test_model_file)) model.module.load_state_dict(torch.load(test_model_file)) else: raise ValueError('Check the model file for testing!') validate_3d(config, model, test_loader, final_output_dir)
def main(): args = TrainOptions().parse() # get training options torch.cuda.manual_seed(args.random_seed) args.path_helper = set_log_dir('logs', args.name) logger = create_logger(args.path_helper['log_path']) args.logger = logger dataset = create_dataset(args) # create a dataset given opt.dataset_mode and other options dataset_size = len(dataset) # get the number of images in the dataset. logger.info('The number of training images = %d' % dataset_size) model = create_model(args) # create a model given opt.model and other options model.setup(args) # regular setup: load and print networks; create schedulers total = 0 for epoch in tqdm(range(0, args.n_epochs + args.n_epochs_decay + 1)): for i, data in enumerate(dataset): # inner loop within one epoch iter_start_time = time.time() model.set_input(data) model.optimize_parameters() if (total + 1) % args.print_freq == 0: losses = model.get_current_losses() t_comp = (time.time() - iter_start_time) message = "[Batch: %d/%d][time: %.3f]" % (i, len(dataset), t_comp) for k, v in losses.items(): message += '[%s: %.3f]' % (k, v) logger.info(message) tqdm.write(message) if (total + 1) % args.display_freq == 0: model.compute_visuals() utils.save_current_results(args, model.get_current_visuals(), epoch) if (total + 1) % args.save_epoch_freq == 0: logger.info('saving the model at the end of epoch %d' % (epoch)) model.save_networks('latest') model.save_networks(epoch) total += 1 model.update_learning_rate()
def main(): args = parse_args() update_config(cfg, args) logger, final_output_dir, tb_log_dir = create_logger( cfg, args.cfg, 'valid') exec_net = load_to_IE(args.model) valid_dataset = eval('dataset.' + cfg.DATASET.DATASET)( cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False, transforms.Compose([transforms.ToTensor()])) valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=1, shuffle=False, num_workers=cfg.WORKERS, pin_memory=True) process_time = AverageMeter() with torch.no_grad(): for i, (input, target, target_weight, meta) in enumerate(valid_loader): start_time = time.time() # compute output output = sync_inference(exec_net, image=np.expand_dims(input[0].numpy(), 0)) # measure elapsed time process_time.update(time.time() - start_time) batch_heatmaps = output['Conv_746'] coords = output['Mul_772'] prefix = '{}_{}'.format(os.path.join(final_output_dir, 'val'), i) save_debug_images(cfg, input, meta, target, coords * 4, torch.from_numpy(batch_heatmaps), prefix) if i == 100: break logger.info( f'OpenVINO IE: Average processing time of model with merged pre- and post-processing:{process_time.avg}' )
def main(): cfg = "experiments/bdd100k/bdd100k_resize.yaml" model_state_file = "data/pretrained_models/hrnet_w48_cityscapes_cls19_1024x2048_trainset.pth" logger, final_output_dir, _ = create_logger(config, cfg, 'test') ext_update_config(config, cfg, []) #logger.info(pprint.pformat(config)) # cudnn related setting cudnn.benchmark = config.CUDNN.BENCHMARK cudnn.deterministic = config.CUDNN.DETERMINISTIC cudnn.enabled = config.CUDNN.ENABLED # build model model = models.seg_hrnet.get_seg_model(config) image_size = config.TEST.IMAGE_SIZE dump_input = torch.rand((1, 3, image_size[1], image_size[0])) #print(get_model_summary(model.cuda(), dump_input.cuda())) #logger.info('=> loading model from {}'.format(model_state_file)) pretrained_dict = torch.load(model_state_file) model_dict = model.state_dict() pretrained_dict = { k[6:]: v for k, v in pretrained_dict.items() if k[6:] in model_dict.keys() } # for k, _ in pretrained_dict.items(): # logger.info( # '=> loading {} from pretrained model'.format(k)) model_dict.update(pretrained_dict) model.load_state_dict(model_dict) gpus = list(config.GPUS) model = nn.DataParallel(model, device_ids=gpus).cuda() model.eval() with torch.no_grad(): seg_hash_list = hr.get_train_list("../seg_hash_list.txt") gen_feature_list(model, image_size, seg_hash_list)
def predict(cfg_path, img_dir, bbox_dir, out_file, param_overrides=[]): # update_config needs some hardcoded params, fake them here class args: cfg = cfg_path opts = param_overrides modelDir = '' logDir = '' dataDir = '' update_config(cfg, args) cfg.defrost() cfg.TEST.MODEL_FILE = '../hrnet/pose_hrnet_w32_256x192.pth' cfg.TEST.USE_GT_BBOX = False cfg.TEST.BATCH_SIZE_PER_GPU = 64 cfg.GPUS = (0, ) cfg.freeze() logger, final_output_dir, tb_log_dir = create_logger( cfg, cfg_path, 'valid') logger.info(pprint.pformat(args)) logger.info(cfg) # cudnn related setting cudnn.benchmark = cfg.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE)) model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg, is_train=False) model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False) model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda() # Data loading code normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) normalize = transforms.Compose([transforms.ToTensor(), normalize]) detection_thresh = 0.8 img_dir = os.path.join(img_dir, '*') # Dataset requires a glob format predict_imgs(model, img_dir, bbox_dir, out_file, normalize, detection_thresh)
def main(): args = cfg.parse_args() torch.cuda.manual_seed(args.random_seed) assert args.exp_name assert args.load_path.endswith('.pth') assert os.path.exists(args.load_path) args.path_helper = set_log_dir('logs_eval', args.exp_name) logger = create_logger(args.path_helper['log_path'], phase='test') # set tf env _init_inception() inception_path = check_or_download_inception(None) create_inception_graph(inception_path) # import network gen_net = eval('models.' + args.model + '.Generator')(args=args).cuda() # initial fixed_z = torch.cuda.FloatTensor( np.random.normal(0, 1, (25, args.latent_dim))) # set writer logger.info(f'=> resuming from {args.load_path}') checkpoint_file = args.load_path assert os.path.exists(checkpoint_file) checkpoint = torch.load(checkpoint_file) if 'avg_gen_state_dict' in checkpoint: gen_net.load_state_dict(checkpoint['avg_gen_state_dict']) epoch = checkpoint['epoch'] logger.info(f'=> loaded checkpoint {checkpoint_file} (epoch {epoch})') else: gen_net.load_state_dict(checkpoint) logger.info(f'=> loaded checkpoint {checkpoint_file}') logger.info(args) writer_dict = { 'writer': SummaryWriter(args.path_helper['log_path']), 'valid_global_steps': 0, } inception_score, fid_score = validate(args, fixed_z, gen_net, writer_dict) logger.info(f'Inception score: {inception_score}, FID score: {fid_score}.')
def main(): args = parse_args() update_config(cfg, args) logger, final_output_dir, tb_log_dir = create_logger( cfg, args.cfg, 'trace') logger.info(pprint.pformat(args)) logger.info(cfg) # cudnn related setting torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')( cfg, is_train=False).cuda() logger.info("Model:\n{}".format(model)) logger.info("Params: {:,}".format(params_count(model))) logger.info("Flops: {:,} G".format(get_model_stats(model, cfg, "flop"))) logger.info("Activations: {:,} M".format( get_model_stats(model, cfg, "activation")))
def main(): args = parse_args() update_config(cfg, args) logger, final_output_dir, tb_log_dir = create_logger( cfg, args.cfg, 'valid') logger.info(pprint.pformat(args)) logger.info(cfg) # cudnn related setting cudnn.benchmark = cfg.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg, is_train=False) if cfg.TEST.MODEL_FILE: logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE)) model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False) else: model_state_file = os.path.join(final_output_dir, 'final_state.pth') logger.info('=> loading model from {}'.format(model_state_file)) model.load_state_dict(torch.load(model_state_file)) model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda() model.eval() for image_path in glob.glob(os.path.join(args.image_dir, "*")): with torch.no_grad(): test_im = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB) test_preprocess = get_preprocessing() test_im = test_preprocess(image=test_im)['image'] test_im = torch.unsqueeze(test_im, 0).to('cuda') ret = model(test_im) print(ret) print(ret.shape) break