def read_frames_split_file(self, frames_split_file=None):
        if frames_split_file is None:
            print(f"[Note] No frame split will be specified.")
            return None
        
        try:
            frames_split = cfg.load_config(frames_split_file)

            assert('train' in frames_split and 'val' in frames_split)
        except FileNotFoundError:
            print(
                f"[Warning] File {frames_split_file} not found. No frame split will be specified.")
            return None
        except AssertionError:
            print(
                f"[Warning] Invalid train/val frame splits in {frames_split_file}. No frame split will be specified.")
            return None

        # Reorganise frames split
        new_dict = {}
        for d in frames_split['train']:
            for k in d.keys():
                new_dict[str(k)] = d[k]

        frames_split['train'] = new_dict

        new_dict = {}
        for d in frames_split['val']:
            for k in d.keys():
                new_dict[str(k)] = d[k]

        frames_split['val'] = new_dict

        return frames_split
def infer(model_type="alg",max_num=5, save_images_instead=1, crop=True):

    if model_type == "alg":
        config = cfg.load_config("./experiments/human36m/train/human36m_alg.yaml")
    elif model_type == "vol":
        config = cfg.load_config("./experiments/human36m/train/human36m_vol_softmax.yaml")
        pelvis3d = loadPrePelvis(config.dataset.train.pred_results_path)

    device = torch.device(0)
    labels = loadHuman36mLabel(config.dataset.train.labels_path)
    detector = Detector(config, device=device)
    for idx in range(max_num):
        sample = [prepareSample(100+idx, labels, config.dataset.train.h36m_root, keyPoint3d=None, crop=crop, imageShape=config.image_shape)]
        viewSample(sample[0],idx)
        prediction, inputBatch = detector.inferHuman36Data(sample, model_type, device, config,
                                                                randomize_n_views=config.dataset.val.randomize_n_views,
                                                                min_n_views=config.dataset.val.min_n_views,
                                                                max_n_views=config.dataset.val.max_n_views)
        viewResult(sample[0],idx,prediction,config,save_images_instead=save_images_instead)
Пример #3
0
def main(args):
    print("Number of available GPUs: {}".format(torch.cuda.device_count()))

    is_distributed = init_distributed(args)
    master = True
    if is_distributed and os.environ["RANK"]:
        master = int(os.environ["RANK"]) == 0

    if is_distributed:
        device = torch.device(args.local_rank)
    else:
        device = torch.device(0)

    # config
    config = cfg.load_config(args.config)
    config.opt.n_iters_per_epoch = config.opt.n_objects_per_epoch // config.opt.batch_size

    model = {
        "ransac": RANSACTriangulationNet,
        "alg": AlgebraicTriangulationNet,
        "vol": VolumetricTriangulationNet
    }[config.model.name](config, device=device).to(device)

    if config.model.init_weights:
        state_dict = torch.load(config.model.checkpoint)
        for key in list(state_dict.keys()):
            new_key = key.replace("module.", "")
            state_dict[new_key] = state_dict.pop(key)

        model.load_state_dict(state_dict, strict=True)
        print("Successfully loaded pretrained weights for whole model")

    # criterion
    criterion_class = {
        "MSE": KeypointsMSELoss,
        "MSESmooth": KeypointsMSESmoothLoss,
        "MAE": KeypointsMAELoss
    }[config.opt.criterion]

    if config.opt.criterion == "MSESmooth":
        criterion = criterion_class(config.opt.mse_smooth_threshold)
    else:
        criterion = criterion_class()

    # optimizer
    opt = None
    if not args.eval:
        if config.model.name == "vol":
            opt = torch.optim.Adam(
                [{'params': model.backbone.parameters()},
                 {'params': model.process_features.parameters(), 'lr': config.opt.process_features_lr if hasattr(config.opt, "process_features_lr") else config.opt.lr},
                 {'params': model.volume_net.parameters(), 'lr': config.opt.volume_net_lr if hasattr(config.opt, "volume_net_lr") else config.opt.lr}
                ],
                lr=config.opt.lr
            )
        else:
            opt = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=config.opt.lr)


    # datasets
    print("Loading data...")
    train_dataloader, val_dataloader, train_sampler = setup_dataloaders(config, distributed_train=is_distributed)

    # experiment
    experiment_dir, writer = None, None
    if master:
        experiment_dir, writer = setup_experiment(config, type(model).__name__, is_train=not args.eval)

    # multi-gpu
    if is_distributed:
        model = DistributedDataParallel(model, device_ids=[device])

    if not args.eval:
        print('training process')
        # train loop
        n_iters_total_train, n_iters_total_val = 0, 0
        pbar = tqdm(total = config.opt.n_epochs, desc='training process')
        for epoch in range(config.opt.n_epochs):
            if train_sampler is not None:
                train_sampler.set_epoch(epoch)

            n_iters_total_train = one_epoch(model, criterion, opt, config, train_dataloader, device, epoch, n_iters_total=n_iters_total_train, is_train=True, master=master, experiment_dir=experiment_dir, writer=writer)
            n_iters_total_val = one_epoch(model, criterion, opt, config, val_dataloader, device, epoch, n_iters_total=n_iters_total_val, is_train=False, master=master, experiment_dir=experiment_dir, writer=writer)

            if master:
                checkpoint_dir = os.path.join(experiment_dir, "checkpoints", "{:04}".format(epoch))
                os.makedirs(checkpoint_dir, exist_ok=True)

                torch.save(model.state_dict(), os.path.join(checkpoint_dir, "weights.pth"))

            print(f"{n_iters_total_train} iters done.")
            pbar.update(1)
        pbar.close()
    else:
        print('evaluation process')
        if args.eval_dataset == 'train':
            one_epoch(model, criterion, opt, config, train_dataloader, device, 0, n_iters_total=0, is_train=False, master=master, experiment_dir=experiment_dir, writer=writer)
        else:
            one_epoch(model, criterion, opt, config, val_dataloader, device, 0, n_iters_total=0, is_train=False, master=master, experiment_dir=experiment_dir, writer=writer)

    print("Done.")
def main():
    #device = torch.device('cpu')
    device = torch.device('cuda')

    #config = cfg.load_config("experiments/human36m/eval/human36m_vol_softmax.yaml")
    config = cfg.load_config("experiments/human36m/eval/human36m_alg.yaml")

    #model = VolumetricTriangulationNet2(config, device=device).to(device)
    #model = BaselinePose2d(config, device=device).to(device)
    #model = AlgebraicTriangulationNet2(config, device=device).to(device)
    #model = VolPose2dFeatureOnly(config, device=device).to(device)
    #model = VolPose2dSpine2dAndFeatures(config, device=device).to(device)
    #model = AlgPose3dTriangulation(config, device=device).to(device)
    #model = VolPose3d(config, device=device).to(device)
    #model = AlgPose3dPreTriangulation(config, device=device).to(device)
    #model = AlgebraicTriangulationNetPermute(config, device=device).to(device)
    #model = AlgebraicHeatmap(config, device=device).to(device)
    model = AlgebraicTriangulationNetPreprocess(config,
                                                device=device).to(device)

    if config.model.init_weights:
        if device == 'cpu':
            state_dict = torch.load(config.model.checkpoint,
                                    map_location='cpu')
        else:
            state_dict = torch.load(config.model.checkpoint)
        for key in list(state_dict.keys()):
            new_key = key.replace("module.", "")
            state_dict[new_key] = state_dict.pop(key)

        model.load_state_dict(state_dict, strict=True)
        print("Successfully loaded pretrained weights for whole model")
    model.eval()

    #input_image = torch.rand(2, 2, 3, 384, 384).cpu() #.cpu() .cuda()
    #input_projMat = torch.rand(2, 2, 3, 4).cpu() #.cpu() .cuda()

    #input = torch.rand(2, 3, 384, 384).to(device)
    #input = torch.rand(1, 384, 384, 3).to(device)
    #input_projMat = torch.rand(5, 3, 4).to(device)

    #input_2dpoints = torch.rand(2, 2).to(device)
    #input_confidence = torch.rand(2).to(device)

    #input_volumes = torch.rand(1, 32,64,64,64).to(device)
    #input_coords = createCoordinateVolume(batch_size=1, device=device)

    ##########################################
    ######## Algebraric trianglation #########
    ### baseline_pose2d_withConf_2x384x384xBGRxByte (BGR2RGB and Byte2Float included in onnx)
    ##########################################
    input = torch.rand(2, 384, 384, 3).byte().to(device)
    ### Export the model
    torch.onnx.export(
        model,  # model being run
        (input),  # model input (or a tuple for multiple inputs)
        "baseline_pose2d_withConf_2x384x384xBGRxByte.onnx",  # where to save the model (can be a file or file-like object)
        export_params=
        True,  # store the trained parameter weights inside the model file
        opset_version=12,  # the ONNX version to export the model to
        do_constant_folding=
        True,  # whether to execute constant folding for optimization
        input_names=['images'
                     ],  # the model's input names # [n*384*384*3], [n*3*4]
        output_names=['joints2d', 'confidence'
                      ],  # the model's output names # [n*17*2], [n*17]
        #                  dynamic_axes={'images' : {0 : 'batch_size'},    # variable lenght axes これで入出力するTensorのdim=0が可変になる。それ以外の次元は固定
        #                                'joints2d' : {0 : 'batch_size'},
        #                                'confidence' : {0 : 'batch_size'},
        #                                }
    )
    onnx_model = onnx.load("baseline_pose2d_withConf_2x384x384xBGRxByte.onnx")
    onnx.checker.check_model(onnx_model)
def main(args):
    print("Number of available GPUs: {}".format(torch.cuda.device_count()))

    # Attempt to fix overflow error with pickle
    # See https://stackoverflow.com/questions/51562221/python-multiprocessing-overflowerrorcannot-serialize-a-bytes-object-larger-t
    ctx = torch.multiprocessing.get_context()
    ctx.reducer = pickle4reducer.Pickle4Reducer()

    config = cfg.load_config(args.config)

    global DEBUG
    DEBUG = config.debug_mode if hasattr(config, "debug_mode") else False
    print("Debugging Mode: ", DEBUG)

    is_distributed = init_distributed(args)
    print("Using distributed:", is_distributed)

    master = True
    if is_distributed and os.environ["RANK"]:
        master = int(os.environ["RANK"]) == 0

    if is_distributed:
        print("Rank:", args.local_rank)
        device = torch.device(args.local_rank)
    else:
        device = torch.device(0)

    # config
    config.opt.n_iters_per_epoch = config.opt.n_objects_per_epoch // config.opt.batch_size
    
    if hasattr(config.opt, "n_objects_per_epoch_val"):
        config.opt.n_iters_per_epoch_val = config.opt.n_objects_per_epoch_val // config.opt.val_batch_size
    else:
        config.opt.n_iters_per_epoch_val = None

    model = {
        "ransac": RANSACTriangulationNet,
        "alg": AlgebraicTriangulationNet,
        "vol": VolumetricTriangulationNet
    }[config.model.name](config, device=device).to(device)

    # NOTE: May be a bad idea to share memory since NCCL used
    # https://pytorch.org/docs/stable/distributed.html#torch.distributed.Backend
    # model.share_memory()

    if config.model.init_weights:
        state_dict = torch.load(config.model.checkpoint)
        for key in list(state_dict.keys()):
            new_key = key.replace("module.", "")
            state_dict[new_key] = state_dict.pop(key)

        model.load_state_dict(state_dict, strict=False)
        print("Successfully loaded pretrained weights for whole model")

    # criterion
    criterion_class = {
        "MSE": KeypointsMSELoss,
        "MSESmooth": KeypointsMSESmoothLoss,
        "MAE": KeypointsMAELoss
    }[config.opt.criterion]

    if config.opt.criterion == "MSESmooth":
        criterion = criterion_class(config.opt.mse_smooth_threshold)
    else:
        criterion = criterion_class()

    # optimizer
    opt = None
    if not args.eval:
        print("Optimising model...")
        if config.model.name == "vol":
            opt = torch.optim.Adam(
                [{'params': model.backbone.parameters()},
                 {'params': model.process_features.parameters(), 'lr': config.opt.process_features_lr if hasattr(config.opt, "process_features_lr") else config.opt.lr},
                 {'params': model.volume_net.parameters(), 'lr': config.opt.volume_net_lr if hasattr(config.opt, "volume_net_lr") else config.opt.lr}
                ],
                lr=config.opt.lr
            )
        else:
            opt = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=config.opt.lr)


    # datasets
    print("Loading data...")
    train_dataloader, val_dataloader, train_sampler = setup_dataloaders(config, distributed_train=is_distributed)

    # experiment
    experiment_dir, writer = None, None
    if master:
        experiment_dir, writer = setup_experiment(config, type(model).__name__, is_train=not args.eval)

    # multi-gpu
    if is_distributed:
        model = DistributedDataParallel(model, device_ids=[device])

    if not args.eval:
        print(f"Performing training with {config.opt.n_epochs} total epochs...")

        # train loop
        n_iters_total_train, n_iters_total_val = 0, 0
        for epoch in range(config.opt.n_epochs):
            if train_sampler is not None:
                train_sampler.set_epoch(epoch)

            if DEBUG:
                print(f"Training epoch {epoch}...")

            # Cache needs to be emptied first
            # torch.cuda.empty_cache()
            # print("CUDA Cache Empty!")

            n_iters_total_train = one_epoch(model, criterion, opt, config, train_dataloader, device, epoch, n_iters_total=n_iters_total_train, is_train=True, master=master, experiment_dir=experiment_dir, writer=writer)

            if DEBUG:
                print(f"Epoch {epoch} training complete!")

                # torch.cuda.empty_cache()

                print(f"Evaluating epoch {epoch}...")

            n_iters_total_val = one_epoch(model, criterion, opt, config, val_dataloader, device, epoch, n_iters_total=n_iters_total_val, is_train=False, master=master, experiment_dir=experiment_dir, writer=writer)

            if DEBUG:
                print(f"Epoch {epoch} evaluation complete!")

            if master:
                checkpoint_dir = os.path.join(experiment_dir, "checkpoints", "{:04}".format(epoch))
                os.makedirs(checkpoint_dir, exist_ok=True)

                if DEBUG:
                    print(f"Saving checkpoints to {checkpoint_dir}/weights.pth... ", end="")

                torch.save(model.state_dict(), os.path.join(checkpoint_dir, "weights.pth"))

                if DEBUG:
                    print("Checkpoint saved!")

            print(f"{n_iters_total_train} iters done.")
    else:
        if args.eval_dataset == 'train':
            one_epoch(model, criterion, opt, config, train_dataloader, device, 0, n_iters_total=0, is_train=False, master=master, experiment_dir=experiment_dir, writer=writer)
        else:
            one_epoch(model, criterion, opt, config, val_dataloader, device, 0, n_iters_total=0, is_train=False, master=master, experiment_dir=experiment_dir, writer=writer)

    print("Done.")
def infer_videos(model_type="alg",subject="S1", action="Sitting-1", max_num=5, save_images_instead=True, crop=True):

    if model_type == "alg":
        config = cfg.load_config("./experiments/human36m/train/human36m_alg.yaml")
    elif model_type == "vol":
        config = cfg.load_config("./experiments/human36m/train/human36m_vol_softmax.yaml")
        pelvis3d = loadPrePelvis(config.dataset.train.pred_results_path)

    device = torch.device(0)
    detector = Detector(config, device=device)

    bboxes = fill_bbox_subject_action(bbox_file, subject, action)
    cameras = fill_cameras_subject(h5_file,subject)
    cap = {}
    wri = None
    human36mRoot = "/dataset/experiment-dataset/extracted/"
    video_path = os.path.join(human36mRoot, subject, 'Videos')

    for (camera_idx, camera) in enumerate(retval['camera_names']):
        video_name = video_path+'/'+action.replace("-"," ")+'.'+camera+'.mp4'
        assert os.path.isfile(video_name), '%s doesn\'t exist' % video_name
        cap[camera] = cv2.VideoCapture(video_name) 
        size = (int(cap[camera].get(cv2.CAP_PROP_FRAME_WIDTH)),int(cap[camera].get(cv2.CAP_PROP_FRAME_HEIGHT)))

    if save_images_instead:
        wri = cv2.VideoWriter(
               f'./result/result-{subject}-{action}.mp4',cv2.VideoWriter_fourcc('m','p','4','v'),
               30,(1920,384))
    idx = 0
    #while True:
    while True:
        frames = {}
        for (camera_idx, camera) in enumerate(retval['camera_names']):
            success,frames[camera] = cap[camera].read()
            if success != True:
                break

        bbox = get_bbox_subject_action(bboxes,idx)
        sample = prepareVideoSample(info=retval, images=frames, cameras=cameras, bboxes=bbox, subject = subject, imageShape = [384, 384], scaleBox = 1.0, crop = True, normImage = False)
        prediction, inputBatch = detector.infer(sample, model_type, device, config)

        combined = viewVideoResult(sample,idx, prediction,config)
        #combined = viewVideo(sample)
        idx = idx + 1
        if save_images_instead:
            if idx < max_num:
                #file = f"./result/result-video-{subject}-{action}-{camera}-{idx}.png"
                #cv2.imwrite(file, combined)
                wri.write(combined)
            else:
                break
        else:
            cv2.imshow('w', combined)
            cv2.setWindowTitle('w', f"Index {idx}")

            c = cv2.waitKey(0) % 256
            if c == ord('q') or c == 27:
                print('Quitting...')
                break;

    cv2.destroyAllWindows()
    for (camera_idx, camera) in enumerate(retval['camera_names']):
        cap[camera].release()
    if save_images_instead: wri.release()
Пример #7
0
def main(args):
    #print("Number of available GPUs: {}".format(torch.cuda.device_count()))

    #is_distributed = init_distributed(args)
    #master = True
    #if is_distributed and os.environ["RANK"]:
    #    master = int(os.environ["RANK"]) == 0

    #if is_distributed:
    #    device = torch.device(args.local_rank)
    #else:
    #    device = torch.device(0)
    device = torch.device(
        'cpu')  ######################################################

    # config
    config = cfg.load_config(args.config)
    #configAlg = cfg.load_config(args.configAlg)######### ALG ##########
    config.opt.n_iters_per_epoch = config.opt.n_objects_per_epoch // config.opt.batch_size

    #model = {
    #    "ransac": RANSACTriangulationNet,
    #    "alg": AlgebraicTriangulationNet,
    #    "vol": VolumetricTriangulationNet
    #}[config.model.name](config, device=device).to(device)
    #modelAlg = AlgebraicTriangulationNet(configAlg, device=device).to(device)######### ALG ##########
    model = VolumetricTriangulationNet(config, device=device).to(device)

    if config.model.init_weights:
        state_dict = torch.load(
            config.model.checkpoint, map_location='cpu'
        )  ######################################################
        for key in list(state_dict.keys()):
            new_key = key.replace("module.", "")
            state_dict[new_key] = state_dict.pop(key)

        model.load_state_dict(state_dict, strict=True)
        print("Successfully loaded pretrained weights for whole model")

    ######### ALG ##########
    #if configAlg.model.init_weights:
    #    state_dict = torch.load(configAlg.model.checkpoint, map_location='cpu') ######################################################
    #    for key in list(state_dict.keys()):
    #        new_key = key.replace("module.", "")
    #        state_dict[new_key] = state_dict.pop(key)

    #    modelAlg.load_state_dict(state_dict, strict=True)
    #    print("Successfully loaded pretrained weights for whole model")
    ######### ALG ##########

    # criterion
    #criterion_class = {
    #    "MSE": KeypointsMSELoss,
    #    "MSESmooth": KeypointsMSESmoothLoss,
    #    "MAE": KeypointsMAELoss
    #}[config.opt.criterion]

    #if config.opt.criterion == "MSESmooth":
    #    criterion = criterion_class(config.opt.mse_smooth_threshold)
    #else:
    #    criterion = criterion_class()

    # optimizer
    #opt = None
    #if not args.eval:
    #    if config.model.name == "vol":
    #        opt = torch.optim.Adam(
    #            [{'params': model.backbone.parameters()},
    #             {'params': model.process_features.parameters(), 'lr': config.opt.process_features_lr if hasattr(config.opt, "process_features_lr") else config.opt.lr},
    #             {'params': model.volume_net.parameters(), 'lr': config.opt.volume_net_lr if hasattr(config.opt, "volume_net_lr") else config.opt.lr}
    #            ],
    #            lr=config.opt.lr
    #        )
    #    else:
    #        opt = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=config.opt.lr)

    ## datasets
    #print("Loading data...")
    #train_dataloader, val_dataloader, train_sampler = setup_dataloaders(config, distributed_train=is_distributed)

    # experiment
    #experiment_dir, writer = None, None
    #if master:
    #    experiment_dir, writer = setup_experiment(config, type(model).__name__, is_train=not args.eval)
    experiment_dir, writer = setup_experiment(config,
                                              type(model).__name__,
                                              is_train=not args.eval)

    # multi-gpu
    #if is_distributed:
    #    model = DistributedDataParallel(model, device_ids=[device])

    #if not args.eval:
    #    # train loop
    #    n_iters_total_train, n_iters_total_val = 0, 0
    #    for epoch in range(config.opt.n_epochs):
    #        if train_sampler is not None:
    #            train_sampler.set_epoch(epoch)

    #        n_iters_total_train = one_epoch(model, criterion, opt, config, train_dataloader, device, epoch, n_iters_total=n_iters_total_train, is_train=True, master=master, experiment_dir=experiment_dir, writer=writer)
    #        n_iters_total_val = one_epoch(model, criterion, opt, config, val_dataloader, device, epoch, n_iters_total=n_iters_total_val, is_train=False, master=master, experiment_dir=experiment_dir, writer=writer)

    #        if master:
    #            checkpoint_dir = os.path.join(experiment_dir, "checkpoints", "{:04}".format(epoch))
    #            os.makedirs(checkpoint_dir, exist_ok=True)

    #            torch.save(model.state_dict(), os.path.join(checkpoint_dir, "weights.pth"))

    #        print(f"{n_iters_total_train} iters done.")
    #else:
    #    if args.eval_dataset == 'train':
    #        one_epoch(model, criterion, opt, config, train_dataloader, device, 0, n_iters_total=0, is_train=False, master=master, experiment_dir=experiment_dir, writer=writer)
    #    else:
    #        one_epoch(model, criterion, opt, config, val_dataloader, device, 0, n_iters_total=0, is_train=False, master=master, experiment_dir=experiment_dir, writer=writer)

    one_epoch(model,
              config,
              device,
              0,
              n_iters_total=0,
              is_train=False,
              experiment_dir=experiment_dir,
              writer=writer)
    #one_epoch(model, config, modelAlg, configAlg, device, 0, n_iters_total=0, is_train=False, experiment_dir=experiment_dir, writer=writer)

    print("Done.")
def main(args):
    print("Number of available GPUs: {}".format(torch.cuda.device_count()))

    # Attempt to fix overflow error with pickle
    # See https://stackoverflow.com/questions/51562221/python-multiprocessing-overflowerrorcannot-serialize-a-bytes-object-larger-t
    ctx = torch.multiprocessing.get_context()
    ctx.reducer = pickle4reducer.Pickle4Reducer()

    config = cfg.load_config(args.config)

    global DEBUG
    DEBUG = config.debug_mode if hasattr(config, "debug_mode") else False
    print("Debugging Mode: ", DEBUG)

    is_distributed = init_distributed(args)
    print("Using distributed:", is_distributed)

    master = True
    if is_distributed and os.environ["RANK"]:
        master = int(os.environ["RANK"]) == 0

    if is_distributed:
        print("Rank:", args.local_rank)
        device = torch.device(args.local_rank)
    else:
        device = torch.device(0)

    # config
    config.opt.n_iters_per_epoch = config.opt.n_objects_per_epoch // config.opt.batch_size

    if hasattr(config.opt, "n_objects_per_epoch_val"):
        config.opt.n_iters_per_epoch_val = config.opt.n_objects_per_epoch_val // config.opt.val_batch_size
    else:
        config.opt.n_iters_per_epoch_val = None

    model = {
        "ransac": RANSACTriangulationNet,
        "alg": AlgebraicTriangulationNet,
        "vol": VolumetricTriangulationNet
    }[config.model.name](config, device=device).to(device)

    # NOTE: May be a bad idea to share memory since NCCL used
    # https://pytorch.org/docs/stable/distributed.html#torch.distributed.Backend
    # model.share_memory()

    if config.model.init_weights:
        state_dict = torch.load(config.model.checkpoint)
        for key in list(state_dict.keys()):
            new_key = key.replace("module.", "")
            state_dict[new_key] = state_dict.pop(key)

        model.load_state_dict(state_dict, strict=True)
        print("Successfully loaded pretrained weights for whole model")

    # criterion
    criterion_class = {
        "MSE": KeypointsMSELoss,
        "MSESmooth": KeypointsMSESmoothLoss,
        "MAE": KeypointsMAELoss
    }[config.opt.criterion]

    if config.opt.criterion == "MSESmooth":
        criterion = criterion_class(config.opt.mse_smooth_threshold)
    else:
        criterion = criterion_class()

    print("Loading data...")
    val_dataloader = setup_dataloaders(config)

    # experiment
    experiment_dir, writer = None, None
    if master:
        experiment_dir, writer = setup_experiment(config, type(model).__name__)

    one_epoch(model,
              criterion,
              opt,
              config,
              val_dataloader,
              device,
              0,
              n_iters_total=0,
              master=master,
              experiment_dir=experiment_dir,
              writer=writer)

    print("Done.")
Пример #9
0
    new_cx = cx * (new_w / w)
    new_cy = cy * (new_h / h)
    K[0, 0], K[1, 1], K[0, 2], K[1, 2] = new_fx, new_fy, new_cx, new_cy
    return K


if __name__ == "__main__":
    bbox = [420, 0, 1500, 1080]
    size = (480, 480)

    folder = os.path.join("data", "real", "single_human")
    output_folder = os.path.join(folder, "results")

    device = torch.device(0)

    config = cfg.load_config(
        'experiments/syn_data/multiview_data_alg_test_17jnts.yaml')

    model = AlgebraicTriangulationNet(config, device=device).to(device)

    state_dict = torch.load(config.model.checkpoint)
    for key in list(state_dict.keys()):
        new_key = key.replace("module.", "")
        state_dict[new_key] = state_dict.pop(key)

    model.load_state_dict(state_dict, strict=True)

    # load camera
    proj_mats = torch.empty((1, 2, 3, 4))
    with open(os.path.join(folder, "cam0.json"), 'r') as json_file:
        cam0 = json.load(json_file)
    with open(os.path.join(folder, "cam1.json"), 'r') as json_file:
    18: (BasicBlock, [2, 2, 2, 2]),
    34: (BasicBlock, [3, 4, 6, 3]),
    50: (Bottleneck, [3, 4, 6, 3]),
    101: (Bottleneck, [3, 4, 23, 3]),
    152: (Bottleneck, [3, 8, 36, 3])
}

if __name__ == '__main__':
    # quick and dirty hardcoded inference

    # step 0 load the sample data
    image_names = glob.glob('/home/colin/panoptic/images/*.png')
    images = [cv.imread(image_name) for image_name in image_names]
    # step 1 load the 2D backbone model and do inference on the images
    config = load_config(
        '/home/colin/vpose/learnable-triangulation/pytorch/experiments/human36m/eval/test_vol.yaml'
    )
    num_layers = 152
    resnet_weight_path = '/home/colin/vpose/pose_resnet_4.5_pixels_human36m.pth'
    resnet_weights = torch.load(resnet_weight_path,
                                map_location=torch.device('cpu'))
    block_class, layers = resnet_spec[num_layers]
    model_2d = PoseResNet(block_class,
                          layers,
                          17,
                          num_input_channels=3,
                          deconv_with_bias=False,
                          num_deconv_layers=3,
                          num_deconv_filters=(256, 256, 256),
                          num_deconv_kernels=(4, 4, 4),
                          final_conv_kernel=1)
        raise Exception("n_images_step cannot be < 1")
except:
    n_images_step = 1

try:
    save_images_instead = (int(sys.argv[4]) == 1)
except:
    save_images_instead = 0

assert os.path.exists(results_file) and os.path.isfile(
    results_file), f"Results file {results_file} does not exist!"
assert os.path.exists(config_file) and os.path.isfile(
    config_file), f"Config file {config_file} does not exist!"

# Load config file and necessary information
config = cfg.load_config(config_file)

if config.kind == "cmu":
    dataset = cmupanoptic.CMUPanopticDataset(
        cmu_root=config.dataset.val.cmu_root,
        pred_results_path=config.dataset.val.pred_results_path if hasattr(
            config.dataset.val, "pred_results_path") else None,
        train=False,
        test=True,
        image_shape=config.image_shape if hasattr(config, "image_shape") else
        (256, 256),
        labels_path=config.dataset.val.labels_path,
        retain_every_n_frames_in_test=config.dataset.val.
        retain_every_n_frames_in_test,
        scale_bbox=config.dataset.val.scale_bbox,
        square_bbox=config.dataset.val.square_bbox if hasattr(