Esempio n. 1
0
def main(argv, common_opts):
    args = parse_args(argv)
    seed_all(12345)
    init_algorithms(deterministic=True)
    torch.set_grad_enabled(False)

    device = common_opts['device']

    if args.model:
        # model = load_model(args.model).to(device).eval()
        model = load_model(args.model).eval()

        data_specs = model.data_specs
    else:
        model = None
        data_specs = DataSpecs(
            ImageSpecs(224,
                       mean=ImageSpecs.IMAGENET_MEAN,
                       stddev=ImageSpecs.IMAGENET_STDDEV),
            JointsSpecs(CanonicalSkeletonDesc, n_dims=3),
        )

    dataset = get_dataset(args.dataset, data_specs, use_aug=False)

    app = MainGUIApp(dataset, device, model)
    app.mainloop()
Esempio n. 2
0
def main(argv, common_opts):
    args = parse_args(argv)
    seed_all(12345)
    init_algorithms(deterministic=True)
    torch.set_grad_enabled(False)

    device = common_opts['device']

    model = load_model(args.model).to(device).eval()
    dataset = get_dataset(args.dataset, model.data_specs, use_aug=False)

    if args.multicrop:
        dataset.multicrop = True
        loader = make_unbatched_dataloader(dataset)
    else:
        loader = make_dataloader(dataset, batch_size=1)

    if args.dataset.startswith('h36m-'):
        known_depth = True
        included_joints = list(range(CanonicalSkeletonDesc.n_joints))
    else:
        known_depth = False
        included_joints = [
            CanonicalSkeletonDesc.joint_names.index(joint_name)
            for joint_name in VNect_Common_Skeleton
        ]
    print('Use ground truth root joint depth? {}'.format(known_depth))
    print('Number of joints in evaluation: {}'.format(len(included_joints)))

    df = run_evaluation_3d(model,
                           device,
                           loader,
                           included_joints,
                           known_depth=known_depth,
                           print_progress=True)

    print('### By sequence')
    print()
    print(
        tabulate(df.drop(columns=['activity_id']).groupby('seq_id').mean(),
                 headers='keys',
                 tablefmt='pipe'))
    print()
    print('### By activity')
    print()
    print(
        tabulate(df.drop(columns=['seq_id']).groupby('activity_id').mean(),
                 headers='keys',
                 tablefmt='pipe'))
    print()
    print('### Overall')
    print()
    print(
        tabulate(
            df.drop(columns=['activity_id', 'seq_id']).mean().to_frame().T,
            headers='keys',
            tablefmt='pipe'))
Esempio n. 3
0
def _create_dataloader(dataset_names, data_specs, batch_size,
                       examples_per_epoch, use_aug):
    datasets = [
        get_dataset(name, data_specs, use_aug=use_aug)
        for name in dataset_names
    ]
    assert len(datasets) > 0, 'at least one dataset must be specified'
    if len(datasets) == 1:
        dataset = datasets[0]
    else:
        dataset = MixedPoseDataset(datasets)
    return make_dataloader(
        dataset,
        sampler=dataset.sampler(examples_per_epoch=examples_per_epoch),
        batch_size=batch_size,
        drop_last=True,
        num_workers=4,
    )