Esempio n. 1
0
def main(argv=sys.argv):
    args = parse_args(argv)
    seed_all(12345)
    init_algorithms(deterministic=True)

    # Load the model into system memory (CPU, not GPU)
    model_state = torch.load(args.input, map_location='cpu')
    model_desc = model_state['model_desc']
    model = create_model(model_desc)
    model.load_state_dict(model_state['state_dict'])

    if args.format == 'pytorch':
        new_model_state = {
            'state_dict': model.state_dict(),
            'model_desc': model_desc,
            'train_datasets': model_state.get('train_datasets', []),
        }
        torch.save(new_model_state, args.output)
    elif args.format == 'onnx':
        image_height = model.data_specs.input_specs.height
        image_width = model.data_specs.input_specs.width
        dummy_input = torch.randn(1, 3, image_height, image_width)
        onnx.export(model, (dummy_input, ), args.output, verbose=False)
    else:
        raise Exception('Unrecognised model format: {}'.format(args.format))
Esempio n. 2
0
def main(argv, common_opts):
    args = parse_args(argv)
    seed_all(12345)
    init_algorithms(deterministic=True)
    torch.set_grad_enabled(False)

    device = common_opts['device']

    assert args.multicrop == False, 'TODO: Implement multi-crop for single image inference.'

    model = load_model(args.model).to(device).eval()

    input_specs: ImageSpecs = model.data_specs.input_specs

    image: PIL.Image.Image = PIL.Image.open(args.image, 'r')
    image.thumbnail((input_specs.width, input_specs.height))
    inp = input_specs.convert(image).to(device, torch.float32)

    output = model(inp[None, ...])[0]

    norm_skel3d = ensure_cartesian(output.to(CPU, torch.float64), d=3)

    fig = plt.figure(figsize=(16, 8))
    ax1 = fig.add_subplot(1, 2, 1)
    ax2: Axes3D = fig.add_subplot(1, 2, 2, projection='3d')

    ax1.imshow(input_specs.unconvert(inp.to(CPU)))
    plot_skeleton_on_axes3d(norm_skel3d, CanonicalSkeletonDesc, ax2, invert=True)

    plt.show()
Esempio n. 3
0
def main(argv, common_opts):
    args = parse_args(argv)
    seed_all(12345)
    init_algorithms(deterministic=True)
    torch.set_grad_enabled(False)

    device = common_opts['device']

    if args.model:
        # model = load_model(args.model).to(device).eval()
        model = load_model(args.model).eval()

        data_specs = model.data_specs
    else:
        model = None
        data_specs = DataSpecs(
            ImageSpecs(224,
                       mean=ImageSpecs.IMAGENET_MEAN,
                       stddev=ImageSpecs.IMAGENET_STDDEV),
            JointsSpecs(CanonicalSkeletonDesc, n_dims=3),
        )

    dataset = get_dataset(args.dataset, data_specs, use_aug=False)

    app = MainGUIApp(dataset, device, model)
    app.mainloop()
Esempio n. 4
0
def main(argv, common_opts):
    args = parse_args(argv)
    seed_all(12345)
    init_algorithms(deterministic=True)
    torch.set_grad_enabled(False)

    device = common_opts['device']

    model = load_model(args.model).to(device).eval()
    dataset = get_dataset(args.dataset, model.data_specs, use_aug=False)

    if args.multicrop:
        dataset.multicrop = True
        loader = make_unbatched_dataloader(dataset)
    else:
        loader = make_dataloader(dataset, batch_size=1)

    if args.dataset.startswith('h36m-'):
        known_depth = True
        included_joints = list(range(CanonicalSkeletonDesc.n_joints))
    else:
        known_depth = False
        included_joints = [
            CanonicalSkeletonDesc.joint_names.index(joint_name)
            for joint_name in VNect_Common_Skeleton
        ]
    print('Use ground truth root joint depth? {}'.format(known_depth))
    print('Number of joints in evaluation: {}'.format(len(included_joints)))

    df = run_evaluation_3d(model,
                           device,
                           loader,
                           included_joints,
                           known_depth=known_depth,
                           print_progress=True)

    print('### By sequence')
    print()
    print(
        tabulate(df.drop(columns=['activity_id']).groupby('seq_id').mean(),
                 headers='keys',
                 tablefmt='pipe'))
    print()
    print('### By activity')
    print()
    print(
        tabulate(df.drop(columns=['seq_id']).groupby('activity_id').mean(),
                 headers='keys',
                 tablefmt='pipe'))
    print()
    print('### Overall')
    print()
    print(
        tabulate(
            df.drop(columns=['activity_id', 'seq_id']).mean().to_frame().T,
            headers='keys',
            tablefmt='pipe'))
Esempio n. 5
0
import json
import torch
from mpl_toolkits.mplot3d import Axes3D
from pose3d_utils.coords import ensure_cartesian
from MayaExporter import MayaExporter
from VideoFrames import VideoFrames

from margipose.cli import Subcommand
from margipose.data.skeleton import CanonicalSkeletonDesc
from margipose.data_specs import ImageSpecs
from margipose.models import load_model
from margipose.utils import seed_all, init_algorithms, plot_skeleton_on_axes3d, plot_skeleton_on_axes, angleBetween

CPU = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# CPU = torch.device('cpu')
init_algorithms(deterministic=True)
torch.set_grad_enabled(False)
torch.no_grad()
seed_all(12345)

def parse_args():

    """Parse command-line arguments."""

    parser = argparse.ArgumentParser()

    parser.add_argument('--model', type=str, metavar='FILE', default=argparse.SUPPRESS,
                        required=True,
                        help='path to model file')
    parser.add_argument('--mode', type=str, required=True, choices = ['I', 'D', 'd', 'i', 'v', 'V'],
                        help='infer single image or directory')
Esempio n. 6
0
def sacred_main(_run: Run, seed, showoff, out_dir, batch_size, epochs, tags,
                model_desc, experiment_id, weights, train_examples,
                val_examples, deterministic, train_datasets, val_datasets, lr,
                lr_milestones, lr_gamma, optim_algorithm):
    seed_all(seed)
    init_algorithms(deterministic=deterministic)

    exp_out_dir = None
    if out_dir:
        exp_out_dir = path.join(out_dir, experiment_id)
        makedirs(exp_out_dir, exist_ok=True)
    print(f'Experiment ID: {experiment_id}')

    ####
    # Model
    ####

    if weights is None:
        model = create_model(model_desc)
    else:
        details = torch.load(weights)
        model_desc = details['model_desc']
        model = create_model(model_desc)
        model.load_state_dict(details['state_dict'])
    model.to(global_opts['device'])

    print(json.dumps(model_desc, sort_keys=True, indent=2))

    ####
    # Data
    ####

    train_loader = create_train_dataloader(train_datasets, model.data_specs,
                                           batch_size, train_examples)
    if len(val_datasets) > 0:
        val_loader = create_val_dataloader(val_datasets, model.data_specs,
                                           batch_size, val_examples)
    else:
        val_loader = None

    ####
    # Reporting
    ####

    reporter = Reporter(with_val=(val_loader is not None))

    reporter.setup_console_output()
    reporter.setup_sacred_output(_run)

    notebook = None
    if showoff:
        title = '3D pose model ({}@{})'.format(model_desc['type'],
                                               model_desc['version'])
        notebook = create_showoff_notebook(title, tags)
        reporter.setup_showoff_output(notebook)

    def set_progress(value):
        if notebook is not None:
            notebook.set_progress(value)

    tel = reporter.telemetry

    tel['config'].set_value(_run.config)
    tel['host_info'].set_value(get_host_info())

    ####
    # Optimiser
    ####

    if optim_algorithm == '1cycle':
        from torch import optim
        optimiser = optim.SGD(model.parameters(), lr=0)
        scheduler = make_1cycle(optimiser,
                                epochs * len(train_loader),
                                lr_max=lr,
                                momentum=0.9)
    else:
        scheduler = learning_schedule(model.parameters(), optim_algorithm, lr,
                                      lr_milestones, lr_gamma)

    ####
    # Training
    ####

    model_file = None
    if exp_out_dir:
        model_file = path.join(exp_out_dir, 'model-latest.pth')
        with open(path.join(exp_out_dir, 'config.json'), 'w') as f:
            json.dump(tel['config'].value(), f, sort_keys=True, indent=2)

    for epoch in range(epochs):
        tel['epoch'].set_value(epoch)
        print('> Epoch {:3d}/{:3d}'.format(epoch + 1, epochs))

        def on_train_progress(samples_processed):
            so_far = epoch * len(train_loader.dataset) + samples_processed
            total = epochs * len(train_loader.dataset)
            set_progress(so_far / total)

        do_training_pass(epoch, model, tel, train_loader, scheduler,
                         on_train_progress)
        if val_loader:
            do_validation_pass(epoch, model, tel, val_loader)

        _run.result = tel['train_pck'].value()[0]

        if model_file is not None:
            state = {
                'state_dict': model.state_dict(),
                'model_desc': model_desc,
                'train_datasets': train_datasets,
                'optimizer': scheduler.optimizer.state_dict(),
                'epoch': epoch + 1,
            }
            torch.save(state, model_file)

        tel.step()

    # Add the final model as a Sacred artifact
    if model_file is not None and path.isfile(model_file):
        _run.add_artifact(model_file)

    set_progress(1.0)
    return _run.result
Esempio n. 7
0
def sacred_main(_run: Run, seed, showoff, batch_size, model_desc, deterministic, train_datasets,
         lr_min, lr_max, max_iters, ema_beta, weight_decay, momentum):
    seed_all(seed)
    init_algorithms(deterministic=deterministic)

    model = create_model(model_desc).to(global_opts['device'])
    data_loader = create_train_dataloader(train_datasets, model.data_specs, batch_size,
                                          examples_per_epoch=(max_iters * batch_size))
    data_iter = iter(data_loader)

    print(json.dumps(model_desc, sort_keys=True, indent=2))

    def do_training_iteration(optimiser):
        batch = next(data_iter)

        in_var = batch['input'].to(global_opts['device'], torch.float32)
        target_var = batch['target'].to(global_opts['device'], torch.float32)
        mask_var = batch['joint_mask'].to(global_opts['device'], torch.float32)

        # Calculate predictions and loss
        out_var = model(in_var)
        loss = forward_loss(model, out_var, target_var, mask_var, batch['valid_depth'])

        # Calculate gradients
        optimiser.zero_grad()
        loss.backward()

        # Update parameters
        optimiser.step()

        return loss.item()

    optimiser = SGD(model.parameters(), lr=1, weight_decay=weight_decay, momentum=momentum)

    tel = tele.Telemetry({
        'config': ValueMeter(skip_reset=True),
        'host_info': ValueMeter(skip_reset=True),
        'loss_lr_fig': ValueMeter(),
    })

    notebook = None
    if showoff:
        title = 'Hyperparameter search ({}@{})'.format(model_desc['type'], model_desc['version'])
        notebook = create_showoff_notebook(title, ['lrfinder'])

        from tele.showoff import views

        tel.sink(tele.showoff.Conf(notebook), [
            views.Inspect(['config'], 'Experiment configuration', flatten=True),
            views.Inspect(['host_info'], 'Host information', flatten=True),
            views.FrameContent(['loss_lr_fig'], 'Loss vs learning rate graph', 'plotly'),
        ])

    def set_progress(value):
        if notebook is not None:
            notebook.set_progress(value)

    tel['config'].set_value(_run.config)
    tel['host_info'].set_value(get_host_info())

    lrs = np.geomspace(lr_min, lr_max, max_iters)
    losses = []
    avg_loss = 0
    min_loss = np.inf
    for i, lr in enumerate(tqdm(lrs, ascii=True)):
        set_progress(i / len(lrs))

        for param_group in optimiser.param_groups:
            param_group['lr'] = lr
        loss = do_training_iteration(optimiser)
        avg_loss = ema_beta * avg_loss + (1 - ema_beta) * loss
        smoothed_loss = avg_loss / (1 - ema_beta ** (i + 1))
        if min_loss > 0 and smoothed_loss > 4 * min_loss:
            break
        min_loss = min(smoothed_loss, min_loss)
        losses.append(smoothed_loss)

        if i % 10 == 0:
            fig = go.Figure(
                data=[go.Scatter(x=lrs[:len(losses)].tolist(), y=losses, mode='lines')],
                layout=go.Layout(
                    margin=go.Margin(l=60, r=40, b=80, t=20, pad=4),
                    xaxis=go.XAxis(title='Learning rate', type='log', exponentformat='power'),
                    yaxis=go.YAxis(title='Training loss'),
                )
            )
            tel['loss_lr_fig'].set_value(fig)
            tel.step()

    set_progress(1)