Exemplo n.º 1
0
def main(opt):
    start_epoch = 0
    err_best = 1000
    glob_step = 0
    lr_now = opt.lr

    # save options
    log.save_options(opt, opt.ckpt)

    # create model
    print(">>> creating model")
    model = LinearModel()
    model = model.cuda()
    model.apply(weight_init)
    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))
    criterion = nn.MSELoss(reduction='mean').cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    # load ckpt
    if opt.load:

        print(">>> loading ckpt from '{}'".format(opt.load))
        ckpt = torch.load(opt.load, encoding='utf-8')
        start_epoch = ckpt['epoch']
        err_best = ckpt['err']
        glob_step = ckpt['step']
        lr_now = ckpt['lr']
        model.load_state_dict(ckpt['state_dict'])
        optimizer.load_state_dict(ckpt['optimizer'])
        print(">>> ckpt loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))
    if opt.resume:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'), resume=True)
    else:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'))
        logger.set_names(
            ['epoch', 'lr', 'loss_train', 'loss_test', 'err_test'])

    # list of action(s)
    actions = misc.define_actions(opt.action)
    num_actions = len(actions)
    print(">>> actions to use (total: {}):".format(num_actions))
    # pprint(actions, indent=4)
    # print(">>>")

    # data loading
    print(">>> loading data")
    # load statistics data
    stat_3d = torch.load(os.path.join(opt.data_dir, 'stat_3d.pth.tar'))
    stat_2d = torch.load(os.path.join(opt.data_dir, 'stat_2d.pth.tar'))

    # test
    if opt.test:
        err_set = []
        for action in actions:
            print(">>> TEST on _{}_".format(action))

            test_loader = DataLoader(dataset=Human36M(
                actions=action,
                data_path=opt.data_dir,
                set_num_samples=opt.set_num_samples,
                use_hg=opt.use_hg,
                is_train=False),
                                     batch_size=opt.test_batch,
                                     shuffle=False,
                                     num_workers=opt.job,
                                     pin_memory=True)

            _, err_test = test(test_loader,
                               model,
                               criterion,
                               stat_2d,
                               stat_3d,
                               procrustes=opt.procrustes)
            err_set.append(err_test)

        print(">>>>>> TEST results:")

        for action in actions:
            print("{}".format(action), end='\t')
        print("\n")

        for err in err_set:
            print("{:.4f}".format(err), end='\t')
        print(">>>\nERRORS: {}".format(np.array(err_set).mean()))
        sys.exit()

    # load datasets for training
    test_loader = DataLoader(dataset=Human36M(
        actions=actions,
        data_path=opt.data_dir,
        set_num_samples=opt.set_num_samples,
        use_hg=opt.use_hg,
        is_train=False),
                             batch_size=opt.test_batch,
                             shuffle=False,
                             num_workers=opt.job,
                             pin_memory=True)

    train_loader = DataLoader(dataset=Human36M(
        actions=actions,
        data_path=opt.data_dir,
        set_num_samples=opt.set_num_samples,
        use_hg=opt.use_hg),
                              batch_size=opt.train_batch,
                              shuffle=True,
                              num_workers=opt.job,
                              pin_memory=True)

    print(">>> data loaded !")

    cudnn.benchmark = True

    for epoch in range(start_epoch, opt.epochs):
        print('==========================')
        print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))

        ## per epoch
        # train
        glob_step, lr_now, loss_train = train(train_loader,
                                              model,
                                              criterion,
                                              optimizer,
                                              stat_2d,
                                              stat_3d,
                                              lr_init=opt.lr,
                                              lr_now=lr_now,
                                              glob_step=glob_step,
                                              lr_decay=opt.lr_decay,
                                              gamma=opt.lr_gamma,
                                              max_norm=opt.max_norm)
        # test
        loss_test, err_test = test(test_loader,
                                   model,
                                   criterion,
                                   stat_2d,
                                   stat_3d,
                                   procrustes=opt.procrustes)
        # loss_test, err_test = test(test_loader, model, criterion, stat_3d, procrustes=True)

        # update log file
        logger.append([epoch + 1, lr_now, loss_train, loss_test, err_test],
                      ['int', 'float', 'float', 'float', 'float'])

        # save ckpt
        is_best = err_test < err_best
        err_best = min(err_test, err_best)
        if is_best:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'err': err_best,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=opt.ckpt,
                is_best=True)

        else:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'err': err_best,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=opt.ckpt,
                is_best=False)

    logger.close()
Exemplo n.º 2
0
def main(opt):
    start_epoch = 0
    err_best = 1000
    glob_step = 0
    lr_now = opt.lr

    manual_seed = 1234
    np.random.seed(manual_seed)
    torch.manual_seed(manual_seed)

    # save options
    log.save_options(opt, opt.ckpt)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # create model
    print(">>> creating model")
    model = LinearModel()
    model = model.to(device)
    model.apply(weight_init)
    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))
    criterion = nn.MSELoss(size_average=True).to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    # load ckpt
    if opt.load:
        print(">>> loading ckpt from '{}'".format(opt.load))
        ckpt = torch.load(opt.load)
        start_epoch = ckpt['epoch']
        err_best = ckpt['err']
        glob_step = ckpt['step']
        lr_now = ckpt['lr']
        model.load_state_dict(ckpt['state_dict'])
        optimizer.load_state_dict(ckpt['optimizer'])
        print(">>> ckpt loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))
    if opt.resume:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'), resume=True)
    else:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'))
        logger.set_names(
            ['epoch', 'lr', 'loss_train', 'loss_test', 'err_test'])

    # list of action(s)
    actions = misc.define_actions(opt.action)
    num_actions = len(actions)
    print(">>> actions to use (total: {}):".format(num_actions))
    pprint(actions, indent=4)
    print(">>>")

    # data loading
    print(">>> loading data")
    # load statistics data
    stat_3d = torch.load(os.path.join(opt.data_dir, 'stat_3d.pth.tar'))
    # test
    if opt.test:
        refine_dic, refine_per_action, coeff_funs, refine_extra_kwargs = ru.get_refine_config(
            opt)
        pck_thresholds = [50, 100, 150, 200, 250]
        noise_fun = lambda x: add_gaussion_noise(x, percent=opt.noise_level)

        err_set = []
        pck_set = []
        for action in actions:
            print(">>> TEST on _{}_".format(action))
            test_loader = DataLoader(dataset=Human36M(actions=action,
                                                      data_path=opt.data_dir,
                                                      use_hg=opt.use_hg,
                                                      is_train=False),
                                     batch_size=opt.test_batch,
                                     shuffle=False,
                                     pin_memory=True)

            refine_idx_action = ru.get_idx_action(action)
            if refine_per_action:
                refine_dic_i = refine_dic[refine_idx_action]
            else:
                refine_dic_i = refine_dic
            coeff_fun_i = coeff_funs[refine_idx_action]
            _, err_test, pck_test = test(
                test_loader,
                model,
                criterion,
                stat_3d,
                device,
                procrustes=opt.procrustes,
                noise_fun=noise_fun,
                pck_thresholds=pck_thresholds,
                refine_dic=refine_dic_i,
                refine_coeff_fun=coeff_fun_i,
                refine_extra_kwargs=refine_extra_kwargs,
                cache_prefix=action if opt.dump_err else None)
            err_set.append(err_test)
            pck_set.append(pck_test)
        print(">>>>>> TEST results:")
        for action in actions:
            print("{}".format(action[:7]), end='\t')
        print("\n")
        for err in err_set:
            print("{:7.4f}".format(err), end='\t')
        print(">>> ERRORS: {}".format(np.array(err_set).mean()))

        for i, thres in enumerate(pck_thresholds):
            for pck in pck_set:
                print("{:7.4f}".format(pck[i]), end='\t')
            print(">>> PCKS {}: {}".format(
                thres, np.mean([pck[i] for pck in pck_set])))
        sys.exit()

    # load dadasets for training
    test_loader = DataLoader(dataset=Human36M(actions=actions,
                                              data_path=opt.data_dir,
                                              use_hg=opt.use_hg,
                                              is_train=False),
                             batch_size=opt.test_batch,
                             shuffle=False,
                             num_workers=opt.job,
                             pin_memory=True)
    train_loader = DataLoader(dataset=Human36M(actions=actions,
                                               data_path=opt.data_dir,
                                               use_hg=opt.use_hg),
                              batch_size=opt.train_batch,
                              shuffle=True,
                              num_workers=opt.job,
                              pin_memory=True)
    print(">>> data loaded !")

    cudnn.benchmark = True
    for epoch in range(start_epoch, opt.epochs):
        print('==========================')
        print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))

        # per epoch
        glob_step, lr_now, loss_train = train(train_loader,
                                              model,
                                              criterion,
                                              optimizer,
                                              device,
                                              lr_init=opt.lr,
                                              lr_now=lr_now,
                                              glob_step=glob_step,
                                              lr_decay=opt.lr_decay,
                                              gamma=opt.lr_gamma,
                                              max_norm=opt.max_norm)
        loss_test, err_test, pck_test = test(test_loader,
                                             model,
                                             criterion,
                                             stat_3d,
                                             device,
                                             procrustes=opt.procrustes)

        # update log file
        logger.append(
            [epoch + 1, lr_now, loss_train, loss_test, err_test, pck_test],
            ['int', 'float', 'float', 'float', 'float', 'float'])

        # save ckpt
        is_best = err_test < err_best
        err_best = min(err_test, err_best)
        if is_best:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'err': err_best,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=opt.ckpt,
                is_best=True)
        else:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'err': err_best,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=opt.ckpt,
                is_best=False)

    logger.close()
def main(opt):
    start_epoch = 0
    err_best = 1000
    glob_step = 0
    lr_now = opt.lr

    # save options
    log.save_options(opt, opt.ckpt)

    # create model
    print(">>> creating model")
    model = LinearModel()
    model = model.cuda()
    model.apply(weight_init)
    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))
    criterion = nn.MSELoss(size_average=True).cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    # load ckpt
    if opt.load:
        print(">>> loading ckpt from '{}'".format(opt.load))
        ckpt = torch.load(opt.load)
        start_epoch = ckpt["epoch"]
        err_best = ckpt["err"]
        glob_step = ckpt["step"]
        lr_now = ckpt["lr"]
        model.load_state_dict(ckpt["state_dict"])
        optimizer.load_state_dict(ckpt["optimizer"])
        print(">>> ckpt loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))
    if opt.resume:
        logger = log.Logger(os.path.join(opt.ckpt, "log.txt"), resume=True)
    else:
        logger = log.Logger(os.path.join(opt.ckpt, "log.txt"))
        logger.set_names(
            ["epoch", "lr", "loss_train", "loss_test", "err_test"])

    # list of action(s)
    actions = misc.define_actions(opt.action)
    num_actions = len(actions)
    print(">>> actions to use (total: {}):".format(num_actions))
    pprint(actions, indent=4)
    print(">>>")

    # data loading
    print(">>> loading data")
    # load statistics data
    stat_3d = torch.load(os.path.join(opt.data_dir, "stat_3d.pth.tar"))
    # test
    if opt.test:
        err_set = []
        for action in actions:
            print(">>> TEST on _{}_".format(action))
            test_loader = DataLoader(
                dataset=Human36M(
                    actions=action,
                    data_path=opt.data_dir,
                    use_hg=opt.use_hg,
                    is_train=False,
                ),
                batch_size=opt.test_batch,
                shuffle=False,
                num_workers=opt.job,
                pin_memory=True,
            )
            _, err_test = test(test_loader,
                               model,
                               criterion,
                               stat_3d,
                               procrustes=opt.procrustes)
            err_set.append(err_test)
        print(">>>>>> TEST results:")
        for action in actions:
            print("{}".format(action), end="\t")
        print("\n")
        for err in err_set:
            print("{:.4f}".format(err), end="\t")
        print(">>>\nERRORS: {}".format(np.array(err_set).mean()))
        sys.exit()

    # load dadasets for training
    test_loader = DataLoader(
        dataset=Human36M(actions=actions,
                         data_path=opt.data_dir,
                         use_hg=opt.use_hg,
                         is_train=False),
        batch_size=opt.test_batch,
        shuffle=False,
        num_workers=opt.job,
        pin_memory=True,
    )
    train_loader = DataLoader(
        dataset=Human36M(actions=actions,
                         data_path=opt.data_dir,
                         use_hg=opt.use_hg),
        batch_size=opt.train_batch,
        shuffle=True,
        num_workers=opt.job,
        pin_memory=True,
    )
    print(">>> data loaded !")

    cudnn.benchmark = True
    for epoch in range(start_epoch, opt.epochs):
        print("==========================")
        print(">>> epoch: {} | lr: {:.5f}".format(epoch + 1, lr_now))

        # per epoch
        glob_step, lr_now, loss_train = train(
            train_loader,
            model,
            criterion,
            optimizer,
            lr_init=opt.lr,
            lr_now=lr_now,
            glob_step=glob_step,
            lr_decay=opt.lr_decay,
            gamma=opt.lr_gamma,
            max_norm=opt.max_norm,
        )
        loss_test, err_test = test(test_loader,
                                   model,
                                   criterion,
                                   stat_3d,
                                   procrustes=opt.procrustes)

        # update log file
        logger.append(
            [epoch + 1, lr_now, loss_train, loss_test, err_test],
            ["int", "float", "float", "flaot", "float"],
        )

        # save ckpt
        is_best = err_test < err_best
        err_best = min(err_test, err_best)
        if is_best:
            log.save_ckpt(
                {
                    "epoch": epoch + 1,
                    "lr": lr_now,
                    "step": glob_step,
                    "err": err_best,
                    "state_dict": model.state_dict(),
                    "optimizer": optimizer.state_dict(),
                },
                ckpt_path=opt.ckpt,
                is_best=True,
            )
        else:
            log.save_ckpt(
                {
                    "epoch": epoch + 1,
                    "lr": lr_now,
                    "step": glob_step,
                    "err": err_best,
                    "state_dict": model.state_dict(),
                    "optimizer": optimizer.state_dict(),
                },
                ckpt_path=opt.ckpt,
                is_best=False,
            )

    logger.close()
Exemplo n.º 4
0
def main(opt):
    start_epoch = 0
    err_best = 1000
    glob_step = 0
    lr_now = opt.lr

    # save options
    log.save_options(opt, opt.ckpt)

    # create model
    print(">>> creating model")
    model = LinearModel()
    model = model.cuda()
    model.apply(weight_init)
    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))
    criterion = nn.MSELoss(reduction='mean').cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    # load ckpt
    if opt.load:

        print(">>> loading ckpt from '{}'".format(opt.load))
        ckpt = torch.load(opt.load, encoding='utf-8')
        start_epoch = ckpt['epoch']
        err_best = ckpt['err']
        glob_step = ckpt['step']
        lr_now = ckpt['lr']
        model.load_state_dict(ckpt['state_dict'])
        optimizer.load_state_dict(ckpt['optimizer'])
        print(">>> ckpt loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))
    if opt.resume:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'), resume=True)
    else:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'))
        logger.set_names(
            ['epoch', 'lr', 'loss_train', 'loss_test', 'err_test'])

    # list of action(s)
    actions = misc.define_actions(opt.action)
    num_actions = len(actions)
    print(">>> actions to use (total: {}):".format(num_actions))
    # pprint(actions, indent=4)
    # print(">>>")

    # data loading
    print(">>> loading data")
    # load statistics data
    stat_3d = torch.load(os.path.join(opt.data_dir, 'stat_3d.pth.tar'))
    stat_2d = torch.load(os.path.join(opt.data_dir, 'stat_2d.pth.tar'))
    """
    stat_3d.keys() =>  dict_keys(['std', 'dim_use', 'train', 'test', 'mean'])
    std => (96., )
    mean => (96.,)
    dim_use => (48, ) ?????
    train => dict{[user, action, camera_id]} ex) dict{[6, 'Walking', 'Walking 1.60457274.h5']} // data = int // len 600 = 15 actions * 8 cameras+extra_actions * 5 users
    test => same as train, user = 9, 11 // len 240
    (7,
     'Photo',
     'Photo 1.58860488.h5'): array([[514.54570615, -606.40670751, 5283.29114444],
                                    [513.19690503, -606.27874917, 5282.94296128],
                                    [511.72623278, -606.3556718, 5282.09161439],
                                    ...,
                                    [660.21544235, -494.87670603, 5111.48298849],
                                    [654.79473179, -497.67942449, 5111.05843265],
                                    [649.61962945, -498.74291164, 5111.91590807]])}

    """
    # actions = ["Directions",
    #            "Discussion",
    #            "Eating",
    #            "Greeting",
    #            "Phoning",
    #            "Photo",
    #            "Posing",
    #            "Purchases",
    #            "Sitting",
    #            "SittingDown",
    #            "Smoking",
    #            "Waiting",
    #            "WalkDog",
    #            "Walking",
    #            "WalkTogether"]
    # actions = ["Photo"]
    # test
    if opt.test:
        err_set = []
        for action in actions:
            print(">>> TEST on _{}_".format(action))

            test_loader = DataLoader(dataset=Human36M(
                actions=action,
                data_path=opt.data_dir,
                set_num_samples=opt.set_num_samples,
                use_hg=opt.use_hg,
                is_train=False),
                                     batch_size=opt.test_batch,
                                     shuffle=False,
                                     num_workers=opt.job,
                                     pin_memory=True)

            _, err_test = test(test_loader,
                               model,
                               criterion,
                               stat_2d,
                               stat_3d,
                               procrustes=opt.procrustes)
            err_set.append(err_test)

        print(">>>>>> TEST results:")

        for action in actions:
            print("{}".format(action), end='\t')
        print("\n")

        for err in err_set:
            print("{:.4f}".format(err), end='\t')
        print(">>>\nERRORS: {}".format(np.array(err_set).mean()))
        sys.exit()

    # load datasets for training
    test_loader = DataLoader(dataset=Human36M(
        actions=actions,
        data_path=opt.data_dir,
        set_num_samples=opt.set_num_samples,
        use_hg=opt.use_hg,
        is_train=False),
                             batch_size=opt.test_batch,
                             shuffle=False,
                             num_workers=opt.job,
                             pin_memory=True)

    train_loader = DataLoader(dataset=Human36M(
        actions=actions,
        data_path=opt.data_dir,
        set_num_samples=opt.set_num_samples,
        use_hg=opt.use_hg),
                              batch_size=opt.train_batch,
                              shuffle=True,
                              num_workers=opt.job,
                              pin_memory=True)

    print(">>> data loaded !")

    cudnn.benchmark = True

    for epoch in range(start_epoch, opt.epochs):
        print('==========================')
        print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))

        ## per epoch
        # train
        glob_step, lr_now, loss_train = train(train_loader,
                                              model,
                                              criterion,
                                              optimizer,
                                              stat_2d,
                                              stat_3d,
                                              lr_init=opt.lr,
                                              lr_now=lr_now,
                                              glob_step=glob_step,
                                              lr_decay=opt.lr_decay,
                                              gamma=opt.lr_gamma,
                                              max_norm=opt.max_norm)
        # test
        loss_test, err_test = test(test_loader,
                                   model,
                                   criterion,
                                   stat_2d,
                                   stat_3d,
                                   procrustes=opt.procrustes)
        # loss_test, err_test = test(test_loader, model, criterion, stat_3d, procrustes=True)

        # update log file
        logger.append([epoch + 1, lr_now, loss_train, loss_test, err_test],
                      ['int', 'float', 'float', 'float', 'float'])

        # save ckpt
        is_best = err_test < err_best
        err_best = min(err_test, err_best)
        if is_best:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'err': err_best,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=opt.ckpt,
                is_best=True)

        else:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'err': err_best,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=opt.ckpt,
                is_best=False)

    logger.close()