コード例 #1
0
def main(opt):
    lr_now = opt.lr_now
    start_epoch = 1
    # opt.is_eval = True
    print('>>> create models')
    in_features = opt.in_features  # 54
    d_model = opt.d_model
    kernel_size = opt.kernel_size
    net_pred = AttModel.AttModel(in_features=in_features,
                                 kernel_size=kernel_size,
                                 d_model=d_model,
                                 num_stage=opt.num_stage,
                                 dct_n=opt.dct_n,
                                 input_n=opt.input_n,
                                 output_n=opt.output_n)
    net_pred = nn.DataParallel(net_pred)
    net_pred.cuda()

    model_path_len = '{}/ckpt_best.pth.tar'.format(opt.ckpt)
    print(">>> loading ckpt len from '{}'".format(model_path_len))
    ckpt = torch.load(model_path_len)
    net_pred.load_state_dict(ckpt['state_dict'])
    print(">>> ckpt len loaded (epoch: {} | err: {})".format(
        ckpt['epoch'], ckpt['err']))
    net_pred = net_pred.module

    print('>>> loading datasets')
    test_dataset = datasets.Datasets(opt, split=2)
    print('>>> Testing dataset length: {:d}'.format(test_dataset.__len__()))
    test_loader = DataLoader(test_dataset,
                             batch_size=opt.test_batch_size,
                             shuffle=False,
                             num_workers=0,
                             pin_memory=False)

    ret_test = run_model(net_pred,
                         is_train=2,
                         data_loader=test_loader,
                         opt=opt,
                         epo=0)
    print('testing error: {:.3f}'.format(ret_test['#1']))
    ret_log = np.array([])
    head = np.array([])
    for k in ret_test.keys():
        ret_log = np.append(ret_log, [ret_test[k]])
        head = np.append(head, ['test_' + k])
    log.save_csv_log(opt, head, ret_log, is_create=True, file_name='test_bml1')
コード例 #2
0
def main(opt):
    lr_now = opt.lr_now
    start_epoch = 1
    # opt.is_eval = True
    print('>>> create models')
    in_features = opt.in_features  # 54
    d_model = opt.d_model
    kernel_size = opt.kernel_size
    net_pred = AttModel.AttModel(in_features=in_features,
                                 kernel_size=kernel_size,
                                 d_model=d_model,
                                 num_stage=opt.num_stage,
                                 dct_n=opt.dct_n)
    net_pred.cuda()

    optimizer = optim.Adam(filter(lambda x: x.requires_grad,
                                  net_pred.parameters()),
                           lr=opt.lr_now)
    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in net_pred.parameters()) / 1000000.0))

    if opt.is_load or opt.is_eval:
        model_path_len = './{}/ckpt_best.pth.tar'.format(opt.ckpt)
        print(">>> loading ckpt len from '{}'".format(model_path_len))
        ckpt = torch.load(model_path_len)
        start_epoch = ckpt['epoch'] + 1
        err_best = ckpt['err']
        lr_now = ckpt['lr']
        net_pred.load_state_dict(ckpt['state_dict'])
        # net.load_state_dict(ckpt)
        # optimizer.load_state_dict(ckpt['optimizer'])
        # lr_now = util.lr_decay_mine(optimizer, lr_now, 0.2)
        print(">>> ckpt len loaded (epoch: {} | err: {})".format(
            ckpt['epoch'], ckpt['err']))

    print('>>> loading datasets')

    if not opt.is_eval:
        dataset = datasets.Datasets(opt, split=0)
        print('>>> Training dataset length: {:d}'.format(dataset.__len__()))
        data_loader = DataLoader(dataset,
                                 batch_size=opt.batch_size,
                                 shuffle=True,
                                 num_workers=0,
                                 pin_memory=False)
        valid_dataset = datasets.Datasets(opt, split=1)
        print('>>> Validation dataset length: {:d}'.format(
            valid_dataset.__len__()))
        valid_loader = DataLoader(valid_dataset,
                                  batch_size=opt.test_batch_size,
                                  shuffle=True,
                                  num_workers=0,
                                  pin_memory=False)

    test_dataset = datasets.Datasets(opt, split=2)
    print('>>> Testing dataset length: {:d}'.format(test_dataset.__len__()))
    test_loader = DataLoader(test_dataset,
                             batch_size=opt.test_batch_size,
                             shuffle=False,
                             num_workers=0,
                             pin_memory=False)

    # evaluation
    if opt.is_eval:
        ret_test = run_model(net_pred,
                             is_train=3,
                             data_loader=test_loader,
                             opt=opt,
                             epo=0)
        ret_log = np.array([])
        head = np.array([])
        for k in ret_test.keys():
            ret_log = np.append(ret_log, [ret_test[k]])
            head = np.append(head, [k])
        log.save_csv_log(opt,
                         head,
                         ret_log,
                         is_create=True,
                         file_name='test_walking')
        # print('testing error: {:.3f}'.format(ret_test['m_p3d_h36']))
    # training
    if not opt.is_eval:
        err_best = 1000
        for epo in range(start_epoch, opt.epoch + 1):
            is_best = False
            # if epo % opt.lr_decay == 0:
            lr_now = util.lr_decay_mine(optimizer, lr_now,
                                        0.1**(1 / opt.epoch))
            print('>>> training epoch: {:d}'.format(epo))
            ret_train = run_model(net_pred,
                                  optimizer,
                                  is_train=0,
                                  data_loader=data_loader,
                                  epo=epo,
                                  opt=opt)
            print('train error: {:.3f}'.format(ret_train['m_p3d_h36']))
            ret_valid = run_model(net_pred,
                                  is_train=1,
                                  data_loader=valid_loader,
                                  opt=opt,
                                  epo=epo)
            print('validation error: {:.3f}'.format(ret_valid['m_p3d_h36']))
            ret_test = run_model(net_pred,
                                 is_train=2,
                                 data_loader=test_loader,
                                 opt=opt,
                                 epo=epo)
            print('testing error: {:.3f}'.format(ret_test['#1']))
            ret_log = np.array([epo, lr_now])
            head = np.array(['epoch', 'lr'])
            for k in ret_train.keys():
                ret_log = np.append(ret_log, [ret_train[k]])
                head = np.append(head, [k])
            for k in ret_valid.keys():
                ret_log = np.append(ret_log, [ret_valid[k]])
                head = np.append(head, ['valid_' + k])
            for k in ret_test.keys():
                ret_log = np.append(ret_log, [ret_test[k]])
                head = np.append(head, ['test_' + k])
            log.save_csv_log(opt, head, ret_log, is_create=(epo == 1))
            if ret_valid['m_p3d_h36'] < err_best:
                err_best = ret_valid['m_p3d_h36']
                is_best = True
            log.save_ckpt(
                {
                    'epoch': epo,
                    'lr': lr_now,
                    'err': ret_valid['m_p3d_h36'],
                    'state_dict': net_pred.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                is_best=is_best,
                opt=opt)
コード例 #3
0
def main(opt):
    lr_now = opt.lr_now
    start_epoch = 1
    # opt.is_eval = True
    print('>>> create models')
    in_features = 66
    d_model = opt.d_model
    kernel_size = opt.kernel_size
    net_pred = AttModel.AttModel(in_features=in_features,
                                 kernel_size=kernel_size,
                                 d_model=d_model,
                                 num_stage=opt.num_stage,
                                 dct_n=opt.dct_n)
    net_pred.cuda()
    model_path_len = '{}/ckpt_best.pth.tar'.format(opt.ckpt)
    print(">>> loading ckpt len from '{}'".format(model_path_len))
    ckpt = torch.load(model_path_len)
    start_epoch = ckpt['epoch'] + 1
    err_best = ckpt['err']
    lr_now = ckpt['lr']
    net_pred.load_state_dict(ckpt['state_dict'])
    print(">>> ckpt len loaded (epoch: {} | err: {})".format(
        ckpt['epoch'], ckpt['err']))

    print('>>> loading datasets')

    head = np.array(['act'])
    for k in range(1, opt.output_n + 1):
        head = np.append(head, [f'#{k}'])

    acts = [
        "walking", "eating", "smoking", "discussion", "directions", "greeting",
        "phoning", "posing", "purchases", "sitting", "sittingdown",
        "takingphoto", "waiting", "walkingdog", "walkingtogether"
    ]
    errs = np.zeros([len(acts) + 1, opt.output_n])
    for i, act in enumerate(acts):
        test_dataset = datasets.Datasets(opt, split=2, actions=[act])
        print('>>> Testing dataset length: {:d}'.format(
            test_dataset.__len__()))
        test_loader = DataLoader(test_dataset,
                                 batch_size=opt.test_batch_size,
                                 shuffle=False,
                                 num_workers=0,
                                 pin_memory=True)

        ret_test = run_model(net_pred,
                             is_train=3,
                             data_loader=test_loader,
                             opt=opt)
        print('testing error: {:.3f}'.format(ret_test['#1']))
        ret_log = np.array([])
        for k in ret_test.keys():
            ret_log = np.append(ret_log, [ret_test[k]])
        errs[i] = ret_log
    errs[-1] = np.mean(errs[:-1], axis=0)
    acts = np.expand_dims(np.array(acts + ["average"]), axis=1)
    value = np.concatenate([acts, errs.astype(np.str)], axis=1)
    log.save_csv_log(opt,
                     head,
                     value,
                     is_create=True,
                     file_name='test_pre_action')
コード例 #4
0
def main(opt):
    lr_now = opt.lr_now
    start_epoch = 1
    # opt.is_eval = True
    ckpt = './checkpoint/pretrained/h36m_3d_in50_out10_dctn20/'
    batch_size = 1
    opt.ckpt = ckpt
    print('>>> create models')
    net_pred = AttModel.AttModel(in_features=66,
                                 kernel_size=10,
                                 d_model=256,
                                 num_stage=12,
                                 dct_n=20)
    net_pred.cuda()
    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in net_pred.parameters()) / 1000000.0))

    model_path_len = './{}/ckpt_best.pth.tar'.format(opt.ckpt)
    print(">>> loading ckpt len from '{}'".format(model_path_len))
    ckpt = torch.load(model_path_len)
    start_epoch = ckpt['epoch']
    err_best = ckpt['err']
    net_pred.load_state_dict(ckpt['state_dict'])
    print(">>> ckpt len loaded (epoch: {} | err: {})".format(
        start_epoch, err_best))

    print('>>> loading datasets')

    acts = [
        "walking", "eating", "smoking", "discussion", "directions", "greeting",
        "phoning", "posing", "purchases", "sitting", "sittingdown",
        "takingphoto", "waiting", "walkingdog", "walkingtogether"
    ]
    good_idx = pd.read_csv('./checkpoint/pretrained/seq_selected.csv')
    good_idx = good_idx.values
    sele = {}
    for gi in range(good_idx.shape[0]):
        if good_idx[gi, 0] in sele.keys():
            sele[good_idx[gi, 0]].append(int(good_idx[gi, 1]))
        else:
            sele[good_idx[gi, 0]] = [int(good_idx[gi, 1])]

    err = np.zeros([2, opt.output_n])
    n = 0
    for act in acts:
        if not act in sele.keys():
            continue
        test_dataset = datasets.Datasets(opt, split=2, actions=[act])
        print('>>> Testing dataset length: {:d}'.format(
            test_dataset.__len__()))
        test_loader = DataLoader(test_dataset,
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=0,
                                 pin_memory=True)
        # evaluation
        ret, nt = run_model(net_pred,
                            is_train=3,
                            data_loader=test_loader,
                            opt=opt,
                            good_idx=sele[act])
        err += ret
        n += nt
    err = err / n
    head = np.array(['input_n'])
    for k in range(1, opt.output_n + 1):
        head = np.append(head, [f'#{k}'])
    value = np.expand_dims(np.array(['in50', 'in100']), axis=1)
    value = np.concatenate([value, err.astype(np.str)], axis=1)
    log.save_csv_log(opt,
                     head,
                     value,
                     is_create=True,
                     file_name='test_in50_in100')