def __init__(self, pretrained_model_path):
     ckpt = torch.load(pretrained_model_path)
     self.mean_pose = ckpt['mean_pose']
     self.device = torch.device(
         "cuda:0" if torch.cuda.is_available() else "cpu")
     self.joint_num = ckpt['mean_pose'].shape[0]
     self.model = LinearModel(joint_num=self.joint_num)
     self.model.cuda()
     self.model.load_state_dict(ckpt['state_dict'])
     self.model.eval()
Beispiel #2
0
def main(opt):
    start_epoch = 0
    err_best = 1000
    glob_step = 0
    lr_now = opt.lr

    # save options
    log.save_options(opt, opt.ckpt)

    # create model
    print(">>> creating model")
    model = LinearModel()
    model = model.cuda()
    model.apply(weight_init)
    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))
    criterion = nn.MSELoss(reduction='mean').cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    # load ckpt
    if opt.load:

        print(">>> loading ckpt from '{}'".format(opt.load))
        ckpt = torch.load(opt.load, encoding='utf-8')
        start_epoch = ckpt['epoch']
        err_best = ckpt['err']
        glob_step = ckpt['step']
        lr_now = ckpt['lr']
        model.load_state_dict(ckpt['state_dict'])
        optimizer.load_state_dict(ckpt['optimizer'])
        print(">>> ckpt loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))
    if opt.resume:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'), resume=True)
    else:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'))
        logger.set_names(
            ['epoch', 'lr', 'loss_train', 'loss_test', 'err_test'])

    # list of action(s)
    actions = misc.define_actions(opt.action)
    num_actions = len(actions)
    print(">>> actions to use (total: {}):".format(num_actions))
    # pprint(actions, indent=4)
    # print(">>>")

    # data loading
    print(">>> loading data")
    # load statistics data
    stat_3d = torch.load(os.path.join(opt.data_dir, 'stat_3d.pth.tar'))
    stat_2d = torch.load(os.path.join(opt.data_dir, 'stat_2d.pth.tar'))

    # test
    if opt.test:
        err_set = []
        for action in actions:
            print(">>> TEST on _{}_".format(action))

            test_loader = DataLoader(dataset=Human36M(
                actions=action,
                data_path=opt.data_dir,
                set_num_samples=opt.set_num_samples,
                use_hg=opt.use_hg,
                is_train=False),
                                     batch_size=opt.test_batch,
                                     shuffle=False,
                                     num_workers=opt.job,
                                     pin_memory=True)

            _, err_test = test(test_loader,
                               model,
                               criterion,
                               stat_2d,
                               stat_3d,
                               procrustes=opt.procrustes)
            err_set.append(err_test)

        print(">>>>>> TEST results:")

        for action in actions:
            print("{}".format(action), end='\t')
        print("\n")

        for err in err_set:
            print("{:.4f}".format(err), end='\t')
        print(">>>\nERRORS: {}".format(np.array(err_set).mean()))
        sys.exit()

    # load datasets for training
    test_loader = DataLoader(dataset=Human36M(
        actions=actions,
        data_path=opt.data_dir,
        set_num_samples=opt.set_num_samples,
        use_hg=opt.use_hg,
        is_train=False),
                             batch_size=opt.test_batch,
                             shuffle=False,
                             num_workers=opt.job,
                             pin_memory=True)

    train_loader = DataLoader(dataset=Human36M(
        actions=actions,
        data_path=opt.data_dir,
        set_num_samples=opt.set_num_samples,
        use_hg=opt.use_hg),
                              batch_size=opt.train_batch,
                              shuffle=True,
                              num_workers=opt.job,
                              pin_memory=True)

    print(">>> data loaded !")

    cudnn.benchmark = True

    for epoch in range(start_epoch, opt.epochs):
        print('==========================')
        print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))

        ## per epoch
        # train
        glob_step, lr_now, loss_train = train(train_loader,
                                              model,
                                              criterion,
                                              optimizer,
                                              stat_2d,
                                              stat_3d,
                                              lr_init=opt.lr,
                                              lr_now=lr_now,
                                              glob_step=glob_step,
                                              lr_decay=opt.lr_decay,
                                              gamma=opt.lr_gamma,
                                              max_norm=opt.max_norm)
        # test
        loss_test, err_test = test(test_loader,
                                   model,
                                   criterion,
                                   stat_2d,
                                   stat_3d,
                                   procrustes=opt.procrustes)
        # loss_test, err_test = test(test_loader, model, criterion, stat_3d, procrustes=True)

        # update log file
        logger.append([epoch + 1, lr_now, loss_train, loss_test, err_test],
                      ['int', 'float', 'float', 'float', 'float'])

        # save ckpt
        is_best = err_test < err_best
        err_best = min(err_test, err_best)
        if is_best:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'err': err_best,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=opt.ckpt,
                is_best=True)

        else:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'err': err_best,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=opt.ckpt,
                is_best=False)

    logger.close()
class PoseBaselineForCOCO():
    def __init__(self, pretrained_model_path):
        ckpt = torch.load(pretrained_model_path)
        self.mean_pose = ckpt['mean_pose']
        self.device = torch.device(
            "cuda:0" if torch.cuda.is_available() else "cpu")
        self.joint_num = ckpt['mean_pose'].shape[0]
        self.model = LinearModel(joint_num=self.joint_num)
        self.model.cuda()
        self.model.load_state_dict(ckpt['state_dict'])
        self.model.eval()

    def read_openpose_json(self, openpose_output_dir):
        # load json files
        json_files = os.listdir(openpose_output_dir)
        # check for other file types
        json_files = sorted([
            filename for filename in json_files if filename.endswith(".json")
        ])

        pose2d, confs = [], []
        ### extract x,y and ignore confidence rate
        for file_name in json_files:
            _file = os.path.join(openpose_output_dir, file_name)
            data = json.load(open(_file))
            if len(data['people']) == 0:
                continue

            # get frame INDEX from 12 digit number string
            frame_indx = re.findall("(\d{12})", file_name)
            # if int(frame_indx[0]) <= 0:
            # n order to register the first frame as it is, specify INDEX as it is
            tmp = data["people"][0]["pose_keypoints_2d"]

            # if openpose is 25 joints version -> convert to 18 joints
            if len(tmp) == 75:
                # remove joint index
                remove_idx = [8, 19, 20, 21, 22, 23, 24]
                tmp = np.array(tmp).reshape([-1, 3])
                tmp = np.delete(tmp, remove_idx, axis=0)
                tmp = tmp.reshape(-1)

            pose_frame, conf = [], []
            for i in range(len(tmp)):
                if i % 3 == 2:
                    conf.append(tmp[i])
                else:
                    pose_frame.append(tmp[i])
            confs.append(conf)
            pose2d.append(pose_frame)

        return np.array(pose2d), np.array(confs)

    # COCO-Data(18 Joints) -> CMU-Data(19 Joints)
    def convertCOCO2CMU(self, pose2d, conf_rate):
        pose2d = pose2d.reshape(-1, 18, 2)
        cmu_poses = []
        cmu_confs = []
        for i, pose in enumerate(pose2d):
            cmu_pose = [None] * 19
            cmu_conf = [None] * 19
            for j in range(len(pose)):
                cmu_pose[coco2cmu[j]] = pose[j]
                cmu_conf[coco2cmu[j]] = conf_rate[i][j]
            cmu_pose[2] = (pose[8] + pose[11]) / 2  # MidHip
            cmu_conf[2] = (conf_rate[i][8] + conf_rate[i][11]) / 2  # MidHip
            cmu_poses.append(cmu_pose)
            cmu_confs.append(cmu_conf)
        cmu_poses = np.array(cmu_poses)
        cmu_confs = np.array(cmu_confs)
        return cmu_poses, cmu_confs

    # COCO-Data(18 Joints) -> CMU-Data(15 Joints)
    def convertCOCO2CMU15joints(self, pose2d, conf_rate):
        pose2d = pose2d.reshape(-1, 18, 2)
        cmu_poses = []
        cmu_confs = []
        for i, pose in enumerate(pose2d):
            cmu_pose = [None] * 15
            cmu_conf = [None] * 15
            for j in range(len(coco2joint15)):
                cmu_pose[coco2joint15[j]] = pose[j]
                cmu_conf[coco2joint15[j]] = conf_rate[i][j]
            cmu_pose[2] = (pose[8] + pose[11]) / 2  # MidHip
            cmu_conf[2] = (conf_rate[i][8] + conf_rate[i][11]) / 2  # MidHip
            cmu_poses.append(cmu_pose)
            cmu_confs.append(cmu_conf)
        cmu_poses = np.array(cmu_poses)
        cmu_confs = np.array(cmu_confs)
        return cmu_poses, cmu_confs

    # linealy interpolate joints that not estimated by OpenPose
    def linearInterpolation(self, skeletons, conf_rate):
        conf_rate = conf_rate.T
        skeletons = skeletons.reshape(
            [-1, skeletons.shape[1] * skeletons.shape[2]]).T

        # At first, if confidence rate of first or end frame is 0, it is interpolated with the nearest value
        for joint_idx in range(len(conf_rate)):
            # First frame
            if conf_rate[joint_idx][0] == 0:
                for i in range(1, len(conf_rate[joint_idx])):
                    if conf_rate[joint_idx][i] != 0:
                        skeletons[joint_idx * 2 +
                                  0][0] = skeletons[joint_idx * 2 + 0][i]
                        skeletons[joint_idx * 2 +
                                  1][0] = skeletons[joint_idx * 2 + 1][i]
                        break
            # End frame
            end_frame = len(conf_rate[joint_idx]) - 1
            if conf_rate[joint_idx][end_frame] == 0:
                for i in range(end_frame - 1, 0, -1):
                    if conf_rate[joint_idx][i] != 0:
                        skeletons[joint_idx * 2 +
                                  0][end_frame] = skeletons[joint_idx * 2 +
                                                            0][i]
                        skeletons[joint_idx * 2 +
                                  1][end_frame] = skeletons[joint_idx * 2 +
                                                            1][i]
                        break

        # Second detect outliers
        outliers = []  # frames to interpolate for each joint
        for joint_idx in range(len(conf_rate)):
            outlier = []
            i = 0
            for frame in range(len(conf_rate[joint_idx])):
                # if first or end frame -> cannot interpolate
                if frame < i or frame == 0 or frame == len(
                        conf_rate[joint_idx]) - 1:
                    continue
                if conf_rate[joint_idx][frame] == 0:
                    out = []
                    i = frame
                    skip = False
                    while (conf_rate[joint_idx][i] == 0):
                        out.append(i)
                        i += 1
                        if i >= len(conf_rate[joint_idx]) - 1:
                            break
                    outlier.append([out[0], out[len(out) - 1]])
            outliers.append(outlier)

        # Finally Linear Interpolation
        for joint in range(len(outliers)):
            for frame in outliers[joint]:
                j = 1
                for i in range(frame[0], frame[1] + 1):
                    # Interpolation
                    skeletons[joint * 2 + 0][i] = skeletons[joint * 2 + 0][
                        frame[0] -
                        1] + j * (skeletons[joint * 2 + 0][frame[1] + 1] -
                                  skeletons[joint * 2 + 0][frame[0] - 1]) / (
                                      frame[1] - frame[0] + 2)
                    skeletons[joint * 2 + 1][i] = skeletons[joint * 2 + 1][
                        frame[0] -
                        1] + j * (skeletons[joint * 2 + 1][frame[1] + 1] -
                                  skeletons[joint * 2 + 1][frame[0] - 1]) / (
                                      frame[1] - frame[0] + 2)
                    j += 1
        skeletons = skeletons.T.reshape([skeletons.T.shape[0], -1, 2])
        return skeletons

    # inputs : torch_tensor[batch_size][joints(19*2)]
    # outputs: linealy interpolated 2d-pose, 3d-pose
    # For data that include only upper body, fill the lower body with mean_pose.
    def predict(self, openpose_json_dir, mode='joint19'):
        pose2d, confs = self.read_openpose_json(openpose_json_dir)

        # Check if only the upper body is detected
        lower_body_conf = np.array(confs)[:, 8:14]
        if np.mean(lower_body_conf) < 0.5:
            isUpperBody = True
        else:
            isUpperBody = False

        # convert COCO joints index to CMU joints index
        if mode == 'joint19':
            pose2d, confs = self.convertCOCO2CMU(pose2d, confs)
        elif mode == 'joint15':
            pose2d, confs = self.convertCOCO2CMU15joints(pose2d, confs)

        # Linear interpolation of unestimated joints
        pose2d = self.linearInterpolation(pose2d, confs)

        # Normalize input pose
        inputs, shoulder_len, neck_pos = normalize_skeleton(pose2d,
                                                            mode='openpose')

        # if only upperbody is estimated, fill the lower body with mean pose
        if isUpperBody:
            upper_joints = [0, 1, 3, 4, 5, 9, 10, 11]
            for i in range(len(inputs)):
                for j in range(len(inputs[i])):
                    if not j in upper_joints:
                        inputs[i][j] = self.mean_pose[j][0:2]

        inputs = Variable(
            torch.tensor(inputs).cuda().type(torch.cuda.FloatTensor))
        inputs = inputs.reshape(inputs.shape[0], -1)
        outputs = self.model(inputs)
        outputs = outputs.cpu().detach().numpy().reshape(-1, self.joint_num, 3)
        # outputs = unNormalize_skeleton(outputs, shoulder_len, neck_pos, mode='cmu')
        return pose2d, outputs
Beispiel #4
0
from src.train import Trainer
from src.test import Tester
from src.configer import Configer

actions = [
    "Directions", "Discussion", "Eating", "Greeting", "Phoning", "Photo",
    "Posing", "Purchases", "Sitting", "SittingDown", "Smoking", "Waiting",
    "WalkDog", "Walking", "WalkTogether"
]

if __name__ == '__main__':
    cfg = Configer('3d_pose_baseline.cfg')

    lr_now = cfg.get_learning_rate()

    model = LinearModel(cfg)
    model = model.cuda()

    glob_step = 0
    start_epoch = 0
    err_best = 1000
    # load ckpt
    if cfg.is_train() != 1:
        ckpt = torch.load(cfg.get_ckpt())  #'./backup/gt_ckpt_best_old.pth.tar'
        start_epoch = ckpt['epoch']
        err_best = ckpt['err']
        glob_step = ckpt['step']
        lr_now = ckpt['lr']
        model.load_state_dict(ckpt['state_dict'])
        print(">>> ckpt loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))
Beispiel #5
0
def main_human(opt, save_op=True, return_proc=False):
    start_epoch = 0
    err_test_best = 100000
    glob_step = 0
    lr_now = opt.lr

    # save options
    if save_op:
        log.save_options(opt, opt.ckpt)

    print("\n==================Actions=================")
    actions = define_actions(opt.action)
    print(">>> actions to use: {}".format(len(actions)))
    pprint(actions, indent=4)
    print("==========================================\n")

    print("\n==================Data=================")
    print(">>> loading data")

    # load structure for miscellaneous info
    misc = DatasetMisc(opt.dataset_type)

    # load the data from the h5 annotations
    data_dict_train, cameras_dict_train, data_dict_test, cameras_dict_test, \
        stat_2d, stat_3d  = h36.load_human36(misc, opt, actions)

    # relevant options for creating the train and test data loader
    tol_mm = opt.tolerance_mm
    num_pairs = opt.num_pairs
    amt_train_data = opt.amt_train_data
    amt_test_data = opt.amt_test_data
    train_rel_labels_noise_prob = opt.rel_labels_noise_prob
    test_rel_labels_noise_prob = 0.  #NOTE: hard coded to 0 for the test set
    in_dropout_p = opt.in_dropout_p

    if opt.is_train:
        print("\n>>> creating Train dataset")
        # create dataset of type Human36M
        train_h36 = \
            Human36M( misc, cameras_dict_train, data_dict_train, stat_2d, stat_3d,
                      tol_mm, num_pairs, amt_train_data, train_rel_labels_noise_prob,
                      in_dropout_p, is_train=True )
        # create data loader
        train_loader = DataLoader(dataset=train_h36,
                                  batch_size=opt.train_batch,
                                  shuffle=True,
                                  num_workers=opt.job)
        print(" - number of batches: {}".format(len(train_loader)))

    if opt.is_test:
        print("\n>>> creating Test dataset")
        # create dataset of type Human36M
        test_h36 = \
            Human36M( misc, cameras_dict_test, data_dict_test, stat_2d, stat_3d,
                      tol_mm, num_pairs, amt_test_data, test_rel_labels_noise_prob,
                      in_dropout_p, is_train=False )
        # create data loader
        test_loader = DataLoader(dataset=test_h36,
                                 batch_size=opt.test_batch,
                                 shuffle=False,
                                 num_workers=0,
                                 drop_last=False)
        print(" - number of batches: {}".format(len(test_loader)))

    print("==========================================\n")
    print("\n==================Model=================")
    print(">>> creating model")

    num_2d_coords = misc.NUM_KEYPOINTS_2D * 2
    num_3d_coords = misc.NUM_KEYPOINTS_3D * 3

    model = LinearModel(num_2d_coords,
                        num_3d_coords,
                        linear_size=opt.linear_size,
                        num_stage=opt.num_stage,
                        p_dropout=opt.dropout,
                        predict_scale=opt.predict_scale,
                        scale_range=opt.scale_range,
                        unnorm_op=opt.unnorm_op,
                        unnorm_init=opt.unnorm_init)

    model = model.cuda()
    model.apply(weight_init)
    print(" - total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1e6))
    print("==========================================\n")

    ############################################################################
    # define losses and optimizers
    mse_loss = nn.MSELoss(size_average=True).cuda()
    # mse_loss = nn.MSELoss(size_average=True)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
    cudnn.benchmark = True

    ############################################################################
    # load ckpt
    if opt.load:
        print(">>> loading ckpt from '{}'".format(opt.load))
        ckpt = torch.load(opt.load)

        stat_3d = ckpt['stat_3d']
        stat_2d = ckpt['stat_2d']

        err_best = ckpt['err']
        lr_now = ckpt['lr']
        glob_step = ckpt['step']

        model.load_state_dict(ckpt['state_dict'])
        optimizer.load_state_dict(ckpt['optimizer'])

        if not opt.resume:
            print(">>> ckpt loaded (epoch: {} | err: {})".format(
                start_epoch, err_best))

    if opt.resume:
        assert opt.load != ''
        start_epoch = ckpt['epoch']
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'), resume=True)
        print(">>> ckpt loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))
    else:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'))
        logger.set_names([
            'it', 'lr', 'l_train', 'l_test', 'e_test', 'e_test_s', 'e_test_p'
        ])

    ############################################################################
    ## TRAINING LOOP
    overall_train_losses = [[0], [0], [0], [0]]
    loss_lbls = ['sup_loss', 'rel_loss', 'rep_loss', 'cam_loss']
    for epoch in range(start_epoch, opt.epochs):
        print('\n==========================')
        print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))

        ########################################################################
        ## TRAIN
        avg_loss_train = -1
        if opt.is_train:
            print('\n - Training')
            glob_step, lr_now, avg_loss_train, losses_train = \
                    train_human(
                        train_loader=train_loader, misc=misc,
                        stat_2d=stat_2d, stat_3d=stat_3d,
                        standardize_input_data=opt.standardize_input_data,
                        standardize_output_data=opt.standardize_output_data,
                        use_rel_loss=opt.use_rel_loss,
                        subtract_2d_root=opt.subtract_2d_root,
                        keep_root=opt.keep_root,
                        optimizer=optimizer, model=model, mse_loss=mse_loss,
                        reprojection=opt.reprojection,
                        use_full_intrinsics=opt.use_full_intrinsics,
                        predict_scale=opt.predict_scale, limb_type=opt.limb_type,
                        glob_step=glob_step, lr_init=opt.lr, lr_now=lr_now,
                        lr_decay=opt.lr_decay, gamma=opt.lr_gamma,
                        max_norm=opt.max_norm,
                        distance_multiplier=opt.distance_multiplier,
                        loss_weights=opt.loss_weights)
            for li, l in enumerate(overall_train_losses):
                l.extend(losses_train[li])
            viz.plot_losses(overall_train_losses, loss_lbls,
                            opt.ckpt + '/train_losses.jpg', 'Training Set',
                            'iterations', 'losses')

        ########################################################################
        ## TEST
        loss_test = err_test = err_test_scale = err_test_proc = -1
        if opt.is_test and (glob_step) % opt.test_step == 0:
            print('\n - Testing')
            loss_test, target_poses, out_poses, proc_poses, scaled_poses = \
                    test_human(
                        test_loader=test_loader, misc=misc,
                        stat_2d=stat_2d, stat_3d=stat_3d,
                        standardize_input_data=opt.standardize_input_data,
                        standardize_output_data=opt.standardize_output_data,
                        subtract_2d_root=opt.subtract_2d_root, keep_root=opt.keep_root,
                        model=model, mse_loss=mse_loss, use_rel_loss=opt.use_rel_loss,
                        save_ims=opt.save_ims, epoch=epoch, op_dir=opt.ckpt_ims)

            target_poses = np.vstack(target_poses)
            out_poses = np.vstack(out_poses)
            scaled_poses = np.vstack(scaled_poses)
            proc_poses = np.vstack(proc_poses)

            ####################################################################
            ## compute error in mm for both protocols (with and without procrustes)
            sqerr = (out_poses - target_poses)**2
            sqerr_proc = (proc_poses - target_poses)**2
            sqerr_scaled = (scaled_poses - target_poses)**2

            all_err = np.sqrt(sqerr[:, 0::3] + sqerr[:, 1::3] + sqerr[:, 2::3])
            all_err_scaled = np.sqrt(sqerr_scaled[:, 0::3] +
                                     sqerr_scaled[:, 1::3] +
                                     sqerr_scaled[:, 2::3])
            all_err_proc = np.sqrt(sqerr_proc[:, 0::3] + sqerr_proc[:, 1::3] +
                                   sqerr_proc[:, 2::3])

            err_test = np.mean(all_err)
            err_test_scale = np.mean(all_err_scaled)
            err_test_proc = np.mean(all_err_proc)
            print("> 3d error              {}".format(round(err_test, 3)))
            print("> 3d error (scaled)     {}".format(round(err_test_scale,
                                                            3)))
            print("> 3d error (procrustes) {}".format(round(err_test_proc, 3)))
            print("-" * 25)

            # compute the errors per action
            a_test = data_dict_test['A'][test_h36.inds]
            vals, counts = np.unique(a_test, return_counts=True)
            assert a_test.shape[0] == all_err_proc.shape[0], "Bad shapes."

            err_test_actions_last = []
            for vv, cc in zip(vals, counts):
                action_inds = np.where(a_test == vv)[0]
                action_errs = all_err_proc[action_inds]
                err_test_actions_last.append(action_errs.mean())

            for aa, bb in zip(err_test_actions_last, actions):
                print("> {:<12} 3d error: {:.3f}".format(bb, aa))
            print("> Overall avg : {:.3f}".format(
                np.mean(err_test_actions_last)))
            print("-" * 25)

            # change this line to decide what error to use for storing best
            err_test_last = err_test_proc

            is_best = err_test_last < err_test_best
            err_test_best = min(err_test_last, err_test_best)
            if save_op:
                log.save_ckpt(
                    {
                        'epoch': epoch + 1,
                        'lr': lr_now,
                        'step': glob_step,
                        'err': err_test_last,
                        'stat_2d': stat_2d,
                        'stat_3d': stat_3d,
                        'opt': opt,
                        'state_dict': model.state_dict(),
                        'optimizer': optimizer.state_dict()
                    },
                    ckpt_path=opt.ckpt,
                    data_split='test',
                    is_best=is_best)

        # update log file
        logs = [
            glob_step, lr_now, avg_loss_train, loss_test, err_test,
            err_test_scale, err_test_proc
        ]
        logs_type = [
            'int', 'float', 'float', 'float', 'float', 'float', 'float'
        ]

        if save_op:
            logger.append(logs, logs_type)

    logger.close()
    if return_proc:
        return err_test_best, err_test_last, err_test_actions_last, all_err_proc, proc_poses, data_dict_test
    else:
        return err_test_best, err_test_last, err_test_actions_last
Beispiel #6
0
def main(opt):
    start_epoch = 0
    err_best = 1000
    glob_step = 0
    lr_now = opt.lr

    manual_seed = 1234
    np.random.seed(manual_seed)
    torch.manual_seed(manual_seed)

    # save options
    log.save_options(opt, opt.ckpt)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # create model
    print(">>> creating model")
    model = LinearModel()
    model = model.to(device)
    model.apply(weight_init)
    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))
    criterion = nn.MSELoss(size_average=True).to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    # load ckpt
    if opt.load:
        print(">>> loading ckpt from '{}'".format(opt.load))
        ckpt = torch.load(opt.load)
        start_epoch = ckpt['epoch']
        err_best = ckpt['err']
        glob_step = ckpt['step']
        lr_now = ckpt['lr']
        model.load_state_dict(ckpt['state_dict'])
        optimizer.load_state_dict(ckpt['optimizer'])
        print(">>> ckpt loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))
    if opt.resume:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'), resume=True)
    else:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'))
        logger.set_names(
            ['epoch', 'lr', 'loss_train', 'loss_test', 'err_test'])

    # list of action(s)
    actions = misc.define_actions(opt.action)
    num_actions = len(actions)
    print(">>> actions to use (total: {}):".format(num_actions))
    pprint(actions, indent=4)
    print(">>>")

    # data loading
    print(">>> loading data")
    # load statistics data
    stat_3d = torch.load(os.path.join(opt.data_dir, 'stat_3d.pth.tar'))
    # test
    if opt.test:
        refine_dic, refine_per_action, coeff_funs, refine_extra_kwargs = ru.get_refine_config(
            opt)
        pck_thresholds = [50, 100, 150, 200, 250]
        noise_fun = lambda x: add_gaussion_noise(x, percent=opt.noise_level)

        err_set = []
        pck_set = []
        for action in actions:
            print(">>> TEST on _{}_".format(action))
            test_loader = DataLoader(dataset=Human36M(actions=action,
                                                      data_path=opt.data_dir,
                                                      use_hg=opt.use_hg,
                                                      is_train=False),
                                     batch_size=opt.test_batch,
                                     shuffle=False,
                                     pin_memory=True)

            refine_idx_action = ru.get_idx_action(action)
            if refine_per_action:
                refine_dic_i = refine_dic[refine_idx_action]
            else:
                refine_dic_i = refine_dic
            coeff_fun_i = coeff_funs[refine_idx_action]
            _, err_test, pck_test = test(
                test_loader,
                model,
                criterion,
                stat_3d,
                device,
                procrustes=opt.procrustes,
                noise_fun=noise_fun,
                pck_thresholds=pck_thresholds,
                refine_dic=refine_dic_i,
                refine_coeff_fun=coeff_fun_i,
                refine_extra_kwargs=refine_extra_kwargs,
                cache_prefix=action if opt.dump_err else None)
            err_set.append(err_test)
            pck_set.append(pck_test)
        print(">>>>>> TEST results:")
        for action in actions:
            print("{}".format(action[:7]), end='\t')
        print("\n")
        for err in err_set:
            print("{:7.4f}".format(err), end='\t')
        print(">>> ERRORS: {}".format(np.array(err_set).mean()))

        for i, thres in enumerate(pck_thresholds):
            for pck in pck_set:
                print("{:7.4f}".format(pck[i]), end='\t')
            print(">>> PCKS {}: {}".format(
                thres, np.mean([pck[i] for pck in pck_set])))
        sys.exit()

    # load dadasets for training
    test_loader = DataLoader(dataset=Human36M(actions=actions,
                                              data_path=opt.data_dir,
                                              use_hg=opt.use_hg,
                                              is_train=False),
                             batch_size=opt.test_batch,
                             shuffle=False,
                             num_workers=opt.job,
                             pin_memory=True)
    train_loader = DataLoader(dataset=Human36M(actions=actions,
                                               data_path=opt.data_dir,
                                               use_hg=opt.use_hg),
                              batch_size=opt.train_batch,
                              shuffle=True,
                              num_workers=opt.job,
                              pin_memory=True)
    print(">>> data loaded !")

    cudnn.benchmark = True
    for epoch in range(start_epoch, opt.epochs):
        print('==========================')
        print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))

        # per epoch
        glob_step, lr_now, loss_train = train(train_loader,
                                              model,
                                              criterion,
                                              optimizer,
                                              device,
                                              lr_init=opt.lr,
                                              lr_now=lr_now,
                                              glob_step=glob_step,
                                              lr_decay=opt.lr_decay,
                                              gamma=opt.lr_gamma,
                                              max_norm=opt.max_norm)
        loss_test, err_test, pck_test = test(test_loader,
                                             model,
                                             criterion,
                                             stat_3d,
                                             device,
                                             procrustes=opt.procrustes)

        # update log file
        logger.append(
            [epoch + 1, lr_now, loss_train, loss_test, err_test, pck_test],
            ['int', 'float', 'float', 'float', 'float', 'float'])

        # save ckpt
        is_best = err_test < err_best
        err_best = min(err_test, err_best)
        if is_best:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'err': err_best,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=opt.ckpt,
                is_best=True)
        else:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'err': err_best,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=opt.ckpt,
                is_best=False)

    logger.close()
def main(opt):
    start_epoch = 0
    err_best = 1000
    glob_step = 0
    lr_now = opt.lr

    # save options
    log.save_options(opt, opt.ckpt)

    # create model
    print(">>> creating model")
    model = LinearModel()
    model = model.cuda()
    model.apply(weight_init)
    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))
    criterion = nn.MSELoss(size_average=True).cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    # load ckpt
    if opt.load:
        print(">>> loading ckpt from '{}'".format(opt.load))
        ckpt = torch.load(opt.load)
        start_epoch = ckpt["epoch"]
        err_best = ckpt["err"]
        glob_step = ckpt["step"]
        lr_now = ckpt["lr"]
        model.load_state_dict(ckpt["state_dict"])
        optimizer.load_state_dict(ckpt["optimizer"])
        print(">>> ckpt loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))
    if opt.resume:
        logger = log.Logger(os.path.join(opt.ckpt, "log.txt"), resume=True)
    else:
        logger = log.Logger(os.path.join(opt.ckpt, "log.txt"))
        logger.set_names(
            ["epoch", "lr", "loss_train", "loss_test", "err_test"])

    # list of action(s)
    actions = misc.define_actions(opt.action)
    num_actions = len(actions)
    print(">>> actions to use (total: {}):".format(num_actions))
    pprint(actions, indent=4)
    print(">>>")

    # data loading
    print(">>> loading data")
    # load statistics data
    stat_3d = torch.load(os.path.join(opt.data_dir, "stat_3d.pth.tar"))
    # test
    if opt.test:
        err_set = []
        for action in actions:
            print(">>> TEST on _{}_".format(action))
            test_loader = DataLoader(
                dataset=Human36M(
                    actions=action,
                    data_path=opt.data_dir,
                    use_hg=opt.use_hg,
                    is_train=False,
                ),
                batch_size=opt.test_batch,
                shuffle=False,
                num_workers=opt.job,
                pin_memory=True,
            )
            _, err_test = test(test_loader,
                               model,
                               criterion,
                               stat_3d,
                               procrustes=opt.procrustes)
            err_set.append(err_test)
        print(">>>>>> TEST results:")
        for action in actions:
            print("{}".format(action), end="\t")
        print("\n")
        for err in err_set:
            print("{:.4f}".format(err), end="\t")
        print(">>>\nERRORS: {}".format(np.array(err_set).mean()))
        sys.exit()

    # load dadasets for training
    test_loader = DataLoader(
        dataset=Human36M(actions=actions,
                         data_path=opt.data_dir,
                         use_hg=opt.use_hg,
                         is_train=False),
        batch_size=opt.test_batch,
        shuffle=False,
        num_workers=opt.job,
        pin_memory=True,
    )
    train_loader = DataLoader(
        dataset=Human36M(actions=actions,
                         data_path=opt.data_dir,
                         use_hg=opt.use_hg),
        batch_size=opt.train_batch,
        shuffle=True,
        num_workers=opt.job,
        pin_memory=True,
    )
    print(">>> data loaded !")

    cudnn.benchmark = True
    for epoch in range(start_epoch, opt.epochs):
        print("==========================")
        print(">>> epoch: {} | lr: {:.5f}".format(epoch + 1, lr_now))

        # per epoch
        glob_step, lr_now, loss_train = train(
            train_loader,
            model,
            criterion,
            optimizer,
            lr_init=opt.lr,
            lr_now=lr_now,
            glob_step=glob_step,
            lr_decay=opt.lr_decay,
            gamma=opt.lr_gamma,
            max_norm=opt.max_norm,
        )
        loss_test, err_test = test(test_loader,
                                   model,
                                   criterion,
                                   stat_3d,
                                   procrustes=opt.procrustes)

        # update log file
        logger.append(
            [epoch + 1, lr_now, loss_train, loss_test, err_test],
            ["int", "float", "float", "flaot", "float"],
        )

        # save ckpt
        is_best = err_test < err_best
        err_best = min(err_test, err_best)
        if is_best:
            log.save_ckpt(
                {
                    "epoch": epoch + 1,
                    "lr": lr_now,
                    "step": glob_step,
                    "err": err_best,
                    "state_dict": model.state_dict(),
                    "optimizer": optimizer.state_dict(),
                },
                ckpt_path=opt.ckpt,
                is_best=True,
            )
        else:
            log.save_ckpt(
                {
                    "epoch": epoch + 1,
                    "lr": lr_now,
                    "step": glob_step,
                    "err": err_best,
                    "state_dict": model.state_dict(),
                    "optimizer": optimizer.state_dict(),
                },
                ckpt_path=opt.ckpt,
                is_best=False,
            )

    logger.close()
Beispiel #8
0
def main(opt):
    start_epoch = 0
    err_best = 1000
    glob_step = 0
    lr_now = opt.lr

    # save options
    log.save_options(opt, opt.out_dir)

    # create and initialise model
    # parents = [1, 2, 7, 7, 5, 7, 5, -1, 8, 7, 7, 10, 7]
    # assert len(parents) == 13
    # adj = adj_mx_from_skeleton(13, parents)

    model = LinearModel(
        input_size=26,
        output_size=39,
        linear_size=opt.linear_size,
        num_stage=opt.num_stage,
        p_dropout=opt.dropout,
    )
    # groups = [[2, 3], [5, 6], [1, 4], [0, 7], [8, 9], [14, 15], [11, 12], [10, 13]]
    # model = SemGCN(adj, 128, num_layers=4, p_dropout=0.0, nodes_group=None)

    # model = SemGCN()
    model = model.cuda()
    model.apply(weight_init)
    criterion = nn.MSELoss(size_average=True).cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    # load pretrained ckpt
    if opt.load:
        print(">>> loading ckpt from '{}'".format(opt.load))
        ckpt = torch.load(opt.load)
        start_epoch = ckpt["epoch"]
        err_best = ckpt["err"]
        glob_step = ckpt["step"]
        lr_now = ckpt["lr"]
        model.load_state_dict(ckpt["state_dict"])
        optimizer.load_state_dict(ckpt["optimizer"])
        print(">>> ckpt loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))

    if opt.test:
        log_file = "log_test.txt"
    else:
        log_file = "log_train.txt"
    if opt.resume:
        logger = log.Logger(os.path.join(opt.out_dir, log_file), resume=True)
    else:
        logger = log.Logger(os.path.join(opt.out_dir, log_file))
        logger.set_names(
            ["epoch", "lr", "loss_train", "loss_test", "err_test"])

    # data loading
    print("\n>>> loading data")
    stat_3d = torch.load(os.path.join(opt.data_dir, "stat_3d.pth.tar"))

    # test
    if opt.test:
        test_loader = DataLoader(
            dataset=data_loader(data_path=opt.data_dir, is_train=False),
            batch_size=opt.batch_size,
            shuffle=False,
            num_workers=opt.job,
            pin_memory=True,
        )

        loss_test, err_test, joint_err, all_err, outputs, targets, inputs = test(
            test_loader, model, criterion, stat_3d)

        print(os.path.join(opt.out_dir, "test_results.pth.tar"))
        torch.save(
            {
                "loss": loss_test,
                "all_err": all_err,
                "test_err": err_test,
                "joint_err": joint_err,
                "output": outputs,
                "target": targets,
                "input": inputs,
            },
            open(os.path.join(opt.out_dir, "test_results.pth.tar"), "wb"),
        )

        # print("train {:.4f}".format(err_train), end="\t")
        print("test {:.4f}".format(err_test), end="\t")
        sys.exit()

    # load datasets for training
    test_loader = DataLoader(
        dataset=data_loader(data_path=opt.data_dir, is_train=False),
        batch_size=opt.batch_size,
        shuffle=False,
        num_workers=opt.job,
        pin_memory=True,
    )

    train_loader = DataLoader(
        dataset=data_loader(data_path=opt.data_dir,
                            is_train=True,
                            noise=opt.noise),
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=opt.job,
        pin_memory=True,
        drop_last=False,
    )

    # loop through epochs
    cudnn.benchmark = True
    for epoch in range(start_epoch, opt.epochs):
        print("==========================")
        print(">>> epoch: {} | lr: {:.10f}".format(epoch + 1, lr_now))

        # train
        glob_step, lr_now, loss_train = train(
            train_loader,
            model,
            criterion,
            optimizer,
            lr_init=opt.lr,
            lr_now=lr_now,
            glob_step=glob_step,
            lr_decay=opt.lr_decay,
            gamma=opt.lr_gamma,
            max_norm=opt.max_norm,
        )

        loss_test, err_test, _, _, _, _, _ = test(train_loader, model,
                                                  criterion, stat_3d)

        # test
        loss_test, err_test, _, _, _, _, _ = test(test_loader, model,
                                                  criterion, stat_3d)

        # update log file
        logger.append(
            [epoch + 1, lr_now, loss_train, loss_test, err_test],
            ["int", "float", "float", "float", "float"],
        )

        # save ckpt
        is_best = err_test < err_best
        err_best = min(err_test, err_best)
        log.save_ckpt(
            {
                "epoch": epoch + 1,
                "lr": lr_now,
                "step": glob_step,
                "err": err_best,
                "state_dict": model.state_dict(),
                "optimizer": optimizer.state_dict(),
            },
            ckpt_path=opt.out_dir,
            is_best=is_best,
        )

    logger.close()
Beispiel #9
0
def main_lsp(opt, save_op=True, return_poses=False):
    start_epoch = 0
    err_test_best = 100000
    glob_step = 0
    lr_now = opt.lr

    # save options
    if save_op:
        log.save_options(opt, opt.ckpt)

    print("==================Data=================")

    # get info related to dataset type
    misc = DatasetMisc(opt.dataset_type)

    ############################################################################
    # create train data loader
    print(">>> creating Train dataset")
    train_dataset_lsp = LSP(misc, opt, is_train=True)
    train_loader = DataLoader(dataset=train_dataset_lsp,
                              batch_size=opt.train_batch,
                              shuffle=True,
                              num_workers=opt.job)
    stat_2d = {}
    stat_3d = {}
    stat_2d['lsp_mean'] = train_dataset_lsp.stat_2d['mean']
    stat_2d['lsp_std'] = train_dataset_lsp.stat_2d['std']
    print(" - number of batches: {}".format(len(train_loader)))

    ############################################################################
    # create test data loader
    print("\n>>> creating Test dataset")
    test_dataset_lsp = LSP(misc, opt, is_train=False)
    test_loader = DataLoader(dataset=test_dataset_lsp,
                             batch_size=opt.test_batch,
                             shuffle=False,
                             num_workers=0,
                             drop_last=False)
    print(" - number of batches: {}".format(len(test_loader)))
    print("==========================================\n")

    print("\n==================Model=================")
    print(">>> creating model")

    num_2d_coords = misc.NUM_KEYPOINTS_2D * 2
    num_3d_coords = misc.NUM_KEYPOINTS_3D * 3

    model = LinearModel(num_2d_coords,
                        num_3d_coords,
                        linear_size=opt.linear_size,
                        num_stage=opt.num_stage,
                        p_dropout=opt.dropout,
                        predict_scale=opt.predict_scale,
                        scale_range=opt.scale_range,
                        unnorm_op=opt.unnorm_op,
                        unnorm_init=opt.unnorm_init)

    model = model.cuda()
    model.apply(weight_init)
    print(" - total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1e6))
    print("==========================================\n")

    # define optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
    cudnn.benchmark = True

    # load ckpt
    if opt.load:
        print(">>> loading ckpt from '{}'".format(opt.load))
        ckpt = torch.load(opt.load)

        loaded_stat_3d = ckpt['stat_3d']
        loaded_stat_2d = ckpt['stat_2d']
        # add the keys of the loaded stat dicts to the current stat dicts
        for k, v in loaded_stat_2d.items():
            stat_2d[k] = v
        for k, v in loaded_stat_3d.items():
            stat_3d[k] = v

        err_best = ckpt['err']
        lr_now = ckpt['lr']
        glob_step = ckpt['step']

        model.load_state_dict(ckpt['state_dict'])
        optimizer.load_state_dict(ckpt['optimizer'])

        if not opt.resume:
            print(">>> ckpt loaded (epoch: {} | err: {})".format(
                start_epoch, err_best))

    if opt.resume:
        assert opt.load != ''
        start_epoch = ckpt['epoch']
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'), resume=True)
        print(">>> ckpt loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))
    else:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'))
        logger.set_names(['it', 'lr', 'l_train', 'e_train', 'e_test'])

    ############################################################################
    ## TRAINING LOOP
    overall_train_losses = [[0], [0], [0]]
    loss_lbls = ['rel_loss', 'rep_loss', 'cam_loss']
    for epoch in range(start_epoch, opt.epochs):
        print('\n==========================')
        print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))

        ########################################################################
        ## TRAIN
        err_train = avg_loss_train = -1
        if opt.is_train:
            print('\n - Training')
            glob_step, lr_now, avg_loss_train, losses_train, all_err_train = \
                    train_lsp(train_loader=train_loader, misc=misc,
                              stat_2d=stat_2d, stat_3d=stat_3d, limb_type=opt.limb_type,
                              standardize_input_data=opt.standardize_input_data,
                              standardize_output_data=opt.standardize_output_data,
                              use_loaded_stats=opt.use_loaded_stats,
                              use_rel_loss=opt.use_rel_loss,
                              subtract_2d_root=opt.subtract_2d_root, keep_root=opt.keep_root,
                              optimizer=optimizer, model=model,
                              predict_scale=opt.predict_scale,
                              glob_step=glob_step, lr_init=opt.lr, lr_now=lr_now, lr_decay=opt.lr_decay,
                              gamma=opt.lr_gamma, max_norm=opt.max_norm, distance_multiplier=opt.distance_multiplier,
                              loss_weights=opt.loss_weights)
            for li, l in enumerate(overall_train_losses):
                l.extend(losses_train[li])
            viz.plot_losses(overall_train_losses, loss_lbls,
                            opt.ckpt + '/train_losses.jpg', 'Training Set',
                            'iterations', 'losses')

            err_train = np.mean(np.vstack(all_err_train))
            print("> Rel labels error (train):   {}".format(round(
                err_train, 3)))

        ########################################################################
        ## TEST
        err_test_last = -1
        if opt.is_test and (glob_step) % opt.test_step == 0:
            print('\n - Testing')
            all_err_test, all_poses = test_lsp(
                test_loader=test_loader,
                misc=misc,
                stat_2d=stat_2d,
                stat_3d=stat_3d,
                standardize_input_data=opt.standardize_input_data,
                standardize_output_data=opt.standardize_output_data,
                use_loaded_stats=opt.use_loaded_stats,
                use_rel_loss=opt.use_rel_loss,
                subtract_2d_root=opt.subtract_2d_root,
                keep_root=opt.keep_root,
                model=model,
                save_ims=opt.save_ims,
                epoch=epoch,
                op_dir=opt.ckpt_ims)

            err_test_last = np.mean(np.vstack(all_err_test))
            print("> Rel labels error (test):   {}".format(
                round(err_test_last, 3)))

            is_best = err_test_last < err_test_best
            err_test_best = min(err_test_last, err_test_best)
            if save_op:
                log.save_ckpt(
                    {
                        'epoch': epoch + 1,
                        'lr': lr_now,
                        'step': glob_step,
                        'err': err_test_best,
                        'stat_2d': stat_2d,
                        'stat_3d': stat_3d,
                        'opt': opt,
                        'state_dict': model.state_dict(),
                        'optimizer': optimizer.state_dict()
                    },
                    ckpt_path=opt.ckpt,
                    data_split='test',
                    is_best=is_best)

        # update log file
        logger.append(
            [glob_step, lr_now, avg_loss_train, err_train, err_test_last],
            ['int', 'float', 'float', 'float', 'float'])

    logger.close()
    if return_poses:
        return err_test_best, err_test_last, np.vstack(
            [i[0] for i in all_poses])[0::5, :]
    else:
        return err_test_best, err_test_last
def main(opt):
    start_epoch = 0
    err_best = 1000
    glob_step = 0
    lr_now = opt.lr

    # save options
    log.save_options(opt, opt.ckpt)

    # create model
    print(">>> creating model")
    model = LinearModel()
    model = model.cuda()
    model.apply(weight_init)
    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))
    criterion = nn.MSELoss(reduction='mean').cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    # load ckpt
    if opt.load:

        print(">>> loading ckpt from '{}'".format(opt.load))
        ckpt = torch.load(opt.load, encoding='utf-8')
        start_epoch = ckpt['epoch']
        err_best = ckpt['err']
        glob_step = ckpt['step']
        lr_now = ckpt['lr']
        model.load_state_dict(ckpt['state_dict'])
        optimizer.load_state_dict(ckpt['optimizer'])
        print(">>> ckpt loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))
    if opt.resume:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'), resume=True)
    else:
        logger = log.Logger(os.path.join(opt.ckpt, 'log.txt'))
        logger.set_names(
            ['epoch', 'lr', 'loss_train', 'loss_test', 'err_test'])

    # list of action(s)
    actions = misc.define_actions(opt.action)
    num_actions = len(actions)
    print(">>> actions to use (total: {}):".format(num_actions))
    # pprint(actions, indent=4)
    # print(">>>")

    # data loading
    print(">>> loading data")
    # load statistics data
    stat_3d = torch.load(os.path.join(opt.data_dir, 'stat_3d.pth.tar'))
    stat_2d = torch.load(os.path.join(opt.data_dir, 'stat_2d.pth.tar'))
    """
    stat_3d.keys() =>  dict_keys(['std', 'dim_use', 'train', 'test', 'mean'])
    std => (96., )
    mean => (96.,)
    dim_use => (48, ) ?????
    train => dict{[user, action, camera_id]} ex) dict{[6, 'Walking', 'Walking 1.60457274.h5']} // data = int // len 600 = 15 actions * 8 cameras+extra_actions * 5 users
    test => same as train, user = 9, 11 // len 240
    (7,
     'Photo',
     'Photo 1.58860488.h5'): array([[514.54570615, -606.40670751, 5283.29114444],
                                    [513.19690503, -606.27874917, 5282.94296128],
                                    [511.72623278, -606.3556718, 5282.09161439],
                                    ...,
                                    [660.21544235, -494.87670603, 5111.48298849],
                                    [654.79473179, -497.67942449, 5111.05843265],
                                    [649.61962945, -498.74291164, 5111.91590807]])}

    """
    # actions = ["Directions",
    #            "Discussion",
    #            "Eating",
    #            "Greeting",
    #            "Phoning",
    #            "Photo",
    #            "Posing",
    #            "Purchases",
    #            "Sitting",
    #            "SittingDown",
    #            "Smoking",
    #            "Waiting",
    #            "WalkDog",
    #            "Walking",
    #            "WalkTogether"]
    # actions = ["Photo"]
    # test
    if opt.test:
        err_set = []
        for action in actions:
            print(">>> TEST on _{}_".format(action))

            test_loader = DataLoader(dataset=Human36M(
                actions=action,
                data_path=opt.data_dir,
                set_num_samples=opt.set_num_samples,
                use_hg=opt.use_hg,
                is_train=False),
                                     batch_size=opt.test_batch,
                                     shuffle=False,
                                     num_workers=opt.job,
                                     pin_memory=True)

            _, err_test = test(test_loader,
                               model,
                               criterion,
                               stat_2d,
                               stat_3d,
                               procrustes=opt.procrustes)
            err_set.append(err_test)

        print(">>>>>> TEST results:")

        for action in actions:
            print("{}".format(action), end='\t')
        print("\n")

        for err in err_set:
            print("{:.4f}".format(err), end='\t')
        print(">>>\nERRORS: {}".format(np.array(err_set).mean()))
        sys.exit()

    # load datasets for training
    test_loader = DataLoader(dataset=Human36M(
        actions=actions,
        data_path=opt.data_dir,
        set_num_samples=opt.set_num_samples,
        use_hg=opt.use_hg,
        is_train=False),
                             batch_size=opt.test_batch,
                             shuffle=False,
                             num_workers=opt.job,
                             pin_memory=True)

    train_loader = DataLoader(dataset=Human36M(
        actions=actions,
        data_path=opt.data_dir,
        set_num_samples=opt.set_num_samples,
        use_hg=opt.use_hg),
                              batch_size=opt.train_batch,
                              shuffle=True,
                              num_workers=opt.job,
                              pin_memory=True)

    print(">>> data loaded !")

    cudnn.benchmark = True

    for epoch in range(start_epoch, opt.epochs):
        print('==========================')
        print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))

        ## per epoch
        # train
        glob_step, lr_now, loss_train = train(train_loader,
                                              model,
                                              criterion,
                                              optimizer,
                                              stat_2d,
                                              stat_3d,
                                              lr_init=opt.lr,
                                              lr_now=lr_now,
                                              glob_step=glob_step,
                                              lr_decay=opt.lr_decay,
                                              gamma=opt.lr_gamma,
                                              max_norm=opt.max_norm)
        # test
        loss_test, err_test = test(test_loader,
                                   model,
                                   criterion,
                                   stat_2d,
                                   stat_3d,
                                   procrustes=opt.procrustes)
        # loss_test, err_test = test(test_loader, model, criterion, stat_3d, procrustes=True)

        # update log file
        logger.append([epoch + 1, lr_now, loss_train, loss_test, err_test],
                      ['int', 'float', 'float', 'float', 'float'])

        # save ckpt
        is_best = err_test < err_best
        err_best = min(err_test, err_best)
        if is_best:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'err': err_best,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=opt.ckpt,
                is_best=True)

        else:
            log.save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'err': err_best,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                ckpt_path=opt.ckpt,
                is_best=False)

    logger.close()
Beispiel #11
0
def main(opt):
    start_epoch = 0
    err_best = 1000
    glob_step = 0
    lr_now = opt.lr

    # data loading
    print("\n>>> loading data")
    stat_3d = torch.load(os.path.join(opt.data_dir, 'stat_3d.pth.tar'))
    input_size = stat_3d['input_size']
    output_size = stat_3d['output_size']

    print('\n>>> input dimension: {} '.format(input_size))
    print('>>> output dimension: {} \n'.format(output_size))

    # save options
    log.save_options(opt, opt.out_dir)

    # create and initialise model
    model = LinearModel(input_size=input_size, output_size=output_size)
    model = model.cuda()
    model.apply(weight_init)
    criterion = nn.MSELoss(size_average=True).cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    # load pretrained ckpt
    if opt.load:
        print(">>> loading ckpt from '{}'".format(opt.load))
        ckpt = torch.load(opt.load)
        start_epoch = ckpt['epoch']
        err_best = ckpt['err']
        glob_step = ckpt['step']
        lr_now = ckpt['lr']
        model.load_state_dict(ckpt['state_dict'])
        optimizer.load_state_dict(ckpt['optimizer'])
        print(">>> ckpt loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))

    if opt.test:
        log_file = 'log_test.txt'
    else:
        log_file = 'log_train.txt'
    if opt.resume:
        logger = log.Logger(os.path.join(opt.out_dir, log_file), resume=True)
    else:
        logger = log.Logger(os.path.join(opt.out_dir, log_file))
        logger.set_names(
            ['epoch', 'lr', 'loss_train', 'loss_test', 'err_test'])

    #loader for testing and prediction
    test_loader = DataLoader(dataset=data_loader(data_path=opt.data_dir,
                                                 is_train=False,
                                                 predict=opt.predict),
                             batch_size=opt.batch_size,
                             shuffle=False,
                             num_workers=opt.job,
                             pin_memory=True)

    # test
    if opt.test | opt.predict:

        loss_test, err_test, joint_err, all_err, outputs, targets, inputs = \
        test(test_loader, model, criterion, stat_3d)

        print(os.path.join(opt.out_dir, "test_results.pth.tar"))
        torch.save(
            {
                'loss': loss_test,
                'all_err': all_err,
                'test_err': err_test,
                'joint_err': joint_err,
                'output': outputs,
                'target': targets,
                'input': inputs
            }, open(os.path.join(opt.out_dir, "test_results.pth.tar"), "wb"))

        if not opt.predict:
            print("{:.4f}".format(err_test), end='\t')

        sys.exit()

    # loader for training
    train_loader = DataLoader(dataset=data_loader(data_path=opt.data_dir,
                                                  is_train=True,
                                                  noise=opt.noise),
                              batch_size=opt.batch_size,
                              shuffle=True,
                              num_workers=opt.job,
                              pin_memory=True)

    # loop through epochs
    cudnn.benchmark = True
    for epoch in range(start_epoch, opt.epochs):
        print('==========================')
        print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))

        # train
        glob_step, lr_now, loss_train = train(train_loader,
                                              model,
                                              criterion,
                                              optimizer,
                                              lr_init=opt.lr,
                                              lr_now=lr_now,
                                              glob_step=glob_step,
                                              lr_decay=opt.lr_decay,
                                              gamma=opt.lr_gamma,
                                              max_norm=opt.max_norm)

        #test
        loss_test, err_test, _, _, _, _, _ = test(test_loader, model,
                                                  criterion, stat_3d)

        # update log file
        logger.append([epoch + 1, lr_now, loss_train, loss_test, err_test],
                      ['int', 'float', 'float', 'float', 'float'])

        # save ckpt
        is_best = err_test < err_best
        err_best = min(err_test, err_best)
        log.save_ckpt(
            {
                'epoch': epoch + 1,
                'lr': lr_now,
                'step': glob_step,
                'err': err_best,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()
            },
            ckpt_path=opt.out_dir,
            is_best=is_best)

    logger.close()
Beispiel #12
0
def main(opt):
    err_best = 1000
    glob_step = 0
    lr_now = opt.lr
    lr_decay = opt.lr_decay
    lr_init = opt.lr
    lr_gamma = opt.lr_gamma
    start_epoch = 0

    file_path = os.path.join(opt.ckpt, 'opt.json')
    with open(file_path, 'w') as f:
        f.write(json.dumps(vars(opt), sort_keys=True, indent=4))

    # create model
    print(">>> creating model")
    model = LinearModel(opt.batch_size, opt.predict_14)
    # = refine_2d_model(opt.batch_size,opt.predict_14)
    model = model.cuda()
    model.apply(weight_init)

    #refine_2d_model = refine_2d_model.cuda()
    #refine_2d_model.apply(weight_init)
    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0
    ))  #+ sum(p.numel() for p in refine_2d_model.parameters()) / 1000000.0))
    criterion = nn.MSELoss(size_average=True).cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
    #refine_2d_model_optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    # 加载checkpoint
    if opt.resume:
        print(">>> loading ckpt from '{}'".format(opt.load))
        ckpt = torch.load(opt.load)
        start_epoch = ckpt['epoch']
        err_best = ckpt['err']
        glob_step = ckpt['step']
        lr_now = ckpt['lr']
        model.load_state_dict(ckpt['state_dict'])
        #refine_2d_model.load_state_dict[ckpt['refine_state_dict']]
        optimizer.load_state_dict(ckpt['optimizer'])
        #refine_2d_model_optimizer.load_state_dict(ckpt['refine_optimizer'])
        print(">>> ckpt loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))

    # 包含动作的 list
    actions = data_utils.define_actions(opt.action)
    num_actions = len(actions)
    print(">>> actions to use (total: {}):".format(num_actions))
    pprint(actions, indent=4)
    print(">>>")

    # data loading
    print(">>> loading data")

    # Load camera parameters
    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(opt.cameras_path, SUBJECT_IDS)

    # Load 3d data and load (or create) 2d projections
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, opt.data_dir, opt.camera_frame, rcams, opt.predict_14)

    # Read stacked hourglass 2D predictions if use_sh, otherwise use groundtruth 2D projections
    if opt.use_hg:
        train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
            actions, opt.data_dir)
    else:
        train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.create_2d_data(
            actions, opt.data_dir, rcams)

    #gt_train_set_2d, gt_test_set_2d, gt_data_mean_2d, gt_data_std_2d, gt_dim_to_ignore_2d, gt_dim_to_use_2d = data_utils.create_2d_data( actions, opt.data_dir, rcams )

    print("done reading and normalizing data.")

    step_time, loss = 0, 0
    current_epoch = start_epoch
    log_every_n_batches = 100

    cudnn.benchmark = True
    best_error = 10000
    while current_epoch < opt.epochs:
        current_epoch = current_epoch + 1

        # === Load training batches for one epoch ===
        encoder_inputs, decoder_outputs = get_all_batches(opt,
                                                          train_set_2d,
                                                          train_set_3d,
                                                          training=True)

        nbatches = len(encoder_inputs)
        print("There are {0} train batches".format(nbatches))
        start_time = time.time()

        # === Loop through all the training batches ===
        current_step = 0
        for i in range(nbatches):

            if (i + 1) % log_every_n_batches == 0:
                # Print progress every log_every_n_batches batches
                print("Working on epoch {0}, batch {1} / {2}... \n".format(
                    current_epoch, i + 1, nbatches),
                      end="")

            model.train()

            if glob_step % lr_decay == 0 or glob_step == 1:
                lr_now = utils.lr_decay(optimizer, glob_step, lr_init,
                                        lr_decay, lr_gamma)
                #utils.lr_decay(refine_2d_model_optimizer, glob_step, lr_init, lr_decay, lr_gamma)

            enc_in = torch.from_numpy(encoder_inputs[i]).float()
            dec_out = torch.from_numpy(decoder_outputs[i]).float()

            inputs = Variable(enc_in.cuda())
            targets = Variable(dec_out.cuda())

            outputs = model(inputs)

            # calculate loss
            optimizer.zero_grad()

            step_loss = criterion(outputs, targets)
            step_loss.backward()

            if opt.max_norm:
                nn.utils.clip_grad_norm_(model.parameters(), max_norm=1)
                #nn.utils.clip_grad_norm_(refine_2d_model.parameters(), max_norm=1)

            optimizer.step()

            loss += float(step_loss)

            current_step += 1
            glob_step += 1
            # === end looping through training batches ===

        loss = loss / nbatches

        print("=============================\n"
              "Global step:         %d\n"
              "Learning rate:       %.2e\n"
              "Train loss avg:      %.4f\n"
              "=============================" % (glob_step, lr_now, loss))
        # === End training for an epoch ===

        # clear useless chache
        torch.cuda.empty_cache()

        # === Testing after this epoch ===
        model.eval()
        if opt.evaluateActionWise:
            print("{0:=^12} {1:=^6}".format("Action",
                                            "mm"))  # line of 30 equal signs

            cum_err = 0
            record = ''
            for action in actions:

                print("{0:<12} ".format(action), end="")
                # Get 2d and 3d testing data for this action
                action_test_set_2d = get_action_subset(test_set_2d, action)
                action_test_set_3d = get_action_subset(test_set_3d, action)
                encoder_inputs, decoder_outputs = get_all_batches(
                    opt,
                    action_test_set_2d,
                    action_test_set_3d,
                    training=False)

                total_err, joint_err, step_time = evaluate_batches(
                    opt, criterion, model, data_mean_3d, data_std_3d,
                    dim_to_use_3d, dim_to_ignore_3d, data_mean_2d, data_std_2d,
                    dim_to_use_2d, dim_to_ignore_2d, current_step,
                    encoder_inputs, decoder_outputs)
                cum_err = cum_err + total_err

                print("{0:>6.2f}".format(total_err))

                record = record + "{}   :   {}  (mm) \n".format(
                    action, total_err)
            avg_val = cum_err / float(len(actions))
            print("{0:<12} {1:>6.2f}".format("Average", avg_val))
            print("{0:=^19}".format(''))

            f = open("records.txt", 'a')
            f.write("epoch: {} , avg_error: {}  loss : {} \n".format(
                current_epoch, avg_val, loss))

            if best_error > avg_val:
                print("=============================")
                print("==== save best record   =====")
                print("=============================")
                best_error = avg_val
                # save ckpt
                file_path = os.path.join(opt.ckpt, 'ckpt_last.pth.tar')
                torch.save(
                    {
                        'epoch': current_epoch,
                        'lr': lr_now,
                        'step': glob_step,
                        'err': avg_val,
                        'state_dict': model.state_dict(),
                        'optimizer': optimizer.state_dict()
                    }, file_path)

                f.write("epoch: {} , avg_error: {} \n".format(
                    current_epoch, best_error))
                f.write(record)

            f.write("=======================================\n")
            f.close()

        else:

            n_joints = 17 if not (opt.predict_14) else 14

            encoder_inputs, decoder_outputs = get_all_batches(opt,
                                                              test_set_2d,
                                                              test_set_3d,
                                                              training=False)

            total_err, joint_err, step_time = evaluate_batches(
                opt, criterion, model, data_mean_3d, data_std_3d,
                dim_to_use_3d, dim_to_ignore_3d, data_mean_2d, data_std_2d,
                dim_to_use_2d, dim_to_ignore_2d, current_step, encoder_inputs,
                decoder_outputs, current_epoch)

            print("=============================\n"
                  "Step-time (ms):      %.4f\n"
                  "Val loss avg:        %.4f\n"
                  "Val error avg (mm):  %.2f\n"
                  "=============================" %
                  (1000 * step_time, loss, total_err))

            for i in range(n_joints):
                # 6 spaces, right-aligned, 5 decimal places
                print("Error in joint {0:02d} (mm): {1:>5.2f}".format(
                    i + 1, joint_err[i]))

                if save_flag is True:
                    f.write("Error in joint {0:02d} (mm): {1:>5.2f} \n".format(
                        i + 1, joint_err[i]))
            print("=============================")

            save_flag = False
            f.close()

    print("done in {0:.2f} ms".format(1000 * (time.time() - start_time)))
    # Reset global time and loss
    step_time, loss = 0, 0
Beispiel #13
0
def testFunc(opt):
    start_epoch = 0
    print("procrustes          {}".format(opt.procrustes))
    # create model
    print(">>> creating model")
    model = LinearModel(opt.batch_size, opt.predict_14)
    # = refine_2d_model(opt.batch_size,opt.predict_14)
    model = model.cuda()
    model.apply(weight_init)
    model.eval()
    #refine_2d_model = refine_2d_model.cuda()
    #refine_2d_model.apply(weight_init)
    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0
    ))  #+ sum(p.numel() for p in refine_2d_model.parameters()) / 1000000.0))
    criterion = nn.MSELoss(size_average=True).cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
    #refine_2d_model_optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    # 加载checkpoint
    print(">>> loading ckpt from '{}'".format(opt.load))
    ckpt = torch.load(opt.load)
    start_epoch = ckpt['epoch']
    err_best = ckpt['err']
    glob_step = ckpt['step']
    model.load_state_dict(ckpt['state_dict'])
    #refine_2d_model.load_state_dict[ckpt['refine_state_dict']]
    optimizer.load_state_dict(ckpt['optimizer'])
    #refine_2d_model_optimizer.load_state_dict(ckpt['refine_optimizer'])
    print(">>> ckpt loaded (epoch: {} | err: {})".format(
        start_epoch, err_best))

    # 包含动作的 list
    actions = data_utils.define_actions(opt.action)
    num_actions = len(actions)
    print(">>> actions to use (total: {}):".format(num_actions))
    pprint(actions, indent=4)
    print(">>>")

    # data loading
    print(">>> loading data")

    # Load camera parameters
    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(opt.cameras_path, SUBJECT_IDS)

    # Load 3d data and load (or create) 2d projections
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, opt.data_dir, opt.camera_frame, rcams, opt.predict_14)

    # Read stacked hourglass 2D predictions if use_sh, otherwise use groundtruth 2D projections
    if opt.use_hg:
        train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
            actions, opt.data_dir)
    else:
        train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.create_2d_data(
            actions, opt.data_dir, rcams)

    #gt_train_set_2d, gt_test_set_2d, gt_data_mean_2d, gt_data_std_2d, gt_dim_to_ignore_2d, gt_dim_to_use_2d = data_utils.create_2d_data( actions, opt.data_dir, rcams )

    print("done reading and normalizing data.")

    cudnn.benchmark = True

    # === Testing after this epoch ===
    if opt.evaluateActionWise:
        print("{0:=^12} {1:=^6}".format("Action",
                                        "mm"))  # line of 30 equal signs

        cum_err = 0
        record = ''
        for action in actions:

            print("{0:<12} ".format(action), end="")
            # Get 2d and 3d testing data for this action
            action_test_set_2d = get_action_subset(test_set_2d, action)
            action_test_set_3d = get_action_subset(test_set_3d, action)
            encoder_inputs, decoder_outputs = get_all_batches(
                opt, action_test_set_2d, action_test_set_3d, rcams)

            total_err, joint_err, step_time = evaluate_batches(
                opt, criterion, model, data_mean_3d, data_std_3d,
                dim_to_use_3d, dim_to_ignore_3d, data_mean_2d, data_std_2d,
                dim_to_use_2d, dim_to_ignore_2d, glob_step, encoder_inputs,
                decoder_outputs)
            cum_err = cum_err + total_err

            print("{0:>6.2f}".format(total_err))

            record = record + "{}   :   {}  (mm) \n".format(action, total_err)
        avg_val = cum_err / float(len(actions))
        print("{0:<12} {1:>6.2f}".format("Average", avg_val))
        print("{0:=^19}".format(''))

        f = open(opt.ckpt + "records.txt", 'a')
        f.write("Test --- epoch: {} , avg_error: {}  loss : {} \n".format(
            start_epoch, avg_val, err_best))
        f.write(record)
        f.write("=======================================\n")
        f.close()

    else:

        n_joints = 17 if not (opt.predict_14) else 14

        encoder_inputs, decoder_outputs = get_all_batches(opt,
                                                          test_set_2d,
                                                          test_set_3d,
                                                          rcams,
                                                          training=False)

        total_err, joint_err, step_time = evaluate_batches(
            opt, criterion, model, data_mean_3d, data_std_3d, dim_to_use_3d,
            dim_to_ignore_3d, data_mean_2d, data_std_2d, dim_to_use_2d,
            dim_to_ignore_2d, glob_step, encoder_inputs, decoder_outputs,
            start_epoch)

        print("=============================\n"
              "Step-time (ms):      %.4f\n"
              "Val loss avg:        %.4f\n"
              "Val error avg (mm):  %.2f\n"
              "=============================" %
              (1000 * step_time, loss, total_err))

        for i in range(n_joints):
            # 6 spaces, right-aligned, 5 decimal places
            print("Error in joint {0:02d} (mm): {1:>5.2f}".format(
                i + 1, joint_err[i]))

            if save_flag is True:
                f.write("Error in joint {0:02d} (mm): {1:>5.2f} \n".format(
                    i + 1, joint_err[i]))
            print("=============================")

        save_flag = False
        f.close()
Beispiel #14
0
def sample(opt):
    """Get samples from a model and visualize them"""

    actions = data_utils.define_actions(opt.action)

    # Load camera parameters
    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rootPath = os.getcwd()
    rcams = cameras.load_cameras(os.path.join(rootPath, opt.cameras_path),
                                 SUBJECT_IDS)

    # Load 3d data and load (or create) 2d projections
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, opt.data_dir, opt.camera_frame, rcams, opt.predict_14)

    if opt.use_hg:
        train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
            actions, opt.data_dir)
    else:
        train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.create_2d_data(
            actions, opt.data_dir, rcams)
    print("done reading and normalizing data.")

    # create model
    print(">>> creating model")
    model = LinearModel(opt.batch_size, opt.predict_14)
    model = model.cuda()
    model.apply(weight_init)

    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    print(">>> loading ckpt from '{}'".format(opt.load))

    ckpt = torch.load(opt.load)
    model.load_state_dict(ckpt['state_dict'])
    optimizer.load_state_dict(ckpt['optimizer'])
    print("Model loaded")
    model.eval()

    for key2d in test_set_2d.keys():

        (subj, b, fname) = key2d
        print("Subject: {}, action: {}, fname: {}".format(subj, b, fname))

        # keys should be the same if 3d is in camera coordinates
        key3d = key2d if opt.camera_frame else (subj, b, '{0}.h5'.format(
            fname.split('.')[0]))
        key3d = (subj, b, fname[:-3]
                 ) if (fname.endswith('-sh')) and opt.camera_frame else key3d

        enc_in = test_set_2d[key2d]
        n2d, _ = enc_in.shape
        dec_out = test_set_3d[key3d]
        n3d, _ = dec_out.shape
        assert n2d == n3d

        # Split into about-same-size batches
        enc_in = np.array_split(enc_in, n2d // opt.batch_size)
        dec_out = np.array_split(dec_out, n3d // opt.batch_size)
        all_poses_3d = []

        for bidx in range(len(enc_in)):

            # Dropout probability 0 (keep probability 1) for sampling
            dp = 1.0
            ei = torch.from_numpy(enc_in[bidx]).float()
            inputs = Variable(ei.cuda())
            outputs = model(inputs)

            # denormalize
            enc_in[bidx] = data_utils.unNormalizeData(enc_in[bidx],
                                                      data_mean_2d,
                                                      data_std_2d,
                                                      dim_to_ignore_2d)
            dec_out[bidx] = data_utils.unNormalizeData(dec_out[bidx],
                                                       data_mean_3d,
                                                       data_std_3d,
                                                       dim_to_ignore_3d)
            poses3d = data_utils.unNormalizeData(outputs.data.cpu().numpy(),
                                                 data_mean_3d, data_std_3d,
                                                 dim_to_ignore_3d)
            all_poses_3d.append(poses3d)

        # Put all the poses together
        enc_in, dec_out, poses3d = map(np.vstack,
                                       [enc_in, dec_out, all_poses_3d])

        # Convert back to world coordinates
        if opt.camera_frame:
            N_CAMERAS = 4
            N_JOINTS_H36M = 32

            # Add global position back
            dec_out = dec_out + np.tile(test_root_positions[key3d],
                                        [1, N_JOINTS_H36M])

            # Load the appropriate camera
            subj, _, sname = key3d

            cname = sname.split('.')[1]  # <-- camera name
            scams = {(subj, c + 1): rcams[(subj, c + 1)]
                     for c in range(N_CAMERAS)}  # cams of this subject
            scam_idx = [scams[(subj, c + 1)][-1] for c in range(N_CAMERAS)
                        ].index(cname)  # index of camera used
            the_cam = scams[(subj, scam_idx + 1)]  # <-- the camera used
            R, T, f, c, k, p, name = the_cam
            assert name == cname

            def cam2world_centered(data_3d_camframe):
                data_3d_worldframe = cameras.camera_to_world_frame(
                    data_3d_camframe.reshape((-1, 3)), R, T)
                data_3d_worldframe = data_3d_worldframe.reshape(
                    (-1, N_JOINTS_H36M * 3))
                # subtract root translation
                return data_3d_worldframe - np.tile(data_3d_worldframe[:, :3],
                                                    (1, N_JOINTS_H36M))

                # Apply inverse rotation and translation

            dec_out = cam2world_centered(dec_out)
            poses3d = cam2world_centered(poses3d)

    # Grab a random batch to visualize
    enc_in, dec_out, poses3d = map(np.vstack, [enc_in, dec_out, poses3d])
    idx = np.random.permutation(enc_in.shape[0])
    enc_in, dec_out, poses3d = enc_in[idx, :], dec_out[idx, :], poses3d[idx, :]

    # Visualize random samples
    import matplotlib.gridspec as gridspec

    # 1080p	= 1,920 x 1,080
    fig = plt.figure(figsize=(19.2, 10.8))

    gs1 = gridspec.GridSpec(5, 9)  # 5 rows, 9 columns
    gs1.update(wspace=-0.00, hspace=0.05)  # set the spacing between axes.
    plt.axis('off')

    subplot_idx, exidx = 1, 1
    nsamples = 15
    for i in np.arange(nsamples):

        # Plot 2d pose
        ax1 = plt.subplot(gs1[subplot_idx - 1])
        p2d = enc_in[exidx, :]
        viz.show2Dpose(p2d, ax1)
        ax1.invert_yaxis()

        # Plot 3d gt
        ax2 = plt.subplot(gs1[subplot_idx], projection='3d')
        p3d = dec_out[exidx, :]
        viz.show3Dpose(p3d, ax2)

        # Plot 3d predictions
        ax3 = plt.subplot(gs1[subplot_idx + 1], projection='3d')
        p3d = poses3d[exidx, :]
        viz.show3Dpose(p3d, ax3, lcolor="#9b59b6", rcolor="#2ecc71")

        exidx = exidx + 1
        subplot_idx = subplot_idx + 3

    plt.show()
def main(opt):
    start_epoch = 0
    err_best = 1000
    glob_step = 0
    lr_now = opt.lr
    
    # create model
    print(">>> creating model")
    model = LinearModel(joint_num=opt.joint_num)
   
    model=model.cuda()
    model.apply(weight_init)
    print(">>> total params: {:.2f}M".format(sum(p.numel() for p in model.parameters()) / 1000000.0))
    criterion = nn.MSELoss(reduction='elementwise_mean').cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

   # load ckpt
    if opt.load:
        print(">>> loading ckpt from '{}'".format(opt.load))
        ckpt = torch.load(opt.load)
        start_epoch = ckpt['epoch']
        lr_now = ckpt['lr']
        glob_step = ckpt['step']
        err_best = ckpt['err']
        mean_pose = ckpt['mean_pose']
        model.load_state_dict(ckpt['state_dict'])
        optimizer.load_state_dict(ckpt['optimizer'])
        print(">>> ckpt loaded (epoch: {} | err: {})".format(start_epoch, err_best))
    
    # data loading
    print(">>> loading data")
    train_data = torch.load(opt.data_dir+'_train.pth')
    mean_pose = np.mean(train_data['tgt'], axis=0)
    mean_pose = np.reshape(mean_pose, (opt.joint_num, 3))
    test_data=CMU(data_path=opt.data_dir+'_test.pth',  use_hg=opt.use_hg)
    train_data=CMU(data_path=opt.data_dir+'_train.pth',  use_hg=opt.use_hg)

    test_loader = DataLoader(dataset=test_data, batch_size=opt.test_batch, shuffle=False,num_workers=opt.job)
    train_loader = DataLoader(dataset=train_data, batch_size=opt.train_batch, shuffle=True,num_workers=opt.job)
    print(">>> data loaded !")
    
    cudnn.benchmark = True
    for epoch in range(start_epoch, opt.epochs):
        print('==========================')
        print('>>> epoch: {} | lr: {:.5f}'.format(epoch, lr_now))
        
        # train
        glob_step, lr_now, loss_train = train(
            train_loader, model, criterion, optimizer, opt.joint_num, 
            lr_init=opt.lr, lr_now=lr_now, glob_step=glob_step, lr_decay=opt.lr_decay, gamma=opt.lr_gamma,
            max_norm=opt.max_norm)
        print("loss_train:", loss_train)

        # test
        outputs_use, loss_test, err_test = test(test_loader, model, criterion, opt.joint_num, procrustes=opt.procrustes)
        
        # save best checkpoint
        is_best = err_test < err_best
        err_best = min(err_test, err_best)
        save_path = ''
        if is_best:
            print("Saved Check Point (error : {})".format(err_test))
            checkpoint = {'epoch': epoch,
                            'lr': lr_now,
                            'step': glob_step,
                            'err': err_best,
                            'state_dict': model.state_dict(),
                            'optimizer': optimizer.state_dict(),
                            'mean_pose': mean_pose}
            if save_path != '' and os.path.exists(save_path):
                os.remove(save_path)
            save_path = opt.ckpt+'_best.chkpt'
            torch.save(checkpoint, save_path)
        
        # write loss to log file
        log_train_file = opt.log + 'train.log'
        with open(log_train_file, 'a') as log_tr:
            log_tr.write('{},{},{},{},{}\n'.format(epoch, lr_now, loss_train, loss_test, err_test))
Beispiel #16
0
def run_model(opt):

    # get misc file used for the specified data format
    misc = DatasetMisc(opt['dataset_type'])

    # class that takes care of plotting
    pose_plotter = PosePlotter(pose_config['KEYPOINT_NAMES'],
                               pose_config['SKELETON_NAMES'],
                               pose_config['KEYPOINT_COLORS'],
                               pose_config['SKELETON_COLORS'])

    # load checkpoint file
    ckpt = torch.load(opt['load'])
    stat_2d = ckpt['stat_2d']

    # load the pretrained model
    print("\n==================Model===================")
    print("Loading Pretrained Model:")
    print(" - Linear size: [{}]".format(opt['linear_size']))
    print(" - Num stages:  [{}]".format(opt['linear_size']))
    print("==========================================\n")

    pretrained_model = LinearModel(misc.NUM_KEYPOINTS_2D * 2,
                                   misc.NUM_KEYPOINTS_3D * 3,
                                   opt['linear_size'], opt['num_stage'],
                                   opt['dropout'], opt['predict_scale'],
                                   opt['scale_range'], opt['unnorm_op'],
                                   opt['unnorm_init'])

    pretrained_model = pretrained_model.cuda()
    pretrained_model.load_state_dict(ckpt['state_dict'])
    pretrained_model.eval()

    # load the data from a numpy file
    print("\n==================Data====================")
    print("Loading Data:")
    print(" - Data path:  [{}]".format(opt['data_dir']))
    print(" - Data type:  [{}]".format(opt['dataset_type']))

    with open(opt['data_dir'], 'r') as fp:
        data = np.load(fp)
    num_frames, num_coords = data.shape
    num_kpts = int(num_coords / 2)
    print(" - Num frames: [{}]".format(num_frames))
    print(" - Num kpts:   [{}]".format(num_kpts))
    print("==========================================\n")

    # subtract root if specified
    if opt['subtract_2d_root']:
        root_idx_2d, _ = misc.get_skeleton_root_idx()
        # subtract the 2d skeleton center from all coordinates so it is always in 0,0
        data_2d_root = data[:, [2 * root_idx_2d, 2 * root_idx_2d + 1]]
        data -= np.tile(data_2d_root, num_kpts)

    # normalize the inputs according to the stored mean and std
    data_mean = stat_2d['mean']
    data_std = stat_2d['std']

    norm_data = (data - data_mean[np.newaxis, ...]) / data_std[np.newaxis, ...]
    norm_data[np.isnan(norm_data)] = 0
    norm_data = norm_data.astype(np.float32)

    seq_dataset = TensorDataset(torch.from_numpy(norm_data),
                                torch.from_numpy(data))
    seq_loader = DataLoader(dataset=seq_dataset,
                            batch_size=100,
                            shuffle=False,
                            num_workers=4,
                            drop_last=False)

    # predict 3d pose using the model
    in_2d_poses = []
    out_3d_poses = []

    for indx, (norm_data, data) in enumerate(seq_loader):

        model_inps = Variable(norm_data.cuda())
        model_outs, model_scale = pretrained_model(model_inps)

        in_2d_poses.append(data.numpy())
        out_3d_poses.append(model_outs.data.cpu().numpy())

    in_2d_poses = np.vstack(in_2d_poses)
    out_3d_poses = np.vstack(out_3d_poses)

    num_frames = out_3d_poses.shape[0]
    num_kpts = int(out_3d_poses.shape[1] / 3)
    print("\n==================Outputs====================")
    print("Predicted Data:")
    print(" - Num frames:    [{}]".format(num_frames))
    print(" - Num keypoints: [{}]".format(num_kpts))

    f_no = np.random.randint(num_frames)

    ########################################################################
    ## load the 2d groundtruth keypoints in the frame
    kpts_2d_x = in_2d_poses[f_no, 0::2]
    kpts_2d_y = in_2d_poses[f_no, 1::2]

    ########################################################################
    ## get 3d predicted keypoints in the frame
    kpts_3d_x = out_3d_poses[f_no, 0::3]
    kpts_3d_y = out_3d_poses[f_no, 1::3]
    kpts_3d_z = out_3d_poses[f_no, 2::3]

    ########################################################################
    ## set the visibility flags (currently all keypoints are assumed visible)
    kpts_v = np.ones(np.shape(kpts_2d_x))

    pose_plotter.plot_2d(kpts_2d_x, kpts_2d_y, kpts_v)

    pose_plotter.plot_3d(kpts_3d_x, kpts_3d_y, kpts_3d_z, kpts_v)

    pose_plotter.plot_2d_3d(kpts_2d_x, kpts_2d_y, kpts_3d_x, kpts_3d_y,
                            kpts_3d_z, kpts_v)