コード例 #1
0
def validate(LOGGER, data_loader, resnet, autoencoder, device, epoch):

    # ------------------- Loss -------------------

    Loss2D = HeatmapLoss()

    # ------------------- Evaluation -------------------
    eval_body = evaluate.EvalBody()
    eval_upper = evaluate.EvalUpperBody()
    eval_lower = evaluate.EvalLowerBody()

    # ------------------- validate -------------------
    val_losses = AverageMeter()
    for it, (img, p2d, p3d, heatmap, action) in enumerate(data_loader):
        img = img.to(device)
        p2d = p2d.to(device)
        p3d = p3d.to(device)
        heatmap = heatmap.to(device)
        Loss2D.cuda()

        heatmap2d_hat = resnet(img)  # torch.Size([16, 15, 48, 48])
        p3d_hat, heatmap2d_recon = autoencoder(heatmap2d_hat)

        loss2d = Loss2D(heatmap, heatmap2d_hat).mean()

        # Evaluate results using different evaluation metrices
        y_output = p3d_hat.data.cpu().numpy()
        y_target = p3d.data.cpu().numpy()

        eval_body.eval(y_output, y_target, action)
        eval_upper.eval(y_output, y_target, action)
        eval_lower.eval(y_output, y_target, action)

    # ------------------- Save results -------------------

    LOGGER.info('Saving evaluation results...')
    res = {
        'FullBody': eval_body.get_results(),
        'UpperBody': eval_upper.get_results(),
        'LowerBody': eval_lower.get_results()
    }
    LOGGER.info(res)
    utils_io.write_json(
        os.path.join(LOGGER.logfile_dir, f'eval_res_{epoch}' + '.json'), res)
コード例 #2
0
def validate(LOGGER, data_loader, resnet, autoencoder, device, epoch):

    # ------------------- Loss -------------------

    # ------------------- Evaluation -------------------
    eval_body = evaluate.EvalBody()
    eval_upper = evaluate.EvalUpperBody()
    eval_lower = evaluate.EvalLowerBody()

    # ------------------- validate -------------------
    with torch.no_grad():
        for it, (img, p2d, p3d, heatmap, action) in enumerate(data_loader):
            img = img.to(device)
            p3d = p3d.to(device)

            heatmap2d_hat = resnet(img)
            p3d_hat, _ = autoencoder(heatmap2d_hat)

            # Evaluate results using different evaluation metrices
            y_output = p3d_hat.data.cpu().numpy()
            y_target = p3d.data.cpu().numpy()

            eval_body.eval(y_output, y_target, action)
            eval_upper.eval(y_output, y_target, action)
            eval_lower.eval(y_output, y_target, action)

        # ------------------- Save results -------------------

        LOGGER.info('===========Evaluation on Val data==========')
        res = {
            'FullBody': eval_body.get_results(),
            'UpperBody': eval_upper.get_results(),
            'LowerBody': eval_lower.get_results()
        }
        LOGGER.info(pprint.pformat(res))

        # utils_io.write_json(os.path.join(LOGGER.logfile_dir, f'eval_val_{epoch}'+'.json'), res)

    return eval_body.get_results()['All']
コード例 #3
0
def main():

    args = arguments.parse_args()
    LOGGER = ConsoleLogger('Finetune', 'train')
    logdir = LOGGER.getLogFolder()
    LOGGER.info(args)
    LOGGER.info(config)

    cudnn.benckmark = config.CUDNN.BENCHMARK
    cudnn.deterministic = config.CUDNN.DETERMINISTIC
    cudnn.enabled = config.CUDNN.ENABLED

    # ------------------- Data loader -------------------

    data_transform = transforms.Compose([
        trsf.ImageTrsf(),  # normalize
        trsf.Joints3DTrsf(),  # centerize
        trsf.ToTensor()
    ])  # to tensor

    train_data = Mocap(config.dataset.train,
                       SetType.TRAIN,
                       transform=data_transform)
    train_data_loader = DataLoader(train_data,
                                   batch_size=args.batch_size,
                                   shuffle=config.data_loader.shuffle,
                                   num_workers=8)

    # val_data = Mocap(
    #     config.dataset.val,
    #     SetType.VAL,
    #     transform=data_transform)
    # val_data_loader = DataLoader(
    #     val_data,
    #     batch_size=2,
    #     shuffle=config.data_loader.shuffle,
    #     num_workers=8)

    test_data = Mocap(config.dataset.test,
                      SetType.TEST,
                      transform=data_transform)
    test_data_loader = DataLoader(test_data,
                                  batch_size=2,
                                  shuffle=config.data_loader.shuffle,
                                  num_workers=8)

    # ------------------- Model -------------------
    with open('model/model.yaml') as fin:
        model_cfg = edict(yaml.safe_load(fin))
    resnet = pose_resnet.get_pose_net(model_cfg, True)
    Loss2D = HeatmapLoss()  # same as MSELoss()
    autoencoder = encoder_decoder.AutoEncoder(args.batch_norm,
                                              args.denis_activation)
    # LossHeatmapRecon = HeatmapLoss()
    LossHeatmapRecon = HeatmapLossSquare()
    # Loss3D = nn.MSELoss()
    Loss3D = PoseLoss()
    LossLimb = LimbLoss()

    if torch.cuda.is_available():
        device = torch.device(f"cuda:{args.gpu}")
        resnet = resnet.cuda(device)
        Loss2D = Loss2D.cuda(device)
        autoencoder = autoencoder.cuda(device)
        LossHeatmapRecon.cuda(device)
        Loss3D.cuda(device)
        LossLimb.cuda(device)

    # ------------------- optimizer -------------------
    if args.freeze_2d_model:
        optimizer = optim.Adam(autoencoder.parameters(), lr=args.learning_rate)
    else:
        optimizer = optim.Adam(itertools.chain(resnet.parameters(),
                                               autoencoder.parameters()),
                               lr=args.learning_rate)
    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          step_size=args.step_size,
                                          gamma=0.1)
    # scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer)

    # ------------------- load model -------------------
    if args.load_model:
        if not os.path.isfile(args.load_model):
            raise ValueError(f"No checkpoint found at {args.load_model}")
        checkpoint = torch.load(args.load_model)
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        resnet.load_state_dict(checkpoint['resnet_state_dict'])
        autoencoder.load_state_dict(checkpoint['autoencoder_state_dict'])
        scheduler.load_state_dict(checkpoint['scheduler'])

    if args.load_2d_model:
        if not os.path.isfile(args.load_2d_model):
            raise ValueError(f"No checkpoint found at {args.load_2d_model}")
        checkpoint = torch.load(args.load_2d_model, map_location=device)
        resnet.load_state_dict(checkpoint['resnet_state_dict'])

    if args.load_3d_model:
        if not os.path.isfile(args.load_3d_model):
            raise ValueError(f"No checkpoint found at {args.load_3d_model}")
        checkpoint = torch.load(args.load_3d_model, map_location=device)
        autoencoder.load_state_dict(checkpoint['autoencoder_state_dict'])

    # ------------------- tensorboard -------------------
    train_global_steps = 0
    writer_dict = {
        'writer': SummaryWriter(log_dir=logdir),
        'train_global_steps': train_global_steps
    }

    best_perf = float('inf')
    best_model = False
    # ------------------- run the model -------------------
    for epoch in range(args.epochs):
        with torch.autograd.set_detect_anomaly(True):
            LOGGER.info(f'====Training epoch {epoch}====')
            losses = AverageMeter()
            batch_time = AverageMeter()

            # ------------------- Evaluation -------------------
            eval_body = evaluate.EvalBody()
            eval_upper = evaluate.EvalUpperBody()
            eval_lower = evaluate.EvalLowerBody()

            resnet.train()
            autoencoder.train()

            end = time.time()
            for it, (img, p2d, p3d, heatmap,
                     action) in enumerate(train_data_loader, 0):

                img = img.to(device)
                p3d = p3d.to(device)
                heatmap = heatmap.to(device)

                heatmap2d_hat = resnet(img)  # torch.Size([16, 15, 48, 48])
                p3d_hat, heatmap2d_recon = autoencoder(heatmap2d_hat)

                loss2d = Loss2D(heatmap2d_hat, heatmap).mean()
                loss_recon = LossHeatmapRecon(heatmap2d_recon,
                                              heatmap2d_hat).mean()
                loss_3d = Loss3D(p3d_hat, p3d).mean()
                loss_cos, loss_len = LossLimb(p3d_hat, p3d)
                loss_cos = loss_cos.mean()
                loss_len = loss_len.mean()

                loss = args.lambda_2d * loss2d + args.lambda_recon * loss_recon + args.lambda_3d * loss_3d - args.lambda_cos * loss_cos + args.lambda_len * loss_len

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                batch_time.update(time.time() - end)
                losses.update(loss.item(), img.size(0))

                if it % config.train.PRINT_FREQ == 0:
                    # logging messages
                    msg = 'Epoch: [{0}][{1}/{2}]\t' \
                          'Batch Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
                          'Speed {speed:.1f} samples/s\t' \
                          'Loss {loss.val:.5f} ({loss.avg:.5f})\t'.format(
                        epoch, it, len(train_data_loader), batch_time=batch_time,
                        speed=img.size(0) / batch_time.val,  # averaged within batch
                        loss=losses)
                    LOGGER.info(msg)

                    writer = writer_dict['writer']
                    global_steps = writer_dict['train_global_steps']
                    lr = [
                        group['lr']
                        for group in scheduler.optimizer.param_groups
                    ]
                    writer.add_scalar('learning_rate', lr, global_steps)
                    writer.add_scalar('train_loss', losses.val, global_steps)
                    writer.add_scalar('batch_time', batch_time.val,
                                      global_steps)
                    writer.add_scalar('losses/loss_2d', loss2d, global_steps)
                    writer.add_scalar('losses/loss_recon', loss_recon,
                                      global_steps)
                    writer.add_scalar('losses/loss_3d', loss_3d, global_steps)
                    writer.add_scalar('losses/loss_cos', loss_cos,
                                      global_steps)
                    writer.add_scalar('losses/loss_len', loss_len,
                                      global_steps)
                    image_grid = draw2Dpred_and_gt(img, heatmap2d_hat,
                                                   (368, 368))
                    writer.add_image('predicted_heatmaps', image_grid,
                                     global_steps)
                    image_grid_recon = draw2Dpred_and_gt(
                        img, heatmap2d_recon, (368, 368))
                    writer.add_image('reconstructed_heatmaps',
                                     image_grid_recon, global_steps)
                    writer_dict['train_global_steps'] = global_steps + 1

                    # ------------------- evaluation on training data -------------------

                    # Evaluate results using different evaluation metrices
                    y_output = p3d_hat.data.cpu().numpy()
                    y_target = p3d.data.cpu().numpy()

                    eval_body.eval(y_output, y_target, action)
                    eval_upper.eval(y_output, y_target, action)
                    eval_lower.eval(y_output, y_target, action)

                end = time.time()

            # ------------------- Save results -------------------
            checkpoint_dir = os.path.join(logdir, 'checkpoints')
            if not os.path.exists(checkpoint_dir):
                os.makedirs(checkpoint_dir)
            LOGGER.info('=> saving checkpoint to {}'.format(checkpoint_dir))
            states = dict()
            states['resnet_state_dict'] = resnet.state_dict()
            states['autoencoder_state_dict'] = autoencoder.state_dict()
            states['optimizer_state_dict'] = optimizer.state_dict()
            states['scheduler'] = scheduler.state_dict()

            torch.save(states,
                       os.path.join(checkpoint_dir, f'checkpoint_{epoch}.tar'))

            res = {
                'FullBody': eval_body.get_results(),
                'UpperBody': eval_upper.get_results(),
                'LowerBody': eval_lower.get_results()
            }

            LOGGER.info('===========Evaluation on Train data==========')
            LOGGER.info(pprint.pformat(res))

            # utils_io.write_json(config.eval.output_file, res)

            # ------------------- validation -------------------
            resnet.eval()
            autoencoder.eval()
            val_loss = validate(LOGGER, test_data_loader, resnet, autoencoder,
                                device, epoch)
            if val_loss < best_perf:
                best_perf = val_loss
                best_model = True

            if best_model:
                shutil.copyfile(
                    os.path.join(checkpoint_dir, f'checkpoint_{epoch}.tar'),
                    os.path.join(checkpoint_dir, f'model_best.tar'))
                best_model = False

            # scheduler.step(val_loss)
            scheduler.step()
    LOGGER.info('Done.')
コード例 #4
0
def main():
    """Main"""
    args = parse_args()
    print('Starting demo...')
    device = torch.device(f"cuda:{args.gpu}")
    LOGGER.info((args))

    # ------------------- Data loader -------------------

    data_transform = transforms.Compose([
        trsf.ImageTrsf(),  # normalize
        trsf.Joints3DTrsf(),  # centerize
        trsf.ToTensor()])  # to tensor

    data = Mocap(
        # config.dataset.test,
        config.dataset[args.data],
        SetType.TEST,
        transform=data_transform)
    data_loader = DataLoader(
        data,
        batch_size=16,
        shuffle=config.data_loader.shuffle,
        num_workers=8)

    # ------------------- Evaluation -------------------

    eval_body = evaluate.EvalBody()
    eval_upper = evaluate.EvalUpperBody()
    eval_lower = evaluate.EvalLowerBody()

    # ------------------- Model -------------------
    autoencoder = encoder_decoder.AutoEncoder()

    if args.load_model:
        if not os.path.isfile(args.load_model):
            raise ValueError(f"No checkpoint found at {args.load_model}")
        checkpoint = torch.load(args.load_model, map_location=device)
        autoencoder.load_state_dict(checkpoint['autoencoder_state_dict'])

    else:
        raise ValueError("No checkpoint!")
    autoencoder.cuda(device)
    autoencoder.eval()


    # ------------------- Read dataset frames -------------------
    fig = plt.figure(figsize=(19.2, 10.8))
    plt.axis('off')
    subplot_idx = 1

    # ------------------- Read dataset frames -------------------
    with torch.no_grad():
        for it, (img, p2d, p3d, heatmap, action) in enumerate(data_loader):

            print('Iteration: {}'.format(it))
            print('Images: {}'.format(img.shape))
            print('p2ds: {}'.format(p2d.shape))
            print('p3ds: {}'.format(p3d.shape))
            print('Actions: {}'.format(action))

            p3d = p3d.to(device)
            heatmap = heatmap.to(device)

            p3d_hat, heatmap_hat = autoencoder(heatmap)

            # Evaluate results using different evaluation metrices
            y_output = p3d_hat.data.cpu().numpy()
            y_target = p3d.data.cpu().numpy()

            eval_body.eval(y_output, y_target, action)
            eval_upper.eval(y_output, y_target, action)
            eval_lower.eval(y_output, y_target, action)

            # ------------------- Visualize 3D pose -------------------
            if subplot_idx <= 32:
                # ax1 = plt.subplot(gs1[subplot_idx - 1], projection='3d')
                ax1 = fig.add_subplot(4, 8, subplot_idx, projection='3d')
                show3Dpose(p3d[0].cpu().numpy(), ax1, True)

                # Plot 3d gt
                # ax2 = plt.subplot(gs1[subplot_idx], projection='3d')
                ax2 = fig.add_subplot(4, 8, subplot_idx+1, projection='3d')
                show3Dpose(p3d_hat[0].detach().cpu().numpy(), ax2, False)

                subplot_idx += 2
            if subplot_idx == 33:
                plt.savefig(os.path.join(LOGGER.logfile_dir, 'vis.png'))

            # ------------------- Visualize 2D heatmap -------------------
            if it < 32:
                img_grid = draw2Dpred_and_gt(img, heatmap, (368,368))  # tensor
                img_grid_hat = draw2Dpred_and_gt(img, heatmap_hat, (368,368), p2d)  # tensor
                img_grid = img_grid.numpy().transpose(1,2,0)
                img_grid_hat = img_grid_hat.numpy().transpose(1,2,0)
                cv2.imwrite(os.path.join(LOGGER.logfile_dir, f'gt_{it}.jpg'), img_grid)
                cv2.imwrite(os.path.join(LOGGER.logfile_dir, f'pred_{it}.jpg'), img_grid_hat)

                # save_batch_heatmaps(img[0:1], heatmap_hat[0:1], os.path.join(LOGGER.logfile_dir,"pred_combine.jpg"))
                # save_batch_heatmaps(img[0:1], heatmap[0:1], os.path.join(LOGGER.logfile_dir, "gt_combine.jpg"))


    # ------------------- Save results -------------------

    LOGGER.info('Saving evaluation results...')
    res = {'FullBody': eval_body.get_results(),
           'UpperBody': eval_upper.get_results(),
           'LowerBody': eval_lower.get_results()}

    LOGGER.info(pprint.pformat(res))

    print('Done.')
コード例 #5
0
def main():
    """Main"""

    LOGGER.info('Starting demo...')

    # -----------------------------------------------------------
    # -----------------------------------------------------------
    # --------------------- Training Phase ----------------------
    # -----------------------------------------------------------
    # -----------------------------------------------------------
    LOGGER.info('Training Lifting...')

    # ------------------- Data loader -------------------
    train_data_transform = transforms.Compose(
        [trsf.ImageTrsf(),
         trsf.Joints3DTrsf(),
         trsf.ToTensor()])

    # let's load data from validation set as example
    train_data = Mocap(config_lifting_singleBranch.dataset.train,
                       SetType.TRAIN,
                       transform=train_data_transform)
    train_data_loader = DataLoader(
        train_data,
        batch_size=config_lifting_singleBranch.train_data_loader.batch_size,
        shuffle=config_lifting_singleBranch.train_data_loader.shuffle,
        num_workers=config_lifting_singleBranch.train_data_loader.workers)

    # ------------------- Build Model -------------------
    # backbone = resnet101()
    encoder = HeatmapEncoder()
    decoder = PoseDecoder()
    # reconstructer = HeatmapReconstructer()

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    if torch.cuda.device_count() > 1:
        LOGGER.info(
            str("Let's use " + str(torch.cuda.device_count()) + " GPUs!"))
        # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
        # backbone = nn.DataParallel(backbone)
        encoder = nn.DataParallel(encoder)
        decoder = nn.DataParallel(decoder)
        # reconstructer = nn.DataParallel(reconstructer)
    # backbone = backbone.cuda()
    encoder = encoder.cuda()
    decoder = decoder.cuda()
    # reconstructer = reconstructer.cuda()

    # Load or Init Model Weights
    # if config_lifting_singleBranch.train_setting.backbone_path:
    #     backbone.load_state_dict(torch.load(config_lifting_singleBranch.train_setting.backbone_path))
    # else:
    #     backbone.apply(init_weights)
    if config_lifting_singleBranch.train_setting.encoder_path:
        encoder.load_state_dict(
            torch.load(config_lifting_singleBranch.train_setting.encoder_path))
        # encoder = torch.load(config_lifting_singleBranch.train_setting.encoder_path)
        LOGGER.info('Encoder Weight Loaded!')
    else:
        encoder.apply(init_weights)
        LOGGER.info('Encoder Weight Initialized!')
    if config_lifting_singleBranch.train_setting.decoder_path:
        decoder.load_state_dict(
            torch.load(config_lifting_singleBranch.train_setting.decoder_path))
        # decoder = torch.load(config_lifting_singleBranch.train_setting.decoder_path)
        LOGGER.info('Decoder Weight Loaded!')
    else:
        decoder.apply(init_weights)
        LOGGER.info('Decoder Weight Initialized!')
    # if config_lifting_singleBranch.train_setting.reconstructer_path:
    #     reconstructer.load_state_dict(torch.load(config_lifting_singleBranch.train_setting.reconstructer_path))
    #     # reconstructer = torch.load(config_lifting_singleBranch.train_setting.reconstructer_path)
    #     LOGGER.info('Reconstructer Weight Loaded!')
    # else:
    #     reconstructer.apply(init_weights)
    #     LOGGER.info('Reconstructer Weight Initialized!')

    # ------------------- Build Loss & Optimizer -------------------
    # Build Loss
    pose_prediction_cosine_similarity_loss_func = PosePredictionCosineSimilarityPerJointLoss(
    )
    pose_prediction_l1_loss_func = PosePredictionDistancePerJointLoss()
    pose_prediction_l2_loss_func = PosePredictionMSELoss()
    # heatmap_reconstruction_loss_func = HeatmapReconstructionMSELoss()

    pose_prediction_cosine_similarity_loss_func = pose_prediction_cosine_similarity_loss_func.cuda(
    )
    pose_prediction_l1_loss_func = pose_prediction_l1_loss_func.cuda()
    pose_prediction_l2_loss_func = pose_prediction_l2_loss_func.cuda()
    # heatmap_reconstruction_loss_func = heatmap_reconstruction_loss_func.cuda()

    # Build Optimizer
    optimizer = optim.Adam(
        [
            # {"params": backbone.parameters()},
            {
                "params": encoder.parameters()
            },
            {
                "params": decoder.parameters()
            },
            # {"params": reconstructer.parameters()}
        ],
        lr=0.001)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)

    # Variable for Final Model Selection
    # errorMin = 100
    # errorMinIsUpdatedInThisEpoch = False
    # ------------------- Read dataset frames -------------------
    for ep in range(config_lifting_singleBranch.train_setting.epoch):

        # ------------------- Evaluation -------------------
        eval_body_train = evaluate.EvalBody()
        # eval_upper_train = evaluate.EvalUpperBody()
        # eval_lower_train = evaluate.EvalLowerBody()
        # eval_neck_train = evaluate.EvalNeck()
        # eval_head_train = evaluate.EvalHead()
        # eval_left_arm_train = evaluate.EvalLeftArm()
        # eval_left_elbow_train = evaluate.EvalLeftElbow()
        # eval_left_hand_train = evaluate.EvalLeftHand()
        # eval_right_arm_train = evaluate.EvalRightArm()
        # eval_right_elbow_train = evaluate.EvalRightElbow()
        # eval_right_hand_train = evaluate.EvalRightHand()
        # eval_left_leg_train = evaluate.EvalLeftLeg()
        # eval_left_knee_train = evaluate.EvalLeftKnee()
        # eval_left_foot_train = evaluate.EvalLeftFoot()
        # eval_left_toe_train = evaluate.EvalLeftToe()
        # eval_right_leg_train = evaluate.EvalRightLeg()
        # eval_right_knee_train = evaluate.EvalRightKnee()
        # eval_right_foot_train = evaluate.EvalRightFoot()
        # eval_right_toe_train = evaluate.EvalRightToe()

        # backbone.train()
        encoder.train()
        decoder.train()
        # reconstructer.train()

        # Averagemeter for Epoch
        lossAverageMeter = AverageMeter()
        # fullBodyErrorAverageMeter = AverageMeter()
        # upperBodyErrorAverageMeter = AverageMeter()
        # lowerBodyErrorAverageMeter = AverageMeter()
        # heatmapPredictionErrorAverageMeter = AverageMeter()
        PosePredictionCosineSimilarityPerJointErrorAverageMeter = AverageMeter(
        )
        PosePredictionDistancePerJointErrorAverageMeter = AverageMeter()
        PosePredictionMSEErrorAverageMeter = AverageMeter()
        # heatmapReconstructionErrorAverageMeter = AverageMeter()
        # neckErrorAverageMeter = AverageMeter()
        # headErrorAverageMeter = AverageMeter()
        # leftArmErrorAverageMeter = AverageMeter()
        # leftElbowErrorAverageMeter = AverageMeter()
        # leftHandErrorAverageMeter = AverageMeter()
        # rightArmErrorAverageMeter = AverageMeter()
        # rightElbowErrorAverageMeter = AverageMeter()
        # rightHandErrorAverageMeter = AverageMeter()
        # leftLegErrorAverageMeter = AverageMeter()
        # leftKneeErrorAverageMeter = AverageMeter()
        # leftFootErrorAverageMeter = AverageMeter()
        # leftToeErrorAverageMeter = AverageMeter()
        # rightLegErrorAverageMeter = AverageMeter()
        # rightKneeErrorAverageMeter = AverageMeter()
        # rightFootErrorAverageMeter = AverageMeter()
        # rightToeErrorAverageMeter = AverageMeter()
        lossAverageMeterTrain = AverageMeter()
        # fullBodyErrorAverageMeterTrain = AverageMeter()
        # upperBodyErrorAverageMeterTrain = AverageMeter()
        # lowerBodyErrorAverageMeterTrain = AverageMeter()
        # heatmapPredictionErrorAverageMeterTrain = AverageMeter()
        PosePredictionCosineSimilarityPerJointErrorAverageMeterTrain = AverageMeter(
        )
        PosePredictionDistancePerJointErrorAverageMeterTrain = AverageMeter()
        PosePredictionMSEErrorAverageMeterTrain = AverageMeter()
        # heatmapReconstructionErrorAverageMeterTrain = AverageMeter()
        # neckErrorAverageMeterTrain = AverageMeter()
        # headErrorAverageMeterTrain = AverageMeter()
        # leftArmErrorAverageMeterTrain = AverageMeter()
        # leftElbowErrorAverageMeterTrain = AverageMeter()
        # leftHandErrorAverageMeterTrain = AverageMeter()
        # rightArmErrorAverageMeterTrain = AverageMeter()
        # rightElbowErrorAverageMeterTrain = AverageMeter()
        # rightHandErrorAverageMeterTrain = AverageMeter()
        # leftLegErrorAverageMeterTrain = AverageMeter()
        # leftKneeErrorAverageMeterTrain = AverageMeter()
        # leftFootErrorAverageMeterTrain = AverageMeter()
        # leftToeErrorAverageMeterTrain = AverageMeter()
        # rightLegErrorAverageMeterTrain = AverageMeter()
        # rightKneeErrorAverageMeterTrain = AverageMeter()
        # rightFootErrorAverageMeterTrain = AverageMeter()
        # rightToeErrorAverageMeterTrain = AverageMeter()
        for it, (img, p2d, p3d, action,
                 heatmap) in tqdm(enumerate(train_data_loader),
                                  total=len(train_data_loader)):
            #################### p2d는 각 Joint별 (x,y) 좌표를 나타낸듯. Image의 좌측상단이 (0,0)이다.
            #################### p3d는 Neck의 좌표를 (0,0,0)으로 생각했을 때의 각 Joint별 (^x,^y,^z) 좌표를 나타낸듯.
            #################### Joint 순서는 config_lifting_singleBranch.py에 있다.

            # LOGGER.info('Iteration: {}'.format(it))
            # LOGGER.info('Images: {}'.format(img.shape))  # (Batch, Channel, Height(y), Width(x))
            # LOGGER.info('p2dShapes: {}'.format(p2d.shape))  # (Width, Height)
            # # LOGGER.info('p2ds: {}'.format(p2d))
            # LOGGER.info('p3dShapes: {}'.format(p3d.shape))  # (^x, ^y, ^z)
            # # LOGGER.info('p3ds: {}'.format(p3d))
            # LOGGER.info('Actions: {}'.format(action))
            # LOGGER.info('heatmapShapes: {}'.format(heatmap.shape))

            # -----------------------------------------------------------
            # ------------------- Run your model here -------------------
            # -----------------------------------------------------------
            optimizer.zero_grad()

            # Move Tensors to GPUs
            # img = img.cuda()
            p3d = p3d.cuda()
            heatmap = heatmap.cuda()

            # Forward
            # predicted_heatmap = backbone(img)
            latent = encoder(heatmap)
            predicted_pose = decoder(latent)
            # reconstructed_heatmap = reconstructer(latent)

            # Loss Calculation
            # heatmap_prediction_loss = heatmap_prediction_loss_func(predicted_heatmap, heatmap)
            p3d_for_loss = torch.cat(
                (p3d[:, 4:6, :], p3d[:, 7:10, :], p3d[:, 11:, :]),
                dim=1)  # 13까지가 Upper Body
            p3d_for_loss = torch.reshape(p3d_for_loss, (-1, 48))
            pose_prediction_cosine_similarity_loss = pose_prediction_cosine_similarity_loss_func(
                predicted_pose, p3d_for_loss)
            pose_prediction_l1_loss = pose_prediction_l1_loss_func(
                predicted_pose, p3d_for_loss)
            pose_prediction_l2_loss = pose_prediction_l2_loss_func(
                predicted_pose, p3d_for_loss)
            pose_prediction_loss = pose_prediction_l2_loss - 0.01 * pose_prediction_cosine_similarity_loss + 0.5 * pose_prediction_l1_loss
            # heatmap_reconstruction_loss = heatmap_reconstruction_loss_func(reconstructed_heatmap, heatmap)
            # Backpropagating Loss with Weighting Factors
            # backbone_loss = heatmap_prediction_loss
            lifting_loss = 0.1 * pose_prediction_loss  # + 0.001*heatmap_reconstruction_loss
            # loss = backbone_loss + lifting_loss
            loss = lifting_loss
            # print(0.1*(-0.01)*pose_prediction_cosine_similarity_loss)
            # print(0.1*0.5*pose_prediction_l1_loss)
            # print(0.1*pose_prediction_l2_loss)
            # print(0.001*heatmap_reconstruction_loss)

            # Backward & Update
            loss.backward()
            optimizer.step()

            # Evaluate results using different evaluation metrices
            predicted_pose = torch.reshape(predicted_pose, (-1, 16, 3))
            y_output = predicted_pose.data.cpu().numpy()
            p3d_for_loss = torch.cat(
                (p3d[:, 4:6, :], p3d[:, 7:10, :], p3d[:, 11:, :]),
                dim=1)  # 13까지가 Upper Body
            p3d_for_loss = torch.reshape(p3d_for_loss, (-1, 16, 3))
            y_target = p3d_for_loss.data.cpu().numpy()

            eval_body_train.eval(y_output, y_target, action)
            # eval_upper_train.eval(y_output, y_target, action)
            # eval_lower_train.eval(y_output, y_target, action)
            # eval_neck_train.eval(y_output, y_target, action)
            # eval_head_train.eval(y_output, y_target, action)
            # eval_left_arm_train.eval(y_output, y_target, action)
            # eval_left_elbow_train.eval(y_output, y_target, action)
            # eval_left_hand_train.eval(y_output, y_target, action)
            # eval_right_arm_train.eval(y_output, y_target, action)
            # eval_right_elbow_train.eval(y_output, y_target, action)
            # eval_right_hand_train.eval(y_output, y_target, action)
            # eval_left_leg_train.eval(y_output, y_target, action)
            # eval_left_knee_train.eval(y_output, y_target, action)
            # eval_left_foot_train.eval(y_output, y_target, action)
            # eval_left_toe_train.eval(y_output, y_target, action)
            # eval_right_leg_train.eval(y_output, y_target, action)
            # eval_right_knee_train.eval(y_output, y_target, action)
            # eval_right_foot_train.eval(y_output, y_target, action)
            # eval_right_toe_train.eval(y_output, y_target, action)

            # heatmap_prediction_loss = heatmap_prediction_loss_func(predicted_heatmap, heatmap)
            # heatmap_reconstruction_loss = heatmap_reconstruction_loss_func(reconstructed_heatmap, heatmap)

            # AverageMeter Update
            # fullBodyErrorAverageMeterTrain.update(eval_body_train.get_results()["All"])
            # upperBodyErrorAverageMeterTrain.update(eval_upper_train.get_results()["All"])
            # lowerBodyErrorAverageMeterTrain.update(eval_lower_train.get_results()["All"])
            # heatmapPredictionErrorAverageMeterTrain.update(heatmap_prediction_loss.data.cpu().numpy())
            PosePredictionCosineSimilarityPerJointErrorAverageMeterTrain.update(
                -0.001 *
                pose_prediction_cosine_similarity_loss.data.cpu().numpy())
            PosePredictionDistancePerJointErrorAverageMeterTrain.update(
                0.05 * pose_prediction_l1_loss.data.cpu().numpy())
            PosePredictionMSEErrorAverageMeterTrain.update(
                0.1 * pose_prediction_l2_loss.data.cpu().numpy())
            # heatmapReconstructionErrorAverageMeterTrain.update(0.001 * heatmap_reconstruction_loss.data.cpu().numpy())
            # neckErrorAverageMeterTrain.update(eval_neck_train.get_results()["All"])
            # headErrorAverageMeterTrain.update(eval_head_train.get_results()["All"])
            # leftArmErrorAverageMeterTrain.update(eval_left_arm_train.get_results()["All"])
            # leftElbowErrorAverageMeterTrain.update(eval_left_elbow_train.get_results()["All"])
            # leftHandErrorAverageMeterTrain.update(eval_left_hand_train.get_results()["All"])
            # rightArmErrorAverageMeterTrain.update(eval_right_arm_train.get_results()["All"])
            # rightElbowErrorAverageMeterTrain.update(eval_right_elbow_train.get_results()["All"])
            # rightHandErrorAverageMeterTrain.update(eval_right_hand_train.get_results()["All"])
            # leftLegErrorAverageMeterTrain.update(eval_left_leg_train.get_results()["All"])
            # leftKneeErrorAverageMeterTrain.update(eval_left_knee_train.get_results()["All"])
            # leftFootErrorAverageMeterTrain.update(eval_left_foot_train.get_results()["All"])
            # leftToeErrorAverageMeterTrain.update(eval_left_toe_train.get_results()["All"])
            # rightLegErrorAverageMeterTrain.update(eval_right_leg_train.get_results()["All"])
            # rightKneeErrorAverageMeterTrain.update(eval_right_knee_train.get_results()["All"])
            # rightFootErrorAverageMeterTrain.update(eval_right_foot_train.get_results()["All"])
            # rightToeErrorAverageMeterTrain.update(eval_right_toe_train.get_results()["All"])

            # AverageMeter Update
            lossAverageMeterTrain.update(loss.data.cpu().numpy())
        LOGGER.info(
            str("Training Loss in Epoch " + str(ep) + " : " +
                str(lossAverageMeterTrain.avg)))
        LOGGER.info(
            str("Training PosePredictionCosineSimilarityPerJointErrorAverageMeter in Epoch "
                + str(ep) + " : " +
                str(PosePredictionCosineSimilarityPerJointErrorAverageMeterTrain
                    .avg)))
        LOGGER.info(
            str("Training PosePredictionDistancePerJointErrorAverageMeter in Epoch "
                + str(ep) + " : " +
                str(PosePredictionDistancePerJointErrorAverageMeterTrain.avg)))
        LOGGER.info(
            str("Training PosePredictionMSEErrorAverageMeter in Epoch " +
                str(ep) + " : " +
                str(PosePredictionMSEErrorAverageMeterTrain.avg)))
        # LOGGER.info(str("Training heatmapReconstructionErrorAverageMeter in Epoch " + str(ep) + " : " + str(heatmapReconstructionErrorAverageMeterTrain.avg)))
        LOGGER.info(
            str("Training fullBodyErrorAverageMeter in Epoch " + str(ep) +
                " : " + str(eval_body_train.get_results()["All"])))
        LOGGER.info(
            str("Training upperBodyErrorAverageMeter in Epoch " + str(ep) +
                " : " + str(eval_body_train.get_results()["UpperBody"])))
        LOGGER.info(
            str("Training lowerBodyErrorAverageMeter in Epoch " + str(ep) +
                " : " + str(eval_body_train.get_results()["LowerBody"])))
        # LOGGER.info(str("Training heatmapPredictionErrorAverageMeter in Epoch " + str(ep) + " : " + str(heatmapPredictionErrorAverageMeterTrain.avg)))

        # if ep+1 == config_lifting_singleBranch.train_setting.epoch:  # Test only in Final Epoch because of Training Time Issue
        if True:
            # -----------------------------------------------------------
            # -----------------------------------------------------------
            # -------------------- Validation Phase ---------------------
            # -----------------------------------------------------------
            # -----------------------------------------------------------
            LOGGER.info('Validation...')

            # ------------------- Data loader -------------------
            test_data_transform = transforms.Compose(
                [trsf.ImageTrsf(),
                 trsf.Joints3DTrsf(),
                 trsf.ToTensor()])

            # let's load data from validation set as example
            test_data = Mocap(config_lifting_singleBranch.dataset.test,
                              SetType.TEST,
                              transform=test_data_transform)
            test_data_loader = DataLoader(
                test_data,
                batch_size=config_lifting_singleBranch.test_data_loader.
                batch_size,
                shuffle=config_lifting_singleBranch.test_data_loader.shuffle,
                num_workers=config_lifting_singleBranch.test_data_loader.
                workers)

            # ------------------- Evaluation -------------------
            eval_body = evaluate.EvalBody()
            # eval_upper = evaluate.EvalUpperBody()
            # eval_lower = evaluate.EvalLowerBody()
            # eval_neck = evaluate.EvalNeck()
            # eval_head = evaluate.EvalHead()
            # eval_left_arm = evaluate.EvalLeftArm()
            # eval_left_elbow = evaluate.EvalLeftElbow()
            # eval_left_hand = evaluate.EvalLeftHand()
            # eval_right_arm = evaluate.EvalRightArm()
            # eval_right_elbow = evaluate.EvalRightElbow()
            # eval_right_hand = evaluate.EvalRightHand()
            # eval_left_leg = evaluate.EvalLeftLeg()
            # eval_left_knee = evaluate.EvalLeftKnee()
            # eval_left_foot = evaluate.EvalLeftFoot()
            # eval_left_toe = evaluate.EvalLeftToe()
            # eval_right_leg = evaluate.EvalRightLeg()
            # eval_right_knee = evaluate.EvalRightKnee()
            # eval_right_foot = evaluate.EvalRightFoot()
            # eval_right_toe = evaluate.EvalRightToe()

            # ------------------- Read dataset frames -------------------
            # backbone.eval()
            encoder.eval()
            decoder.eval()
            # reconstructer.eval()
            for it, (img, p2d, p3d, action,
                     heatmap) in tqdm(enumerate(test_data_loader),
                                      total=len(test_data_loader)):
                #################### p2d는 각 Joint별 (x,y) 좌표를 나타낸듯. Image의 좌측상단이 (0,0)이다.
                #################### p3d는 Neck의 좌표를 (0,0,0)으로 생각했을 때의 각 Joint별 (^x,^y,^z) 좌표를 나타낸듯.
                #################### Joint 순서는 config_lifting_singleBranch.py에 있다.

                # LOGGER.info('Iteration: {}'.format(it))
                # LOGGER.info('Images: {}'.format(img.shape))  # (Batch, Channel, Height(y), Width(x))
                # LOGGER.info('p2dShapes: {}'.format(p2d.shape))  # (Width, Height)
                # # LOGGER.info('p2ds: {}'.format(p2d))
                # LOGGER.info('p3dShapes: {}'.format(p3d.shape))  # (^x, ^y, ^z)
                # # LOGGER.info('p3ds: {}'.format(p3d))
                # LOGGER.info('Actions: {}'.format(action))
                # LOGGER.info('heatmapShapes: {}'.format(heatmap.shape))

                # ------------------- Evaluate -------------------
                # TODO: replace p3d_hat with model preditions
                # p3d_hat = torch.ones_like(p3d)

                # Move Tensors to GPUs
                # img = img.cuda()
                p3d = p3d.cuda()
                heatmap = heatmap.cuda()

                # Forward
                # predicted_heatmap = backbone(img)
                latent = encoder(heatmap)
                predicted_pose = decoder(latent)
                # reconstructed_heatmap = reconstructer(latent)

                # Loss Calculation
                # heatmap_prediction_loss = heatmap_prediction_loss_func(predicted_heatmap, heatmap)
                p3d_for_loss = torch.cat(
                    (p3d[:, 4:6, :], p3d[:, 7:10, :], p3d[:, 11:, :]),
                    dim=1)  # 13까지가 Upper Body
                p3d_for_loss = torch.reshape(p3d_for_loss, (-1, 48))
                pose_prediction_cosine_similarity_loss = pose_prediction_cosine_similarity_loss_func(
                    predicted_pose, p3d_for_loss)
                pose_prediction_l1_loss = pose_prediction_l1_loss_func(
                    predicted_pose, p3d_for_loss)
                pose_prediction_l2_loss = pose_prediction_l2_loss_func(
                    predicted_pose, p3d_for_loss)
                pose_prediction_loss = pose_prediction_l2_loss - 0.01 * pose_prediction_cosine_similarity_loss + 0.5 * pose_prediction_l1_loss
                # heatmap_reconstruction_loss = heatmap_reconstruction_loss_func(reconstructed_heatmap, heatmap)
                # Backpropagating Loss with Weighting Factors
                # backbone_loss = heatmap_prediction_loss
                lifting_loss = 0.1 * pose_prediction_loss  # + 0.001*heatmap_reconstruction_loss
                # loss = backbone_loss + lifting_loss
                loss = lifting_loss
                # print(0.1*(-0.01)*pose_prediction_cosine_similarity_loss)
                # print(0.1*0.5*pose_prediction_l1_loss)
                # print(0.1*pose_prediction_l2_loss)
                # print(0.001*heatmap_reconstruction_loss)

                # Evaluate results using different evaluation metrices
                predicted_pose = torch.reshape(predicted_pose, (-1, 16, 3))
                y_output = predicted_pose.data.cpu().numpy()
                p3d_for_loss = torch.cat(
                    (p3d[:, 4:6, :], p3d[:, 7:10, :], p3d[:, 11:, :]),
                    dim=1)  # 13까지가 Upper Body
                p3d_for_loss = torch.reshape(p3d_for_loss, (-1, 16, 3))
                y_target = p3d_for_loss.data.cpu().numpy()

                eval_body.eval(y_output, y_target, action)
                # eval_upper.eval(y_output, y_target, action)
                # eval_lower.eval(y_output, y_target, action)
                # eval_neck.eval(y_output, y_target, action)
                # eval_head.eval(y_output, y_target, action)
                # eval_left_arm.eval(y_output, y_target, action)
                # eval_left_elbow.eval(y_output, y_target, action)
                # eval_left_hand.eval(y_output, y_target, action)
                # eval_right_arm.eval(y_output, y_target, action)
                # eval_right_elbow.eval(y_output, y_target, action)
                # eval_right_hand.eval(y_output, y_target, action)
                # eval_left_leg.eval(y_output, y_target, action)
                # eval_left_knee.eval(y_output, y_target, action)
                # eval_left_foot.eval(y_output, y_target, action)
                # eval_left_toe.eval(y_output, y_target, action)
                # eval_right_leg.eval(y_output, y_target, action)
                # eval_right_knee.eval(y_output, y_target, action)
                # eval_right_foot.eval(y_output, y_target, action)
                # eval_right_toe.eval(y_output, y_target, action)

                # heatmap_reconstruction_loss = heatmap_reconstruction_loss_func(reconstructed_heatmap, heatmap)

                # AverageMeter Update
                # fullBodyErrorAverageMeter.update(eval_body.get_results()["All"])
                # upperBodyErrorAverageMeter.update(eval_upper.get_results()["All"])
                # lowerBodyErrorAverageMeter.update(eval_lower.get_results()["All"])
                # heatmapPredictionErrorAverageMeter.update(heatmap_prediction_loss.data.cpu().numpy())
                PosePredictionCosineSimilarityPerJointErrorAverageMeter.update(
                    -0.001 *
                    pose_prediction_cosine_similarity_loss.data.cpu().numpy())
                PosePredictionDistancePerJointErrorAverageMeter.update(
                    0.05 * pose_prediction_l1_loss.data.cpu().numpy())
                PosePredictionMSEErrorAverageMeter.update(
                    0.1 * pose_prediction_l2_loss.data.cpu().numpy())
                # heatmapReconstructionErrorAverageMeter.update(0.001 * heatmap_reconstruction_loss.data.cpu().numpy())
                # neckErrorAverageMeter.update(eval_neck.get_results()["All"])
                # headErrorAverageMeter.update(eval_head.get_results()["All"])
                # leftArmErrorAverageMeter.update(eval_left_arm.get_results()["All"])
                # leftElbowErrorAverageMeter.update(eval_left_elbow.get_results()["All"])
                # leftHandErrorAverageMeter.update(eval_left_hand.get_results()["All"])
                # rightArmErrorAverageMeter.update(eval_right_arm.get_results()["All"])
                # rightElbowErrorAverageMeter.update(eval_right_elbow.get_results()["All"])
                # rightHandErrorAverageMeter.update(eval_right_hand.get_results()["All"])
                # leftLegErrorAverageMeter.update(eval_left_leg.get_results()["All"])
                # leftKneeErrorAverageMeter.update(eval_left_knee.get_results()["All"])
                # leftFootErrorAverageMeter.update(eval_left_foot.get_results()["All"])
                # leftToeErrorAverageMeter.update(eval_left_toe.get_results()["All"])
                # rightLegErrorAverageMeter.update(eval_right_leg.get_results()["All"])
                # rightKneeErrorAverageMeter.update(eval_right_knee.get_results()["All"])
                # rightFootErrorAverageMeter.update(eval_right_foot.get_results()["All"])
                # rightToeErrorAverageMeter.update(eval_right_toe.get_results()["All"])

                # AverageMeter Update
                lossAverageMeter.update(loss.data.cpu().numpy())
            LOGGER.info(
                str("Validation Loss in Epoch " + str(ep) + " : " +
                    str(lossAverageMeter.avg)))
            LOGGER.info(
                str("Validation PosePredictionCosineSimilarityPerJointErrorAverageMeter in Epoch "
                    + str(ep) + " : " +
                    str(PosePredictionCosineSimilarityPerJointErrorAverageMeter
                        .avg)))
            LOGGER.info(
                str("Validation PosePredictionDistancePerJointErrorAverageMeter in Epoch "
                    + str(ep) + " : " +
                    str(PosePredictionDistancePerJointErrorAverageMeter.avg)))
            LOGGER.info(
                str("Validation PosePredictionMSEErrorAverageMeter in Epoch " +
                    str(ep) + " : " +
                    str(PosePredictionMSEErrorAverageMeter.avg)))
            # LOGGER.info(str("Validation heatmapReconstructionErrorAverageMeter in Epoch " + str(ep) + " : " + str(heatmapReconstructionErrorAverageMeter.avg)))
            LOGGER.info(
                str("Validation fullBodyErrorAverageMeter in Epoch " +
                    str(ep) + " : " + str(eval_body.get_results()["All"])))
            LOGGER.info(
                str("Validation upperBodyErrorAverageMeter in Epoch " +
                    str(ep) + " : " +
                    str(eval_body.get_results()["UpperBody"])))
            LOGGER.info(
                str("Validation lowerBodyErrorAverageMeter in Epoch " +
                    str(ep) + " : " +
                    str(eval_body.get_results()["LowerBody"])))
            # LOGGER.info(str("Validation heatmapPredictionErrorAverageMeter in Epoch " + str(ep) + " : " + str(heatmapPredictionErrorAverageMeter.avg)))

            # -----------------------------------------------------------
            # -----------------------------------------------------------
            # ----------------------- Save Phase ------------------------
            # -----------------------------------------------------------
            # -----------------------------------------------------------
            LOGGER.info('Save...')

            # mkdir for this experiment
            if not os.path.exists(
                    os.path.join(
                        os.getcwd(),
                        config_lifting_singleBranch.eval.experiment_folder)):
                os.mkdir(
                    os.path.join(
                        os.getcwd(),
                        config_lifting_singleBranch.eval.experiment_folder))

            # mkdir for this epoch
            if not os.path.exists(
                    os.path.join(
                        os.getcwd(),
                        config_lifting_singleBranch.eval.experiment_folder,
                        str("epoch_" + str(ep)))):
                os.mkdir(
                    os.path.join(
                        os.getcwd(),
                        config_lifting_singleBranch.eval.experiment_folder,
                        str("epoch_" + str(ep))))

            # Variable for Final Model Selection
            # if errorAverageMeter.avg <= errorMin:
            #     errorMin = ErrorAverageMeter.avg
            #     errorMinIsUpdatedInThisEpoch = True

            # ------------------- Save results -------------------
            LOGGER.info('Saving evaluation results...')

            # Training Result Saving
            res_train = {
                'Loss':
                lossAverageMeterTrain.avg,
                # 'HeatmapPrediction': heatmapPredictionErrorAverageMeterTrain.avg,
                'PosePredictionCosineSimilarityPerJoint':
                PosePredictionCosineSimilarityPerJointErrorAverageMeterTrain.
                avg,
                'PosePredictionDistancePerJoint':
                PosePredictionDistancePerJointErrorAverageMeterTrain.avg,
                'PosePredictionMSE':
                PosePredictionMSEErrorAverageMeterTrain.avg,
                # 'HeatmapReconstruction': heatmapReconstructionErrorAverageMeterTrain.avg,
                'FullBody':
                eval_body_train.get_results()["All"],
                'UpperBody':
                eval_body_train.get_results()["UpperBody"],
                'LowerBody':
                eval_body_train.get_results()["LowerBody"],
                'Neck':
                eval_body_train.get_results()["Neck"],
                'Head':
                eval_body_train.get_results()["Head"],
                'LeftArm':
                eval_body_train.get_results()["LeftArm"],
                'LeftElbow':
                eval_body_train.get_results()["LeftElbow"],
                'LeftHand':
                eval_body_train.get_results()["LeftHand"],
                'RightArm':
                eval_body_train.get_results()["RightArm"],
                'RightElbow':
                eval_body_train.get_results()["RightElbow"],
                'RightHand':
                eval_body_train.get_results()["RightHand"],
                'LeftLeg':
                eval_body_train.get_results()["LeftLeg"],
                'LeftKnee':
                eval_body_train.get_results()["LeftKnee"],
                'LeftFoot':
                eval_body_train.get_results()["LeftFoot"],
                'LeftToe':
                eval_body_train.get_results()["LeftToe"],
                'RightLeg':
                eval_body_train.get_results()["RightLeg"],
                'RightKnee':
                eval_body_train.get_results()["RightKnee"],
                'RightFoot':
                eval_body_train.get_results()["RightFoot"],
                'RightToe':
                eval_body_train.get_results()["RightToe"]
            }
            io.write_json(
                os.path.join(
                    os.getcwd(),
                    config_lifting_singleBranch.eval.experiment_folder,
                    str("epoch_" + str(ep)),
                    config_lifting_singleBranch.eval.training_result_file),
                res_train)

            # Evaluation Result Saving
            res = {
                'Loss':
                lossAverageMeter.avg,
                # 'HeatmapPrediction': heatmapPredictionErrorAverageMeter.avg,
                'PosePredictionCosineSimilarityPerJoint':
                PosePredictionCosineSimilarityPerJointErrorAverageMeter.avg,
                'PosePredictionDistancePerJoint':
                PosePredictionDistancePerJointErrorAverageMeter.avg,
                'PosePredictionMSE':
                PosePredictionMSEErrorAverageMeter.avg,
                # 'HeatmapReconstruction': heatmapReconstructionErrorAverageMeter.avg,
                'FullBody':
                eval_body.get_results()["All"],
                'UpperBody':
                eval_body.get_results()["UpperBody"],
                'LowerBody':
                eval_body.get_results()["LowerBody"],
                'Neck':
                eval_body.get_results()["Neck"],
                'Head':
                eval_body.get_results()["Head"],
                'LeftArm':
                eval_body.get_results()["LeftArm"],
                'LeftElbow':
                eval_body.get_results()["LeftElbow"],
                'LeftHand':
                eval_body.get_results()["LeftHand"],
                'RightArm':
                eval_body.get_results()["RightArm"],
                'RightElbow':
                eval_body.get_results()["RightElbow"],
                'RightHand':
                eval_body.get_results()["RightHand"],
                'LeftLeg':
                eval_body.get_results()["LeftLeg"],
                'LeftKnee':
                eval_body.get_results()["LeftKnee"],
                'LeftFoot':
                eval_body.get_results()["LeftFoot"],
                'LeftToe':
                eval_body.get_results()["LeftToe"],
                'RightLeg':
                eval_body.get_results()["RightLeg"],
                'RightKnee':
                eval_body.get_results()["RightKnee"],
                'RightFoot':
                eval_body.get_results()["RightFoot"],
                'RightToe':
                eval_body.get_results()["RightToe"]
            }
            io.write_json(
                os.path.join(
                    os.getcwd(),
                    config_lifting_singleBranch.eval.experiment_folder,
                    str("epoch_" + str(ep)),
                    config_lifting_singleBranch.eval.evaluation_result_file),
                res)

            # Experiement config_lifting_singleBranchuration Saving
            copyfile(
                "data/config_lifting_singleBranch.yml",
                os.path.join(
                    os.getcwd(),
                    config_lifting_singleBranch.eval.experiment_folder,
                    str("epoch_" + str(ep)), config_lifting_singleBranch.eval.
                    experiment_configuration_file))

            # Model Weights Saving
            # torch.save(backbone, os.path.join(os.getcwd(), config_lifting_singleBranch.eval.experiment_folder, str("epoch_" + ep), config_lifting_singleBranch.eval.backbone_weight_file))
            torch.save(
                encoder.state_dict(),
                os.path.join(
                    os.getcwd(),
                    config_lifting_singleBranch.eval.experiment_folder,
                    str("epoch_" + str(ep)),
                    config_lifting_singleBranch.eval.encoder_weight_file))
            torch.save(
                decoder.state_dict(),
                os.path.join(
                    os.getcwd(),
                    config_lifting_singleBranch.eval.experiment_folder,
                    str("epoch_" + str(ep)),
                    config_lifting_singleBranch.eval.decoder_weight_file))
            # torch.save(reconstructer.state_dict(), os.path.join(os.getcwd(), config_lifting_singleBranch.eval.experiment_folder, str("epoch_" + str(ep)), config_lifting_singleBranch.eval.reconstructer_weight_file))

            # Variable for Final Model Selection
            # errorMinIsUpdatedInThisEpoch = False

        scheduler.step()
    LOGGER.info('Done.')
コード例 #6
0
def main():
    """Main"""

    args = arguments.parse_args()
    LOGGER = ConsoleLogger(args.training_type, 'train')
    logdir = LOGGER.getLogFolder()
    LOGGER.info(args)
    LOGGER.info(config)


    cudnn.benckmark = config.CUDNN.BENCHMARK
    cudnn.deterministic = config.CUDNN.DETERMINISTIC
    cudnn.enabled = config.CUDNN.ENABLED

    # ------------------- Data loader -------------------

    data_transform = transforms.Compose([
        trsf.ImageTrsf(),  # normalize
        trsf.Joints3DTrsf(),  # centerize
        trsf.ToTensor()])  # to tensor

    # training data
    train_data = Mocap(
        config.dataset.train,
        SetType.TRAIN,
        transform=data_transform)
    train_data_loader = DataLoader(
        train_data,
        batch_size=args.batch_size,
        shuffle=config.data_loader.shuffle,
        num_workers=8)

    val_data = Mocap(
        config.dataset.val,
        SetType.VAL,
        transform=data_transform)
    val_data_loader = DataLoader(
        val_data,
        batch_size=2,
        shuffle=config.data_loader.shuffle,
        num_workers=8)

    # ------------------- Model -------------------
    if args.training_type != 'Train3d':
        with open('model/model.yaml') as fin:
            model_cfg = edict(yaml.safe_load(fin))
        resnet = pose_resnet.get_pose_net(model_cfg, True)
        Loss2D = HeatmapLoss()  # same as MSELoss()
        # LossMSE = nn.MSELoss()
    if args.training_type != 'Train2d':
        autoencoder = encoder_decoder.AutoEncoder()

    if torch.cuda.is_available():
        device = torch.device(f"cuda:{args.gpu}")
        if args.training_type != 'Train3d':
            resnet = resnet.cuda(device)
            Loss2D = Loss2D.cuda(device)
        if args.training_type != 'Train2d':
            autoencoder = autoencoder.cuda(device)

    # ------------------- optimizer -------------------
    if args.training_type == 'Train2d':
        optimizer = optim.Adam(resnet.parameters(), lr=args.learning_rate)
    if args.training_type == 'Train3d':
        optimizer = optim.Adam(autoencoder.parameters(), lr=config.train.learning_rate)
    if args.training_type != 'Finetune':
        optimizer = optim.Adam(itertools.chain(resnet.parameters(), autoencoder.parameters()), lr=config.train.learning_rate)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=0.1)

    # ------------------- load model -------------------
    if args.load_model:
        if not os.path.isfile(args.load_model):
            raise ValueError(f"No checkpoint found at {args.load_model}")
        checkpoint = torch.load(args.load_model)
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        if args.training_type != 'Train3d':
            resnet.load_state_dict(checkpoint['resnet_state_dict'])
        if args.training_type != 'Train2d':
            autoencoder.load_state_dict(checkpoint['autoencoder_state_dict'])
        scheduler.load_state_dict(checkpoint['scheduler'])


    # ------------------- tensorboard -------------------
    train_global_steps = 0
    writer_dict = {
        'writer': SummaryWriter(log_dir=logdir),
        'train_global_steps': train_global_steps
    }

    # ------------------- Evaluation -------------------
    if args.training_type != 'Train2d':
        eval_body = evaluate.EvalBody()
        eval_upper = evaluate.EvalUpperBody()
        eval_lower = evaluate.EvalLowerBody()


    best_perf = float('inf')
    best_model = False
    # ------------------- run the model -------------------
    for epoch in range(args.epochs):
        with torch.autograd.set_detect_anomaly(True):
            LOGGER.info(f'====Training epoch {epoch}====')
            losses = AverageMeter()
            batch_time = AverageMeter()

            resnet.train()
            autoencoder.train()

            end = time.time()
            for it, (img, p2d, p3d, heatmap, action) in enumerate(train_data_loader, 0):

                img = img.to(device)
                p2d = p2d.to(device)
                p3d = p3d.to(device)
                heatmap = heatmap.to(device)

                if args.training_type != 'Train3d':
                    heatmap2d_hat = resnet(img)  # torch.Size([16, 15, 48, 48])
                else:
                    heatmap2d_hat = heatmap
                p3d_hat, heatmap2d_recon = autoencoder(heatmap2d_hat)

                loss2d = Loss2D(heatmap, heatmap2d_hat).mean()
                # loss2d = LossMSE(heatmap, heatmap2d_hat)

                if args.training_type == 'Train2d':
                    loss = loss2d
                elif args.training_type == 'Train3d':
                    pass

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                batch_time.update(time.time() - end)
                losses.update(loss.item(), img.size(0))

                if it % config.train.PRINT_FREQ == 0:
                    # logging messages
                    msg = 'Epoch: [{0}][{1}/{2}]\t' \
                          'Batch Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
                          'Speed {speed:.1f} samples/s\t' \
                          'Loss {loss.val:.5f} ({loss.avg:.5f})\t'.format(
                        epoch, it, len(train_data_loader), batch_time=batch_time,
                        speed=img.size(0) / batch_time.val,  # averaged within batch
                        loss=losses)
                    LOGGER.info(msg)
                end = time.time()
            scheduler.step()

            # ------------------- validation -------------------

            resnet.eval()
            autoencoder.eval()

            if args.training_type != 'Train2d':
                # Evaluate results using different evaluation metrices
                y_output = p3d_hat.data.cpu().numpy()
                y_target = p3d.data.cpu().numpy()

                eval_body.eval(y_output, y_target, action)
                eval_upper.eval(y_output, y_target, action)
                eval_lower.eval(y_output, y_target, action)


            # ------------------- Save results -------------------
            checkpoint_dir = os.path.join(logdir, 'checkpoints')
            LOGGER.info('=> saving checkpoint to {}'.format(checkpoint_dir))
            states = dict()
            if args.training_type!='Train3d':
                states['resnet_state_dict'] = resnet.state_dict()
            if args.training_type!='Train2d':
                states['autoencoder_state_dict'] = autoencoder.state_dict()
            states['optimizer_state_dict']= optimizer.state_dict()

            torch.save(states, f'checkpoint_{epoch}.tar')
            res = {'FullBody': eval_body.get_results(),
                   'UpperBody': eval_upper.get_results(),
                   'LowerBody': eval_lower.get_results()}

            utils_io.write_json(config.eval.output_file, res)

            LOGGER.info('Done.')
コード例 #7
0
def main():
    """Main"""

    LOGGER.info('Starting demo...')

    # ------------------- Data loader -------------------

    data_transform = transforms.Compose(
        [trsf.ImageTrsf(),
         trsf.Joints3DTrsf(),
         trsf.ToTensor()])

    # let's load data from validation set as example
    data = Mocap(config.dataset.val, SetType.VAL, transform=data_transform)
    data_loader = DataLoader(data,
                             batch_size=config.data_loader.batch_size,
                             shuffle=config.data_loader.shuffle)

    # ------------------- Evaluation -------------------

    eval_body = evaluate.EvalBody()
    eval_upper = evaluate.EvalUpperBody()
    eval_lower = evaluate.EvalUpperBody()

    # ------------------- Read dataset frames -------------------

    for it, (img, p2d, p3d, action) in enumerate(data_loader):

        LOGGER.info('Iteration: {}'.format(it))
        LOGGER.info('Images: {}'.format(img.shape))
        LOGGER.info('p2ds: {}'.format(p2d.shape))
        LOGGER.info('p3ds: {}'.format(p3d.shape))
        LOGGER.info('Actions: {}'.format(action))

        # -----------------------------------------------------------
        # ------------------- Run your model here -------------------
        # -----------------------------------------------------------

        # TODO: replace p3d_hat with model preditions
        p3d_hat = torch.ones_like(p3d)

        # Evaluate results using different evaluation metrices
        y_output = p3d_hat.data.cpu().numpy()
        y_target = p3d.data.cpu().numpy()

        eval_body.eval(y_output, y_target, action)
        eval_upper.eval(y_output, y_target, action)
        eval_lower.eval(y_output, y_target, action)

        # TODO: remove break
        break

    # ------------------- Save results -------------------

    LOGGER.info('Saving evaluation results...')
    res = {
        'FullBody': eval_body.get_results(),
        'UpperBody': eval_upper.get_results(),
        'LowerBody': eval_lower.get_results()
    }

    io.write_json(config.eval.output_file, res)

    LOGGER.info('Done.')