Ejemplo n.º 1
0
    def validate(self, epoch):
        """Evaluate on test dataset."""
        val_loss = AverageMeter()
        metrics = {
            'val_dist': AverageMeter(),  # per vertex distance in mm
        }
        self.model.eval()
        with torch.no_grad():
            for i, inputs in enumerate(self.test_loader):
                outputs, loss = self.one_step(inputs)
                val_loss.update(loss.item(), inputs[0].shape[0])

                self.update_metrics(metrics, inputs, outputs)
                self.visualize_batch(inputs, outputs, epoch)

        val_dist_avg = metrics['val_dist'].avg
        self.logger.add_scalar("val/loss", val_loss.avg, epoch)
        self.logger.add_scalar("val/dist", val_dist_avg, epoch)
        print("VALIDATION")
        print("Epoch {}, loss: {:.4f}, dist: {:.4f} mm".format(
            epoch, val_loss.avg, val_dist_avg))

        if val_dist_avg < self.best_error:
            self.best_error = val_dist_avg
            self.best_epoch = epoch
            self.save_ckpt_best()
            with open(os.path.join(self.log_dir, 'best_epoch'), 'w') as f:
                f.write("{:04d}".format(epoch))
Ejemplo n.º 2
0
def step(phase, epoch, opt, dataloader, model, criterion, optimizer=None):
    # Choose the phase(Evaluate phase-Normally without Dropout and BatchNorm)
    if phase == 'train':
        model.train()
    else:
        model.eval()
    # Load default values
    Loss, Err, Acc = AverageMeter(), AverageMeter(), AverageMeter()
    Acc_tot = AverageMeter()
    seqlen = set_sequence_length(opt.MinSeqLenIndex, opt.MaxSeqLenIndex, epoch)
    # Show iteration using Bar
    nIters = len(dataloader)
    bar = Bar(f'{opt.expID}', max=nIters)
    # Loop in dataloader
    for i, gt in enumerate(dataloader):
        ## Wraps tensors and records the operations applied to it
        input, label = gt['input'], gt['label']
        gtpts, center, scale = gt['gtpts'], gt['center'], gt['scale']
        input_var = input[:, 0, ].float().cuda(device=opt.device,
                                               non_blocking=True)
        label_var = label.float().cuda(device=opt.device, non_blocking=True)
        Loss.reset()
        Err.reset()
        Acc.reset()
        ### if it is 3D, may need the nOutput to get the different target, not just only the heatmap
        ## Forwad propagation
        output = model(input_var)
        ## Get model outputs and calculate loss
        loss = criterion(output, label_var)
        ## Backward + Optimize only if in training phase
        if phase == 'train':
            ## Zero the parameter gradients
            optimizer.zero_grad()
            loss.mean().backward()
            optimizer.step()
        Loss.update(loss.sum())
        ## Compute the accuracy
        # acc = Accuracy(opt, output.data.cpu().numpy(), labels_var.data.cpu().numpy())
        ref = get_ref(opt.dataset, scale)
        for j in range(opt.preSeqLen):
            if j <= seqlen:
                pred_hm = get_preds(output[:, j, ].float())
                pred_pts = original_coordinate(pred_hm, center[:, ], scale,
                                               opt.outputRes)
                err, ne = error(pred_pts, gtpts[:, j, ], ref)
                acc, na = accuracy(pred_pts, gtpts[:, j, ], ref)
                # assert ne == na, "ne must be the same as na"
                Err.update(err)
                Acc.update(acc)
                Acc_tot.update(acc)

        Bar.suffix = f'{phase}[{epoch}][{i}/{nIters}]|Total:{bar.elapsed_td}' \
            f'|ETA:{bar.eta_td}|Loss:{Loss.val:.6f}|Err:{Err.avg:.6f}|Acc:{Acc.avg:.6f}'
        bar.next()

    bar.finish()
    return Loss.val, Acc_tot.avg
Ejemplo n.º 3
0
def train(model, train_loader, optimizer, criterion, v_threshold, f_threshold):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()

    model.cuda()
    model.train()

    end = time.time()

    frame_dict = {}

    bar = Bar('Processing', max=len(train_loader))
    for batch_idx, (data, target, idx) in enumerate(train_loader):
        data_time.update(time.time() - end)

        input_var = data.float().cuda()
        target_var = target.long().cuda()

        optimizer.zero_grad()
        output = model(input_var)
        _, predicted = torch.max(output, 1)
        loss = criterion(output, target_var)

        loss.backward()
        optimizer.step()

        losses.update(loss.item())

        for i in range(len(idx)):
            if idx[i] not in frame_dict.keys():
                frame_dict[idx[i]] = 0
            frame_dict[
                idx[i]] += 1 if target[i].item() == predicted[i].item() else 0

        batch_time.update(time.time() - end)
        end = time.time()

        bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} |' \
                     ' Loss: {loss:.4f}'.format(
            batch=batch_idx + 1,
            size=len(train_loader),
            data=data_time.val,
            bt=batch_time.val,
            total=bar.elapsed_td,
            eta=bar.eta_td,
            loss=loss,
        )
        bar.next()

    bar.finish()

    acc = cal_acc(frame_dict, v_threshold, f_threshold)

    return losses.avg, acc
Ejemplo n.º 4
0
    def train(self, epoch):
        """Train for one epoch."""
        epoch_loss = AverageMeter()
        self.model.train()
        for i, inputs in enumerate(self.train_loader):
            self.optimizer.zero_grad()
            outputs, loss = self.one_step(inputs)
            loss.backward()
            self.optimizer.step()

            self.logger.add_scalar("train/loss", loss.item(), self.iter_nums)
            print("Iter {}, loss: {:.8f}".format(self.iter_nums, loss.item()))
            epoch_loss.update(loss, inputs[0].shape[0])
            self.iter_nums += 1

        self.logger.add_scalar("train_epoch/loss", epoch_loss.avg, epoch)
Ejemplo n.º 5
0
def evaluate():
    """Evaluate TailorNet (or any model for that matter) on test set."""
    from dataset.static_pose_shape_final import MultiStyleShape
    import torch
    from torch.utils.data import DataLoader
    from utils.eval import AverageMeter
    from models import ops

    gender = 'female'
    garment_class = 'skirt'

    dataset = MultiStyleShape(garment_class=garment_class,
                              gender=gender,
                              split='test')
    dataloader = DataLoader(dataset,
                            batch_size=32,
                            num_workers=0,
                            shuffle=False,
                            drop_last=False)
    print(len(dataset))

    val_dist = AverageMeter()
    from models.tailornet_model import get_best_runner as tn_runner
    runner = tn_runner(garment_class, gender)
    # from trainer.base_trainer import get_best_runner as baseline_runner
    # runner = baseline_runner("/BS/cpatel/work/data/learn_anim/{}_{}_weights/tn_orig_baseline/{}_{}".format(garment_class, gender, garment_class, gender))

    device = torch.device('cuda:0')
    with torch.no_grad():
        for i, inputs in enumerate(dataloader):
            gt_verts, thetas, betas, gammas, _ = inputs

            thetas, betas, gammas = ops.mask_inputs(thetas, betas, gammas,
                                                    garment_class)
            gt_verts = gt_verts.to(device)
            thetas = thetas.to(device)
            betas = betas.to(device)
            gammas = gammas.to(device)
            pred_verts = runner.forward(thetas=thetas,
                                        betas=betas,
                                        gammas=gammas).view(gt_verts.shape)

            dist = ops.verts_dist(gt_verts, pred_verts) * 1000.
            val_dist.update(dist.item(), gt_verts.shape[0])
            print(i, len(dataloader))
    print(val_dist.avg)
Ejemplo n.º 6
0
def evaluate_save():
    """Evaluate TailorNet (or any model for that matter) on test set."""
    from dataset.static_pose_shape_final import MultiStyleShape
    import torch
    from torch.utils.data import DataLoader
    from utils.eval import AverageMeter
    from models import ops
    from models.smpl4garment import SMPL4Garment
    import os

    gender = 'female'
    garment_class = 'skirt'
    smpl = SMPL4Garment(gender)
    vis_freq = 512
    log_dir = "/BS/cpatel/work/code_test2/try"

    dataset = MultiStyleShape(garment_class=garment_class,
                              gender=gender,
                              split='test')
    dataloader = DataLoader(dataset,
                            batch_size=32,
                            num_workers=0,
                            shuffle=False,
                            drop_last=False)
    print(len(dataset))

    val_dist = AverageMeter()
    from models.tailornet_model import get_best_runner as tn_runner
    runner = tn_runner(garment_class, gender)
    # from trainer.base_trainer import get_best_runner as baseline_runner
    # runner = baseline_runner("/BS/cpatel/work/data/learn_anim/{}_{}_weights/tn_orig_baseline/{}_{}".format(garment_class, gender, garment_class, gender))

    device = torch.device('cuda:0')
    with torch.no_grad():
        for i, inputs in enumerate(dataloader):
            gt_verts, thetas, betas, gammas, idxs = inputs

            thetas, betas, gammas = ops.mask_inputs(thetas, betas, gammas,
                                                    garment_class)
            gt_verts = gt_verts.to(device)
            thetas = thetas.to(device)
            betas = betas.to(device)
            gammas = gammas.to(device)
            pred_verts = runner.forward(thetas=thetas,
                                        betas=betas,
                                        gammas=gammas).view(gt_verts.shape)

            for lidx, idx in enumerate(idxs):
                if idx % vis_freq != 0:
                    continue
                theta = thetas[lidx].cpu().numpy()
                beta = betas[lidx].cpu().numpy()
                pred_vert = pred_verts[lidx].cpu().numpy()
                gt_vert = gt_verts[lidx].cpu().numpy()

                body_m, pred_m = smpl.run(theta=theta,
                                          garment_d=pred_vert,
                                          beta=beta,
                                          garment_class=garment_class)
                _, gt_m = smpl.run(theta=theta,
                                   garment_d=gt_vert,
                                   beta=beta,
                                   garment_class=garment_class)

                save_dir = log_dir
                pred_m.write_ply(
                    os.path.join(save_dir, "pred_{}.ply".format(idx)))
                gt_m.write_ply(os.path.join(save_dir, "gt_{}.ply".format(idx)))
                body_m.write_ply(
                    os.path.join(save_dir, "body_{}.ply".format(idx)))

    print(val_dist.avg)
Ejemplo n.º 7
0
def main():
    # Parse the options from parameters
    opts = Opts().parse()
    ## For PyTorch 0.4.1, cuda(device)
    opts.device = torch.device(f'cuda:{opts.gpu[0]}')
    print(opts.expID, opts.task, os.path.dirname(os.path.realpath(__file__)))
    # Load the trained model test
    if opts.loadModel != 'none':
        model_path = os.path.join(opts.root_dir, opts.loadModel)
        model = torch.load(model_path).cuda(device=opts.device)
        model.eval()
    else:
        print('ERROR: No model is loaded!')
        return
    # Read the input image, pass input to gpu
    if opts.img == 'None':
        val_dataset = PENN_CROP(opts, 'val')
        val_loader = tud.DataLoader(val_dataset,
                                    batch_size=1,
                                    shuffle=False,
                                    num_workers=int(opts.num_workers))
        opts.nJoints = val_dataset.nJoints
        opts.skeleton = val_dataset.skeleton
        for i, gt in enumerate(val_loader):
            # Test Visualizer, Input and get_preds
            if i == 0:
                input, label = gt['input'], gt['label']
                gtpts, center, scale, proj = gt['gtpts'], gt['center'], gt[
                    'scale'], gt['proj']
                input_var = input[:, 0, ].float().cuda(device=opts.device,
                                                       non_blocking=True)
                # output = label
                output = model(input_var)
                # Test Loss, Err and Acc(PCK)
                Loss, Err, Acc = AverageMeter(), AverageMeter(), AverageMeter()
                ref = get_ref(opts.dataset, scale)
                for j in range(opts.preSeqLen):
                    pred = get_preds(output[:, j, ].cpu().float())
                    pred = original_coordinate(pred, center[:, ], scale,
                                               opts.outputRes)
                    err, ne = error(pred, gtpts[:, j, ], ref)
                    acc, na = accuracy(pred, gtpts[:, j, ], ref)
                    # assert ne == na, "ne must be the same as na"
                    Err.update(err)
                    Acc.update(acc)
                    print(j, f"{Err.val:.6f}", Acc.val)
                print('all', f"{Err.avg:.6f}", Acc.avg)
                # Visualizer Object
                ## Initialize
                v = Visualizer(opts.nJoints, opts.skeleton, opts.outputRes)
                # ## Add input image
                # v.add_img(input[0,0,].transpose(2, 0).numpy().astype(np.uint8))
                # ## Get the predicted joints
                # predJoints = get_preds(output[:, 0, ])
                # # ## Add joints and skeleton to the figure
                # v.add_2d_joints_skeleton(predJoints, (0, 0, 255))
                # Transform heatmap to show
                hm_img = output[0, 0, ].cpu().detach().numpy()
                v.add_hm(hm_img)
                ## Show image
                v.show_img(pause=True)
                break
    else:
        print('NOT ready for the raw input outside the dataset')
        img = cv2.imread(opts.img)
        input = torch.from_numpy(img.tramspose(2, 0, 1)).float() / 256.
        input = input.view(1, input.size(0), input.size(1), input.size(2))
        input_var = torch.autograd.variable(input).float().cuda(
            device=opts.device)
        output = model(input_var)
        predJoints = get_preds(output[-2].data.cpu().numpy())[0] * 4