def validate(self, epoch): """Evaluate on test dataset.""" val_loss = AverageMeter() metrics = { 'val_dist': AverageMeter(), # per vertex distance in mm } self.model.eval() with torch.no_grad(): for i, inputs in enumerate(self.test_loader): outputs, loss = self.one_step(inputs) val_loss.update(loss.item(), inputs[0].shape[0]) self.update_metrics(metrics, inputs, outputs) self.visualize_batch(inputs, outputs, epoch) val_dist_avg = metrics['val_dist'].avg self.logger.add_scalar("val/loss", val_loss.avg, epoch) self.logger.add_scalar("val/dist", val_dist_avg, epoch) print("VALIDATION") print("Epoch {}, loss: {:.4f}, dist: {:.4f} mm".format( epoch, val_loss.avg, val_dist_avg)) if val_dist_avg < self.best_error: self.best_error = val_dist_avg self.best_epoch = epoch self.save_ckpt_best() with open(os.path.join(self.log_dir, 'best_epoch'), 'w') as f: f.write("{:04d}".format(epoch))
def step(phase, epoch, opt, dataloader, model, criterion, optimizer=None): # Choose the phase(Evaluate phase-Normally without Dropout and BatchNorm) if phase == 'train': model.train() else: model.eval() # Load default values Loss, Err, Acc = AverageMeter(), AverageMeter(), AverageMeter() Acc_tot = AverageMeter() seqlen = set_sequence_length(opt.MinSeqLenIndex, opt.MaxSeqLenIndex, epoch) # Show iteration using Bar nIters = len(dataloader) bar = Bar(f'{opt.expID}', max=nIters) # Loop in dataloader for i, gt in enumerate(dataloader): ## Wraps tensors and records the operations applied to it input, label = gt['input'], gt['label'] gtpts, center, scale = gt['gtpts'], gt['center'], gt['scale'] input_var = input[:, 0, ].float().cuda(device=opt.device, non_blocking=True) label_var = label.float().cuda(device=opt.device, non_blocking=True) Loss.reset() Err.reset() Acc.reset() ### if it is 3D, may need the nOutput to get the different target, not just only the heatmap ## Forwad propagation output = model(input_var) ## Get model outputs and calculate loss loss = criterion(output, label_var) ## Backward + Optimize only if in training phase if phase == 'train': ## Zero the parameter gradients optimizer.zero_grad() loss.mean().backward() optimizer.step() Loss.update(loss.sum()) ## Compute the accuracy # acc = Accuracy(opt, output.data.cpu().numpy(), labels_var.data.cpu().numpy()) ref = get_ref(opt.dataset, scale) for j in range(opt.preSeqLen): if j <= seqlen: pred_hm = get_preds(output[:, j, ].float()) pred_pts = original_coordinate(pred_hm, center[:, ], scale, opt.outputRes) err, ne = error(pred_pts, gtpts[:, j, ], ref) acc, na = accuracy(pred_pts, gtpts[:, j, ], ref) # assert ne == na, "ne must be the same as na" Err.update(err) Acc.update(acc) Acc_tot.update(acc) Bar.suffix = f'{phase}[{epoch}][{i}/{nIters}]|Total:{bar.elapsed_td}' \ f'|ETA:{bar.eta_td}|Loss:{Loss.val:.6f}|Err:{Err.avg:.6f}|Acc:{Acc.avg:.6f}' bar.next() bar.finish() return Loss.val, Acc_tot.avg
def train(self, epoch): """Train for one epoch.""" epoch_loss = AverageMeter() self.model.train() for i, inputs in enumerate(self.train_loader): self.optimizer.zero_grad() outputs, loss = self.one_step(inputs) loss.backward() self.optimizer.step() self.logger.add_scalar("train/loss", loss.item(), self.iter_nums) print("Iter {}, loss: {:.8f}".format(self.iter_nums, loss.item())) epoch_loss.update(loss, inputs[0].shape[0]) self.iter_nums += 1 self.logger.add_scalar("train_epoch/loss", epoch_loss.avg, epoch)
def evaluate(): """Evaluate TailorNet (or any model for that matter) on test set.""" from dataset.static_pose_shape_final import MultiStyleShape import torch from torch.utils.data import DataLoader from utils.eval import AverageMeter from models import ops gender = 'female' garment_class = 'skirt' dataset = MultiStyleShape(garment_class=garment_class, gender=gender, split='test') dataloader = DataLoader(dataset, batch_size=32, num_workers=0, shuffle=False, drop_last=False) print(len(dataset)) val_dist = AverageMeter() from models.tailornet_model import get_best_runner as tn_runner runner = tn_runner(garment_class, gender) # from trainer.base_trainer import get_best_runner as baseline_runner # runner = baseline_runner("/BS/cpatel/work/data/learn_anim/{}_{}_weights/tn_orig_baseline/{}_{}".format(garment_class, gender, garment_class, gender)) device = torch.device('cuda:0') with torch.no_grad(): for i, inputs in enumerate(dataloader): gt_verts, thetas, betas, gammas, _ = inputs thetas, betas, gammas = ops.mask_inputs(thetas, betas, gammas, garment_class) gt_verts = gt_verts.to(device) thetas = thetas.to(device) betas = betas.to(device) gammas = gammas.to(device) pred_verts = runner.forward(thetas=thetas, betas=betas, gammas=gammas).view(gt_verts.shape) dist = ops.verts_dist(gt_verts, pred_verts) * 1000. val_dist.update(dist.item(), gt_verts.shape[0]) print(i, len(dataloader)) print(val_dist.avg)
def train(model, train_loader, optimizer, criterion, v_threshold, f_threshold): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() model.cuda() model.train() end = time.time() frame_dict = {} bar = Bar('Processing', max=len(train_loader)) for batch_idx, (data, target, idx) in enumerate(train_loader): data_time.update(time.time() - end) input_var = data.float().cuda() target_var = target.long().cuda() optimizer.zero_grad() output = model(input_var) _, predicted = torch.max(output, 1) loss = criterion(output, target_var) loss.backward() optimizer.step() losses.update(loss.item()) for i in range(len(idx)): if idx[i] not in frame_dict.keys(): frame_dict[idx[i]] = 0 frame_dict[ idx[i]] += 1 if target[i].item() == predicted[i].item() else 0 batch_time.update(time.time() - end) end = time.time() bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} |' \ ' Loss: {loss:.4f}'.format( batch=batch_idx + 1, size=len(train_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=loss, ) bar.next() bar.finish() acc = cal_acc(frame_dict, v_threshold, f_threshold) return losses.avg, acc