def validate(model, val_loader, device, min_depth, max_depth, cfg): model.eval() metric = Metrics(max_depth=max_depth) score = AverageMeter() score_1 = AverageMeter() with torch.no_grad(): for _, inputs in tqdm(enumerate(val_loader)): rgb = inputs['color'].to(device) * 255. sdepth = inputs['depth_gt'].to(device) if cfg.colorize: depth_color = inputs['depth_color'].to(device) * 255. depth_in = torch.cat([sdepth, depth_color], 1) else: depth_in = sdepth mask = (sdepth > 0).float() output, _ = model(depth_in, mask, rgb) if use_norm_depth: output = torch.clamp(output, 0, 1.0) output = min_depth + output * (max_depth - min_depth) else: output = torch.clamp(output, min_depth, max_depth) output = output[:, 0:1].detach().cpu() gt = inputs['depth_sd_gt'] metric.calculate(output, gt) score.update(metric.get_metric('mae'), metric.num) score_1.update(metric.get_metric('rmse'), metric.num) model.train() return score.avg, score_1.avg
def validate(self): self.model.eval() valid_loss = AverageMeter() valid_acc = AccuracyMeter() for i, (x, y) in enumerate(self.valid_loader): x = Variable(x, volatile=True) y = Variable(y).long() if self.use_cuda: x = x.cuda() y = y.cuda() output = self.model(x) loss = F.cross_entropy(output, y) valid_loss.update(float(loss.data), x.size(0)) y_pred = output.data.max(dim=1)[1] correct = int(y_pred.eq(y.data).cpu().sum()) valid_acc.update(correct, x.size(0)) print('\nTrain Epoch [{}]: Average batch loss: {:.6f}\n'.format( epoch, valid_acc.accuracy)) return valid_loss.average, valid_acc.accuracy
seg_loss_1 = seg_criterion_1(seg_logits, masks) seg_loss_2 = seg_criterion_2(seg_logits, masks) loss = W1 * clf_loss + W2 * seg_loss_1 + W3 * seg_loss_2 if GRAD_ACCUM > 1: loss = loss / GRAD_ACCUM with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() if it % GRAD_ACCUM == 0: optimizer.step() optimizer.zero_grad() train_clf_labels.append(classes.cpu().detach().numpy()) train_clf_preds.append(clf_logits.sigmoid().cpu().detach().numpy()) clf_loss_meter.update(clf_loss.cpu().detach().numpy()) seg_loss_1_meter.update(seg_loss_1.cpu().detach().numpy()) seg_loss_2_meter.update(seg_loss_2.cpu().detach().numpy()) seg_metric_meter.update(masks, clf_logits, seg_logits) dt = (datetime.now() - t0).total_seconds() message = train_running_message.format(epoch + 1, it, dt, clf_loss_meter.avg, seg_loss_1_meter.avg, seg_loss_2_meter.avg) print(message, end='', flush=True) train_clf_preds = np.vstack(train_clf_preds) train_clf_labels = np.vstack(train_clf_labels) scores = get_auc_scores(train_clf_labels, train_clf_preds) clf_thresholds, seg_thresholds, dice_score, results = seg_metric_meter.get_scores(
start_epoch += 1 for epoch in range(start_epoch, args.epochs): for iter, (image, label) in enumerate(train_loader): image = image.cuda() label = label.cuda() pred = model(image, label) loss = loss_fn(criterion, label, pred, args.n_classes) acc = accuracy(pred, label) optimizer.zero_grad() loss.backward() optimizer.step() scheduler.step() losses.update(loss.data.item(), image.size(0)) batch_time.update(time.time() - end) acc_score.update(acc) gap_score.update(GAP(pred, label)) end = time.time() if iter % args.log_freq == 0: print(f'epoch : {epoch} step : [{iter}/{len(train_loader)}]\t' f'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' f'loss {losses.val:.4f} ({losses.avg:.4f})\t' f'acc {acc_score.val:.4f} ({acc_score.avg:.4f})\t' f'gap {gap_score.val:.4f} ({gap_score.avg:.4f})') # validation model.eval() val_start = time.time() val_time = 0
def run(self): self.net.eval() for loader in [self.test_loader]: for batch_idx, inputs in tqdm(enumerate(loader)): depth_gt = inputs['depth_gt'].float() depth_sd_gt = inputs['depth_sd_gt'].float() # reduce scan lines depth_map = depth_gt[0][0].numpy() pc, coord = project_depth_to_points(depth_map) reorg_pc, reorg_coord = restore_scan_line(pc, coord, verbose=not self.opt.dump) reduced_pc, reduced_coord = reduce_scan_line(reorg_pc, reorg_coord, step=4) # reduced_pc, reduced_coord = sample_scan_line(reorg_pc, reorg_coord, ratio=0.1) print('pc:', pc.shape[0], '->', reduced_pc.shape[0]) reduced_depth = restore_depth_map(reduced_pc, reduced_coord, [self.opt.crop_h, self.opt.crop_w]) reduced_depth = torch.from_numpy(reduced_depth).float() reduced_depth = reduced_depth.unsqueeze(0).unsqueeze(0) raw = reduced_depth.to(self.device) rgb = inputs['color'].float().to(self.device) rgb = rgb*255.0 # crop assert raw.size()[2:] == rgb.size()[2:] h, w = raw.size()[2:] assert h >= self.crop_h assert w == self.crop_w # 1216 don't need crop w h_cropped = h - self.crop_h depth_gt = depth_gt[:,:, h_cropped:h, 0:self.crop_w] depth_sd_gt = depth_sd_gt[:,:, h_cropped:h, 0:self.crop_w] raw = raw[:,:, h_cropped:h, 0:self.crop_w] rgb = rgb[:,:, h_cropped:h, 0:self.crop_w] mask = (raw > 0).float() output, _ = self.net(raw, mask, rgb) if use_norm_depth == False: output = torch.clamp(output, min=self.opt.min_depth, max=self.opt.max_depth) else: output = torch.clamp(output, min=0, max=1.0) output = restore_depth(output, self.opt.min_depth, self.opt.max_depth) output = output[:,0:1].detach().cpu() metric = Metrics(max_depth=self.opt.max_depth) mae = AverageMeter() rmse = AverageMeter() metric.calculate(output, depth_sd_gt) mae.update(metric.get_metric('mae'), metric.num) rmse.update(metric.get_metric('rmse'), metric.num) print("model: mae {} rmse {}". format(int(1000*mae.avg), int(1000*rmse.avg))) if not self.opt.dump: plot3d(reduced_pc) fig = plt.figure(num=batch_idx, figsize=(8, 10)) plt_img(fig, 4, 1, 1, plt, inputs['color'][0], 'color') plt_img(fig, 4, 1, 2, plt, depth_gt[0], 'depth') plt_img(fig, 4, 1, 3, plt, raw.cpu()[0], 'depth') plt_img(fig, 4, 1, 4, plt, output[0], 'depth') plt.tight_layout() plt.show()