def plot_diff(input_path, gt_path): result_path = os.path.join( os.path.dirname(input_path), os.path.basename(input_path).replace('npy', 'png')) fire_palette = misc.imread('image/fire_palette.png')[0][:, 0:3] error_palette = misc.imread('image/error_palette.jpg')[0][:, 0:3] dv_height = np.load(input_path) gt_height = np.load(gt_path) # import ipdb;ipdb.set_trace() print('saving results to %s...' % (os.path.dirname(result_path))) color_map = eval_util.getColorMapFromPalette(dv_height, fire_palette) misc.imsave(result_path.replace('.png', '_dv.png'), color_map) color_map = eval_util.getColorMapFromPalette(gt_height, fire_palette) misc.imsave(result_path.replace('.png', '_gt.png'), color_map) color_map = eval_util.getColorMapFromPalette(np.abs(dv_height - gt_height), error_palette, 0, 10) misc.imsave(result_path.replace('.png', '_error.png'), color_map) dv_height_l1e = eval_util.L1E(dv_height, gt_height) dv_height_rsme = eval_util.RSME(dv_height, gt_height) dv_height_acc = eval_util.accuracy(dv_height, gt_height) dv_height_com = eval_util.completeness(dv_height, gt_height) print('L1 Error of estimation = %.4f' % dv_height_l1e) print('RMSE of estimation = %.4f' % dv_height_rsme) print('Accuracy of estimation = %.4f' % dv_height_acc) print('Completeness of estimation = %.4f\n' % dv_height_com)
def train_epoch(self, epoch, printer=print): top1 = AverageMeter() top5 = AverageMeter() losses = AverageMeter() cur_lr = self.optimizer.param_groups[0]['lr'] self.model.train() prefetcher = data_prefetcher(self.train_loader) X, y = prefetcher.next() i = 0 while X is not None: i += 1 N = X.size(0) self.steps += 1 logits, aux_logits = self.model(X) loss = self.criterion(logits, y) if self.use_aux: loss += self.config.aux_weight * self.criterion(aux_logits, y) self.optimizer.zero_grad() if self.opt_level == 'O0': loss.backward() else: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() nn.utils.clip_grad_norm_(self.model.parameters(), self.config.grad_clip) self.optimizer.step() prec1, prec5 = accuracy(logits, y, topk=(1, 5)) losses.update(loss.item(), N) top1.update(prec1.item(), N) top5.update(prec5.item(), N) if self.steps % self.log_step == 0 and self.rank == 0: self.writer.add_scalar('train/lr', round(cur_lr, 5), self.steps) self.writer.add_scalar('train/loss', loss.item(), self.steps) self.writer.add_scalar('train/top1', prec1.item(), self.steps) self.writer.add_scalar('train/top5', prec5.item(), self.steps) if self.gpu == 0 and (i % self.config.print_freq == 0 or i == len(self.train_loader) - 1): printer( f'Train: Epoch: [{epoch}][{i}/{len(self.train_loader) - 1}]\t' f'Step {self.steps}\t' f'lr {round(cur_lr, 5)}\t' f'Loss {losses.val:.4f} ({losses.avg:.4f})\t' f'Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})\t') X, y = prefetcher.next() if self.gpu == 0: printer("Train: [{:3d}/{}] Final Prec@1 {:.4%}".format( epoch, self.total_epochs - 1, top1.avg))
def train(train_loader, model, optimizer, criterion, epoch): top1 = AverageMeter() top5 = AverageMeter() losses = AverageMeter() cur_step = epoch * len(train_loader) cur_lr = optimizer.param_groups[0]['lr'] logger.info("Epoch {} LR {}".format(epoch, cur_lr)) writer.add_scalar('train/lr', cur_lr, cur_step) model.train() for step, (X, y) in enumerate(train_loader): X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) N = X.size(0) optimizer.zero_grad() logits, aux_logits = model(X) loss = criterion(logits, y) if config.aux_weight > 0.: loss += config.aux_weight * criterion(aux_logits, y) loss.backward() # gradient clipping nn.utils.clip_grad_norm_(model.parameters(), config.grad_clip) optimizer.step() prec1, prec5 = accuracy(logits, y, topk=(1, 5)) losses.update(loss.item(), N) top1.update(prec1.item(), N) top5.update(prec5.item(), N) if step % config.print_freq == 0 or step == len(train_loader) - 1: logger.info( "Train: [{:3d}/{}] Step {:03d}/{:03d} Loss {losses.avg:.3f} " "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( epoch + 1, config.epochs, step, len(train_loader) - 1, losses=losses, top1=top1, top5=top5)) writer.add_scalar('train/loss', loss.item(), cur_step) writer.add_scalar('train/top1', prec1.item(), cur_step) writer.add_scalar('train/top5', prec5.item(), cur_step) cur_step += 1 logger.info("Train: [{:3d}/{}] Final Prec@1 {:.4%}".format( epoch + 1, config.epochs, top1.avg))
def val_epoch(self, epoch, printer): top1 = AverageMeter() top5 = AverageMeter() losses = AverageMeter() self.model.eval() prefetcher = data_prefetcher(self.valid_loader) X, y = prefetcher.next() i = 0 with torch.no_grad(): while X is not None: N = X.size(0) i += 1 logits, _ = self.model(X) loss = self.criterion(logits, y) prec1, prec5 = accuracy(logits, y, topk=(1, 5)) losses.update(loss.item(), N) top1.update(prec1.item(), N) top5.update(prec5.item(), N) if self.rank == 0 and (i % self.config.print_freq == 0 or i == len(self.valid_loader) - 1): printer( f'Valid: Epoch: [{epoch}][{i}/{len(self.valid_loader)}]\t' f'Step {self.steps}\t' f'Loss {losses.avg:.4f}\t' f'Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})') X, y = prefetcher.next() if self.rank == 0: self.writer.add_scalar('val/loss', losses.avg, self.steps) self.writer.add_scalar('val/top1', top1.avg, self.steps) self.writer.add_scalar('val/top5', top5.avg, self.steps) printer("Valid: [{:3d}/{}] Final Prec@1 {:.4%}".format( epoch, self.total_epochs - 1, top1.avg)) return top1.avg
def validate(valid_loader, model, criterion, epoch, cur_step): top1 = AverageMeter() top5 = AverageMeter() losses = AverageMeter() model.eval() with torch.no_grad(): for step, (X, y) in enumerate(valid_loader): X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) N = X.size(0) logits, _ = model(X) loss = criterion(logits, y) prec1, prec5 = accuracy(logits, y, topk=(1, 5)) losses.update(loss.item(), N) top1.update(prec1.item(), N) top5.update(prec5.item(), N) if step % config.print_freq == 0 or step == len(valid_loader) - 1: logger.info( "Valid: [{:3d}/{}] Step {:03d}/{:03d} Loss {losses.avg:.3f} " "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( epoch + 1, config.epochs, step, len(valid_loader) - 1, losses=losses, top1=top1, top5=top5)) writer.add_scalar('val/loss', losses.avg, cur_step) writer.add_scalar('val/top1', top1.avg, cur_step) writer.add_scalar('val/top5', top5.avg, cur_step) logger.info("Valid: [{:3d}/{}] Final Prec@1 {:.4%}".format( epoch + 1, config.epochs, top1.avg)) return top1.avg
def train_epoch(self, epoch, printer=print): top1 = AverageMeter() top5 = AverageMeter() losses = AverageMeter() cur_lr = self.lr_scheduler.get_last_lr()[0] self.model.print_alphas(self.logger) self.model.train() prefetcher_trn = data_prefetcher(self.train_loader) prefetcher_val = data_prefetcher(self.valid_loader) trn_X, trn_y = prefetcher_trn.next() val_X, val_y = prefetcher_val.next() i = 0 while trn_X is not None: i += 1 N = trn_X.size(0) self.steps += 1 # architect step (alpha) self.alpha_optim.zero_grad() self.architect.unrolled_backward(trn_X, trn_y, val_X, val_y, cur_lr, self.w_optim) self.alpha_optim.step() # child network step (w) self.w_optim.zero_grad() logits = self.model(trn_X) loss = self.model.criterion(logits, trn_y) loss.backward() nn.utils.clip_grad_norm_(self.model.weights(), self.config.w_grad_clip) self.w_optim.step() prec1, prec5 = accuracy(logits, trn_y, topk=(1, 5)) losses.update(loss.item(), N) top1.update(prec1.item(), N) top5.update(prec5.item(), N) if self.steps % self.log_step == 0: self.writer.add_scalar('train/lr', round(cur_lr, 5), self.steps) self.writer.add_scalar('train/loss', loss.item(), self.steps) self.writer.add_scalar('train/top1', prec1.item(), self.steps) self.writer.add_scalar('train/top5', prec5.item(), self.steps) if i % self.config.print_freq == 0 or i == len( self.train_loader) - 1: printer( f'Train: Epoch: [{epoch}][{i}/{len(self.train_loader) - 1}]\t' f'Step {self.steps}\t' f'lr {round(cur_lr, 5)}\t' f'Loss {losses.val:.4f} ({losses.avg:.4f})\t' f'Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})\t') trn_X, trn_y = prefetcher_trn.next() val_X, val_y = prefetcher_val.next() printer("Train: [{:3d}/{}] Final Prec@1 {:.4%}".format( epoch, self.total_epochs - 1, top1.avg))