def eval(args): torch.cuda.set_device(args.local_rank) dist.init_process_group(backend='nccl', init_method='tcp://127.0.0.1:{}'.format( config_CS.port), world_size=torch.cuda.device_count(), rank=0) dataset = CityScapes(mode='val') sampler = torch.utils.data.distributed.DistributedSampler(dataset) dataloader = DataLoader(dataset, batch_size=1, shuffle=False, sampler=sampler, num_workers=4, pin_memory=True, drop_last=False) # net = Origin_Res() # net = Deeplab_v3plus() net = HighOrder(19) net.cuda() net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net) net = nn.parallel.DistributedDataParallel(net, device_ids=[args.local_rank], output_device=args.local_rank) net.load_state_dict(torch.load('./Res60000.pth')) net.eval() data = iter(dataloader) num = 0 hist = 0 with torch.no_grad(): while 1: try: image, label, name = next(data) except: break image = image.cuda() label = label.cuda() label = torch.squeeze(label, 1) output = net(image) pred = output.max(dim=1)[1] hist_once = fast_hist(label, pred) hist = torch.tensor(hist).cuda() hist = hist + hist_once dist.all_reduce(hist, dist.ReduceOp.SUM) num += 1 if num % 50 == 0: print('iter :{}'.format(num)) hist = hist.cpu().numpy().astype(np.float32) miou = cal_scores(hist) print('miou = {}'.format(miou))
def main(ckp_name='final.pth'): sess = Session(dt_split='val') sess.load_checkpoints(ckp_name) dt_iter = sess.dataloader sess.net.eval() for i, [image, label] in enumerate(dt_iter): sess.inf_batch(image, label) if i % 10 == 0: logger.info('num-%d' % i) scores, cls_iu = cal_scores(sess.hist.cpu().numpy()) for k, v in scores.items(): logger.info('%s-%f' % (k, v)) scores, cls_iu = cal_scores(sess.hist.cpu().numpy()) for k, v in scores.items(): logger.info('%s-%f' % (k, v)) logger.info('') for k, v in cls_iu.items(): logger.info('%s-%f' % (k, v))
def eval_epoch(writer, iterations): sess = Session(dt_split='val') sess.load_checkpoints('latest.pth') dt_iter = sess.dataloader sess.net.eval() for i, [image, label] in enumerate(dt_iter): sess.inf_batch(image, label) if i % 10 == 0: logger.info('num-%d' % i) scores, cls_iu = cal_scores(sess.hist.cpu().numpy()) for k, v in scores.items(): logger.info('%s-%f' % (k, v)) scores, cls_iu = cal_scores(sess.hist.cpu().numpy()) for k, v in scores.items(): logger.info('%s-%f' % (k, v)) logger.info('') for k, v in cls_iu.items(): logger.info('%s-%f' % (k, v)) writer.add_scalar('mIoU', scores['mIoU'], iterations) writer.add_scalar('Acc', scores['pAcc'], iterations)
def eval_by_steps(ckp_name): sess = EvalSession('val') sess.load_checkpoints(ckp_name) dt_iter = sess.dataloader sess.net.eval() for _, [image, label] in enumerate(dt_iter): sess.inf_batch(image, label) logger.info('') logger.info('Total Scores') scores, cls_iu = cal_scores(sess.hist.cpu().numpy()) for k, v in scores.items(): logger.info('%s-%f' % (k, v)) logger.info('') logger.info('Class Scores') for k, v in cls_iu.items(): logger.info('%s-%f' % (k, v)) return scores['mIoU']
def val(self, prefix='', logging=False, steps=None): self.net.eval() hist = 0 for image, label in self.val_loader: hist += self.infer_batch(image, label) scores, cls_iu = cal_scores(hist.cpu().numpy()) mIoU = scores['mIoU'] if mIoU > self.best_mIoU: self.best_mIoU = mIoU if self.split == 'trainval': path = 'best.pth' else: path = 'best_trainaug.pth' self.save_checkpoints(path) steps = self.step if steps is None else steps for k, v in scores.items(): self.writer.add_scalar(prefix + k, v, steps) if logging: logger.info('') logger.info('Total Scores') for k, v in scores.items(): logger.info('%s-%f' % (k, v)) logger.info('') logger.info('Class Scores') for k, v in cls_iu.items(): logger.info('%s-%f' % (k, v)) else: self.net.train()
def eval(args): torch.cuda.set_device(args.local_rank) dist.init_process_group(backend='nccl', init_method='tcp://127.0.0.1:{}'.format( config.port), world_size=torch.cuda.device_count(), rank=args.local_rank # rank=args.local_rank ) dataset = ADE20K(mode='val') sampler = torch.utils.data.distributed.DistributedSampler(dataset) dataloader = DataLoader(dataset, batch_size=1, shuffle=False, sampler=sampler, num_workers=4, drop_last=False, pin_memory=True) net = PANet(150) net.cuda() net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net) net = nn.parallel.DistributedDataParallel(net, device_ids=[args.local_rank], output_device=args.local_rank) net.load_state_dict( torch.load('./GPADE20Kres50150000.pth', map_location='cpu')) net.eval() data = iter(dataloader) palette = get_palette(256) num = 0 hist = 0 with torch.no_grad(): while 1: try: image, label, name = next(data) except: break image = image.cuda() label = label.cuda() label = torch.squeeze(label, 1) N, _, H, W = image.size() preds = torch.zeros((N, 150, H, W)) preds = preds.cuda() for scale in config.eval_scales: new_hw = [int(H * scale), int(W * scale)] image_change = F.interpolate(image, new_hw, mode='bilinear', align_corners=True) output, w = net(image_change) output = F.interpolate(output, (H, W), mode='bilinear', align_corners=True) output = F.softmax(output, 1) preds += output if config.eval_flip: output, w = net(torch.flip(image_change, dims=(3, ))) output = torch.flip(output, dims=(3, )) output = F.interpolate(output, (H, W), mode='bilinear', align_corners=True) output = F.softmax(output, 1) preds += output pred = preds.max(dim=1)[1] hist_once = fast_hist(label, pred) hist = torch.tensor(hist).cuda() hist = hist + hist_once dist.all_reduce(hist, dist.ReduceOp.SUM) num += 1 if num % 5 == 0: print('iter: {}'.format(num)) preds = np.asarray(np.argmax(preds.cpu(), axis=1), dtype=np.uint8) # for i in range(preds.shape[0]): # pred = convert_label(preds[i], inverse=True) # save_img = Image.fromarray(pred) # save_img.putpalette(palette) # save_img.save(os.path.join('./CS_results/', name[i] + '.png')) hist = hist.cpu().numpy().astype(np.float32) miou = cal_scores(hist) print('miou = {}'.format(miou))