def evaluate(model, dataloader, epoch, writer, logger, data_name='val'): save_root = os.path.join(opt.result_dir, opt.tag, str(epoch), data_name) utils.try_make_dir(save_root) total_psnr = 0.0 total_ssim = 0.0 ct_num = 0 # print('Start testing ' + tag + '...') for i, sample in enumerate(dataloader): utils.progress_bar(i, len(dataloader), 'Eva... ') path = sample['path'] with torch.no_grad(): recovered = model(sample) if data_name == 'val': label = sample['label'] label = tensor2im(label) recovered = tensor2im(recovered) ct_num += 1 total_psnr += psnr(recovered, label, data_range=255) total_ssim += ski_ssim(recovered, label, data_range=255, multichannel=True) save_dst = os.path.join(save_root, utils.get_file_name(path[0]) + '.png') Image.fromarray(recovered).save(save_dst) elif data_name == 'test': pass else: raise Exception('Unknown dataset name: %s.' % data_name) # 保存结果 save_dst = os.path.join(save_root, utils.get_file_name(path[0]) + '.png') Image.fromarray(recovered).save(save_dst) if data_name == 'val': ave_psnr = total_psnr / float(ct_num) ave_ssim = total_ssim / float(ct_num) # write_loss(writer, f'val/{data_name}', 'psnr', total_psnr / float(ct_num), epochs) logger.info(f'Eva({data_name}) epoch {epoch}, psnr: {ave_psnr}.') logger.info(f'Eva({data_name}) epoch {epoch}, ssim: {ave_ssim}.') return f'{ave_ssim: .3f}' else: return ''
def evaluate(model, dataloader, epochs, writer, logger, data_name='val'): save_root = os.path.join(opt.result_dir, opt.tag, str(epochs), data_name) utils.try_make_dir(save_root) correct = 0 ct_num = 0 counts = defaultdict(int) corrects = defaultdict(int) # print('Start testing ' + tag + '...') for i, data in enumerate(dataloader): if data_name == 'val': input, label, path = data['input'], data['label'], data['path'] utils.progress_bar(i, len(dataloader), 'Eva... ') # ct_num += 1 with torch.no_grad(): img_var = Variable(input, requires_grad=False).to(device=opt.device) label_var = Variable(label, requires_grad=False).to(device=opt.device) predicted = model(img_var) _, predicted = torch.max(predicted, 1) ct_num += label.size(0) correct += (predicted == label_var).sum().item() for idx, l in enumerate(label): l = l.item() counts[l] += 1 p = predicted[idx].item() if p == l: corrects[l] += 1 elif data_name == 'test': pass else: raise Exception('Unknown dataset name: %s.' % data_name) if data_name == 'val': # write_loss(writer, 'val/%s' % data_name, 'psnr', ave_psnr / float(ct_num), epochs) acc = 0. for k in counts: acc += corrects[k] / counts[k] err = 1 - acc / len(counts) err = err + 0.03 # 线上错误率比线下高0.03 # logger.info('Eva(%s) epoch %d ,' % (data_name, epochs) + 'Accuracy: ' + str(correct / float(ct_num)) + '.') logger.info('Eva(%s) epoch %d ,' % (data_name, epochs) + 'Err: ' + str(err) + '.') # ipdb.set_trace() return str(round(correct / float(ct_num), 3)) else: return ''
from network import get_model from eval import evaluate from utils import * from mscv.summary import create_summary_writer, write_meters_loss, write_image import misc_utils as utils # 初始化 with torch.no_grad(): # 初始化路径 save_root = os.path.join(opt.checkpoint_dir, opt.tag) log_root = os.path.join(opt.log_dir, opt.tag) utils.try_make_dir(save_root) utils.try_make_dir(log_root) train_dataloader = dl.train_dataloader val_dataloader = dl.val_dataloader # 初始化日志 logger = init_log(training=True) # 初始化模型 Model = get_model(opt.model) model = Model(opt) # 暂时还不支持多GPU # if len(opt.gpu_ids): # model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)