def eval(model,dataloader, args, label_info): print('start test!') with torch.no_grad(): model.eval() precision_record = [] tq = tqdm.tqdm(total=len(dataloader) * args.batch_size) tq.set_description('test') hist = np.zeros((args.num_classes, args.num_classes)) for i, (data, label) in enumerate(dataloader): tq.update(args.batch_size) if torch.cuda.is_available() and args.use_gpu: data = data.cuda() label = label.cuda() predict = model(data).squeeze() predict = reverse_one_hot(predict) predict = np.array(predict) # predict = colour_code_segmentation(np.array(predict), label_info) label = label.squeeze() label = reverse_one_hot(label) label = np.array(label) # label = colour_code_segmentation(np.array(label), label_info) precision = compute_global_accuracy(predict, label) hist += fast_hist(label.flatten(), predict.flatten(), args.num_classes) precision_record.append(precision) precision = np.mean(precision_record) miou = np.mean(per_class_iu(hist)) tq.close() print('precision for test: %.3f' % precision) print('mIoU for validation: %.3f' % miou) return precision
def val(args, model, dataloader, csv_path): print('start val!') # label_info = get_label_info(csv_path) with torch.no_grad(): model.eval() precision_record = [] hist = np.zeros((args.num_classes, args.num_classes)) for i, (data, label) in enumerate(dataloader): if torch.cuda.is_available() and args.use_gpu: data = data.cuda() label = label.cuda() # get RGB predict image predict = model(data).squeeze() predict = reverse_one_hot(predict) predict = np.array(predict) # get RGB label image label = label.squeeze() label = reverse_one_hot(label) label = np.array(label) # compute per pixel accuracy precision = compute_global_accuracy(predict, label) hist += fast_hist(label.flatten(), predict.flatten(), args.num_classes) # there is no need to transform the one-hot array to visual RGB array # predict = colour_code_segmentation(np.array(predict), label_info) # label = colour_code_segmentation(np.array(label), label_info) precision_record.append(precision) dice = np.mean(precision_record) miou = np.mean(per_class_iu(hist)) print('precision per pixel for validation: %.3f' % dice) print('mIoU for validation: %.3f' % miou) return dice
def val(args, model, dataloader): print('start val!') # label_info = get_label_info(csv_path) ''' model.eval() is a kind of switch for some specific layers/parts of the model that behave differently during training and inference (evaluating) time. For example, Dropouts Layers, BatchNorm Layers etc. You need to turn off them during model evaluation, and .eval() will do it for you. In addition, the common practice for evaluating/validation is using torch.no_grad() in pair with model.eval() to turn off gradients computation. After that we call model.train() to reswitch them on. ''' with torch.no_grad(): model.eval() # lista delle precisioni precision_record = [] # inizializziamo una matrice per calcolare successivamente mIoU hist = np.zeros((args.num_classes, args.num_classes)) # Itarates over what? Single images or batches? for i, (data, label) in enumerate(dataloader): if torch.cuda.is_available() and args.use_gpu: data = data.cuda() label = label.cuda().long() output = model(data) # get RGB predict image _, prediction = output.max(dim=1) # B, H, W label = label.cpu().numpy() prediction = prediction.cpu().numpy() # compute per pixel accuracy precision = compute_global_accuracy(prediction, label) # Cosa fa fast_hist???? hist += fast_hist(label.flatten(), prediction.flatten(), args.num_classes) # there is no need to transform the one-hot array to visual RGB array # predict = colour_code_segmentation(np.array(predict), label_info) # label = colour_code_segmentation(np.array(label), label_info) precision_record.append(precision) # compute the mean precision: precision = np.mean(precision_record) # miou = np.mean(per_class_iu(hist)) miou_list = per_class_iu(hist)[: -1] # preché tutti tranne l'ultimo????? # miou_dict, miou = cal_miou(miou_list, csv_path) miou = np.mean(miou_list) print('precision per pixel for test: %.3f' % precision) print('mIoU for validation: %.3f' % miou) # miou_str = '' # for key in miou_dict: # miou_str += '{}:{},\n'.format(key, miou_dict[key]) # print('mIoU for each class:') # print(miou_str) return precision, miou
def val(args, model, dataloader, csv_path): print('start val!!') label_info = get_label_info(csv_path) with torch.no_grad(): model.eval() precision_record = [] hist = np.zeros((args.num_classes, args.num_classes)) for i, (data, label) in enumerate(dataloader): if torch.cuda.is_available() and args.use_gpu: data = data.cuda() label = label.cuda() # get RGB predict image predict = model(data).squeeze() predict = reverse_one_hot(predict) predict = np.array(predict.cpu()) # get RGB label image label = label.squeeze() if args.loss == 'dice': label = reverse_one_hot(label) label = np.array(label.cpu()) # compute per pixel accuracy precision = compute_global_accuracy(predict, label) hist += fast_hist(label.flatten(), predict.flatten(), args.num_classes) # there is no need to transform the one-hot array to visual RGB array # predict = colour_code_segmentation(np.array(predict), label_info) # label = colour_code_segmentation(np.array(label), label_info) precision_record.append(precision) precision = np.mean(precision_record) miou = np.mean(per_class_iu(hist)) miou_list = per_class_iu(hist)[:-1] miou_dict, miou = cal_miou(miou_list, csv_path) miou = np.mean(miou_list) print('precision per pixel for test: %.3f' % precision) print('mIoU for validation: %.3f' % miou) miou_str = '' for key in miou_dict: miou_str += '{}:{},\n'.format(key, miou_dict[key]) print('mIoU for each class:') print(miou_str) return precision, miou
def eval(model, dataloader, args, csv_path): print('start test!') with torch.no_grad(): model.eval() precision_record = [] tq = tqdm.tqdm(total=len(dataloader) * args.batch_size) tq.set_description('test') hist = np.zeros((args.num_classes, args.num_classes)) for i, (data, label) in enumerate(dataloader): tq.update(args.batch_size) if torch.cuda.is_available() and args.use_gpu: data = data.cuda() label = label.cuda() predict = model(data).squeeze() predict = reverse_one_hot(predict) predict = np.array(predict) # predict = colour_code_segmentation(np.array(predict), label_info) label = label.squeeze() if args.loss == 'dice': label = reverse_one_hot(label) label = np.array(label) # label = colour_code_segmentation(np.array(label), label_info) #saving some images if args.save_images_path is not None and i < 40: current_image = transforms.functional.to_pil_image(data[0]) current_label = Image.fromarray(colorize_label(label)) current_predi = Image.fromarray(colorize_label(predict)) current_image.save(args.save_images_path + f"/image{i}.jpg") current_label.save(args.save_images_path + f"/label{i}.jpeg") current_predi.save(args.save_images_path + f"/prediction{i}.jpeg") precision = compute_global_accuracy(predict, label) hist += fast_hist(label.flatten(), predict.flatten(), args.num_classes) precision_record.append(precision) precision = np.mean(precision_record) miou_list = per_class_iu(hist)[:-1] miou_dict, miou = cal_miou(miou_list, csv_path) print('IoU for each class:') for key in miou_dict: print('{}:{},'.format(key, miou_dict[key])) tq.close() print('precision for test: %.3f' % precision) print('mIoU for validation: %.3f' % miou) return precision
def val(args, model, dataloader, loss_func): # print("start val!") # label_info = get_label_info(csv_path) tq = tqdm(total=len(dataloader) * args.batch_size) tq.set_description("validating:") with torch.no_grad(): model.eval() precision_record = [] loss_record = [] hist = np.zeros((args.num_classes, args.num_classes)) for i, (data, label) in enumerate(dataloader): tq.update(args.batch_size) if torch.cuda.is_available() and args.use_gpu: data = data.cuda() label = label.cuda().long() output = model(data) loss = loss_func(output, label) loss_record.append(loss.item()) # get RGB predict image _, prediction = output.max(dim=1) # B, H, W label = label.cpu().numpy() prediction = prediction.cpu().numpy() # compute per pixel accuracy precision = compute_global_accuracy(prediction, label) hist += fast_hist(label.flatten(), prediction.flatten(), args.num_classes) # there is no need to transform the one-hot array to visual RGB array # predict = colour_code_segmentation(np.array(predict), label_info) # label = colour_code_segmentation(np.array(label), label_info) precision_record.append(precision) tq.close() loss_mean = np.mean(loss_record) precision = np.mean(precision_record) miou_list = per_class_iu(hist)[:-1] miou = np.mean(miou_list) return precision, miou, loss_mean
def eval(model, dataloader, args, csv_path): print('start test!') with torch.no_grad(): total_pred = np.array([0]) total_label = np.array([0]) total_cm = np.zeros((6, 6)) model.eval() precision_record = [] tq = tqdm.tqdm(total=len(dataloader) * args.batch_size) tq.set_description('test') hist = np.zeros((args.num_classes, args.num_classes)) total_time = 0 total_cks, total_f1 = 0.0, 0.0 length = len(dataloader) print('length: %d' % length) for i, (data, label) in enumerate(dataloader): tq.update(args.batch_size) if torch.cuda.is_available() and args.use_gpu: data = data.cuda() label = label.cuda() start = time.clock() predict = model(data).squeeze() end = time.clock() # 转为类别矩阵 predict = reverse_one_hot(predict) predict = np.array(predict) # predict = colour_code_segmentation(np.array(predict), label_info) # end = time.clock() # 测试花费时间 total_time += (end - start) label = label.squeeze() # 转换为类别矩阵 if args.loss == 'dice': label = reverse_one_hot(label) label = np.array(label) # 计算cm # total_pred = np.append(total_pred, predict.flatten()) # total_label = np.append(total_label, label.flatten()) # if (i+1) % 8 == 0: # total_cm += confusion_matrix(total_label[1:], total_pred[1:]) # total_label = np.array([0]) # total_pred = np.array([0]) # 计算kappa,总的算平均 cks = cohen_kappa_score(label.flatten(), predict.flatten()) total_cks += cks f1 = f1_score(label.flatten(), predict.flatten(), average='macro') total_f1 += f1 # label = colour_code_segmentation(np.array(label), label_info) # 计算oa precision = compute_global_accuracy(predict, label) hist += fast_hist(label.flatten(), predict.flatten(), args.num_classes) # 记录总的精度 precision_record.append(precision) # 保存cm # np.savetxt('cm.txt', total_cm) precision = np.mean(precision_record) miou_list = per_class_iu(hist) miou_dict, miou = cal_miou(miou_list, csv_path) print('IoU for each class:') for key in miou_dict: print('{}:{},'.format(key, miou_dict[key])) tq.close() print('oa for test: %.3f' % precision) print('mIoU for test: %.3f' % miou) # 计算cm, kappa, cr //作废 cm, cks, cr = compute_cm_cks_cr(predict, label) # print('cm for test:\n', cm) total_cks /= length print('kappa for test: %.4f' % total_cks) total_f1 /= length print('f1 for test: %.4f' % total_f1) fps = length / total_time print('fps: %.2f' % fps) return precision, cm, total_cks, cr
def val(args, model, dataloader, data_name): print('start val!') # label_info = get_label_info(csv_path) total_cks, total_f1 = 0.0, 0.0 total_pred = np.array([0]) total_label = np.array([0]) length = len(dataloader) with torch.no_grad(): model.eval() precision_record = [] hist = np.zeros((args.num_classes, args.num_classes)) for i, (data, label) in enumerate(dataloader): # print('label size: ', label.size()) # print('data size: ', data.size()) if torch.cuda.is_available() and args.use_gpu: data = data.cuda() label = label.cuda() # get RGB predict image # print('label_cuda size: ', label.size()) # print('data_cuda size: ', data.size()) predict = model(data).squeeze() # print('predict size: ', predict.size()) predict = reverse_one_hot(predict) predict = np.array(predict) # get RGB label image label = label.squeeze() if args.loss == 'dice': label = reverse_one_hot(label) label = np.array(label) total_pred = np.append(total_pred, predict.flatten()) total_label = np.append(total_label, label.flatten()) if (i + 1) % 8 == 0: # total_cm += confusion_matrix(total_label[1:], total_pred[1:]) cks = cohen_kappa_score(total_label[1:], total_pred[1:]) total_label = np.array([0]) total_pred = np.array([0]) total_cks += cks # cks = cohen_kappa_score(label.flatten(), predict.flatten()) # total_cks += cks f1 = f1_score(label.flatten(), predict.flatten(), average='macro') total_f1 += f1 precision = compute_global_accuracy(predict, label) hist += fast_hist(label.flatten(), predict.flatten(), args.num_classes) # there is no need to transform the one-hot array to visual RGB array # predict = colour_code_segmentation(np.array(predict), label_info) # label = colour_code_segmentation(np.array(label), label_info) precision_record.append(precision) precision = np.mean(precision_record) # miou = np.mean(per_class_iu(hist)) miou_list = per_class_iu(hist)[:-1] # miou_dict, miou = cal_miou(miou_list, csv_path) miou = np.mean(miou_list) # print('precision per pixel for test: %.3f' % precision) print('oa for %s: %.3f' % (data_name, precision)) # print('mIoU for validation: %.3f' % miou) print('mIoU for %s: %.3f' % (data_name, miou)) cm, cks, cr = compute_cm_cks_cr(predict, label) total_f1 /= length total_cks = total_cks / (length // 8) # print('cm:\n', cm) print('kappa for %s: %.4f' % (data_name, total_cks)) print('f1 for {}:\n'.format(data_name), total_f1) # miou_str = '' # for key in miou_dict: # miou_str += '{}:{},\n'.format(key, miou_dict[key]) # print('mIoU for each class:') # print(miou_str) return precision, miou, cm, total_cks, total_f1