def evaluate(model, data_loader, mode = "train", output_dir = "./preds/"): ''' set model to evaluate mode ''' model.eval() preds = [] gts = [] with torch.no_grad(): # do not need to caculate information for gradient during eval cnt = 0 for _, (imgs, gt) in enumerate(data_loader): imgs = imgs.cuda() pred = model(imgs) if mode == "val" or mode == "test": # save images only during test and validation for p in pred: p = torch.argmax(p.squeeze(), dim=0).detach().cpu().numpy() skimage.io.imsave(os.path.join(output_dir, prediction_labeller(str(cnt)) + ".png"), p) cnt += 1 pass else: # no need to save during training pass _, pred = torch.max(pred, dim = 1) pred = pred.cpu().numpy().squeeze() gt = gt.numpy().squeeze() preds.append(pred) gts.append(gt) gts = np.concatenate(gts) preds = np.concatenate(preds) return mean_iou_score(gts, preds)
def evaluate(model, data_loader): args = parser.arg_parse() ''' set model to evaluate mode ''' model.eval() preds = [] gts = [] #ground truth print('start evaluate') with torch.no_grad( ): # do not need to caculate information for gradient during eval for idx, (imgs, gt) in enumerate(data_loader): imgs = imgs.cuda() pred = model(imgs) _, pred = torch.max(pred, dim=1) pred = pred.cpu().numpy().squeeze() gt = gt.numpy().squeeze() preds.append(pred) gts.append(gt) gts = np.concatenate(gts) preds = np.concatenate(preds) np.save(args.save_dir + 'preds.npy', preds) return mean_iou_evaluate.mean_iou_score( preds, gts) #maybe gts preds#, preds#accuracy_score(gts, preds)
def evaluate(model, data_loader, save=False): """ set model to evaluate mode """ model.eval() preds = [] gts = [] with torch.no_grad( ): # do not need to caculate information for gradient during eval for idx, (imgs, gt) in enumerate(data_loader): imgs = imgs.cuda() pred = model(imgs) _, pred = torch.max(pred, dim=1) pred = pred.cpu().numpy().squeeze() gt = gt.numpy().squeeze() preds.append(pred) gts.append(gt) gts = np.concatenate(gts) preds = np.concatenate(preds) # ''' add predictions to output_dir''' # if save: # output_dir = args.save_dir # if not os.path.exists(output_dir): # os.makedirs(output_dir) # for idx, pred in enumerate(preds): # im = Image.fromarray(np.uint8(pred)) # save_path = os.path.join(output_dir, f"{idx:04}.png") # im.save(save_path) return mean_iou_score(gts, preds)
def evaluate(model, data_loader): ''' set model to evaluate mode ''' model.eval() preds = [] gts = [] with torch.no_grad(): # do not need to caculate information for gradient during eval for idx, (imgs, gt, filename) in enumerate(data_loader): # imgs = imgs.cuda() pred = model(imgs) _, pred = torch.max(pred, dim = 1) print(pred.shape) pred = pred.cpu().numpy().squeeze() # gt = gt.numpy().squeeze() preds.append(pred) gts.append(gt) gts = np.concatenate(gts) preds = np.concatenate(preds) return mean_iou_score(preds, gts, 9)
def on_epoch_end(self, epoch, logs={}): validation(model, self.x) pred = read_masks(testing_folder) labels = read_masks(predict_folder) now_score = mean_iou_score(pred, labels) if (now_score >= self.prev_high_score): print("save model") self.model.save(model_name) self.prev_high_score = now_score file = open(output_file_name, "a+") file.write(str(now_score)) file.write('\n') file.close()
def _eval_one_epoch(self, epoch, iters, best_iou): """ Evaluate one epoch Returns: ======== loss: float mean_iou_score: float iters: int best_iou: float """ """ Evaluating Process """ self.model.eval() self.metric.reset() batch_loss = 0.0 val_preds = [] val_segs = [] """ evaluate the model """ with torch.no_grad(): for idx, (imgs, segs) in enumerate(self.val_loader): iters += 1 """ move data to gpu """ imgs, segs = imgs.cuda(), segs.cuda() """ forward path """ preds = self.model(imgs) """ compute loss """ loss = self.criterion(preds, segs) """ argmax softmax and append to list """ preds = F.softmax(preds, dim=1) preds = preds.max(dim=1)[1] val_preds.append(preds.cpu().numpy()) val_segs.append(segs.cpu().numpy()) """ update loss """ batch_loss += loss.item() """ write out information to tensorboard """ self.writer.add_scalar("loss/val_loss", loss.data.cpu().numpy(), iters) val_preds = np.concatenate(val_preds) val_segs = np.concatenate(val_segs) val_iou = mean_iou_score(val_preds, val_segs) self.writer.add_scalar("mIoU/val_miou", val_iou, epoch) """ save best model """ if val_iou > best_iou: print("Best model saved!") self.save(os.path.join(self.save_dir, "model_best.pth.tar")) best_iou = val_iou return batch_loss / (idx + 1), val_iou, iters, best_iou
def validation(model, output_dir): model = model.eval() # turn model into evaluation mode tot_loss = 0 correct = 0 p_acc = 0 count = 0 label_trues, label_preds = [], [] with torch.no_grad(): # free gpu memory use for back-up for data, target in testset_loader: data, target = data.to("cuda"), target.to("cuda", dtype=torch.int64) output = model(data) label_pred = output.data.max(1)[1].data.cpu().numpy() label_true = target.data.cpu().numpy() for pred, labels in zip(label_pred, label_true): labels = labels[0, :, :].astype(int) p_acc += pixel_acc(pred, labels) mean_iou = mean_iou_score(pred, labels) correct += mean_iou predimg = return_mask(pred) misc.imsave(output_dir + "\\" + str(count).zfill(4) + ".png", predimg) # misc.imsave(output_dir+"\\"+str(count).zfill(4)+".png",labelimg) count += 1 tot_loss /= len(testset_loader.dataset) P_ACC = 100. * p_acc / len(testset_loader.dataset) print("++++++++++++++++++++++++++++++++") import mean_iou_evaluate as mie pred = mie.read_masks( r"C:\Users\YH\Desktop\CVDL_HW2\hw2-yohschang\hw2_data\p2_data\pred") labels = mie.read_masks( r"C:\Users\YH\Desktop\CVDL_HW2\hw2-yohschang\hw2_data\p2_data\validation" ) m_iou = mie.mean_iou_score(pred, labels) print(" loss : " + str(tot_loss) + " Accuracy : " + str(round(m_iou * 100, 5))) print("pixel accuracy : " + str(P_ACC)) print("++++++++++++++++++++++++++++++++") return m_iou * 100
def evaluate(model, data_loader, save): # Evaluate a model, and save predicted segmentation maps if desired model.eval() preds = [] gts = [] with torch.no_grad(): # do not need to calculate information for gradient during eval for idx, (imgs, gt) in enumerate(data_loader): imgs = imgs.cuda() pred = model(imgs) _, pred = torch.max(pred, dim = 1) pred = pred.cpu().numpy().squeeze() # numpy.ndarray 45x352x448, 5x352x448 on last one gt = gt.numpy().squeeze() # numpy.ndarray 45x352x448, 5x352x448 on last one preds.append(pred) gts.append(gt) gts = np.concatenate(gts) # numpy.ndarray 500x352x448 preds = np.concatenate(preds) # numpy.ndarray 500x352x448, signed 8 byte integers if save == 1: # create prediction directory if it doesn't exist if not os.path.exists(args.pred_dir): os.makedirs(args.pred_dir) # separate the images for i in range(500): seg_map = Image.fromarray( preds[i][:][:].astype('uint8') ) if i < 10: seg_map.save( os.path.join(args.pred_dir, "000{}.png".format(i)) ) elif i < 100: seg_map.save( os.path.join(args.pred_dir, "00{}.png".format(i)) ) else: seg_map.save( os.path.join(args.pred_dir, "0{}.png".format(i)) ) return mean_iou_score(preds, gts)
from mean_iou_evaluate import read_masks, mean_iou_score myans_path = 'improve/' label_path = 'hw2_data/p2_data/validation/' myans = read_masks(myans_path) label = read_masks(label_path) score = mean_iou_score(myans, label) print(score)
valid_loss.append(loss.item()) # valid_accs.append(acc) # The average loss and accuracy for entire validation set is the average of the recorded values. valid_loss = sum(valid_loss) / len(valid_loss) # Print the information. print( f"[ Valid | {epoch + 1:03d}/{n_epochs:03d} ] loss = {valid_loss:.5f}") scheduler.step() pred = pred.cpu().numpy() pred = np.argmax(pred, 1) gt = gt.cpu().numpy() mean_iou = mean_iou_score(pred, gt) # record model progress if epoch in [1, 11, 21]: progress_dir = "./progress" if not os.path.exists(progress_dir): os.makedirs(progress_dir) # save 0010_mask, 0097_mask, and 0107_mask ids = [10, 97, 107] imgs = torch.tensor( [valid_set.images[i].cpu().detach().numpy() for i in ids]) with torch.no_grad(): outputs = model(imgs.to(device)) outputs = outputs.cpu().numpy()