def validate(segmenter, val_loader, epoch, num_classes=4): """Validate segmenter Args: segmenter (nn.Module) : segmentation network val_loader (DataLoader) : training data iterator epoch (int) : current epoch num_classes (int) : number of classes to consider Returns: Mean IoU (float) """ val_loader.dataset.set_stage('val') segmenter.eval() cm = np.zeros((num_classes, num_classes), dtype=int) with torch.no_grad(): for i, sample in enumerate(val_loader): start = time.time() input = sample['image'] target = sample['mask'] input_2 = sample['img_2'] #print(input.size()) #print(target.size()) #print(input_2.size()) input_var = torch.autograd.Variable(input).float().cuda() input_var_2 = torch.autograd.Variable(input_2).float().cuda() #input_var = torch.Tensor(input).cuda() #input_var_2=torch.Tensor(input_2).cuda() # Compute output output = segmenter(input_var, input_var_2) #print(output.size()) output = cv2.resize( output[0, :num_classes].data.cpu().numpy().transpose(1, 2, 0), target.size()[1:][::-1], interpolation=cv2.INTER_CUBIC).argmax(axis=2).astype(np.uint8) # Compute IoU #print(np.shape(output)) #print(output) gt = target[0].data.cpu().numpy().astype(np.uint8) #print('target',gt) gt_idx = gt < num_classes # Ignore every class index larger than the number of classes cm += fast_cm(output[gt_idx], gt[gt_idx], num_classes) pred = output #s=1 - np.sum(np.abs(pred - gt)) / np.sum(np.maximum(np.abs(gt - 0), np.abs(gt - 3.0)) * (1 - (1 - (pred > 0)) * (1 - gt > 0))) #print(s) if i % args.print_every == 0: logger.info(' Val epoch: {} [{}/{}]\t' 'Mean IoU: {:.3f}'.format(epoch, i, len(val_loader), compute_iu(cm).mean())) ious = compute_iu(cm) logger.info(" IoUs: {}".format(ious)) miou = np.mean(ious) logger.info(' Val epoch: {}\tMean IoU: {:.3f}'.format(epoch, miou)) return miou
def validate(segmenter, val_loader, epoch, num_classes=-1): """Validate segmenter Args: segmenter (nn.Module) : segmentation network val_loader (DataLoader) : training data iterator epoch (int) : current epoch num_classes (int) : number of classes to consider Returns: Mean IoU (float) """ val_loader.dataset.set_stage('val') segmenter.eval() cm = np.zeros((num_classes, num_classes), dtype=int) with torch.no_grad(): for i, sample in enumerate(val_loader): start = time.time() input = sample['image'] target = sample['mask'] input_var = torch.autograd.Variable(input).float().cuda() # Compute output output = segmenter(input_var) output = cv2.resize( output[0, :num_classes].data.cpu().numpy().transpose(1, 2, 0), target.size()[1:][::-1], interpolation=cv2.INTER_CUBIC).argmax(axis=2).astype(np.uint8) #print(input[0].shape) # 3*720*1280 #print(target.shape) # 1*720*1280 # Compute IoU gt = target[0].data.cpu().numpy().astype(np.uint8) gt_idx = gt < num_classes # Ignore every class index larger than the number of classes cm += fast_cm(output[gt_idx], gt[gt_idx], num_classes) if i % args.print_every == 0: logger.info(' Val epoch: {} [{}/{}]\t' 'hand-IoU: {:.3f}\t' 'hand-Recall: {:.3f}'.format( epoch, i, len(val_loader), compute_iu(cm)[1], compute_recall(cm)[1])) ious = compute_iu(cm) recall_hand = compute_recall(cm)[1] logger.info(" IoUs: {}".format(ious)) #miou = np.mean(ious) hiou = ious[1] logger.info( ' Val epoch: {}\t overall hand-IoU: {:.3f}\t overall hand-Recall: {:.3f}\t global_step: {}' .format(epoch, hiou, recall_hand, global_step)) return hiou
def validate(multiTaskNet, val_loader, epoch, num_classes=-1, num_depths=-1): """Validate multiTaskNet Args: multiTaskNet (nn.Module) : segmentation+depth network val_loader (DataLoader) : training data iterator epoch (int) : current epoch num_classes (int) : number of classes to consider num_depths (int) : number of depths Returns: Mean IoU (float) """ val_loader.dataset.set_stage('val') multiTaskNet.eval() cm1 = np.zeros((num_classes, num_classes), dtype=int) cm2 = np.zeros((num_depths, num_depths), dtype=int) with torch.no_grad(): for i, sample in enumerate(val_loader): start = time.time() input_img = sample['image'] target1 = sample['mask'].cuda() target2 = sample['depth'].cuda() input_var = torch.autograd.Variable(input_img).float().cuda() # Compute output output1, output2 = multiTaskNet(input_var) output1 = cv2.resize( output1[0, :num_classes].data.cpu().numpy().transpose(1, 2, 0), target1.size()[1:][::-1], interpolation=cv2.INTER_CUBIC).argmax(axis=2).astype(np.uint8) output2 = cv2.resize( output2[0, :num_depths].data.cpu().numpy().transpose(1, 2, 0), target1.size()[1:][::-1], interpolation=cv2.INTER_CUBIC).argmax(axis=2).astype(np.uint8) # Compute IoU gt1 = target1[0].data.cpu().numpy().astype(np.uint8) gt1_idx = gt1 < num_classes # Ignore every class index larger than the number of classes cm1 += fast_cm(output1[gt1_idx], gt1[gt1_idx], num_classes) gt2 = target2[0].data.cpu().numpy().astype(np.uint8) gt2_idx = gt2 < num_depths cm2 += fast_cm(output2[gt2_idx], gt2[gt2_idx], num_depths) #if i % args.print_every == 0: #logger.info(' Val epoch: {} [{}/{}]\tMean IoU: {:.3f}'.format(epoch, i, len(val_loader),compute_iu(cm).mean())) ious1 = compute_iu(cm1) ious2 = compute_iu(cm2) logger.info(" IoUs: segm=\n{}\ndepth=\n{}\n".format(ious1, ious2)) miou = (np.mean(ious1) + np.mean(ious2)) / 2 logger.info(' Val epoch: {}\tMean IoU: {:.3f}'.format(epoch, miou)) return miou
def validate(segmenter, val_loader, epoch, num_classes=-1): """Validate segmenter Args: segmenter (nn.Module) : segmentation network val_loader (DataLoader) : training data iterator epoch (int) : current epoch num_classes (int) : number of classes to consider Returns: Mean IoU (float) """ val_loader.dataset.set_stage("val") segmenter.eval() cm = np.zeros((num_classes, num_classes), dtype=int) with torch.no_grad(): for i, sample in enumerate(val_loader): input = sample["image"] target = sample["mask"] input_var = torch.autograd.Variable(input).float().cuda() # Compute output output = segmenter(input_var) output = ( cv2.resize( output[0, :num_classes].data.cpu().numpy().transpose(1, 2, 0), target.size()[1:][::-1], interpolation=cv2.INTER_CUBIC, ) .argmax(axis=2) .astype(np.uint8) ) # Compute IoU gt = target[0].data.cpu().numpy().astype(np.uint8) gt_idx = ( gt < num_classes ) # Ignore every class index larger than the number of classes cm += fast_cm(output[gt_idx], gt[gt_idx], num_classes) if i % args.print_every == 0: logger.info( " Val epoch: {} [{}/{}]\t" "Mean IoU: {:.3f}".format( epoch, i, len(val_loader), compute_iu(cm).mean() ) ) ious = compute_iu(cm) logger.info(" IoUs: {}".format(ious)) miou = np.mean(ious) logger.info(" Val epoch: {}\tMean IoU: {:.3f}".format(epoch, miou)) return miou
def validate( segmenter, val_loader, epoch, num_classes=-1 ): """Validate segmenter """ val_loader.dataset.set_stage('val') segmenter.eval() cm = np.zeros((num_classes, num_classes), dtype=int) plt.figure(figsize=(16, 12)) with torch.no_grad(): for i, sample in enumerate(val_loader): start = time.time() input = sample['image'] ##been transformed, if wanted to be shown, comment line 197 target = sample['mask'] input_var = torch.autograd.Variable(input).float().cuda() # Compute output output = segmenter(input_var) output = cv2.resize(output[0, :num_classes].data.cpu().numpy().transpose(1, 2, 0), target.size()[1:][::-1], interpolation=cv2.INTER_CUBIC).argmax(axis=2).astype(np.uint8) #print(input[0].shape) # 3*720*1280 #print(target.shape) # 1*720*1280 # Compute IoU gt = target[0].data.cpu().numpy().astype(np.uint8) gt_idx = gt < num_classes # Ignore every class index larger than the number of classes cm += fast_cm(output[gt_idx], gt[gt_idx], num_classes) plt.subplot(1, 3, 1) plt.title('img-%d'%i) #inverse of 'image': (self.scale * image - self.mean) / self.std, unnormalized=(input[0].data.cpu().numpy().transpose(1, 2, 0)*args.normalise_params[2]+args.normalise_params[1])/args.normalise_params[0] plt.imshow(unnormalized.astype(np.uint8)) plt.subplot(1, 3, 2) plt.title('gt-%d'%i) plt.imshow(gt) plt.subplot(1, 3, 3) plt.title('pred-%d'%i) plt.imshow(output) #plt.show() #in dataset.py comment line153 # img_dir='/home/yangjing/code/wash-hand/light-weight-refinenet-master/infer_img/val_first10/40ep-77/' if i<10: plt.savefig(os.path.join(img_dir,'%d.png'%i)) if i % args.print_every == 0: logger.info(' Val epoch: {} [{}/{}]\t' 'Mean IoU: {:.3f}'.format( epoch, i, len(val_loader), compute_iu(cm).mean() )) ious = compute_iu(cm) logger.info(" IoUs: {}".format(ious)) miou = np.mean(ious) logger.info(' Val epoch: {}\tMean IoU: {:.3f}'.format( epoch, miou)) return miou
def test(segmenter, test_loader, num_classes=-1): """test segmenter Args: segmenter (nn.Module) : segmentation network test_loader (DataLoader) : test data iterator num_classes (int) : number of classes to consider Returns: Mean IoU (float) """ test_loader.dataset.set_stage('val') # 'val' would suffice segmenter.eval() cm = np.zeros((num_classes, num_classes), dtype=int) logger.info(" Testing begins.") with torch.no_grad(): for i, sample in enumerate(tqdm(test_loader)): start = time.time() input = sample['image'] target = sample['mask'] input_var = torch.autograd.Variable(input).float().cuda() # Compute output output = segmenter(input_var) output = cv2.resize( output[0, :num_classes].data.cpu().numpy().transpose(1, 2, 0), target.size()[1:][::-1], interpolation=cv2.INTER_CUBIC).argmax(axis=2).astype(np.uint8) # Compute IoU gt = target[0].data.cpu().numpy().astype(np.uint8) gt_idx = gt < num_classes # Ignore every class index larger than the number of classes cm += fast_cm(output[gt_idx], gt[gt_idx], num_classes) # logger.info('Testing: [{}/{}]\t' # 'Mean IoU: {:.3f}'.format( # i, len(test_loader), # compute_iu(cm).mean() # )) if args.save_visuals and args.save_visuals_dir is not None: with open(args.test_list, 'rb') as f: testlist = f.readlines() test_list = [(k, v) for k, v in map( lambda x: x.decode('utf-8').strip('\n').split('\t'), testlist)] img_path = os.path.join(args.test_dir, test_list[i][0]) img_name = img_path.split('/')[-1] if not os.path.exists(args.save_visuals_dir): os.makedirs(args.save_visuals_dir) if args.data_name == 'cityscapes': palette = get_palette_cityscapes() output_im = Image.fromarray(np.array(output)) output_im.putpalette(palette) output_im.save(args.save_visuals_dir + '/' + img_name) else: # Not implemented for other datasets for now # Kindly set SAVE_VISUALS=False, or implement it for your dataset raise NotImplementedError ious = compute_iu(cm) logger.info(" IoUs: {}".format(ious)) miou = np.mean(ious) logger.info(' Mean IoU: {:.4f}'.format(miou)) return miou
def validate(segmenter, val_loader, epoch, num_classes=-1): """Validate segmenter """ val_loader.dataset.set_stage('val') segmenter.eval() cm = np.zeros((num_classes, num_classes), dtype=int) plt.figure(figsize=(16, 12)) with torch.no_grad(): fo = open(args.val_list[0]) conts = fo.readlines() for i, sample in enumerate(val_loader): #val_images/101/v1_000144.jpg val_labels/101/v1_000144.png img_name = os.path.basename(conts[i].split('\t')[0]) sub_id = conts[i].split('\t')[0].split('/')[1] start = time.time() input = sample[ 'image'] ##been transformed, if wanted to be shown, comment line 197 target = sample['mask'] input_var = torch.autograd.Variable(input).float().cuda() # Compute output output = segmenter(input_var) output = cv2.resize( output[0, :num_classes].data.cpu().numpy().transpose(1, 2, 0), target.size()[1:][::-1], interpolation=cv2.INTER_CUBIC).argmax(axis=2).astype(np.uint8) #print(input[0].shape) # 3*720*1280 #print(target.shape) # 1*720*1280 # Compute IoU gt = target[0].data.cpu().numpy().astype(np.uint8) gt_idx = gt < num_classes # Ignore every class index larger than the number of classes cm += fast_cm(output[gt_idx], gt[gt_idx], num_classes) plt.subplot(1, 3, 1) plt.title('%s-%s' % (sub_id, img_name)) #inverse of 'image': (self.scale * image - self.mean) / self.std, unnormalized = (input[0].data.cpu().numpy().transpose(1, 2, 0) * args.normalise_params[2] + args.normalise_params[1] ) / args.normalise_params[0] plt.imshow(unnormalized.astype(np.uint8)) plt.subplot(1, 3, 2) plt.title('gt') plt.imshow(gt) plt.subplot(1, 3, 3) plt.title('pred') plt.imshow(output) #plt.show() #in dataset.py comment line153 # img_dir = '/home/yangjing/code/wash-hand/handSeg_pytorch/visual_output/hw1-HWtest-06/' if i < 10: #plt.savefig(os.path.join(img_dir,'%d.png'%i)) #plt.savefig(os.path.join(img_dir,img_name)) pass if i % args.print_every == 0: #plt.savefig(os.path.join(img_dir,'%d.png'%i)) #plt.savefig(os.path.join(img_dir,img_name)) logger.info(' Val epoch: {} [{}/{}]\t' 'hand IoU: {:.3f}\t' 'hand Recall: {:.3f}'.format( epoch, i, len(val_loader), compute_iu(cm)[1], compute_recall(cm)[1])) fo.close() ious = compute_iu(cm) recall_hand = compute_recall(cm)[1] logger.info(" IoUs: {}".format(ious)) #miou = np.mean(ious) hiou = ious[1] logger.info( ' Val epoch: {}\t Mean IoU: {:.3f}\t avg_Recall-hand : {:.3f}\t resize_scale: {:.1f}' .format(epoch, hiou, recall_hand, args.low_scale[0])) return hiou
def validate(segmenter, val_loader, epoch, num_classes=-1): """Validate segmenter Args: segmenter (nn.Module) : segmentation network val_loader (DataLoader) : training data iterator epoch (int) : current epoch num_classes (int) : number of classes to consider Returns: Mean IoU (float) """ val_loader.dataset.set_stage('val') segmenter.eval() cm = np.zeros((num_classes, num_classes), dtype=int) with torch.no_grad(): for i, sample in enumerate(val_loader): start = time.time() input = sample['image'] ### uncomment to save input image to file cv2.imwrite( "val-tmp/" + str(i) + "-input.png", input.data.cpu().numpy().squeeze(0).transpose(1, 2, 0) * 100) ######## ### depth_input = sample['depth_image'] ### uncomment to save input depth image to file cv2.imwrite( "val-tmp/" + str(i) + "-depth-image.png", depth_input.data.cpu().numpy().squeeze(0).transpose(1, 2, 0) * 100) ####### ### target = sample['mask'] ### uncomment to save target label image to file cv2.imwrite("val-tmp/" + str(i) + "-target.png", target.data.cpu().numpy().astype( np.uint8).squeeze(0)) ###### ### input_var = torch.autograd.Variable(input).float().cuda() depth_input_var = torch.autograd.Variable( depth_input).float().cuda() # Compute output output = segmenter(input_var, depth_input_var) output = cv2.resize( output[0, :num_classes].data.cpu().numpy().transpose(1, 2, 0), target.size()[1:][::-1], interpolation=cv2.INTER_CUBIC).argmax(axis=2).astype(np.uint8) ### uncomment to save output image to file cv2.imwrite("val-tmp/" + str(i) + "-output.png", output) ##### ### # Compute IoU gt = target[0].data.cpu().numpy().astype(np.uint8) gt_idx = gt < num_classes # Ignore every class index larger than the number of classes cm += fast_cm(output[gt_idx], gt[gt_idx], num_classes) if i % args.print_every == 0: logger.info(' Val epoch: {} [{}/{}]\t' 'Mean IoU: {:.3f}'.format(epoch, i, len(val_loader), compute_iu(cm).mean())) ious = compute_iu(cm) logger.info(" IoUs: {}".format(ious)) miou = np.mean(ious) logger.info(' Val epoch: {}\tMean IoU: {:.3f}'.format(epoch, miou)) return miou