def demo(model, dataset, device, filename): for path in dataset.paths: gt_img = Image.open(path) gt_img = dataset.img_transform(gt_img.convert('RGB')) for maskpath in dataset.mask_paths: base = os.path.basename(maskpath) base = os.path.splitext(base)[0] history = gt_img if base[:-2] in path: mask = Image.open(maskpath) mask = dataset.mask_transform(mask.convert('RGB')) mask = (mask > .1).type(torch.FloatTensor) # (gt_img,mask) = zip(*[gt_img,mask]) gt_img = torch.reshape(gt_img, (1, 3, 256, 256)) mask = torch.reshape(mask, (1, 3, 256, 256)) # gt = torch.stack(gt) output = None with torch.no_grad(): output, _ = model(gt_img.to(device), mask.to(device)) output = output.to(torch.device('cpu')) grid = make_grid( torch.cat( (unnormalize(gt_img), mask, unnormalize( gt_img * mask), unnormalize(output)), dim=0)) save_image(grid, base + '_out.jpg')
def evaluate(model, dataset, device, filename,if_save=False): #print("Inside evaluate..." ," and filename is",filename) image, mask, gt = zip(*[dataset[i] for i in range(8)]) image = torch.stack(image) mask = torch.stack(mask) gt = torch.stack(gt) #print(len(image),len(mask),len(gt)) #print(image.shape,mask.shape,gt.shape) #print(image[0].shape,mask[0].shape,gt[0].shape) with torch.no_grad(): output, _ = model(image.to(device), mask.to(device)) output = output.to(torch.device('cpu')) output_comp = mask * image + (1 - mask) * output #print("output and output_comp shapes",output.shape,output_comp.shape) grid = make_grid( torch.cat((unnormalize(image), mask, unnormalize(output), unnormalize(output_comp), unnormalize(gt)), dim=0)) save_image(grid, filename) image = image[0].permute(1,2,0) gt = gt[0].permute(1,2,0) mask = mask[0].permute(1,2,0) output = output[0].permute(1,2,0) #print("permuted shapes",image.shape,mask.shape,gt.shape,output.shape) if if_save== True: fig = plt.figure(figsize=(6,8)) fig.add_subplot(2,2,1) plt.imshow(gt) plt.title("True image") plt.ylabel('y') plt.xlabel('x') fig.add_subplot(2,2,2) plt.imshow(mask) plt.title("Mask is") plt.ylabel('mask_y') plt.xlabel('mask_x') fig.add_subplot(2,2,3) title = "Masked Image for PATCH_SIZE = 10" plt.imshow(image) plt.title(title) plt.ylabel('masked_y') plt.xlabel('masked_x') plt.savefig("figure_8.png") fig.add_subplot(2,2,4) plt.imshow(output) plt.title("output image") plt.ylabel('output_y') plt.xlabel('output_x') plt.show() plt.savefig("all_images.png")
def demo(model, dataset, device, filename): image, mask, gt = zip(*[dataset[i] for i in range(1)]) image = torch.stack(image) mask = torch.stack(mask) gt = torch.stack(gt) with torch.no_grad(): output, _ = model(image.to(device), mask.to(device)) output = output.to(torch.device('cpu')) output_comp = mask * image + (1 - mask) * output grid = make_grid( torch.cat((unnormalize(image), mask, unnormalize(output), unnormalize(output_comp), unnormalize(gt)), dim=0)) save_image(grid, filename)
def evaluate(model, dataset, device, filename, gamma=1, exposure=1, black=0.0, white=1.0, random=False): n = len(dataset) image, mask, gt = zip( *[dataset[randint(0, n - 1) if random else i] for i in range(8)]) #print('image shape: ',image[0].shape) if image[0].shape[0] == 4: use_guide = True guide = [im[3:4, :, :] for im in image] image = [im[:3, :, :] for im in image] else: use_guide = False image = torch.stack(image) mask = torch.stack(mask) gt = torch.stack(gt) if use_guide: guide = torch.stack(guide) with torch.no_grad(): if use_guide: output, _ = model(image.to(device), mask.to(device), guide.to(device)) else: output, _ = model(image.to(device), mask.to(device)) output = output.to(torch.device('cpu')) output_comp = mask * image + (1 - mask) * output grid = make_grid( torch.cat((levels( gamma_correct(unnormalize(image[:, 0:3, :, :]), gamma, exposure), black, white), mask[:, 0:3, :, :], levels( gamma_correct(unnormalize(output[:, 0:3, :, :]), gamma, exposure), black, white), levels( gamma_correct(unnormalize(output_comp[:, 0:3, :, :]), gamma, exposure), black, white), levels( gamma_correct(unnormalize(gt[:, 0:3, :, :]), gamma, exposure), black, white)), dim=0)) save_image(grid, filename)
def evaluate(model, dataset, device, path): num = len(dataset) for i in range(num): image, mask, gt, name = zip(*[dataset[i]]) image = torch.stack(image) mask = torch.stack(mask) gt = torch.stack(gt) with torch.no_grad(): outputs = model(image.to(device), mask.to(device)) output = outputs[-1].to(torch.device('cpu')) output_comp = mask * image + (1 - mask) * output name = name[0] name = name.split("/")[-1].replace('.jpg', '.png') save_image(unnormalize(output_comp), path + '/' + name) save_image(unnormalize(gt), "gt_" + path + '/' + name)
def evaluate(model, dataset, device, filename): image, mask, gt = [], [], [] for i in range(8): sample = dataset[i] image.append(sample['img'].float()) mask.append(sample['mask'].float()) gt.append(sample['gt'].float()) image = torch.stack(image) mask = torch.stack(mask) gt = torch.stack(gt) with torch.no_grad(): output, _ = model(image.to(device), mask.to(device)) output = output.to(torch.device('cpu')) output_comp = mask * image + (1 - mask) * output grid = make_grid( torch.cat((unnormalize(image), mask, unnormalize(output), unnormalize(output_comp), unnormalize(gt)), dim=0)) save_image(grid, filename)
def evaluate(model, dataset, device, filename): print(BASE_DIR) image, mask, gt, ori_size = zip(*[dataset[i] for i in range(1)]) image = torch.stack(image) mask = torch.stack(mask) ori_size = ori_size[0] with torch.no_grad(): output, _ = model(image.to(device), mask.to(device)) output = output.to(torch.device('cpu')) RESULT_DIR = BASE_DIR + '\\Django\\media\\' save_image(unnormalize(output), RESULT_DIR + filename) print(ori_size) img_transform = transforms.Compose( [transforms.Resize((ori_size[1], ori_size[0])), transforms.ToTensor()]) output = Image.open(RESULT_DIR + filename) print(output) output = img_transform(output) save_image(output, RESULT_DIR + filename)
masks = torch.tensor(np.stack(masks)).float().to(device) # image masked imgs = gts * masks # get output with torch.no_grad(): outputs, _ = model(imgs, masks) outputs_comp = masks * imgs + (1 - masks) * outputs # send back to cpu gts = gts.cpu() outputs_comp = outputs_comp.cpu() # unnormalize un_gts = unnormalize(gts) un_outputs_comp = unnormalize(outputs_comp) # get them to range 0, 1 un_gts = torch.clamp(un_gts, 0., 1.) un_outputs_comp = torch.clamp(un_outputs_comp, 0., 1.) # reshape and typing un_gts = [x.transpose(1, 2, 0) for x in list(un_gts.numpy())] un_outputs_comp = [ x.transpose(1, 2, 0) for x in list(un_outputs_comp.numpy()) ] for score_name in metric_funcs: metric_scores[rng][score_name] = metric_scores[rng].get( score_name, []) + score_func(un_gts, un_outputs_comp, score_name)