def test(args): model = Unet(1, 1) model.load_state_dict(torch.load(args.ckpt, map_location='cpu')) liver_dataset = LiverDataset("data/val", transform=x_transforms, target_transform=y_transforms) dataloaders = DataLoader(liver_dataset, batch_size=1) model.eval() import matplotlib.pyplot as plt plt.ion() with torch.no_grad(): for x, yy in dataloaders: y = model(x) l1loss = nn.L1Loss() loss = l1loss(y, yy) print(loss.item()) img_y = torch.squeeze(y).numpy() img_yy = torch.squeeze(yy).numpy() # img_y = (img_y + 1) * 127.5 plt.figure() plt.subplot(121) plt.imshow(img_y.transpose(), aspect='auto', interpolation='none', cmap=plt.get_cmap('gray')) plt.subplot(122) plt.imshow(img_yy.transpose(), aspect='auto', interpolation='none', cmap=plt.get_cmap('gray')) plt.pause(0.01) # plt.waitforbuttonpress() plt.show()
def test_1(): model = Unet(3, 1) model.load_state_dict(torch.load(args.ckp, map_location='cpu')) liver_dataset = LiverDataset("data/val", transform=x_transforms, target_transform=y_transforms) dataloaders = DataLoader(liver_dataset, batch_size=1) model.eval() import matplotlib.pyplot as plt plt.ion() imgs = [] root = "data/val" n = len(os.listdir(root)) // 2 for i in range(n): img = os.path.join(root, "%03d.png" % i) # mask = os.path.join(root, "%03d_mask.png" % i) imgs.append(img) i = 0 with torch.no_grad(): for x, _ in dataloaders: y = model(x) img_x = torch.squeeze(_).numpy() img_y = torch.squeeze(y).numpy() img_input = cv2.imread(imgs[i], cv2.IMREAD_GRAYSCALE) im_color = cv2.applyColorMap(img_input, cv2.COLORMAP_JET) img_x = img_as_ubyte(img_x) img_y = img_as_ubyte(img_y) imgStack = stackImages(0.8, [[img_input, img_x, img_y]]) # 转为伪彩色,视情况可以加上 # imgStack = cv2.applyColorMap(imgStack, cv2.COLORMAP_JET) cv2.imwrite(f'train_img/{i}.png', imgStack) plt.imshow(imgStack) i = i + 1 plt.pause(0.1) plt.show()
def test(): model = Unet(3, 1).to(device) # unet输入是三通道,输出是一通道,因为不算上背景只有肝脏一个类别 weight_pre = r"./results/weights4_18_35.pth" model.load_state_dict(torch.load(weight_pre)) # 载入训练好的模型 liver_dataset = LiverDataset(r"D:\project\data_sets\data_sci\val", transform=x_transforms, target_transform=y_transforms) dataloaders = DataLoader(liver_dataset, batch_size=1) model.eval() import matplotlib.pyplot as plt plt.ion() # 开启动态模式 with torch.no_grad(): i = 0 # 验证集中第i张图 miou_total = 0 num = len(dataloaders) # 验证集图片的总数 for x, _ in dataloaders: x = x.to(device) y = model(x) img_y = torch.squeeze(y).cpu().numpy( ) # 输入损失函数之前要把预测图变成numpy格式,且为了跟训练图对应,要额外加多一维表示batchsize mask = get_data(i)[1] # 得到当前mask的路径 miou_total += get_iou(mask, img_y) # 获取当前预测图的miou,并加到总miou中 plt.subplot(121) plt.imshow(Image.open(get_data(i)[0])) plt.subplot(122) plt.imshow(img_y) plt.pause(0.01) if i < num: i += 1 # 处理验证集下一张图 plt.show() print('Miou=%f' % (miou_total / 10)) res_record("weights4_13_40.pth Miou=%f \n" % (miou_total / 10))
def test(): model = Unet(3, 1).to(device) #unet输入是三通道,输出是一通道,因为不算上背景只有肝脏一个类别 model.load_state_dict(torch.load(args.ckp, map_location='cpu')) #载入训练好的模型 liver_dataset = LiverDataset( r"H:\BaiduNetdisk\BaiduDownload\u_net_liver-master\data\val", transform=x_transforms, target_transform=y_transforms) dataloaders = DataLoader(liver_dataset, batch_size=1) model.eval() import matplotlib.pyplot as plt plt.ion() #开启动态模式 with torch.no_grad(): i = 0 #验证集中第i张图 miou_total = 0 num = len(dataloaders) #验证集图片的总数 for x, _ in dataloaders: x = x.to(device) y = model(x) img_y = torch.squeeze(y).cpu().numpy( ) #输入损失函数之前要把预测图变成numpy格式,且为了跟训练图对应,要额外加多一维表示batchsize mask = get_data(i)[1] #得到当前mask的路径 miou_total += get_iou(mask, img_y) #获取当前预测图的miou,并加到总miou中 plt.subplot(121) plt.imshow(Image.open(get_data(i)[0])) plt.subplot(122) plt.imshow(img_y) plt.pause(0.01) if i < num: i += 1 #处理验证集下一张图 plt.show() print('Miou=%f' % (miou_total / 20))
def test(): model = Unet(3, 1) model.load_state_dict(torch.load(Model_path, map_location='cpu')) #card_dataset = CardDataset("data/val", transform=x_transforms,target_transform=y_transforms) #dataloaders = DataLoader(card_dataset, batch_size=1) model.eval() with torch.no_grad(): for name in names: img = cv2.imread(name, 1) x = cv2img_process(img) y = model(x) img_y = (torch.squeeze(y).numpy() * -0.4 * 40 / 255.0 - 0.3) / 0.7 img_y = np.where(img_y < 0.3, 0, img_y) img_y = np.where(img_y > 0.3, 1, img_y) cv2.imshow("x", img) cv2.imshow("predict", img_y) #print(img.shape) #print(img_y.shape) #print("max ",img_y.max()) #print("min ",img_y.min()) print(img_y[250][250]) cv2.waitKey(10)
def test(args): model = Unet(1, 1) model.load_state_dict(torch.load(args.ckpt, map_location='cuda')) liver_dataset = LiverDataset("data/val", transform=x_transforms, target_transform=y_transforms) dataloaders = DataLoader(liver_dataset, batch_size=1) save_root = './data/predict' model.eval() plt.ion() index = 0 with torch.no_grad(): for x, ground in dataloaders: x = x.type(torch.FloatTensor) y = model(x) x = torch.squeeze(x) x = x.unsqueeze(0) ground = torch.squeeze(ground) ground = ground.unsqueeze(0) img_ground = transform_invert(ground, y_transforms) img_x = transform_invert(x, x_transforms) img_y = torch.squeeze(y).numpy() # cv2.imshow('img', img_y) src_path = os.path.join(save_root, "predict_%d_s.png" % index) save_path = os.path.join(save_root, "predict_%d_o.png" % index) ground_path = os.path.join(save_root, "predict_%d_g.png" % index) img_ground.save(ground_path) # img_x.save(src_path) cv2.imwrite(save_path, img_y * 255) index = index + 1
def test(): model = Unet(3, 1) model.load_state_dict( torch.load('weight/weights_{}.pth'.format(str(num_epochs - 1)), map_location='cpu')) liver_dataset = LiverDataset("data/img", "data/label", transform=x_transforms, target_transform=y_transforms) dataloaders = DataLoader(liver_dataset, batch_size=1) model.eval() import matplotlib.pyplot as plt plt.ion() i = 0 with torch.no_grad(): for x, _ in dataloaders: #print (x.shape) y = model(x) img_y = torch.squeeze(y).numpy() print(img_y) img = cv2.normalize(img_y, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U) cv2.imwrite('data/pred/{}.png'.format(str(i)), img) i = i + 1 print(i)
def test(args): model = Unet(3, 1) model.load_state_dict(torch.load(args.ckpt,map_location='cpu')) liver_dataset = LiverDataset("val/healthysick_2", transform=x_transforms,target_transform=y_transforms) dataloaders = DataLoader(liver_dataset, batch_size=1) model.eval() test_loss = 0 correct = 0 import matplotlib.pyplot as plt import torchvision.utils as vutils plt.ion() with torch.no_grad(): i = 0 for x, y, target in dataloaders: output1, output2 = model(x) img_y=torch.squeeze(output2).numpy() plt.imshow(img_y) plt.show() plt.pause(0.01) test_loss += F.nll_loss(output1, target, reduction='sum').item() print("-----------") print(output1) pred = output1.argmax(dim=1, keepdim=True) print("pretend: {}".format(pred.view_as(target))) print('target: {}'.format(target)) correct += pred.eq(target.view_as(pred)).sum().item() print("-----------") vutils.save_image(x, 'save3/iter%d-data.jpg' % i, padding=0) vutils.save_image(y, 'save3/iter%d-mask.jpg' % i, padding=0) vutils.save_image(output2, 'save3/iter%d-target.jpg' % i, padding=0) i = i+1 test_loss /= len(liver_dataset) print('Average loss is: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(test_loss, correct, len(liver_dataset), 100.*correct/len(liver_dataset)))
def main(argv): with CytomineJob.from_cli(argv) as job: model_path = os.path.join(str(Path.home()), "models", "thyroid-unet") model_filepath = pick_model(model_path, job.parameters.tile_size, job.parameters.cytomine_zoom_level) device = torch.device(job.parameters.device) unet = Unet(job.parameters.init_fmaps, n_classes=1) unet.load_state_dict(torch.load(model_filepath, map_location=device)) unet.to(device) unet.eval() segmenter = UNetSegmenter(device=job.parameters.device, unet=unet, classes=[0, 1], threshold=job.parameters.threshold) working_path = os.path.join(str(Path.home()), "tmp") tile_builder = CytomineTileBuilder(working_path) builder = SSLWorkflowBuilder() builder.set_n_jobs(1) builder.set_overlap(job.parameters.tile_overlap) builder.set_tile_size(job.parameters.tile_size, job.parameters.tile_size) builder.set_tile_builder(tile_builder) builder.set_border_tiles(Workflow.BORDER_TILES_EXTEND) builder.set_background_class(0) builder.set_distance_tolerance(1) builder.set_seg_batch_size(job.parameters.batch_size) builder.set_segmenter(segmenter) workflow = builder.get() slide = CytomineSlide(img_instance=ImageInstance().fetch( job.parameters.cytomine_id_image), zoom_level=job.parameters.cytomine_zoom_level) results = workflow.process(slide) print("-------------------------") print(len(results)) print("-------------------------") collection = AnnotationCollection() for obj in results: wkt = shift_poly(obj.polygon, slide, zoom_level=job.parameters.cytomine_zoom_level).wkt collection.append( Annotation(location=wkt, id_image=job.parameters.cytomine_id_image, id_terms=[154005477], id_project=job.project.id)) collection.save(n_workers=job.parameters.n_jobs) return {}
def test(args): model = Unet(3, 1) model.load_state_dict(torch.load(args.ckpt,map_location='cpu')) liver_dataset = LiverDataset("data/val", transform=x_transforms,target_transform=y_transforms) dataloaders = DataLoader(liver_dataset, batch_size=1) model.eval() import matplotlib.pyplot as plt plt.ion() with torch.no_grad(): for x, _ in dataloaders: y=model(x).sigmoid() img_y=torch.squeeze(y).numpy() plt.imshow(img_y) plt.pause(0.01) plt.show()
def test(args): model = Unet(3, 1) model.load_state_dict(torch.load(args.ckpt, map_location='cpu')) liver_dataset = LiverDataset("data/val", transform=x_transforms, target_transform=y_transforms) dataloaders = DataLoader(liver_dataset, batch_size=1) model.eval() plt.ion() with torch.no_grad(): for x, y in dataloaders: y = model(x).sigmoid() img_y = torch.squeeze(y).numpy() plt.imshow(img_y, cmap='gray', interpolation='nearest') plt.pause(0.5) plt.ioff() plt.show()
def test(args): model = Unet(3, 1).to(device) # 构建模型 model.load_state_dict(torch.load(args.ckpt, map_location='cpu')) # 加载参数 liver_dataset = LiverDataset("data/val", transform=x_transforms, target_transform=y_transforms ) # 加载数据,这里懒了用的train的函数,所以也要加载mask,不过识别时没用到 dataloaders = DataLoader(liver_dataset, batch_size=1) model.eval() import matplotlib.pyplot as plt plt.ion() with torch.no_grad(): for x, _ in dataloaders: y = model(x.to(device)) img_y = torch.squeeze(y.cpu()).numpy() > 0 plt.imshow(img_y) plt.pause(0.01) plt.show()
def test(args): model = Unet(3, 1) #.to(device) model.load_state_dict(torch.load(args.ckpt, map_location='cpu')) liver_dataset = LiverDataset("data/train", transform=x_transforms, target_transform=y_transforms) dataloaders = DataLoader(liver_dataset, batch_size=1) model.eval() import matplotlib.pyplot as plt plt.ion() with torch.no_grad(): for x, _ in dataloaders: y = model(x) print("y的shape", y.shape()) img_y = torch.squeeze(y).numpy() plt.imshow(img_y) plt.savefig("./results/output_%d.jpg" % random.randint(0, 100)) plt.pause(0.01) plt.show()
def test_image(): #model = Unet(3, 3) model = Unet(1, 1) model.load_state_dict( torch.load('ckp_xin/fd3model.pth', map_location='cpu')) model.eval() ''' img = Image.open("data/aug/24.bmp") img = x_transforms(img) img = torch.unsqueeze(img,0) ''' #img_x = pydicom.dcmread("data/aug/32.dcm") #img_x = WL(img_x,150,300) #img_x = Image.fromarray(img_x) img_x = Image.open("data/aug/76.bmp") img_x = img_x.convert('L') #img_x.save('data/aug/32dtp.bmp') #img_x.show() img_x = x_transforms(img_x) img_x = torch.unsqueeze(img_x, 0) labels = Image.open("data/aug/76_mask.bmp") labels = labels.convert('L') labels = y_transforms(labels) labels = torch.unsqueeze(labels, 0) out = model(img_x) print(IOU(out.to("cpu"), labels.to("cpu")).item()) ''' img_mask = Image.open("data/aug/166_mask.png") img_mask = y_transforms(img_mask) img_mask = torch.unsqueeze(img_mask,0) out = model(img) dice = dice_coeff(out,img_mask) print(dice.detach().numpy()) ''' trann = transforms.ToPILImage() out = torch.squeeze(out) out = trann(out) out.save("data/aug/76_maskfd3.bmp")
def test(args): model = Unet(3, 1) model.load_state_dict(torch.load(args.ckpt)) liver_dataset = LiverDataset( "/home/ices/work/tzh/predrnn/results/my_data_predrnn/1050/1", transform=x_transforms, target_transform=y_transforms) dataloaders = DataLoader(liver_dataset, batch_size=1) model.eval() import matplotlib.pyplot as plt plt.ion() with torch.no_grad(): for x, name in dataloaders: y = model(x) img_y = torch.squeeze(y).numpy() img_y = np.asarray(img_y) copy = img_y # print(len(copy[copy<0.5])) copy[copy > 0.5] = int(255) copy[copy <= 0.5] = int(0) copy = copy.astype(np.int16) copy = cv.resize(copy, (64, 64)) cv.imwrite(os.path.join("./data/my_result", name[0] + ".png"), copy) print(name[0]) copy = cv.imread( os.path.join("./data/my_result", name[0] + ".png"), 0) # raw_img = cv.imread(os.path.join("../Unet/raw_images",name[0]+".png")) # cv.imwrite(os.path.join("./data/train",name[0]+".png"),raw_img) # kernel = np.ones((7, 7), np.uint8) # copy = cv.morphologyEx(copy, cv.MORPH_CLOSE, kernel) # copy = remove_small(copy, 100) # copy = cv.GaussianBlur(copy,(3,3),0) # cv.imwrite(os.path.join("./data/result",name+"hihi.png"),file) # cv.imwrite(os.path.join("./data/result",name+"_mask.png"),file) edges = cv.Canny(copy, 50, 150) # print(len(edges[edges!=0])) cv.imwrite(os.path.join("./data/my_result", name[0] + ".png"), edges) draw_edge(name[0])
def test(): model = Unet(3, 1).to(device) model.load_state_dict(torch.load(args.ckp)) liver_dataset = LiverDataset("data/test", transform=x_transforms, target_transform=y_transforms) dataloaders = DataLoader(liver_dataset, batch_size=1) model.eval() with torch.no_grad(): for x, _, x_path in tqdm(dataloaders): x_path = str(x_path).split("/") x = x.to(device) y = model(x) img_numpy = y[0].cpu().float().numpy() img_numpy = (np.transpose(img_numpy, (1, 2, 0))) img_numpy = (img_numpy >= 0.5) * 255 img_out = img_numpy.astype(np.uint8) imgs = transforms.ToPILImage()(img_out) imgs.save('result/' + x_path[2][:-3])
def test(): model = Unet(1, 1) model.load_state_dict(torch.load(PATH)) test_dataset = TestDataset("dataset/test", transform=x_transforms, target_transform=y_transforms) dataloaders = DataLoader(test_dataset, batch_size=1) model.eval() import matplotlib.pyplot as plt plt.ion() with torch.no_grad(): for index, x in enumerate(dataloaders): y = model(x) img_y = torch.squeeze(y).numpy() img_y = img_y[:, :, np.newaxis] img = labelVisualize(2, COLOR_DICT, img_y) if False else img_y[:, :, 0] io.imsave("./dataset/test/" + str(index) + "_predict.png", img) plt.pause(0.01) plt.show()
def test_video(): cap = cv2.VideoCapture(2) model = Unet(3, 1) model.load_state_dict(torch.load(Model_path)) model.to(device) #card_dataset = CardDataset("data/val", transform=x_transforms,target_transform=y_transforms) #dataloaders = DataLoader(card_dataset, batch_size=1) model.eval() with torch.no_grad(): while True: ret, frame = cap.read() if ret is None: print("camera is not ready") exit(0) frame = frame[0:480, 0:480] img = cv2.resize(frame, (512, 512)) #img = cv2.imread(name,1) x = cv2img_process(img) x_cuda = x.cuda() y = model(x_cuda) y_cpu = y.cpu() img_y = (torch.squeeze(y_cpu).numpy() * -0.4 * 40 / 255.0 - 0.3) / 0.7 img_y = np.where(img_y < 0.3, 0, img_y) img_y = np.where(img_y > 0.3, 1, img_y) cv2.imshow("x", img) cv2.imshow("predict", img_y) #print(img.shape) #print(img_y.shape) #print("max ",img_y.max()) #print("min ",img_y.min()) #print(img_y[250][250]) cv2.waitKey(1)
def test(args): model = Unet(3, 1) model.load_state_dict(torch.load(args.ckpt, map_location='cpu')) card_dataset = CardDataset("data/val", transform=x_transforms, target_transform=y_transforms) dataloaders = DataLoader(card_dataset, batch_size=1) model.eval() import matplotlib.pyplot as plt plt.ion() with torch.no_grad(): for x, _ in dataloaders: org = x y = model(x) img_y = torch.squeeze(y).numpy() plt.subplot(1, 2, 1) plt.imshow(org.numpy()[0].reshape(512, 512, 3)) plt.subplot(1, 2, 2) plt.imshow(img_y) plt.pause(0.01) plt.show()
def test(args): model = Unet(3, 1) model.load_state_dict(torch.load(args.ckpt, map_location='cpu')) #加载模型 liver_dataset = LiverDataset("data/val", transform=x_transforms, target_transform=y_transforms) dataloaders = DataLoader(liver_dataset, batch_size=1) #batch_size默认为1 model.eval() import matplotlib.pyplot as plt plt.ion() with torch.no_grad(): n = 0 for x, _ in dataloaders: y = model(x) img_y = torch.squeeze(y).numpy() #对数据的维度进行压缩或者解压。Tensor转化为PIL图片 from PIL import Image # image_array是归一化的二维浮点数矩阵 img_y *= 255 # 变换为0-255的灰度值 im = Image.fromarray(img_y) im = im.convert('L') # 这样才能转为灰度图,如果是彩色图则改L为‘RGB’ matplotlib.image.imsave('%03d_predict.png' % n, im) threshold = 180 table = [] for i in range(256): if i < threshold: table.append(0) else: table.append(1) photo = im.point(table, '1') matplotlib.image.imsave('%03d_predict1.png' % n, photo) #plt.imshow(img_y) n = n + 1 # plt.pause(5) # plt.show() print("hello")
def test(args): model = Unet( 3, 1 ) #The unet input is three channels, and the output is one channel, because there is only one category of fingerprints on the background model.load_state_dict(torch.load( args.ckpt, map_location='cpu')) #Load the trained model liver_dataset = LiverDataset("data/val", transform=x_transforms, target_transform=y_transforms) dataloaders = DataLoader(liver_dataset, batch_size=1) model.eval() import matplotlib.pyplot as plt plt.ion() #Turn on dynamic mode with torch.no_grad(): i = 0 miou_total = 0 num = len(dataloaders) for x, _ in dataloaders: x = x.to(device) y = model(x) img_y = torch.squeeze(y).cpu().numpy( ) #Before inputting the loss function, the prediction graph must be converted into numpy format, and in order to correspond to the training graph, an additional one-dimensional representation of the batch size must be added mask = get_data(i)[1] #Get the current mask path miou_total += get_iou( mask, img_y ) #Get the miou of the current prediction graph and add it to the total miou plt.subplot(121) plt.imshow(Image.open(get_data(i)[0])) plt.subplot(122) img_y = img_y * 255 img_y = Image.fromarray(img_y) plt.imshow(img_y.convert('L')) plt.pause(2) if i < num: i += 1 # Processing the next set of validation sets plt.show() print('Miou=%f' % (miou_total / 106))
def test(): model = Unet(3, 1).to(device) weight_pre = r"./results/weights18_14_41.pth" model.load_state_dict(torch.load(weight_pre)) x_transforms = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) ]) y_transforms = transforms.ToTensor() liver_dataset = LiverDataset(r"D:\project\data_sets\liver\val", transform=x_transforms, target_transform=y_transforms) dataloaders = DataLoader(liver_dataset, batch_size=1) model.eval() import matplotlib.pyplot as plt plt.ion() # 开启动态模式 with torch.no_grad(): i = 0 miou_total = 0 num = len(dataloaders) for x, y in dataloaders: x = x.to(device) y = model(x) img_y = torch.squeeze(y).cpu().numpy( ) # 输入损失函数之前要把预测图变成numpy格式,且为了跟训练图对应,要额外加多一维表示batchsize mask = get_data(i)[1] # 得到当前mask的路径 miou_total += get_iou(mask, img_y) # 获取当前预测图的miou,并加到总miou中 plt.subplot(121) plt.imshow(Image.open(get_data(i)[0])) plt.subplot(122) plt.imshow(img_y) plt.pause(0.01) if i < num: i += 1 # 处理验证集下一张图 plt.show() print('Miou=%f' % (miou_total / 20)) res_record("weights18_14_41.pth Miou=%f \n" % (miou_total / 20))
def test(args): model = Unet(3, 1) model.load_state_dict(torch.load(args.ckpt, map_location='cpu')) liver_dataset = LiverDataSet( "/home/ming/code/u-net-liver-pytorch/data/val", transform=x_transforms, target_transform=y_transforms) dataloaders = DataLoader(liver_dataset, batch_size=1) #不启用 BatchNormalization 和 Dropout #训练完 train 样本后,生成的模型 model 要用来测试样本。在 model(test) 之前, #需要加上model.eval(),否则只要有输入数据,即使不训练,model 也会改变权值。 # 这是model中含有的 batch normalization 层所带来的的性质。 model.eval() import matplotlib.pyplot as plt #使matplotlib的显示模式转换为交互(interactive)模式。 # 即使在脚本中遇到plt.show(),代码还是会继续执行 plt.ion() # 在测试阶段使用with torch.no_grad()可以对整个网络都停止自动求导, # 可以大大加快速度,也可以使用大的batch_size来测试 # 当然,也可以不使用with torch.no_grad with torch.no_grad(): for x, _ in dataloaders: # sigmod把值域在0和1之间,sigmod是为了后面用imgshow方法画热图 y = model(x).sigmoid() img_y = torch.squeeze(y).numpy() # get_iou("data/val/000_mask.png",img_y) # imshow方法首先将二维数组的值标准化为0到1之间的值, # 然后根据指定的渐变色依次赋予每个单元格对应的颜色,就形成了热图。 plt.imshow(img_y) plt.pause(0.1) plt.show()
def predict_image(image_file, model_path): """ Introduction ------------ 使用MobileNet-UNet预测图像 """ image = cv2.imread(image_file) image = cv2.resize(image, (256, 256)) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image_tensor = transforms.ToTensor()(image).unsqueeze(0) model = Unet(3, 1) ckpt = torch.load(model_path, map_location='cpu') model.load_state_dict(ckpt) model.eval() output_mask = model(image_tensor) output_mask = output_mask.reshape(256, 256) image_mask = output_mask.detach().numpy() * 255 image_mask = image_mask.astype(np.uint8) image_mask = cv2.resize(image_mask, (256, 256)) plt.subplot(121) plt.imshow(image) plt.subplot(122) plt.imshow(image_mask) plt.show()
def test(): model = Unet(5, 2) model.load_state_dict(torch.load(args.ckp, map_location='cpu')) model.to(device) test_root_dir = "test" PAVE_dataset = SSFPTestDataset(root=test_root_dir) # batch_size has to be divisible by 828 because there are 828 slices per patient batch_size = 1 dataloaders = DataLoader(PAVE_dataset, batch_size=batch_size) model.eval() #import matplotlib.pyplot as plt #plt.ion() test_result_dir = "test_result" if not os.path.exists(test_result_dir): os.makedirs(test_result_dir) patients = np.zeros((1, 832, 2, 224, 832)) with torch.no_grad(): for x, slice_num, patient_num, leg in tqdm(dataloaders): x = x.to(device) y = model(x) output = y.cpu().numpy() if leg[0] == 'left': patients[patient_num, slice_num + 2, :, :192, 80:400] = output[0, :, :, :] else: patients[patient_num, slice_num + 2, :, :192, 480:800] = output[0, :, :, :] for patient_num in range(10): image_filename = os.path.join("/home/mng/scratch/PAVE_Challenge/test/", 'case{}'.format(patient_num + 1), 'ssfp.nii.gz') size_x = nib.load(image_filename).shape[2] patient_output_vessels = np.transpose( (patients[patient_num, :, 0, :size_x, :] >= 0.5), axes=(2, 0, 1)) patient_output_arteries = np.transpose( (patients[patient_num, :, 1, :size_x, :] >= 0.5), axes=(2, 0, 1)) patient_output_veins = np.logical_and( patient_output_vessels, np.logical_not(patient_output_arteries)) results_file = os.path.join( test_result_dir, 'case{}_results_vessels.nii.gz'.format(patient_num + 1)) save_nii(patient_output_vessels.astype(np.uint8), results_file) results_file = os.path.join( test_result_dir, 'case{}_results_arteries.nii.gz'.format(patient_num + 1)) save_nii(patient_output_arteries.astype(np.uint8), results_file) results_file = os.path.join( test_result_dir, 'case{}_results_veins.nii.gz'.format(patient_num + 1)) save_nii(patient_output_veins.astype(np.uint8), results_file)
def main(argv): """ IMAGES VALID: * 005-TS_13C08351_2-2014-02-12 12.22.44.ndpi | id : 77150767 * 024-12C07162_2A-2012-08-14-17.21.05.jp2 | id : 77150761 * 019-CP_12C04234_2-2012-08-10-12.49.26.jp2 | id : 77150809 IMAGES TEST: * 004-PF_08C11886_1-2012-08-09-19.05.53.jp2 | id : 77150623 * 011-TS_13C10153_3-2014-02-13 15.22.21.ndpi | id : 77150611 * 018-PF_07C18435_1-2012-08-17-00.55.09.jp2 | id : 77150755 """ with Cytomine.connect_from_cli(argv): parser = ArgumentParser() parser.add_argument("-b", "--batch_size", dest="batch_size", default=4, type=int) parser.add_argument("-j", "--n_jobs", dest="n_jobs", default=1, type=int) parser.add_argument("-e", "--epochs", dest="epochs", default=1, type=int) parser.add_argument("-d", "--device", dest="device", default="cpu") parser.add_argument("-o", "--overlap", dest="overlap", default=0, type=int) parser.add_argument("-t", "--tile_size", dest="tile_size", default=256, type=int) parser.add_argument("-z", "--zoom_level", dest="zoom_level", default=0, type=int) parser.add_argument("--lr", dest="lr", default=0.01, type=float) parser.add_argument("--init_fmaps", dest="init_fmaps", default=16, type=int) parser.add_argument("--data_path", "--dpath", dest="data_path", default=os.path.join(str(Path.home()), "tmp")) parser.add_argument("-w", "--working_path", "--wpath", dest="working_path", default=os.path.join(str(Path.home()), "tmp")) parser.add_argument("-s", "--save_path", dest="save_path", default=os.path.join(str(Path.home()), "tmp")) args, _ = parser.parse_known_args(argv) os.makedirs(args.save_path, exist_ok=True) os.makedirs(args.data_path, exist_ok=True) os.makedirs(args.working_path, exist_ok=True) # fetch annotations (filter val/test sets + other annotations) all_annotations = AnnotationCollection(project=77150529, showWKT=True, showMeta=True, showTerm=True).fetch() val_ids = {77150767, 77150761, 77150809} test_ids = {77150623, 77150611, 77150755} val_test_ids = val_ids.union(test_ids) train_collection = all_annotations.filter(lambda a: ( a.user in {55502856} and len(a.term) > 0 and a.term[0] in {35777351, 35777321, 35777459} and a.image not in val_test_ids)) val_rois = all_annotations.filter( lambda a: (a.user in {142954314} and a.image in val_ids and len( a.term) > 0 and a.term[0] in {154890363})) val_foreground = all_annotations.filter( lambda a: (a.user in {142954314} and a.image in val_ids and len( a.term) > 0 and a.term[0] in {154005477})) train_wsi_ids = list({an.image for an in all_annotations }.difference(val_test_ids)) val_wsi_ids = list(val_ids) download_path = os.path.join(args.data_path, "crops-{}".format(args.tile_size)) images = { _id: ImageInstance().fetch(_id) for _id in (train_wsi_ids + val_wsi_ids) } train_crops = [ AnnotationCrop(images[annot.image], annot, download_path, args.tile_size, zoom_level=args.zoom_level) for annot in train_collection ] val_crops = [ AnnotationCrop(images[annot.image], annot, download_path, args.tile_size, zoom_level=args.zoom_level) for annot in val_rois ] for crop in train_crops + val_crops: crop.download() np.random.seed(42) dataset = RemoteAnnotationTrainDataset( train_crops, seg_trans=segmentation_transform) loader = DataLoader(dataset, shuffle=True, batch_size=args.batch_size, num_workers=args.n_jobs, worker_init_fn=worker_init) # network device = torch.device(args.device) unet = Unet(args.init_fmaps, n_classes=1) unet.train() unet.to(device) optimizer = Adam(unet.parameters(), lr=args.lr) loss_fn = BCEWithLogitsLoss(reduction="mean") results = { "train_losses": [], "val_losses": [], "val_metrics": [], "save_path": [] } for e in range(args.epochs): print("########################") print(" Epoch {}".format(e)) print("########################") epoch_losses = list() unet.train() for i, (x, y) in enumerate(loader): x, y = (t.to(device) for t in [x, y]) y_pred = unet.forward(x) loss = loss_fn(y_pred, y) optimizer.zero_grad() loss.backward() optimizer.step() epoch_losses = [loss.detach().cpu().item()] + epoch_losses[:5] print("{} - {:1.5f}".format(i, np.mean(epoch_losses))) results["train_losses"].append(epoch_losses[0]) unet.eval() # validation val_losses = np.zeros(len(val_rois), dtype=np.float) val_roc_auc = np.zeros(len(val_rois), dtype=np.float) val_cm = np.zeros([len(val_rois), 2, 2], dtype=np.int) for i, roi in enumerate(val_crops): foregrounds = find_intersecting_annotations( roi.annotation, val_foreground) with torch.no_grad(): y_pred, y_true = predict_roi( roi, foregrounds, unet, device, in_trans=transforms.ToTensor(), batch_size=args.batch_size, tile_size=args.tile_size, overlap=args.overlap, n_jobs=args.n_jobs, zoom_level=args.zoom_level) val_losses[i] = metrics.log_loss(y_true.flatten(), y_pred.flatten()) val_roc_auc[i] = metrics.roc_auc_score(y_true.flatten(), y_pred.flatten()) val_cm[i] = metrics.confusion_matrix( y_true.flatten().astype(np.uint8), (y_pred.flatten() > 0.5).astype(np.uint8)) print("------------------------------") print("Epoch {}:".format(e)) val_loss = np.mean(val_losses) roc_auc = np.mean(val_roc_auc) print("> val_loss: {:1.5f}".format(val_loss)) print("> roc_auc : {:1.5f}".format(roc_auc)) cm = np.sum(val_cm, axis=0) cnt = np.sum(val_cm) print("CM at 0.5 threshold") print("> {:3.2f}% {:3.2f}%".format(100 * cm[0, 0] / cnt, 100 * cm[0, 1] / cnt)) print("> {:3.2f}% {:3.2f}%".format(100 * cm[1, 0] / cnt, 100 * cm[1, 1] / cnt)) print("------------------------------") filename = "{}_e_{}_val_{:0.4f}_roc_{:0.4f}_z{}_s{}.pth".format( datetime.now().timestamp(), e, val_loss, roc_auc, args.zoom_level, args.tile_size) torch.save(unet.state_dict(), os.path.join(args.save_path, filename)) results["val_losses"].append(val_loss) results["val_metrics"].append(roc_auc) results["save_path"].append(filename) return results
colors = [(128, 128, 128), (128, 0, 0), (192, 192, 128), (128, 64, 128), (0, 0, 192), (128, 128, 0), (192, 128, 128), (64, 64, 128), (64, 0, 128), (64, 64, 0), (0, 128, 192), (0, 0, 0)] import os temp = os.listdir() model_list = [] for aux in temp: if (aux.endswith(".pth")): model_list.append(aux) for model_path in model_list: model_path = './model_stable.pth' model.load_state_dict(torch.load(model_path)) model.eval() for batch_idx, (data, target, original) in enumerate(custom_dataloader_eval): print(batch_idx, len(custom_dataloader_eval)) # get the inputs data, target = data.to(device).float(), target.to(device) # forward + backward + optimize predicts = model(data) input_height = data.shape[-2] input_width = data.shape[-1] predicts = (predicts.view(-1, input_height, input_width)) #.cpu().detach().numpy()
plt.plot(valid_iters, valid_l2_qpi) plt.show() plt.plot(valid_iters, valid_l2_dapi) plt.show() plt.imshow(from_dapi_example, vmin=-0.5, vmax=0.5) # plt.savefig(fol+'/result1'+ str(itt).zfill(7) +'.png', format='png', dpi=200,bbox_inches='tight') plt.show() plt.imshow(from_qpi_example, vmin=-0.5, vmax=0.5) # plt.savefig(fol+'/result2'+ str(itt).zfill(7) +'.png', format='png', dpi=200,bbox_inches='tight') plt.show() if itt % 200 == 0: valid_l2_qpi_tmp = [] valid_l2_dapi_tmp = [] for it, (qpi, dapi, name_qpi, name_dapi) in enumerate(validloader): unet_qpi2dapi.eval() unet_dapi2qpi.eval() qpi = qpi.cuda(0) dapi = dapi.cuda(0) fake_dapi = unet_qpi2dapi(qpi) l2_dapi = l1_loss(fake_dapi, dapi) fake_qpi = unet_dapi2qpi(dapi) l2_qpi = l1_loss(fake_qpi, qpi) valid_l2_qpi_tmp.append(l2_qpi.detach().cpu().numpy()) valid_l2_dapi_tmp.append(l2_dapi.detach().cpu().numpy()) if it % 5 == 0: print('test' + str(it))
plt.show() example = np.abs(fake_out_images[0, 2, :, :].data.cpu().numpy() - out_images[0, 2, :, :].data.cpu().numpy()) plt.imshow(example, vmin=0, vmax=0.5) plt.show() plt.plot(train_iters, train_loss) plt.plot(test_iters, test_loss) plt.ylim([0, 0.001]) plt.show() if itt % 200 == 0: test_loss_tmp = [] for it, (in_images, out_images, pat) in enumerate(validloader): unet.eval() in_images = in_images.cuda(0) out_images = out_images.cuda(0) fake_out_images = unet(in_images) l2_l = l2_loss(fake_out_images, out_images) test_loss_tmp.append(l2_l.detach().cpu().numpy()) if it % 20 == 0: print('test' + str(it)) example = np.concatenate( (in_images[0, 0, :, :].data.cpu().numpy(), fake_out_images[0, 0, :, :].data.cpu().numpy(), out_images[0, 0, :, :].data.cpu().numpy()),
from pathlib import Path args = { "device": "cuda", # set to "cuda" if gpu is available "out_dir": Path("predictions") } import torch from unet import Unet state = torch.load(Path("model.pt"), map_location=args["device"]) model = Unet(9, 3, 4).to(args["device"]) model.load_state_dict(state) model = model.eval() from data import GlacierDataset from torch.utils.data import DataLoader paths = {} #for split in ["train", "test"]: for split in ["train"]: paths[split] = {} for v in ["x", "y"]: paths[split][v] = list(Path("out_process").glob(v + "*")) paths[split][v].sort() ds = { "train": GlacierDataset(sorted(paths["train"]["x"]), sorted(paths["train"]["y"])), #"test": GlacierDataset(paths["test"]["x"], paths["test"]["y"]) }