def test():
    model = Unet(3, 1)
    model.load_state_dict(torch.load(Model_path, map_location='cpu'))
    #card_dataset = CardDataset("data/val", transform=x_transforms,target_transform=y_transforms)
    #dataloaders = DataLoader(card_dataset, batch_size=1)
    model.eval()

    with torch.no_grad():
        for name in names:
            img = cv2.imread(name, 1)
            x = cv2img_process(img)

            y = model(x)
            img_y = (torch.squeeze(y).numpy() * -0.4 * 40 / 255.0 - 0.3) / 0.7
            img_y = np.where(img_y < 0.3, 0, img_y)
            img_y = np.where(img_y > 0.3, 1, img_y)

            cv2.imshow("x", img)
            cv2.imshow("predict", img_y)
            #print(img.shape)
            #print(img_y.shape)
            #print("max ",img_y.max())
            #print("min ",img_y.min())
            print(img_y[250][250])
            cv2.waitKey(10)
Exemplo n.º 2
0
def test(args):
    model = Unet(3, 1)
    model.load_state_dict(torch.load(args.ckpt,map_location='cpu'))
    liver_dataset = LiverDataset("val/healthysick_2", transform=x_transforms,target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    test_loss = 0
    correct = 0
    import matplotlib.pyplot as plt
    import torchvision.utils as vutils
    plt.ion()
    with torch.no_grad():
        i = 0
        for x, y, target in dataloaders:
            output1, output2 = model(x)
            img_y=torch.squeeze(output2).numpy()
            plt.imshow(img_y)
            plt.show()
            plt.pause(0.01)
            test_loss += F.nll_loss(output1, target, reduction='sum').item()
            print("-----------")
            print(output1)
            pred = output1.argmax(dim=1, keepdim=True)
            print("pretend: {}".format(pred.view_as(target)))
            print('target:  {}'.format(target))
            correct += pred.eq(target.view_as(pred)).sum().item()
            print("-----------")
            vutils.save_image(x, 'save3/iter%d-data.jpg' % i, padding=0)
            vutils.save_image(y, 'save3/iter%d-mask.jpg' % i, padding=0)
            vutils.save_image(output2, 'save3/iter%d-target.jpg' % i, padding=0)
            i = i+1
    test_loss /= len(liver_dataset)
    print('Average loss is: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(test_loss, correct, len(liver_dataset), 100.*correct/len(liver_dataset)))
Exemplo n.º 3
0
def test():
    model = Unet(3, 1)
    model.load_state_dict(
        torch.load('weight/weights_{}.pth'.format(str(num_epochs - 1)),
                   map_location='cpu'))
    liver_dataset = LiverDataset("data/img",
                                 "data/label",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()
    i = 0
    with torch.no_grad():
        for x, _ in dataloaders:
            #print (x.shape)
            y = model(x)
            img_y = torch.squeeze(y).numpy()
            print(img_y)
            img = cv2.normalize(img_y, None, 0, 255, cv2.NORM_MINMAX,
                                cv2.CV_8U)

            cv2.imwrite('data/pred/{}.png'.format(str(i)), img)

            i = i + 1
            print(i)
Exemplo n.º 4
0
def test():
    model = Unet(3, 1).to(device)  #unet输入是三通道,输出是一通道,因为不算上背景只有肝脏一个类别
    model.load_state_dict(torch.load(args.ckp, map_location='cpu'))  #载入训练好的模型
    liver_dataset = LiverDataset(
        r"H:\BaiduNetdisk\BaiduDownload\u_net_liver-master\data\val",
        transform=x_transforms,
        target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()  #开启动态模式

    with torch.no_grad():
        i = 0  #验证集中第i张图
        miou_total = 0
        num = len(dataloaders)  #验证集图片的总数
        for x, _ in dataloaders:
            x = x.to(device)
            y = model(x)

            img_y = torch.squeeze(y).cpu().numpy(
            )  #输入损失函数之前要把预测图变成numpy格式,且为了跟训练图对应,要额外加多一维表示batchsize
            mask = get_data(i)[1]  #得到当前mask的路径
            miou_total += get_iou(mask, img_y)  #获取当前预测图的miou,并加到总miou中
            plt.subplot(121)
            plt.imshow(Image.open(get_data(i)[0]))
            plt.subplot(122)
            plt.imshow(img_y)
            plt.pause(0.01)
            if i < num: i += 1  #处理验证集下一张图
        plt.show()
        print('Miou=%f' % (miou_total / 20))
Exemplo n.º 5
0
def test():
    model = Unet(3, 1).to(device)  # unet输入是三通道,输出是一通道,因为不算上背景只有肝脏一个类别
    weight_pre = r"./results/weights4_18_35.pth"
    model.load_state_dict(torch.load(weight_pre))  # 载入训练好的模型
    liver_dataset = LiverDataset(r"D:\project\data_sets\data_sci\val",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()  # 开启动态模式

    with torch.no_grad():
        i = 0  # 验证集中第i张图
        miou_total = 0
        num = len(dataloaders)  # 验证集图片的总数
        for x, _ in dataloaders:
            x = x.to(device)
            y = model(x)

            img_y = torch.squeeze(y).cpu().numpy(
            )  # 输入损失函数之前要把预测图变成numpy格式,且为了跟训练图对应,要额外加多一维表示batchsize
            mask = get_data(i)[1]  # 得到当前mask的路径
            miou_total += get_iou(mask, img_y)  # 获取当前预测图的miou,并加到总miou中
            plt.subplot(121)
            plt.imshow(Image.open(get_data(i)[0]))
            plt.subplot(122)
            plt.imshow(img_y)
            plt.pause(0.01)
            if i < num: i += 1  # 处理验证集下一张图
        plt.show()
        print('Miou=%f' % (miou_total / 10))
        res_record("weights4_13_40.pth Miou=%f \n" % (miou_total / 10))
Exemplo n.º 6
0
def test(args):
    model = Unet(1, 1)
    model.load_state_dict(torch.load(args.ckpt, map_location='cuda'))
    liver_dataset = LiverDataset("data/val",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)

    save_root = './data/predict'

    model.eval()
    plt.ion()
    index = 0
    with torch.no_grad():
        for x, ground in dataloaders:
            x = x.type(torch.FloatTensor)
            y = model(x)
            x = torch.squeeze(x)
            x = x.unsqueeze(0)
            ground = torch.squeeze(ground)
            ground = ground.unsqueeze(0)
            img_ground = transform_invert(ground, y_transforms)
            img_x = transform_invert(x, x_transforms)
            img_y = torch.squeeze(y).numpy()
            # cv2.imshow('img', img_y)
            src_path = os.path.join(save_root, "predict_%d_s.png" % index)
            save_path = os.path.join(save_root, "predict_%d_o.png" % index)
            ground_path = os.path.join(save_root, "predict_%d_g.png" % index)
            img_ground.save(ground_path)
            # img_x.save(src_path)
            cv2.imwrite(save_path, img_y * 255)
            index = index + 1
Exemplo n.º 7
0
Arquivo: main.py Projeto: nlroel/unet
def test(args):
    model = Unet(1, 1)
    model.load_state_dict(torch.load(args.ckpt, map_location='cpu'))
    liver_dataset = LiverDataset("data/val",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()
    with torch.no_grad():
        for x, yy in dataloaders:
            y = model(x)
            l1loss = nn.L1Loss()
            loss = l1loss(y, yy)
            print(loss.item())
            img_y = torch.squeeze(y).numpy()
            img_yy = torch.squeeze(yy).numpy()
            # img_y = (img_y + 1) * 127.5
            plt.figure()
            plt.subplot(121)
            plt.imshow(img_y.transpose(),
                       aspect='auto',
                       interpolation='none',
                       cmap=plt.get_cmap('gray'))
            plt.subplot(122)
            plt.imshow(img_yy.transpose(),
                       aspect='auto',
                       interpolation='none',
                       cmap=plt.get_cmap('gray'))
            plt.pause(0.01)
            # plt.waitforbuttonpress()
        plt.show()
Exemplo n.º 8
0
def test_1():
    model = Unet(3, 1)
    model.load_state_dict(torch.load(args.ckp, map_location='cpu'))
    liver_dataset = LiverDataset("data/val",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()
    imgs = []
    root = "data/val"
    n = len(os.listdir(root)) // 2
    for i in range(n):
        img = os.path.join(root, "%03d.png" % i)
        # mask = os.path.join(root, "%03d_mask.png" % i)
        imgs.append(img)
    i = 0
    with torch.no_grad():
        for x, _ in dataloaders:
            y = model(x)
            img_x = torch.squeeze(_).numpy()
            img_y = torch.squeeze(y).numpy()
            img_input = cv2.imread(imgs[i], cv2.IMREAD_GRAYSCALE)
            im_color = cv2.applyColorMap(img_input, cv2.COLORMAP_JET)
            img_x = img_as_ubyte(img_x)
            img_y = img_as_ubyte(img_y)
            imgStack = stackImages(0.8, [[img_input, img_x, img_y]])
            # 转为伪彩色,视情况可以加上
            # imgStack = cv2.applyColorMap(imgStack, cv2.COLORMAP_JET)
            cv2.imwrite(f'train_img/{i}.png', imgStack)
            plt.imshow(imgStack)
            i = i + 1
            plt.pause(0.1)
        plt.show()
Exemplo n.º 9
0
def train():
    x_transforms = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])
    y_transforms = transforms.ToTensor()

    model = Unet(3, 1).to(device)
    model.load_state_dict(torch.load(r"./results/weights.pth"))
    batch_size = 1
    num_epochs = 2
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset(r'D:\project\data_sets\liver\train',
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    data_loaders = DataLoader(liver_dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=0)
    print("Start training at ", strftime("%Y-%m-%d %H:%M:%S", localtime()))
    for epoch in range(num_epochs):
        prev_time = datetime.now()
        print('Epoch{}/{}'.format(epoch, num_epochs))
        print('-' * 10)
        dt_size = len(data_loaders.dataset)
        epoch_loss = 0
        step = 0
        for x, y in data_loaders:
            step += 1
            inputs = x.to(device)
            labels = y.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
            if (step % 10) == 0:
                print("%d/%d, train_loss:%0.3f" %
                      (step, (dt_size - 1) // data_loaders.batch_size + 1,
                       loss.item()))
        # print the results of the current training
        cur_time = datetime.now()
        h, remainder = divmod((cur_time - prev_time).seconds, 3600)
        m, s = divmod(remainder, 60)
        time_str = 'Time:{:.0f}:{:.0f}:{:.0f}'.format(h, m, s)
        epoch_str = "epoch {} loss:{:.4f} ".format(epoch, epoch_loss / 400)
        print(epoch_str + time_str)
        res_record("Time:" + strftime("%Y-%m-%d %H:%M:%S  ", localtime()))
        res_record(epoch_str + '\n')
    print("End training at ", strftime("%Y-%m-%d %H:%M:%S", localtime()))
    # 记录数据
    torch.save(
        model.state_dict(),
        './results/weights{}_{}_{}.pth'.format(localtime().tm_mday,
                                               localtime().tm_hour,
                                               localtime().tm_sec))
Exemplo n.º 10
0
def main(argv):
    with CytomineJob.from_cli(argv) as job:
        model_path = os.path.join(str(Path.home()), "models", "thyroid-unet")
        model_filepath = pick_model(model_path, job.parameters.tile_size,
                                    job.parameters.cytomine_zoom_level)
        device = torch.device(job.parameters.device)
        unet = Unet(job.parameters.init_fmaps, n_classes=1)
        unet.load_state_dict(torch.load(model_filepath, map_location=device))
        unet.to(device)
        unet.eval()

        segmenter = UNetSegmenter(device=job.parameters.device,
                                  unet=unet,
                                  classes=[0, 1],
                                  threshold=job.parameters.threshold)

        working_path = os.path.join(str(Path.home()), "tmp")
        tile_builder = CytomineTileBuilder(working_path)
        builder = SSLWorkflowBuilder()
        builder.set_n_jobs(1)
        builder.set_overlap(job.parameters.tile_overlap)
        builder.set_tile_size(job.parameters.tile_size,
                              job.parameters.tile_size)
        builder.set_tile_builder(tile_builder)
        builder.set_border_tiles(Workflow.BORDER_TILES_EXTEND)
        builder.set_background_class(0)
        builder.set_distance_tolerance(1)
        builder.set_seg_batch_size(job.parameters.batch_size)
        builder.set_segmenter(segmenter)
        workflow = builder.get()

        slide = CytomineSlide(img_instance=ImageInstance().fetch(
            job.parameters.cytomine_id_image),
                              zoom_level=job.parameters.cytomine_zoom_level)
        results = workflow.process(slide)

        print("-------------------------")
        print(len(results))
        print("-------------------------")

        collection = AnnotationCollection()
        for obj in results:
            wkt = shift_poly(obj.polygon,
                             slide,
                             zoom_level=job.parameters.cytomine_zoom_level).wkt
            collection.append(
                Annotation(location=wkt,
                           id_image=job.parameters.cytomine_id_image,
                           id_terms=[154005477],
                           id_project=job.project.id))
        collection.save(n_workers=job.parameters.n_jobs)

        return {}
Exemplo n.º 11
0
def train():
    model = Unet(3, 1).to(device)
    model.load_state_dict(torch.load(r"./results/weights4_13_40.pth"))
    batch_size = 5
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset(r"D:\project\data_sets\data_sci\train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=0)
    train_model(model, criterion, optimizer, dataloaders)
Exemplo n.º 12
0
def test(args):
    model = Unet(3, 1)
    model.load_state_dict(torch.load(args.ckpt,map_location='cpu'))
    liver_dataset = LiverDataset("data/val", transform=x_transforms,target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()
    with torch.no_grad():
        for x, _ in dataloaders:
            y=model(x).sigmoid()
            img_y=torch.squeeze(y).numpy()
            plt.imshow(img_y)
            plt.pause(0.01)
        plt.show()
Exemplo n.º 13
0
def infer():
    model = Unet(3, 1)
    model.load_state_dict(torch.load('weights_19.pth', map_location='cpu'))
    liver_dataset = LiverDataset("./../data/val",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    import matplotlib.pyplot as plt
    plt.ion()
    with torch.no_grad():
        for x, _ in dataloaders:
            y = model(x)
            img_y = torch.squeeze(y).numpy()
            plt.imshow(img_y)
            plt.pause(0.01)
        plt.show()
Exemplo n.º 14
0
def test(args):
    model = Unet(3, 1)
    model.load_state_dict(torch.load(args.ckpt, map_location='cpu'))
    liver_dataset = LiverDataset("data/val",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    plt.ion()
    with torch.no_grad():
        for x, y in dataloaders:
            y = model(x).sigmoid()
            img_y = torch.squeeze(y).numpy()
            plt.imshow(img_y, cmap='gray', interpolation='nearest')
            plt.pause(0.5)
        plt.ioff()
        plt.show()
Exemplo n.º 15
0
def test(args):
    model = Unet(3, 1).to(device)  # 构建模型
    model.load_state_dict(torch.load(args.ckpt, map_location='cpu'))  # 加载参数
    liver_dataset = LiverDataset("data/val",
                                 transform=x_transforms,
                                 target_transform=y_transforms
                                 )  # 加载数据,这里懒了用的train的函数,所以也要加载mask,不过识别时没用到
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()
    with torch.no_grad():
        for x, _ in dataloaders:
            y = model(x.to(device))
            img_y = torch.squeeze(y.cpu()).numpy() > 0
            plt.imshow(img_y)
            plt.pause(0.01)
        plt.show()
Exemplo n.º 16
0
def test():
    model = Unet(3, 1).to(device)
    model.load_state_dict(torch.load(args.ckp))
    liver_dataset = LiverDataset("data/test",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    with torch.no_grad():
        for x, _, x_path in tqdm(dataloaders):
            x_path = str(x_path).split("/")
            x = x.to(device)
            y = model(x)
            img_numpy = y[0].cpu().float().numpy()
            img_numpy = (np.transpose(img_numpy, (1, 2, 0)))
            img_numpy = (img_numpy >= 0.5) * 255
            img_out = img_numpy.astype(np.uint8)
            imgs = transforms.ToPILImage()(img_out)
            imgs.save('result/' + x_path[2][:-3])
Exemplo n.º 17
0
def test(args):
    model = Unet(3, 1)
    model.load_state_dict(torch.load(args.ckpt))
    liver_dataset = LiverDataset(
        "/home/ices/work/tzh/predrnn/results/my_data_predrnn/1050/1",
        transform=x_transforms,
        target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()
    with torch.no_grad():
        for x, name in dataloaders:
            y = model(x)
            img_y = torch.squeeze(y).numpy()
            img_y = np.asarray(img_y)
            copy = img_y
            # print(len(copy[copy<0.5]))
            copy[copy > 0.5] = int(255)
            copy[copy <= 0.5] = int(0)
            copy = copy.astype(np.int16)
            copy = cv.resize(copy, (64, 64))

            cv.imwrite(os.path.join("./data/my_result", name[0] + ".png"),
                       copy)
            print(name[0])
            copy = cv.imread(
                os.path.join("./data/my_result", name[0] + ".png"), 0)

            # raw_img = cv.imread(os.path.join("../Unet/raw_images",name[0]+".png"))
            # cv.imwrite(os.path.join("./data/train",name[0]+".png"),raw_img)
            # kernel = np.ones((7, 7), np.uint8)
            # copy = cv.morphologyEx(copy, cv.MORPH_CLOSE, kernel)
            # copy = remove_small(copy, 100)
            # copy = cv.GaussianBlur(copy,(3,3),0)
            # cv.imwrite(os.path.join("./data/result",name+"hihi.png"),file)
            # cv.imwrite(os.path.join("./data/result",name+"_mask.png"),file)

            edges = cv.Canny(copy, 50, 150)
            # print(len(edges[edges!=0]))
            cv.imwrite(os.path.join("./data/my_result", name[0] + ".png"),
                       edges)
            draw_edge(name[0])
Exemplo n.º 18
0
def test_image():
    #model = Unet(3, 3)
    model = Unet(1, 1)
    model.load_state_dict(
        torch.load('ckp_xin/fd3model.pth', map_location='cpu'))
    model.eval()
    '''
    img = Image.open("data/aug/24.bmp")
    img = x_transforms(img)
    img = torch.unsqueeze(img,0)
    '''
    #img_x = pydicom.dcmread("data/aug/32.dcm")
    #img_x = WL(img_x,150,300)
    #img_x = Image.fromarray(img_x)
    img_x = Image.open("data/aug/76.bmp")
    img_x = img_x.convert('L')
    #img_x.save('data/aug/32dtp.bmp')
    #img_x.show()

    img_x = x_transforms(img_x)
    img_x = torch.unsqueeze(img_x, 0)

    labels = Image.open("data/aug/76_mask.bmp")
    labels = labels.convert('L')
    labels = y_transforms(labels)
    labels = torch.unsqueeze(labels, 0)

    out = model(img_x)
    print(IOU(out.to("cpu"), labels.to("cpu")).item())
    '''
    img_mask = Image.open("data/aug/166_mask.png")
    img_mask = y_transforms(img_mask)
    img_mask = torch.unsqueeze(img_mask,0)

    out = model(img)
    dice = dice_coeff(out,img_mask)
    print(dice.detach().numpy())
    '''

    trann = transforms.ToPILImage()
    out = torch.squeeze(out)
    out = trann(out)
    out.save("data/aug/76_maskfd3.bmp")
Exemplo n.º 19
0
def test(args):
    model = Unet(3, 1)  #.to(device)
    model.load_state_dict(torch.load(args.ckpt, map_location='cpu'))
    liver_dataset = LiverDataset("data/train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()
    with torch.no_grad():
        for x, _ in dataloaders:
            y = model(x)
            print("y的shape", y.shape())
            img_y = torch.squeeze(y).numpy()
            plt.imshow(img_y)
            plt.savefig("./results/output_%d.jpg" % random.randint(0, 100))
            plt.pause(0.01)
        plt.show()
Exemplo n.º 20
0
def test():
    model = Unet(1, 1)
    model.load_state_dict(torch.load(PATH))
    test_dataset = TestDataset("dataset/test",
                               transform=x_transforms,
                               target_transform=y_transforms)
    dataloaders = DataLoader(test_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()
    with torch.no_grad():
        for index, x in enumerate(dataloaders):
            y = model(x)
            img_y = torch.squeeze(y).numpy()
            img_y = img_y[:, :, np.newaxis]
            img = labelVisualize(2, COLOR_DICT,
                                 img_y) if False else img_y[:, :, 0]
            io.imsave("./dataset/test/" + str(index) + "_predict.png", img)
            plt.pause(0.01)
        plt.show()
Exemplo n.º 21
0
def test_video():
    cap = cv2.VideoCapture(2)

    model = Unet(3, 1)
    model.load_state_dict(torch.load(Model_path))

    model.to(device)
    #card_dataset = CardDataset("data/val", transform=x_transforms,target_transform=y_transforms)
    #dataloaders = DataLoader(card_dataset, batch_size=1)
    model.eval()

    with torch.no_grad():
        while True:
            ret, frame = cap.read()
            if ret is None:
                print("camera is not ready")
                exit(0)

            frame = frame[0:480, 0:480]
            img = cv2.resize(frame, (512, 512))
            #img = cv2.imread(name,1)

            x = cv2img_process(img)

            x_cuda = x.cuda()

            y = model(x_cuda)
            y_cpu = y.cpu()
            img_y = (torch.squeeze(y_cpu).numpy() * -0.4 * 40 / 255.0 -
                     0.3) / 0.7
            img_y = np.where(img_y < 0.3, 0, img_y)
            img_y = np.where(img_y > 0.3, 1, img_y)

            cv2.imshow("x", img)
            cv2.imshow("predict", img_y)
            #print(img.shape)
            #print(img_y.shape)
            #print("max ",img_y.max())
            #print("min ",img_y.min())
            #print(img_y[250][250])
            cv2.waitKey(1)
Exemplo n.º 22
0
def test(args):
    model = Unet(3, 1)
    model.load_state_dict(torch.load(args.ckpt, map_location='cpu'))
    card_dataset = CardDataset("data/val",
                               transform=x_transforms,
                               target_transform=y_transforms)
    dataloaders = DataLoader(card_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()
    with torch.no_grad():
        for x, _ in dataloaders:
            org = x
            y = model(x)
            img_y = torch.squeeze(y).numpy()
            plt.subplot(1, 2, 1)
            plt.imshow(org.numpy()[0].reshape(512, 512, 3))
            plt.subplot(1, 2, 2)
            plt.imshow(img_y)
            plt.pause(0.01)
        plt.show()
Exemplo n.º 23
0
def test(args):
    model = Unet(3, 1)
    model.load_state_dict(torch.load(args.ckpt, map_location='cpu'))  #加载模型
    liver_dataset = LiverDataset("data/val",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)  #batch_size默认为1
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()
    with torch.no_grad():
        n = 0
        for x, _ in dataloaders:
            y = model(x)
            img_y = torch.squeeze(y).numpy()  #对数据的维度进行压缩或者解压。Tensor转化为PIL图片
            from PIL import Image
            # image_array是归一化的二维浮点数矩阵

            img_y *= 255  # 变换为0-255的灰度值
            im = Image.fromarray(img_y)
            im = im.convert('L')  # 这样才能转为灰度图,如果是彩色图则改L为‘RGB’
            matplotlib.image.imsave('%03d_predict.png' % n, im)

            threshold = 180

            table = []
            for i in range(256):
                if i < threshold:
                    table.append(0)
                else:
                    table.append(1)

            photo = im.point(table, '1')
            matplotlib.image.imsave('%03d_predict1.png' % n, photo)
            #plt.imshow(img_y)
            n = n + 1
        #     plt.pause(5)
        # plt.show()
        print("hello")
Exemplo n.º 24
0
def test(args):
    model = Unet(
        3, 1
    )  #The unet input is three channels, and the output is one channel, because there is only one category of fingerprints on the background
    model.load_state_dict(torch.load(
        args.ckpt, map_location='cpu'))  #Load the trained model
    liver_dataset = LiverDataset("data/val",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()  #Turn on dynamic mode

    with torch.no_grad():
        i = 0
        miou_total = 0
        num = len(dataloaders)
        for x, _ in dataloaders:
            x = x.to(device)
            y = model(x)

            img_y = torch.squeeze(y).cpu().numpy(
            )  #Before inputting the loss function, the prediction graph must be converted into numpy format, and in order to correspond to the training graph, an additional one-dimensional representation of the batch size must be added
            mask = get_data(i)[1]  #Get the current mask path
            miou_total += get_iou(
                mask, img_y
            )  #Get the miou of the current prediction graph and add it to the total miou
            plt.subplot(121)
            plt.imshow(Image.open(get_data(i)[0]))
            plt.subplot(122)
            img_y = img_y * 255
            img_y = Image.fromarray(img_y)
            plt.imshow(img_y.convert('L'))
            plt.pause(2)
            if i < num: i += 1  # Processing the next set of validation sets
        plt.show()
        print('Miou=%f' % (miou_total / 106))
Exemplo n.º 25
0
def test():
    model = Unet(3, 1).to(device)
    weight_pre = r"./results/weights18_14_41.pth"
    model.load_state_dict(torch.load(weight_pre))
    x_transforms = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])
    y_transforms = transforms.ToTensor()
    liver_dataset = LiverDataset(r"D:\project\data_sets\liver\val",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()  # 开启动态模式

    with torch.no_grad():
        i = 0
        miou_total = 0
        num = len(dataloaders)
        for x, y in dataloaders:
            x = x.to(device)
            y = model(x)
            img_y = torch.squeeze(y).cpu().numpy(
            )  # 输入损失函数之前要把预测图变成numpy格式,且为了跟训练图对应,要额外加多一维表示batchsize
            mask = get_data(i)[1]  # 得到当前mask的路径
            miou_total += get_iou(mask, img_y)  # 获取当前预测图的miou,并加到总miou中
            plt.subplot(121)
            plt.imshow(Image.open(get_data(i)[0]))
            plt.subplot(122)
            plt.imshow(img_y)
            plt.pause(0.01)
            if i < num: i += 1  # 处理验证集下一张图
        plt.show()
        print('Miou=%f' % (miou_total / 20))
        res_record("weights18_14_41.pth Miou=%f \n" % (miou_total / 20))
Exemplo n.º 26
0
def test(args):
    model = Unet(3, 1)
    model.load_state_dict(torch.load(args.ckpt, map_location='cpu'))
    liver_dataset = LiverDataSet(
        "/home/ming/code/u-net-liver-pytorch/data/val",
        transform=x_transforms,
        target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)

    #不启用 BatchNormalization 和 Dropout
    #训练完 train 样本后,生成的模型 model 要用来测试样本。在 model(test) 之前,
    #需要加上model.eval(),否则只要有输入数据,即使不训练,model 也会改变权值。
    # 这是model中含有的 batch normalization 层所带来的的性质。
    model.eval()

    import matplotlib.pyplot as plt
    #使matplotlib的显示模式转换为交互(interactive)模式。
    # 即使在脚本中遇到plt.show(),代码还是会继续执行
    plt.ion()

    # 在测试阶段使用with torch.no_grad()可以对整个网络都停止自动求导,
    # 可以大大加快速度,也可以使用大的batch_size来测试
    # 当然,也可以不使用with torch.no_grad
    with torch.no_grad():
        for x, _ in dataloaders:
            # sigmod把值域在0和1之间,sigmod是为了后面用imgshow方法画热图
            y = model(x).sigmoid()
            img_y = torch.squeeze(y).numpy()

            # get_iou("data/val/000_mask.png",img_y)

            # imshow方法首先将二维数组的值标准化为0到1之间的值,
            # 然后根据指定的渐变色依次赋予每个单元格对应的颜色,就形成了热图。
            plt.imshow(img_y)
            plt.pause(0.1)
        plt.show()
Exemplo n.º 27
0
def predict_image(image_file, model_path):
    """
    Introduction
    ------------
        使用MobileNet-UNet预测图像
    """
    image = cv2.imread(image_file)
    image = cv2.resize(image, (256, 256))
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image_tensor = transforms.ToTensor()(image).unsqueeze(0)
    model = Unet(3, 1)
    ckpt = torch.load(model_path, map_location='cpu')
    model.load_state_dict(ckpt)
    model.eval()
    output_mask = model(image_tensor)
    output_mask = output_mask.reshape(256, 256)
    image_mask = output_mask.detach().numpy() * 255
    image_mask = image_mask.astype(np.uint8)
    image_mask = cv2.resize(image_mask, (256, 256))
    plt.subplot(121)
    plt.imshow(image)
    plt.subplot(122)
    plt.imshow(image_mask)
    plt.show()
train_loader = DataLoader(train_data, batch_size=batch_size)
eval_data = KITTIGroundDataset(data_dir, np.arange(10, 11))
eval_loader = DataLoader(eval_data, batch_size=batch_size)
test_data = KITTIGroundDataset(data_dir, np.arange(9, 10))
test_loader = DataLoader(test_data, batch_size=batch_size)

name = 'ground_estimation_net_v2'
net = Unet(32, 1, 5, 32, concat=False)
use_multi_GPU = True
if use_multi_GPU:
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        net = nn.DataParallel(net)
reuse_weights = True
if reuse_weights:
    net.load_state_dict(torch.load('./models/model_{}.pth'.format(name)))
    try:
        best_val_loss = np.load('./models/best_val_loss_{}.npy'.format(name))
    except:
        best_val_loss = np.finfo(np.float64).max
    print("Model reloaded. Previous lowest validation loss =",
          str(best_val_loss))
else:
    best_val_loss = np.finfo(np.float64).max

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net.to(device)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=5e-4, weight_decay=1e-4)

best_weights = net.state_dict()
Exemplo n.º 29
0
def test():
    model = Unet(5, 2)
    model.load_state_dict(torch.load(args.ckp, map_location='cpu'))
    model.to(device)

    test_root_dir = "test"
    PAVE_dataset = SSFPTestDataset(root=test_root_dir)
    # batch_size has to be divisible by 828 because there are 828 slices per patient
    batch_size = 1

    dataloaders = DataLoader(PAVE_dataset, batch_size=batch_size)
    model.eval()
    #import matplotlib.pyplot as plt
    #plt.ion()

    test_result_dir = "test_result"
    if not os.path.exists(test_result_dir):
        os.makedirs(test_result_dir)

    patients = np.zeros((1, 832, 2, 224, 832))

    with torch.no_grad():
        for x, slice_num, patient_num, leg in tqdm(dataloaders):
            x = x.to(device)
            y = model(x)
            output = y.cpu().numpy()

            if leg[0] == 'left':
                patients[patient_num, slice_num + 2, :, :192,
                         80:400] = output[0, :, :, :]
            else:
                patients[patient_num, slice_num + 2, :, :192,
                         480:800] = output[0, :, :, :]

    for patient_num in range(10):
        image_filename = os.path.join("/home/mng/scratch/PAVE_Challenge/test/",
                                      'case{}'.format(patient_num + 1),
                                      'ssfp.nii.gz')

        size_x = nib.load(image_filename).shape[2]

        patient_output_vessels = np.transpose(
            (patients[patient_num, :, 0, :size_x, :] >= 0.5), axes=(2, 0, 1))
        patient_output_arteries = np.transpose(
            (patients[patient_num, :, 1, :size_x, :] >= 0.5), axes=(2, 0, 1))
        patient_output_veins = np.logical_and(
            patient_output_vessels, np.logical_not(patient_output_arteries))

        results_file = os.path.join(
            test_result_dir,
            'case{}_results_vessels.nii.gz'.format(patient_num + 1))
        save_nii(patient_output_vessels.astype(np.uint8), results_file)

        results_file = os.path.join(
            test_result_dir,
            'case{}_results_arteries.nii.gz'.format(patient_num + 1))
        save_nii(patient_output_arteries.astype(np.uint8), results_file)

        results_file = os.path.join(
            test_result_dir,
            'case{}_results_veins.nii.gz'.format(patient_num + 1))
        save_nii(patient_output_veins.astype(np.uint8), results_file)
Exemplo n.º 30
0
    ########################################################################
    # optimizer
    # if args.optim == 'adam':
    optimizer = optim.Adam(net.parameters(), lr=0.001)

    ########################################################################
    # Train the network
    seed_everything(seed=args.seed)
    if args.load_mod:
        history = {
            'Train_loss': [],
            'Train_dice': [],
            'Valid_loss': [],
            'Valid_dice': []
        }
        net.load_state_dict(torch.load(MODEL_FILE))
    else:
        net_swa, history = train_net(net, optimizer, device, args, LOG_FILE,
                                     MODEL_FILE)
        torch.save(net_swa, MODEL_SWA_FILE)
        # save the final result
        print('Finished Training')
        history_df = pd.DataFrame(history)
        history_df.to_csv(HISTORY_FILE)
        # torch.save(net.state_dict(),MODEL_FILE)
        # show the curve
        fig, axs = plt.subplots(1, 2, figsize=(16, 4))
        axs[0].plot(history['Train_loss'], label='Train Loss')
        axs[0].plot(history['Valid_loss'], label='Valid Loss')
        axs[0].legend()
        axs[0].grid()