Beispiel #1
0
def test(args):
    model = model_class(3, 1)
    model.load_state_dict(torch.load(args.ckpt, map_location='cpu'))
    liver_dataset = LiverDataset("data/healthysick_2",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    test_loss = 0
    correct = 0
    ak = correct
    import matplotlib.pyplot as plt
    import torchvision.utils as vutils
    plt.ion()
    right_arr = []
    with torch.no_grad():
        for x, y, target in dataloaders:
            output1 = model(x)
            test_loss += F.nll_loss(output1, target, reduction='sum').item()
            pred = output1.argmax(dim=1, keepdim=True)
            print("pretend: {}".format(pred.view_as(target)))
            print('target:  {}'.format(target))
            correct += pred.eq(target.view_as(pred)).sum().item()
            print("-----------")
    test_loss /= len(liver_dataset)
    print('Average loss is: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(liver_dataset),
        100. * correct / len(liver_dataset)))
Beispiel #2
0
def test(args):
    model = Unet(3, 1)
    model.load_state_dict(torch.load(args.ckpt,map_location='cpu'))
    liver_dataset = LiverDataset("val/healthysick_2", transform=x_transforms,target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    test_loss = 0
    correct = 0
    import matplotlib.pyplot as plt
    import torchvision.utils as vutils
    plt.ion()
    with torch.no_grad():
        i = 0
        for x, y, target in dataloaders:
            output1, output2 = model(x)
            img_y=torch.squeeze(output2).numpy()
            plt.imshow(img_y)
            plt.show()
            plt.pause(0.01)
            test_loss += F.nll_loss(output1, target, reduction='sum').item()
            print("-----------")
            print(output1)
            pred = output1.argmax(dim=1, keepdim=True)
            print("pretend: {}".format(pred.view_as(target)))
            print('target:  {}'.format(target))
            correct += pred.eq(target.view_as(pred)).sum().item()
            print("-----------")
            vutils.save_image(x, 'save3/iter%d-data.jpg' % i, padding=0)
            vutils.save_image(y, 'save3/iter%d-mask.jpg' % i, padding=0)
            vutils.save_image(output2, 'save3/iter%d-target.jpg' % i, padding=0)
            i = i+1
    test_loss /= len(liver_dataset)
    print('Average loss is: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(test_loss, correct, len(liver_dataset), 100.*correct/len(liver_dataset)))
Beispiel #3
0
def test(args):
    model = Unet(1, 1)
    model.load_state_dict(torch.load(args.ckpt, map_location='cpu'))
    liver_dataset = LiverDataset("data/val",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()
    with torch.no_grad():
        for x, yy in dataloaders:
            y = model(x)
            l1loss = nn.L1Loss()
            loss = l1loss(y, yy)
            print(loss.item())
            img_y = torch.squeeze(y).numpy()
            img_yy = torch.squeeze(yy).numpy()
            # img_y = (img_y + 1) * 127.5
            plt.figure()
            plt.subplot(121)
            plt.imshow(img_y.transpose(),
                       aspect='auto',
                       interpolation='none',
                       cmap=plt.get_cmap('gray'))
            plt.subplot(122)
            plt.imshow(img_yy.transpose(),
                       aspect='auto',
                       interpolation='none',
                       cmap=plt.get_cmap('gray'))
            plt.pause(0.01)
            # plt.waitforbuttonpress()
        plt.show()
def test():
    model = Unet(3, 1)
    model.load_state_dict(
        torch.load('weight/weights_{}.pth'.format(str(num_epochs - 1)),
                   map_location='cpu'))
    liver_dataset = LiverDataset("data/img",
                                 "data/label",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()
    i = 0
    with torch.no_grad():
        for x, _ in dataloaders:
            #print (x.shape)
            y = model(x)
            img_y = torch.squeeze(y).numpy()
            print(img_y)
            img = cv2.normalize(img_y, None, 0, 255, cv2.NORM_MINMAX,
                                cv2.CV_8U)

            cv2.imwrite('data/pred/{}.png'.format(str(i)), img)

            i = i + 1
            print(i)
Beispiel #5
0
def test():
    model = Unet(3, 1).to(device)  # unet输入是三通道,输出是一通道,因为不算上背景只有肝脏一个类别
    weight_pre = r"./results/weights4_18_35.pth"
    model.load_state_dict(torch.load(weight_pre))  # 载入训练好的模型
    liver_dataset = LiverDataset(r"D:\project\data_sets\data_sci\val",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()  # 开启动态模式

    with torch.no_grad():
        i = 0  # 验证集中第i张图
        miou_total = 0
        num = len(dataloaders)  # 验证集图片的总数
        for x, _ in dataloaders:
            x = x.to(device)
            y = model(x)

            img_y = torch.squeeze(y).cpu().numpy(
            )  # 输入损失函数之前要把预测图变成numpy格式,且为了跟训练图对应,要额外加多一维表示batchsize
            mask = get_data(i)[1]  # 得到当前mask的路径
            miou_total += get_iou(mask, img_y)  # 获取当前预测图的miou,并加到总miou中
            plt.subplot(121)
            plt.imshow(Image.open(get_data(i)[0]))
            plt.subplot(122)
            plt.imshow(img_y)
            plt.pause(0.01)
            if i < num: i += 1  # 处理验证集下一张图
        plt.show()
        print('Miou=%f' % (miou_total / 10))
        res_record("weights4_13_40.pth Miou=%f \n" % (miou_total / 10))
Beispiel #6
0
def test():
    model = Unet(3, 1).to(device)  #unet输入是三通道,输出是一通道,因为不算上背景只有肝脏一个类别
    model.load_state_dict(torch.load(args.ckp, map_location='cpu'))  #载入训练好的模型
    liver_dataset = LiverDataset(
        r"H:\BaiduNetdisk\BaiduDownload\u_net_liver-master\data\val",
        transform=x_transforms,
        target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()  #开启动态模式

    with torch.no_grad():
        i = 0  #验证集中第i张图
        miou_total = 0
        num = len(dataloaders)  #验证集图片的总数
        for x, _ in dataloaders:
            x = x.to(device)
            y = model(x)

            img_y = torch.squeeze(y).cpu().numpy(
            )  #输入损失函数之前要把预测图变成numpy格式,且为了跟训练图对应,要额外加多一维表示batchsize
            mask = get_data(i)[1]  #得到当前mask的路径
            miou_total += get_iou(mask, img_y)  #获取当前预测图的miou,并加到总miou中
            plt.subplot(121)
            plt.imshow(Image.open(get_data(i)[0]))
            plt.subplot(122)
            plt.imshow(img_y)
            plt.pause(0.01)
            if i < num: i += 1  #处理验证集下一张图
        plt.show()
        print('Miou=%f' % (miou_total / 20))
Beispiel #7
0
def test(args):
    model = Unet(1, 1)
    model.load_state_dict(torch.load(args.ckpt, map_location='cuda'))
    liver_dataset = LiverDataset("data/val",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)

    save_root = './data/predict'

    model.eval()
    plt.ion()
    index = 0
    with torch.no_grad():
        for x, ground in dataloaders:
            x = x.type(torch.FloatTensor)
            y = model(x)
            x = torch.squeeze(x)
            x = x.unsqueeze(0)
            ground = torch.squeeze(ground)
            ground = ground.unsqueeze(0)
            img_ground = transform_invert(ground, y_transforms)
            img_x = transform_invert(x, x_transforms)
            img_y = torch.squeeze(y).numpy()
            # cv2.imshow('img', img_y)
            src_path = os.path.join(save_root, "predict_%d_s.png" % index)
            save_path = os.path.join(save_root, "predict_%d_o.png" % index)
            ground_path = os.path.join(save_root, "predict_%d_g.png" % index)
            img_ground.save(ground_path)
            # img_x.save(src_path)
            cv2.imwrite(save_path, img_y * 255)
            index = index + 1
Beispiel #8
0
def test_1():
    model = Unet(3, 1)
    model.load_state_dict(torch.load(args.ckp, map_location='cpu'))
    liver_dataset = LiverDataset("data/val",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()
    imgs = []
    root = "data/val"
    n = len(os.listdir(root)) // 2
    for i in range(n):
        img = os.path.join(root, "%03d.png" % i)
        # mask = os.path.join(root, "%03d_mask.png" % i)
        imgs.append(img)
    i = 0
    with torch.no_grad():
        for x, _ in dataloaders:
            y = model(x)
            img_x = torch.squeeze(_).numpy()
            img_y = torch.squeeze(y).numpy()
            img_input = cv2.imread(imgs[i], cv2.IMREAD_GRAYSCALE)
            im_color = cv2.applyColorMap(img_input, cv2.COLORMAP_JET)
            img_x = img_as_ubyte(img_x)
            img_y = img_as_ubyte(img_y)
            imgStack = stackImages(0.8, [[img_input, img_x, img_y]])
            # 转为伪彩色,视情况可以加上
            # imgStack = cv2.applyColorMap(imgStack, cv2.COLORMAP_JET)
            cv2.imwrite(f'train_img/{i}.png', imgStack)
            plt.imshow(imgStack)
            i = i + 1
            plt.pause(0.1)
        plt.show()
Beispiel #9
0
def train(args):
    model = Unet(3, 1).to(device)
    batch_size = args.batch_size
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("data/train",transform=x_transforms,target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Beispiel #10
0
def train():
    x_transforms = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])
    y_transforms = transforms.ToTensor()

    model = Unet(3, 1).to(device)
    model.load_state_dict(torch.load(r"./results/weights.pth"))
    batch_size = 1
    num_epochs = 2
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset(r'D:\project\data_sets\liver\train',
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    data_loaders = DataLoader(liver_dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=0)
    print("Start training at ", strftime("%Y-%m-%d %H:%M:%S", localtime()))
    for epoch in range(num_epochs):
        prev_time = datetime.now()
        print('Epoch{}/{}'.format(epoch, num_epochs))
        print('-' * 10)
        dt_size = len(data_loaders.dataset)
        epoch_loss = 0
        step = 0
        for x, y in data_loaders:
            step += 1
            inputs = x.to(device)
            labels = y.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
            if (step % 10) == 0:
                print("%d/%d, train_loss:%0.3f" %
                      (step, (dt_size - 1) // data_loaders.batch_size + 1,
                       loss.item()))
        # print the results of the current training
        cur_time = datetime.now()
        h, remainder = divmod((cur_time - prev_time).seconds, 3600)
        m, s = divmod(remainder, 60)
        time_str = 'Time:{:.0f}:{:.0f}:{:.0f}'.format(h, m, s)
        epoch_str = "epoch {} loss:{:.4f} ".format(epoch, epoch_loss / 400)
        print(epoch_str + time_str)
        res_record("Time:" + strftime("%Y-%m-%d %H:%M:%S  ", localtime()))
        res_record(epoch_str + '\n')
    print("End training at ", strftime("%Y-%m-%d %H:%M:%S", localtime()))
    # 记录数据
    torch.save(
        model.state_dict(),
        './results/weights{}_{}_{}.pth'.format(localtime().tm_mday,
                                               localtime().tm_hour,
                                               localtime().tm_sec))
Beispiel #11
0
def train(args):
    model = Unet(1, 1).to(device)
    batch_size = args.batch_size
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.01)
    liver_dataset = LiverDataset("/gs/home/majg/liupeng/code",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=10)
    train_model(model, criterion, optimizer, dataloaders)
Beispiel #12
0
def train():
    model = Unet(3, 1).to(device)
    batch_size = 8
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("/home/xm/Program/ALL-Data/unetdata/test",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Beispiel #13
0
def train():
    model = Unet(3, 1).to(device)
    model.load_state_dict(torch.load(r"./results/weights4_13_40.pth"))
    batch_size = 5
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset(r"D:\project\data_sets\data_sci\train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=0)
    train_model(model, criterion, optimizer, dataloaders)
Beispiel #14
0
def train():
    model = Unet(3, 1).to(device)
    batch_size = args.batch_size
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset(
        r"H:\BaiduNetdisk\BaiduDownload\u_net_liver-master\data\train",
        transform=x_transforms,
        target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=0)
    train_model(model, criterion, optimizer, dataloaders)
Beispiel #15
0
def train():
    model = model_class(3, 1).to(device)
    batch_size = 8
    print(batch_size)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.0001, weight_decay=0.0001)
    liver_dataset = LiverDataset("data/healthysick_2",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Beispiel #16
0
def train():
    model = Unet(1, 1).to(device)
    batch_size = BATCH_SIZE
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("./data/membrane/train/image",
                                 "./data/membrane/train/label",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=0)
    train_model(model, criterion, optimizer, dataloaders)
Beispiel #17
0
def train():
    #logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
    model = Unet(3, 1).to(device)
    batch_size = 1
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("data/train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Beispiel #18
0
def test(args):
    model = Unet(1, 1)
    #model = R2AttU_Net()
    #model = U_Net()
    model.load_state_dict(torch.load(args.ckpt, map_location='cpu'))
    #liver_dataset = LiverDataset("/home/cvlab04/Desktop/Code/Medical/u_net_liver/data/val/", transform=x_transforms,target_transform=y_transforms)
    liver_dataset = LiverDataset(
        "/home/cvlab04/Desktop/Code/Medical/u_net_liver/data/train/",
        transform=x_transforms,
        target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=6)
    model.eval()
    import matplotlib.pyplot as plt
    #plt.ion()
    count = 0
    count_sum = 0.
    dice_loss = 0.
    with torch.no_grad():
        for x, labels in dataloaders:
            count += 1
            print("batch:", count)
            y = model(x)
            img_y = torch.squeeze(y).numpy()
            img_y = (img_y > 0.3).astype(np.uint8)
            img_y = img_y.flatten()
            count_predict = np.count_nonzero(img_y > 0)
            #print("predict pixel:   ",count_predict)
            true = torch.squeeze(labels).numpy()
            true = true.flatten()
            count_true = np.count_nonzero(true > 0)
            #print("true pixel:   ",count_true)
            ans = 0
            '''
            for i in range(len(img_y)):
                for j in range(len(img_y)):
                    if img_y[i][j]>0 and true[i][j]>0:
                        ans+=1
            '''
            ans = np.count_nonzero(img_y * true > 0)
            dice_loss = (2 * ans + 0.0001) / (count_predict + count_true +
                                              0.0001)
            print("dice_loss:", dice_loss)

            count_sum += (dice_loss)

            #plt.imshow(img_y)
            #plt.pause(1)
        #plt.show()
        print("Final_Dice_Loss:", count_sum / count)
Beispiel #19
0
def train():
    model = Unet(3, 1).to(device)
    #model.load_state_dict(torch.load('./checkpoints/weights_39.pth'))

    batch_size = args.batch_size
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("data/train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)  #4
    train_model(model, criterion, optimizer, dataloaders)
def train():
    model = Unet(3, 1).to(device)
    #summary(model,(3,512,512))
    batch_size = 1
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("data/image",
                                 "data/mask",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Beispiel #21
0
def test(args):
    model = Unet(3, 1)
    model.load_state_dict(torch.load(args.ckpt,map_location='cpu'))
    liver_dataset = LiverDataset("data/val", transform=x_transforms,target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()
    with torch.no_grad():
        for x, _ in dataloaders:
            y=model(x).sigmoid()
            img_y=torch.squeeze(y).numpy()
            plt.imshow(img_y)
            plt.pause(0.01)
        plt.show()
Beispiel #22
0
def infer():
    model = Unet(3, 1)
    model.load_state_dict(torch.load('weights_19.pth', map_location='cpu'))
    liver_dataset = LiverDataset("./../data/val",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    import matplotlib.pyplot as plt
    plt.ion()
    with torch.no_grad():
        for x, _ in dataloaders:
            y = model(x)
            img_y = torch.squeeze(y).numpy()
            plt.imshow(img_y)
            plt.pause(0.01)
        plt.show()
Beispiel #23
0
def train(args):
    model = Unet(3, 1).to(device)
    #begin add
    # checkpoint = torch.load("./weights_19.pth",map_location=device)
    # model.load_state_dict(checkpoint['model_state_dict'])
    #end add
    batch_size = args.batch_size
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("./data/train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Beispiel #24
0
def test(args):
    model = Unet(3, 1)
    model.load_state_dict(torch.load(args.ckpt, map_location='cpu'))
    liver_dataset = LiverDataset("data/val",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    plt.ion()
    with torch.no_grad():
        for x, y in dataloaders:
            y = model(x).sigmoid()
            img_y = torch.squeeze(y).numpy()
            plt.imshow(img_y, cmap='gray', interpolation='nearest')
            plt.pause(0.5)
        plt.ioff()
        plt.show()
Beispiel #25
0
def test():
    model = Unet(1, 1)
    #model.load_state_dict(torch.load(args.ckp,map_location='cpu'))
    liver_dataset = LiverDataset("./data/membrane/test/image",
                                 "./data/membrane/test/predict",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()
    with torch.no_grad():
        for x, _ in dataloaders:
            y = model(x)
            img_y = torch.squeeze(y).numpy()
            plt.imshow(img_y)
            plt.pause(0.01)
Beispiel #26
0
def test(args):
    model = Unet(3, 1).to(device)  # 构建模型
    model.load_state_dict(torch.load(args.ckpt, map_location='cpu'))  # 加载参数
    liver_dataset = LiverDataset("data/val",
                                 transform=x_transforms,
                                 target_transform=y_transforms
                                 )  # 加载数据,这里懒了用的train的函数,所以也要加载mask,不过识别时没用到
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()
    with torch.no_grad():
        for x, _ in dataloaders:
            y = model(x.to(device))
            img_y = torch.squeeze(y.cpu()).numpy() > 0
            plt.imshow(img_y)
            plt.pause(0.01)
        plt.show()
Beispiel #27
0
def train(args):
    model = Unet(3, 1).to(device)  #  输入3通道,输出1通道
    batch_size = args.batch_size
    criterion = nn.BCEWithLogitsLoss()  #  损失函数
    optimizer = optim.Adam(model.parameters())  #  获得模型的参数
    liver_dataset = LiverDataset("data/train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    #  加载数据集,返回的是一对原图+掩膜,和所有图片的数目
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    #  DataLoader接口是自定义数据接口输出输入接口,将已有的数据输入按照batch size封装成Tensor
    #  batch_size=4,epoch=10,共100个minbatch
    # shuffle,每个epoch将数据打乱
    # num_workers: 多个进程倒入数据,加速倒入速度
    train_model(model, criterion, optimizer, dataloaders)  # 训练
Beispiel #28
0
def test():
    model = Unet(3, 1).to(device)
    model.load_state_dict(torch.load(args.ckp))
    liver_dataset = LiverDataset("data/test",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    with torch.no_grad():
        for x, _, x_path in tqdm(dataloaders):
            x_path = str(x_path).split("/")
            x = x.to(device)
            y = model(x)
            img_numpy = y[0].cpu().float().numpy()
            img_numpy = (np.transpose(img_numpy, (1, 2, 0)))
            img_numpy = (img_numpy >= 0.5) * 255
            img_out = img_numpy.astype(np.uint8)
            imgs = transforms.ToPILImage()(img_out)
            imgs.save('result/' + x_path[2][:-3])
Beispiel #29
0
def train(args):
    model = Unet(3, 1).to(device)
    batch_size = args.batch_size
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("data/train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=1)
    # shuffle = True,  # 乱序
    # num_workers = 2  # 多进程
    # DataLoader:该接口主要用来将自定义的数据读取接口的输出或者PyTorch已有的数据读取接口的输入按照batch size封装成Tensor
    # batch_size:how many samples per minibatch to load,这里为4,数据集大小400,所以一共有100个minibatch
    # shuffle:每个epoch将数据打乱,这里epoch=10。一般在训练数据中会采用
    # num_workers:表示通过多个进程来导入数据,可以加快数据导入速度
    train_model(model, criterion, optimizer, dataloaders)
Beispiel #30
0
def test(args):
    model = Unet(3, 1)  #.to(device)
    model.load_state_dict(torch.load(args.ckpt, map_location='cpu'))
    liver_dataset = LiverDataset("data/train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()
    with torch.no_grad():
        for x, _ in dataloaders:
            y = model(x)
            print("y的shape", y.shape())
            img_y = torch.squeeze(y).numpy()
            plt.imshow(img_y)
            plt.savefig("./results/output_%d.jpg" % random.randint(0, 100))
            plt.pause(0.01)
        plt.show()