Ejemplo n.º 1
0
    def __init__(self,
                 image_size,
                 learning_rate=2e-5,
                 batch_size=1,
                 ngf=64,
                 ):
        """
           Args:
             input_size:list [N, H, W, C]
             batch_size: integer, batch size
             learning_rate: float, initial learning rate for Adam
             ngf: number of base gen filters in conv layer
        """
        self.learning_rate = learning_rate
        self.input_shape = [int(batch_size / 4), image_size[0], image_size[1], image_size[2]]
        self.code_shape = [int(batch_size / 4), int(image_size[0] / 8), int(image_size[1] / 8), 4]
        self.ones_code = tf.ones(self.code_shape, name="ones_code")
        self.tenaor_name = {}

        self.G_X = Unet('G_X', ngf=ngf)
        self.D_X = Discriminator('D_X', ngf=ngf)
        self.G_Y = Unet('G_Y', ngf=ngf)
        self.D_Y = Discriminator('D_Y', ngf=ngf)
        self.G_Z = Unet('G_Z', ngf=ngf)
        self.D_Z = Discriminator('D_Z', ngf=ngf)
        self.G_W = Unet('G_W', ngf=ngf)
        self.D_W = Discriminator('D_W', ngf=ngf)
Ejemplo n.º 2
0
def test():
    model = Unet(3, 1).to(device)  #unet输入是三通道,输出是一通道,因为不算上背景只有肝脏一个类别
    model.load_state_dict(torch.load(args.ckp, map_location='cpu'))  #载入训练好的模型
    liver_dataset = LiverDataset(
        r"H:\BaiduNetdisk\BaiduDownload\u_net_liver-master\data\val",
        transform=x_transforms,
        target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()  #开启动态模式

    with torch.no_grad():
        i = 0  #验证集中第i张图
        miou_total = 0
        num = len(dataloaders)  #验证集图片的总数
        for x, _ in dataloaders:
            x = x.to(device)
            y = model(x)

            img_y = torch.squeeze(y).cpu().numpy(
            )  #输入损失函数之前要把预测图变成numpy格式,且为了跟训练图对应,要额外加多一维表示batchsize
            mask = get_data(i)[1]  #得到当前mask的路径
            miou_total += get_iou(mask, img_y)  #获取当前预测图的miou,并加到总miou中
            plt.subplot(121)
            plt.imshow(Image.open(get_data(i)[0]))
            plt.subplot(122)
            plt.imshow(img_y)
            plt.pause(0.01)
            if i < num: i += 1  #处理验证集下一张图
        plt.show()
        print('Miou=%f' % (miou_total / 20))
Ejemplo n.º 3
0
def test():
    model = Unet(3, 1)
    model.load_state_dict(
        torch.load('weight/weights_{}.pth'.format(str(num_epochs - 1)),
                   map_location='cpu'))
    liver_dataset = LiverDataset("data/img",
                                 "data/label",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()
    i = 0
    with torch.no_grad():
        for x, _ in dataloaders:
            #print (x.shape)
            y = model(x)
            img_y = torch.squeeze(y).numpy()
            print(img_y)
            img = cv2.normalize(img_y, None, 0, 255, cv2.NORM_MINMAX,
                                cv2.CV_8U)

            cv2.imwrite('data/pred/{}.png'.format(str(i)), img)

            i = i + 1
            print(i)
Ejemplo n.º 4
0
def test(args):
    model = Unet(3, 1)
    model.load_state_dict(torch.load(args.ckpt,map_location='cpu'))
    liver_dataset = LiverDataset("val/healthysick_2", transform=x_transforms,target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    test_loss = 0
    correct = 0
    import matplotlib.pyplot as plt
    import torchvision.utils as vutils
    plt.ion()
    with torch.no_grad():
        i = 0
        for x, y, target in dataloaders:
            output1, output2 = model(x)
            img_y=torch.squeeze(output2).numpy()
            plt.imshow(img_y)
            plt.show()
            plt.pause(0.01)
            test_loss += F.nll_loss(output1, target, reduction='sum').item()
            print("-----------")
            print(output1)
            pred = output1.argmax(dim=1, keepdim=True)
            print("pretend: {}".format(pred.view_as(target)))
            print('target:  {}'.format(target))
            correct += pred.eq(target.view_as(pred)).sum().item()
            print("-----------")
            vutils.save_image(x, 'save3/iter%d-data.jpg' % i, padding=0)
            vutils.save_image(y, 'save3/iter%d-mask.jpg' % i, padding=0)
            vutils.save_image(output2, 'save3/iter%d-target.jpg' % i, padding=0)
            i = i+1
    test_loss /= len(liver_dataset)
    print('Average loss is: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(test_loss, correct, len(liver_dataset), 100.*correct/len(liver_dataset)))
Ejemplo n.º 5
0
def get_value():

    modelpath = A.get()
    picinit = B.get()
    outpath = F.get()
    #pic = list(pic)
    pic = picinit.split(' ')
    sign1 = sign.get()
    num_class1 = num_class.get()
    if not modelpath or not pic:
        change.set('请选择文件')

    if modelpath and pic:
        change.set('执行中...')
        root.update()
        #allpic = glob.glob(os.path.join(pic,'*.tif'))
        lastpic = pic[-1]
        model = Unet((256, 256, 3), num_class1)
        #model = myunet((256,256,3),num_class1)
        model.load_weights(modelpath)
        d, n = os.path.split(lastpic)
        lastpic_save = os.path.join(outpath + '/' + n)  #最后一个文件
        # delete_path  = os.path.join(d,'result')#保存文件目录
        # if os.path.exists(delete_path):
        #     pp = os.listdir(delete_path)
        #     for x in pp:
        #         delete = os.path.join(delete_path,x)
        #         os.remove(delete)
        #     os.removedirs(delete_path)
        W = P(num_class1)
        W.main_p(model, pic, outpath, changes=sign1)
        if os.path.exists(lastpic_save):
            change.set('识别完成!')
            os.startfile(outpath)
Ejemplo n.º 6
0
Archivo: main.py Proyecto: nlroel/unet
def test(args):
    model = Unet(1, 1)
    model.load_state_dict(torch.load(args.ckpt, map_location='cpu'))
    liver_dataset = LiverDataset("data/val",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()
    with torch.no_grad():
        for x, yy in dataloaders:
            y = model(x)
            l1loss = nn.L1Loss()
            loss = l1loss(y, yy)
            print(loss.item())
            img_y = torch.squeeze(y).numpy()
            img_yy = torch.squeeze(yy).numpy()
            # img_y = (img_y + 1) * 127.5
            plt.figure()
            plt.subplot(121)
            plt.imshow(img_y.transpose(),
                       aspect='auto',
                       interpolation='none',
                       cmap=plt.get_cmap('gray'))
            plt.subplot(122)
            plt.imshow(img_yy.transpose(),
                       aspect='auto',
                       interpolation='none',
                       cmap=plt.get_cmap('gray'))
            plt.pause(0.01)
            # plt.waitforbuttonpress()
        plt.show()
Ejemplo n.º 7
0
    def __init__(self,
                 image_size,
                 learning_rate=2e-5,
                 batch_size=1,
                 ngf=64,
                 units=4096
                 ):
        """
        Args:
          input_size:list [H, W, C]
          batch_size: integer, batch size
          learning_rate: float, initial learning rate for Adam
          ngf: number of gen filters in first conv layer
        """
        self.learning_rate = learning_rate
        self.input_shape = [int(batch_size / 4), image_size[0], image_size[1], image_size[2]]
        self.ones = tf.ones(self.input_shape, name="ones")
        self.tenaor_name = {}

        self.EC_S = VEncoder('EC_S', ngf=ngf, units=units, keep_prob=0.85)
        self.DC_S = VDecoder('DC_S', ngf=ngf, output_channl=2, units=units)

        self.G_M = Unet('G_M', ngf=ngf / 2, keep_prob=0.9, output_channl=2)

        self.D_S = Discriminator('D_S', ngf=ngf, keep_prob=0.85)
        self.FD_Z = FeatureDiscriminator('FD_Z', ngf=ngf)
Ejemplo n.º 8
0
def test():
    model = Unet(3, 1).to(device)  # unet输入是三通道,输出是一通道,因为不算上背景只有肝脏一个类别
    weight_pre = r"./results/weights4_18_35.pth"
    model.load_state_dict(torch.load(weight_pre))  # 载入训练好的模型
    liver_dataset = LiverDataset(r"D:\project\data_sets\data_sci\val",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()  # 开启动态模式

    with torch.no_grad():
        i = 0  # 验证集中第i张图
        miou_total = 0
        num = len(dataloaders)  # 验证集图片的总数
        for x, _ in dataloaders:
            x = x.to(device)
            y = model(x)

            img_y = torch.squeeze(y).cpu().numpy(
            )  # 输入损失函数之前要把预测图变成numpy格式,且为了跟训练图对应,要额外加多一维表示batchsize
            mask = get_data(i)[1]  # 得到当前mask的路径
            miou_total += get_iou(mask, img_y)  # 获取当前预测图的miou,并加到总miou中
            plt.subplot(121)
            plt.imshow(Image.open(get_data(i)[0]))
            plt.subplot(122)
            plt.imshow(img_y)
            plt.pause(0.01)
            if i < num: i += 1  # 处理验证集下一张图
        plt.show()
        print('Miou=%f' % (miou_total / 10))
        res_record("weights4_13_40.pth Miou=%f \n" % (miou_total / 10))
Ejemplo n.º 9
0
def test(args):
    model = Unet(1, 1)
    model.load_state_dict(torch.load(args.ckpt, map_location='cuda'))
    liver_dataset = LiverDataset("data/val",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)

    save_root = './data/predict'

    model.eval()
    plt.ion()
    index = 0
    with torch.no_grad():
        for x, ground in dataloaders:
            x = x.type(torch.FloatTensor)
            y = model(x)
            x = torch.squeeze(x)
            x = x.unsqueeze(0)
            ground = torch.squeeze(ground)
            ground = ground.unsqueeze(0)
            img_ground = transform_invert(ground, y_transforms)
            img_x = transform_invert(x, x_transforms)
            img_y = torch.squeeze(y).numpy()
            # cv2.imshow('img', img_y)
            src_path = os.path.join(save_root, "predict_%d_s.png" % index)
            save_path = os.path.join(save_root, "predict_%d_o.png" % index)
            ground_path = os.path.join(save_root, "predict_%d_g.png" % index)
            img_ground.save(ground_path)
            # img_x.save(src_path)
            cv2.imwrite(save_path, img_y * 255)
            index = index + 1
Ejemplo n.º 10
0
def test():
    model = Unet(3, 1)
    model.load_state_dict(torch.load(Model_path, map_location='cpu'))
    #card_dataset = CardDataset("data/val", transform=x_transforms,target_transform=y_transforms)
    #dataloaders = DataLoader(card_dataset, batch_size=1)
    model.eval()

    with torch.no_grad():
        for name in names:
            img = cv2.imread(name, 1)
            x = cv2img_process(img)

            y = model(x)
            img_y = (torch.squeeze(y).numpy() * -0.4 * 40 / 255.0 - 0.3) / 0.7
            img_y = np.where(img_y < 0.3, 0, img_y)
            img_y = np.where(img_y > 0.3, 1, img_y)

            cv2.imshow("x", img)
            cv2.imshow("predict", img_y)
            #print(img.shape)
            #print(img_y.shape)
            #print("max ",img_y.max())
            #print("min ",img_y.min())
            print(img_y[250][250])
            cv2.waitKey(10)
Ejemplo n.º 11
0
def dnp(run_name,
        noisy_file,
        samples_dir,
        LR=0.001,
        num_iter=5000,
        save_every=50):

    # Initiate model
    nlayers = 6
    model = Unet(nlayers=nlayers, nefilters=60).cuda()
    samples_dir = os.path.join(samples_dir, run_name)
    utils.makedirs(samples_dir)
    # load data
    target, sr = utils.load_wav_to_torch(noisy_file)
    target = target[:(len(target) // 2**nlayers) * 2**nlayers]
    target = target / utils.MAX_WAV_VALUE
    input = torch.rand_like(target)
    input = (input - 0.5) * 2
    target, input = target.cuda(), input.cuda()
    criterion = torch.nn.MSELoss()

    # Initialize accumulator
    nfft = 512
    residual = 10**(-30 / 10)  # -18 db lower gain
    low_cut = 10
    high_cut = 90
    center = False
    bandpass = int(round(3 / 512 * nfft))
    accumulator = utils.Accumulator(target, low_cut, high_cut, nfft, center,
                                    residual, sr, bandpass)

    # Run the algorithm
    optimize(model, criterion, input, target, samples_dir, LR, num_iter, sr,
             save_every, accumulator)
Ejemplo n.º 12
0
    def __init__(
        self,
        image_size,
        learning_rate=2e-5,
        batch_size=1,
        classes_size=2,
        ngf=64,
    ):
        """
        Args:
          input_size:list [H, W, C]
          batch_size: integer, batch size
          learning_rate: float, initial learning rate for Adam
          ngf: number of gen filters in first conv layer
        """
        self.learning_rate = learning_rate
        self.input_shape = [
            int(batch_size / 4), image_size[0], image_size[1], image_size[2]
        ]
        self.tenaor_name = {}
        self.classes_size = classes_size

        self.G_X = Unet('G_X',
                        ngf=ngf,
                        output_channl=image_size[2],
                        keep_prob=0.97)
        self.D_X = Discriminator('D_X', ngf=ngf, keep_prob=0.9)
        self.G_L_X = Detector('G_L_X',
                              ngf,
                              classes_size=classes_size,
                              keep_prob=0.99,
                              input_channl=image_size[2])
Ejemplo n.º 13
0
def main():
    args = build_parser().parse_args()
    image_size = [args.img_height, args.img_width]
    # config = tf.ConfigProto()
    # config.gpu_options.per_process_gpu_memory_fraction = 1.0
    # sess = tf.Session(config=config)
    sess = tf.Session()
    unet = Unet(input_shape=image_size,
                sess=sess,
                filter_num=args.filter_num,
                batch_norm=args.batch_norm)
    unet.build_net()
    if args.checkpoint_path:
        unet.load_weights(args.checkpoint_path)

    images, masks = read_data(args.train_dir,
                              args.train_mask_dir,
                              n_images=args.n_images,
                              image_size=image_size)
    val_images, val_masks = read_data(args.val_dir,
                                      args.val_mask_dir,
                                      n_images=args.n_images // 4,
                                      image_size=image_size)
    unet.train(images=images,
               masks=masks,
               val_images=val_images,
               val_masks=val_masks,
               epochs=args.epochs,
               batch_size=args.batch_size,
               learning_rate=args.learning_rate,
               dice_loss=args.dice_loss,
               always_save=args.always_save)
Ejemplo n.º 14
0
def test_1():
    model = Unet(3, 1)
    model.load_state_dict(torch.load(args.ckp, map_location='cpu'))
    liver_dataset = LiverDataset("data/val",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()
    imgs = []
    root = "data/val"
    n = len(os.listdir(root)) // 2
    for i in range(n):
        img = os.path.join(root, "%03d.png" % i)
        # mask = os.path.join(root, "%03d_mask.png" % i)
        imgs.append(img)
    i = 0
    with torch.no_grad():
        for x, _ in dataloaders:
            y = model(x)
            img_x = torch.squeeze(_).numpy()
            img_y = torch.squeeze(y).numpy()
            img_input = cv2.imread(imgs[i], cv2.IMREAD_GRAYSCALE)
            im_color = cv2.applyColorMap(img_input, cv2.COLORMAP_JET)
            img_x = img_as_ubyte(img_x)
            img_y = img_as_ubyte(img_y)
            imgStack = stackImages(0.8, [[img_input, img_x, img_y]])
            # 转为伪彩色,视情况可以加上
            # imgStack = cv2.applyColorMap(imgStack, cv2.COLORMAP_JET)
            cv2.imwrite(f'train_img/{i}.png', imgStack)
            plt.imshow(imgStack)
            i = i + 1
            plt.pause(0.1)
        plt.show()
Ejemplo n.º 15
0
def test_unet():
    model = Unet(
        layers_n_channels=[4, 8],
        layers_n_non_lins=1,
    )
    shape = [1, 32, 32, 1]
    res = model(tf.zeros(shape))
    assert res.shape.as_list() == shape
Ejemplo n.º 16
0
def train(args):
    model = Unet(3, 1).to(device)
    batch_size = args.batch_size
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("data/train",transform=x_transforms,target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Ejemplo n.º 17
0
def train():
    x_transforms = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])
    y_transforms = transforms.ToTensor()

    model = Unet(3, 1).to(device)
    model.load_state_dict(torch.load(r"./results/weights.pth"))
    batch_size = 1
    num_epochs = 2
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset(r'D:\project\data_sets\liver\train',
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    data_loaders = DataLoader(liver_dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=0)
    print("Start training at ", strftime("%Y-%m-%d %H:%M:%S", localtime()))
    for epoch in range(num_epochs):
        prev_time = datetime.now()
        print('Epoch{}/{}'.format(epoch, num_epochs))
        print('-' * 10)
        dt_size = len(data_loaders.dataset)
        epoch_loss = 0
        step = 0
        for x, y in data_loaders:
            step += 1
            inputs = x.to(device)
            labels = y.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
            if (step % 10) == 0:
                print("%d/%d, train_loss:%0.3f" %
                      (step, (dt_size - 1) // data_loaders.batch_size + 1,
                       loss.item()))
        # print the results of the current training
        cur_time = datetime.now()
        h, remainder = divmod((cur_time - prev_time).seconds, 3600)
        m, s = divmod(remainder, 60)
        time_str = 'Time:{:.0f}:{:.0f}:{:.0f}'.format(h, m, s)
        epoch_str = "epoch {} loss:{:.4f} ".format(epoch, epoch_loss / 400)
        print(epoch_str + time_str)
        res_record("Time:" + strftime("%Y-%m-%d %H:%M:%S  ", localtime()))
        res_record(epoch_str + '\n')
    print("End training at ", strftime("%Y-%m-%d %H:%M:%S", localtime()))
    # 记录数据
    torch.save(
        model.state_dict(),
        './results/weights{}_{}_{}.pth'.format(localtime().tm_mday,
                                               localtime().tm_hour,
                                               localtime().tm_sec))
Ejemplo n.º 18
0
 def train(self):
     model = Unet(in_ch=2, out_ch=2).to(device)
     # batch_size = 1
     # criterion = nn.BCEWithLogitsLoss()
     criterion = nn.BCELoss()
     # criterion = nn.CrossEntropyLoss()
     # optimizer = optim.Adam(model.parameters(),lr = 0.01)
     optimizer = optim.SGD(model.parameters(), lr=0.005, momentum=0.9)
     # data_set = Train_Data(data_root='./train', mask_root='./Train_GT')
     self.train_model(model, criterion, optimizer)
    def __init__(
        self,
        input_channels=1,
        num_classes=1,
        num_filters=[32, 64, 128, 192],
        latent_dim=6,
        no_convs_fcomb=4,
        beta=10.0,
    ):
        super(ProbabilisticUnet, self).__init__()
        self.input_channels = input_channels
        self.num_classes = num_classes
        self.num_filters = num_filters
        self.latent_dim = latent_dim
        self.no_convs_per_block = 3
        self.no_convs_fcomb = no_convs_fcomb
        self.initializers = {"w": "he_normal", "b": "normal"}
        self.beta = beta
        self.z_prior_sample = 0

        self.unet = Unet(
            self.input_channels,
            self.num_classes,
            self.num_filters,
            self.initializers,
            apply_last_layer=False,
            padding=True,
        ).to(device)
        self.prior = AxisAlignedConvGaussian(
            self.input_channels,
            self.num_filters,
            self.no_convs_per_block,
            self.latent_dim,
            self.initializers,
        ).to(device)
        self.posterior = AxisAlignedConvGaussian(
            self.input_channels,
            self.num_filters,
            self.no_convs_per_block,
            self.latent_dim,
            self.initializers,
            posterior=True,
        ).to(device)
        self.fcomb = Fcomb(
            self.num_filters,
            self.latent_dim,
            self.input_channels,
            self.num_classes,
            self.no_convs_fcomb,
            {
                "w": "orthogonal",
                "b": "normal"
            },
            use_tile=True,
        ).to(device)
Ejemplo n.º 20
0
def main():
    config = configparser.RawConfigParser()
    config.read('config.txt')

    experiment_name = config.get('train', 'name')
    if not os.path.exists('./logs/' + experiment_name):
        os.system('mkdir ./logs/' + experiment_name)
    epochs_num = int(config.get('train', 'epochs_num'))
    batch_size = int(config.get('train', 'batch_size'))

    # Load datasets.
    datasets = config.get('train', 'datasets')
    datasets_path = config.get(datasets, 'h5py_save_path')
    height = int(config.get(datasets, 'height'))
    width = int(config.get(datasets, 'width'))
    pad_height = int(config.get(datasets, 'pad_height'))
    pad_width = int(config.get(datasets, 'pad_width'))

    x_train, y_train, masks = Generator(datasets_path, 'train', height, width,
                                        pad_height, pad_width)()
    visualize(group_images(x_train, 4),
              './logs/' + experiment_name + '/train_images.png').show()
    visualize(group_images(y_train, 4),
              './logs/' + experiment_name + '/train_labels.png').show()
    visualize(group_images(masks, 4),
              './logs/' + experiment_name + '/train_masks.png').show()
    y_train = to_categorical(y_train)

    # Build model and save.
    unet = Unet((pad_height, pad_width, 1), 5)
    unet.summary()
    unet_json = unet.to_json()
    open('./logs/' + experiment_name + '/architecture.json',
         'w').write(unet_json)
    plot_model(unet, to_file='./logs/' + experiment_name + '/model.png')

    # Training.
    checkpointer = ModelCheckpoint(filepath='./logs/' + experiment_name +
                                   '/weights.h5',
                                   verbose=1,
                                   monitor='val_loss',
                                   mode='auto',
                                   save_best_only=True)

    unet.fit(
        x_train,
        y_train,
        epochs=epochs_num,
        batch_size=batch_size,
        verbose=1,
        shuffle=True,
        validation_split=0.1,
        #class_weight=(0.5, 1.3),
        callbacks=[checkpointer])
Ejemplo n.º 21
0
def train():
    model = Unet(5, 2).to(device)
    model.train()
    batch_size = args.batch_size
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    PAVE_dataset = SSFPDataset("train", transform=1, target_transform=1)
    dataloaders = DataLoader(PAVE_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=0)
    train_model(model, criterion, optimizer, dataloaders)
Ejemplo n.º 22
0
def main(argv):
    with CytomineJob.from_cli(argv) as job:
        model_path = os.path.join(str(Path.home()), "models", "thyroid-unet")
        model_filepath = pick_model(model_path, job.parameters.tile_size,
                                    job.parameters.cytomine_zoom_level)
        device = torch.device(job.parameters.device)
        unet = Unet(job.parameters.init_fmaps, n_classes=1)
        unet.load_state_dict(torch.load(model_filepath, map_location=device))
        unet.to(device)
        unet.eval()

        segmenter = UNetSegmenter(device=job.parameters.device,
                                  unet=unet,
                                  classes=[0, 1],
                                  threshold=job.parameters.threshold)

        working_path = os.path.join(str(Path.home()), "tmp")
        tile_builder = CytomineTileBuilder(working_path)
        builder = SSLWorkflowBuilder()
        builder.set_n_jobs(1)
        builder.set_overlap(job.parameters.tile_overlap)
        builder.set_tile_size(job.parameters.tile_size,
                              job.parameters.tile_size)
        builder.set_tile_builder(tile_builder)
        builder.set_border_tiles(Workflow.BORDER_TILES_EXTEND)
        builder.set_background_class(0)
        builder.set_distance_tolerance(1)
        builder.set_seg_batch_size(job.parameters.batch_size)
        builder.set_segmenter(segmenter)
        workflow = builder.get()

        slide = CytomineSlide(img_instance=ImageInstance().fetch(
            job.parameters.cytomine_id_image),
                              zoom_level=job.parameters.cytomine_zoom_level)
        results = workflow.process(slide)

        print("-------------------------")
        print(len(results))
        print("-------------------------")

        collection = AnnotationCollection()
        for obj in results:
            wkt = shift_poly(obj.polygon,
                             slide,
                             zoom_level=job.parameters.cytomine_zoom_level).wkt
            collection.append(
                Annotation(location=wkt,
                           id_image=job.parameters.cytomine_id_image,
                           id_terms=[154005477],
                           id_project=job.project.id))
        collection.save(n_workers=job.parameters.n_jobs)

        return {}
Ejemplo n.º 23
0
Archivo: main.py Proyecto: nlroel/unet
def train(args):
    model = Unet(1, 1).to(device)
    batch_size = args.batch_size
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.01)
    liver_dataset = LiverDataset("/gs/home/majg/liupeng/code",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=10)
    train_model(model, criterion, optimizer, dataloaders)
Ejemplo n.º 24
0
def load_model():
    '''
    获取u-net model,
    :param para: =1 时为208的model; =2时为512的model
    :return: pre-train好的unet-model
    '''
    from unet import UnetSegment as Unet
    unet = Unet(1, 2)
    print 'loading model :', args.unet208model
    checkpoint = torch.load(args.unet208model)
    unet.load_state_dict(checkpoint['state_dict'])
    #unet.cuda()
    return unet
Ejemplo n.º 25
0
def train():
    model = Unet(3, 1).to(device)
    batch_size = 8
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("/home/xm/Program/ALL-Data/unetdata/test",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Ejemplo n.º 26
0
def main():
    args = build_parser().parse_args()
    assert args.checkpoint_path

    result_dir = args.result_dir
    checkpoint_path = args.checkpoint_path
    test_dir = args.test_dir
    n_imgs = args.n_images

    image_size = [args.img_height, args.img_width]
    sess = tf.Session()
    unet = Unet(input_shape=image_size,
                sess=sess,
                filter_num=args.filter_num,
                batch_norm=args.batch_norm)
    unet.build_net(is_train=False)
    unet.load_weights(checkpoint_path)
    img_names = os.listdir(test_dir)
    img_names.sort()
    mask_names = None
    total_dice = None
    if args.mask_dir:
        mask_names = os.listdir(args.mask_dir)
        mask_names.sort()
        total_dice = 0

    if n_imgs <= 0:
        n_imgs = len(img_names)

    for i in range(n_imgs):
        print('%s %d/%d' % (img_names[i], i, n_imgs))
        img_mat = read_car_img(os.path.join(test_dir, img_names[i]),
                               image_size=image_size)
        img_mat = np.expand_dims(img_mat, axis=0)
        if mask_names:
            mask_mat = read_mask_img(os.path.join(args.mask_dir,
                                                  mask_names[i]),
                                     image_size=image_size)
            mask_mat = np.expand_dims(mask_mat, axis=0)
            res, dice = unet.predict_test(img_mat, mask_mat)
            dice = np.mean(dice)
            print('Dice coefficient:%.6f' % dice)
            total_dice += dice
        else:
            res = unet.predict(img_mat)

        if args.result_dir:
            res = res.reshape(image_size)
            misc.imsave(os.path.join(result_dir, img_names[i]), res)
    if total_dice:
        print('Average Dice coefficient:%.6f' % (total_dice / n_imgs))
Ejemplo n.º 27
0
def train():
    model = Unet(1, 1).to(device)
    batch_size = 1
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters())
    train_dataset = TrainDataset("dataset/train/image",
                                 "dataset/train/label",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(train_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Ejemplo n.º 28
0
def train():
    model = Unet(3, 1).to(device)
    model.load_state_dict(torch.load(r"./results/weights4_13_40.pth"))
    batch_size = 5
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset(r"D:\project\data_sets\data_sci\train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=0)
    train_model(model, criterion, optimizer, dataloaders)
Ejemplo n.º 29
0
def train():
    #logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
    model = Unet(3, 1).to(device)
    batch_size = 1
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("data/train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Ejemplo n.º 30
0
def train():
    model = Unet(3, 1).to(device)
    batch_size = args.batch_size
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset(
        r"H:\BaiduNetdisk\BaiduDownload\u_net_liver-master\data\train",
        transform=x_transforms,
        target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=0)
    train_model(model, criterion, optimizer, dataloaders)