Exemplo n.º 1
0
def get_value():

    modelpath = A.get()
    picinit = B.get()
    outpath = F.get()
    #pic = list(pic)
    pic = picinit.split(' ')
    sign1 = sign.get()
    num_class1 = num_class.get()
    if not modelpath or not pic:
        change.set('请选择文件')

    if modelpath and pic:
        change.set('执行中...')
        root.update()
        #allpic = glob.glob(os.path.join(pic,'*.tif'))
        lastpic = pic[-1]
        model = Unet((256, 256, 3), num_class1)
        #model = myunet((256,256,3),num_class1)
        model.load_weights(modelpath)
        d, n = os.path.split(lastpic)
        lastpic_save = os.path.join(outpath + '/' + n)  #最后一个文件
        # delete_path  = os.path.join(d,'result')#保存文件目录
        # if os.path.exists(delete_path):
        #     pp = os.listdir(delete_path)
        #     for x in pp:
        #         delete = os.path.join(delete_path,x)
        #         os.remove(delete)
        #     os.removedirs(delete_path)
        W = P(num_class1)
        W.main_p(model, pic, outpath, changes=sign1)
        if os.path.exists(lastpic_save):
            change.set('识别完成!')
            os.startfile(outpath)
Exemplo n.º 2
0
    def __init__(self,
                 image_size,
                 learning_rate=2e-5,
                 batch_size=1,
                 ngf=64,
                 ):
        """
           Args:
             input_size:list [N, H, W, C]
             batch_size: integer, batch size
             learning_rate: float, initial learning rate for Adam
             ngf: number of base gen filters in conv layer
        """
        self.learning_rate = learning_rate
        self.input_shape = [int(batch_size / 4), image_size[0], image_size[1], image_size[2]]
        self.code_shape = [int(batch_size / 4), int(image_size[0] / 8), int(image_size[1] / 8), 4]
        self.ones_code = tf.ones(self.code_shape, name="ones_code")
        self.tenaor_name = {}

        self.G_X = Unet('G_X', ngf=ngf)
        self.D_X = Discriminator('D_X', ngf=ngf)
        self.G_Y = Unet('G_Y', ngf=ngf)
        self.D_Y = Discriminator('D_Y', ngf=ngf)
        self.G_Z = Unet('G_Z', ngf=ngf)
        self.D_Z = Discriminator('D_Z', ngf=ngf)
        self.G_W = Unet('G_W', ngf=ngf)
        self.D_W = Discriminator('D_W', ngf=ngf)
Exemplo n.º 3
0
def train(args):
    model = Unet(3, 1).to(device)
    batch_size = args.batch_size
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("data/train",transform=x_transforms,target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Exemplo n.º 4
0
 def train(self):
     model = Unet(in_ch=2, out_ch=2).to(device)
     # batch_size = 1
     # criterion = nn.BCEWithLogitsLoss()
     criterion = nn.BCELoss()
     # criterion = nn.CrossEntropyLoss()
     # optimizer = optim.Adam(model.parameters(),lr = 0.01)
     optimizer = optim.SGD(model.parameters(), lr=0.005, momentum=0.9)
     # data_set = Train_Data(data_root='./train', mask_root='./Train_GT')
     self.train_model(model, criterion, optimizer)
    def __init__(
        self,
        input_channels=1,
        num_classes=1,
        num_filters=[32, 64, 128, 192],
        latent_dim=6,
        no_convs_fcomb=4,
        beta=10.0,
    ):
        super(ProbabilisticUnet, self).__init__()
        self.input_channels = input_channels
        self.num_classes = num_classes
        self.num_filters = num_filters
        self.latent_dim = latent_dim
        self.no_convs_per_block = 3
        self.no_convs_fcomb = no_convs_fcomb
        self.initializers = {"w": "he_normal", "b": "normal"}
        self.beta = beta
        self.z_prior_sample = 0

        self.unet = Unet(
            self.input_channels,
            self.num_classes,
            self.num_filters,
            self.initializers,
            apply_last_layer=False,
            padding=True,
        ).to(device)
        self.prior = AxisAlignedConvGaussian(
            self.input_channels,
            self.num_filters,
            self.no_convs_per_block,
            self.latent_dim,
            self.initializers,
        ).to(device)
        self.posterior = AxisAlignedConvGaussian(
            self.input_channels,
            self.num_filters,
            self.no_convs_per_block,
            self.latent_dim,
            self.initializers,
            posterior=True,
        ).to(device)
        self.fcomb = Fcomb(
            self.num_filters,
            self.latent_dim,
            self.input_channels,
            self.num_classes,
            self.no_convs_fcomb,
            {
                "w": "orthogonal",
                "b": "normal"
            },
            use_tile=True,
        ).to(device)
Exemplo n.º 6
0
def train():
    model = Unet(5, 2).to(device)
    model.train()
    batch_size = args.batch_size
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    PAVE_dataset = SSFPDataset("train", transform=1, target_transform=1)
    dataloaders = DataLoader(PAVE_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=0)
    train_model(model, criterion, optimizer, dataloaders)
Exemplo n.º 7
0
def train():
    model = Unet(3, 1).to(device)
    batch_size = 8
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("/home/xm/Program/ALL-Data/unetdata/test",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Exemplo n.º 8
0
Arquivo: main.py Projeto: nlroel/unet
def train(args):
    model = Unet(1, 1).to(device)
    batch_size = args.batch_size
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.01)
    liver_dataset = LiverDataset("/gs/home/majg/liupeng/code",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=10)
    train_model(model, criterion, optimizer, dataloaders)
Exemplo n.º 9
0
def train():
    model = Unet(3, 3).to(device)
    batch_size = 3
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters(), lr=1e-5)
    liver_dataset = LiverDataset("data/train_xin/liver_bmp",
                                 "data/train_xin/mask_bw",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=0)
    train_model(model, criterion, optimizer, dataloaders)
Exemplo n.º 10
0
def train():
    model = Unet(1, 1).to(device)
    batch_size = 1
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters())
    train_dataset = TrainDataset("dataset/train/image",
                                 "dataset/train/label",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(train_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Exemplo n.º 11
0
def train():
    model = Unet(3, 1).to(device)
    batch_size = args.batch_size
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset(
        r"H:\BaiduNetdisk\BaiduDownload\u_net_liver-master\data\train",
        transform=x_transforms,
        target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=0)
    train_model(model, criterion, optimizer, dataloaders)
Exemplo n.º 12
0
def train():
    #logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
    model = Unet(3, 1).to(device)
    batch_size = 1
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("data/train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Exemplo n.º 13
0
def main():
    args = build_parser().parse_args()
    image_size = [args.img_height, args.img_width]
    # config = tf.ConfigProto()
    # config.gpu_options.per_process_gpu_memory_fraction = 1.0
    # sess = tf.Session(config=config)
    sess = tf.Session()
    unet = Unet(input_shape=image_size,
                sess=sess,
                filter_num=args.filter_num,
                batch_norm=args.batch_norm)
    unet.build_net()
    if args.checkpoint_path:
        unet.load_weights(args.checkpoint_path)

    images, masks = read_data(args.train_dir,
                              args.train_mask_dir,
                              n_images=args.n_images,
                              image_size=image_size)
    val_images, val_masks = read_data(args.val_dir,
                                      args.val_mask_dir,
                                      n_images=args.n_images // 4,
                                      image_size=image_size)
    unet.train(images=images,
               masks=masks,
               val_images=val_images,
               val_masks=val_masks,
               epochs=args.epochs,
               batch_size=args.batch_size,
               learning_rate=args.learning_rate,
               dice_loss=args.dice_loss,
               always_save=args.always_save)
Exemplo n.º 14
0
def train():
    model = Unet(3, 1).to(device)
    model.load_state_dict(torch.load(r"./results/weights4_13_40.pth"))
    batch_size = 5
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset(r"D:\project\data_sets\data_sci\train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=0)
    train_model(model, criterion, optimizer, dataloaders)
Exemplo n.º 15
0
def train():
    model = Unet(3, 1).to(device)
    #summary(model,(3,512,512))
    batch_size = 1
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("data/image",
                                 "data/mask",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Exemplo n.º 16
0
def train():
    model = Unet(3, 1).to(device)
    #model.load_state_dict(torch.load('./checkpoints/weights_39.pth'))

    batch_size = args.batch_size
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("data/train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)  #4
    train_model(model, criterion, optimizer, dataloaders)
Exemplo n.º 17
0
    def __init__(self,
                 image_size,
                 learning_rate=2e-5,
                 batch_size=1,
                 ngf=64,
                 units=4096
                 ):
        """
        Args:
          input_size:list [H, W, C]
          batch_size: integer, batch size
          learning_rate: float, initial learning rate for Adam
          ngf: number of gen filters in first conv layer
        """
        self.learning_rate = learning_rate
        self.input_shape = [int(batch_size / 4), image_size[0], image_size[1], image_size[2]]
        self.ones = tf.ones(self.input_shape, name="ones")
        self.tenaor_name = {}

        self.EC_S = VEncoder('EC_S', ngf=ngf, units=units, keep_prob=0.85)
        self.DC_S = VDecoder('DC_S', ngf=ngf, output_channl=2, units=units)

        self.G_M = Unet('G_M', ngf=ngf / 2, keep_prob=0.9, output_channl=2)

        self.D_S = Discriminator('D_S', ngf=ngf, keep_prob=0.85)
        self.FD_Z = FeatureDiscriminator('FD_Z', ngf=ngf)
Exemplo n.º 18
0
    def __init__(
        self,
        image_size,
        learning_rate=2e-5,
        batch_size=1,
        classes_size=2,
        ngf=64,
    ):
        """
        Args:
          input_size:list [H, W, C]
          batch_size: integer, batch size
          learning_rate: float, initial learning rate for Adam
          ngf: number of gen filters in first conv layer
        """
        self.learning_rate = learning_rate
        self.input_shape = [
            int(batch_size / 4), image_size[0], image_size[1], image_size[2]
        ]
        self.tenaor_name = {}
        self.classes_size = classes_size

        self.G_X = Unet('G_X',
                        ngf=ngf,
                        output_channl=image_size[2],
                        keep_prob=0.97)
        self.D_X = Discriminator('D_X', ngf=ngf, keep_prob=0.9)
        self.G_L_X = Detector('G_L_X',
                              ngf,
                              classes_size=classes_size,
                              keep_prob=0.99,
                              input_channl=image_size[2])
Exemplo n.º 19
0
def infer():
    model = Unet(3, 1)
    model.load_state_dict(torch.load('weights_19.pth', map_location='cpu'))
    liver_dataset = LiverDataset("./../data/val",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    import matplotlib.pyplot as plt
    plt.ion()
    with torch.no_grad():
        for x, _ in dataloaders:
            y = model(x)
            img_y = torch.squeeze(y).numpy()
            plt.imshow(img_y)
            plt.pause(0.01)
        plt.show()
Exemplo n.º 20
0
def dnp(run_name,
        noisy_file,
        samples_dir,
        LR=0.001,
        num_iter=5000,
        save_every=50):

    # Initiate model
    nlayers = 6
    model = Unet(nlayers=nlayers, nefilters=60).cuda()
    samples_dir = os.path.join(samples_dir, run_name)
    utils.makedirs(samples_dir)
    # load data
    target, sr = utils.load_wav_to_torch(noisy_file)
    target = target[:(len(target) // 2**nlayers) * 2**nlayers]
    target = target / utils.MAX_WAV_VALUE
    input = torch.rand_like(target)
    input = (input - 0.5) * 2
    target, input = target.cuda(), input.cuda()
    criterion = torch.nn.MSELoss()

    # Initialize accumulator
    nfft = 512
    residual = 10**(-30 / 10)  # -18 db lower gain
    low_cut = 10
    high_cut = 90
    center = False
    bandpass = int(round(3 / 512 * nfft))
    accumulator = utils.Accumulator(target, low_cut, high_cut, nfft, center,
                                    residual, sr, bandpass)

    # Run the algorithm
    optimize(model, criterion, input, target, samples_dir, LR, num_iter, sr,
             save_every, accumulator)
Exemplo n.º 21
0
    def __init__(self,
                 input_channels=1,
                 num_classes=1,
                 num_filters=[32, 64, 128, 192],
                 latent_dim=6,
                 no_convs_fcomb=4,
                 beta=10.0):
        super(ProbabilisticUnet, self).__init__()
        self.input_channels = input_channels  # 输入图像通道数
        self.num_classes = num_classes  # 分割类别数
        self.num_filters = num_filters  # filter数
        self.latent_dim = latent_dim  # 隐空间维度
        self.no_convs_per_block = 3
        self.no_convs_fcomb = no_convs_fcomb
        self.initializers = {'w': 'he_normal', 'b': 'normal'}  # 初始化
        self.beta = beta
        self.z_prior_sample = 0

        self.unet = Unet(self.input_channels,
                         self.num_classes,
                         self.num_filters,
                         self.initializers,
                         apply_last_layer=False,
                         padding=True).to(device)
        self.prior = AxisAlignedConvGaussian(
            self.input_channels,
            self.num_filters,
            self.no_convs_per_block,
            self.latent_dim,
            self.initializers,
        ).to(device)
        self.posterior = AxisAlignedConvGaussian(self.input_channels,
                                                 self.num_filters,
                                                 self.no_convs_per_block,
                                                 self.latent_dim,
                                                 self.initializers,
                                                 posterior=True).to(device)
        self.fcomb = Fcomb(self.num_filters,
                           self.latent_dim,
                           self.input_channels,
                           self.num_classes,
                           self.no_convs_fcomb, {
                               'w': 'orthogonal',
                               'b': 'normal'
                           },
                           use_tile=True).to(device)
Exemplo n.º 22
0
def test_unet():
    model = Unet(
        layers_n_channels=[4, 8],
        layers_n_non_lins=1,
    )
    shape = [1, 32, 32, 1]
    res = model(tf.zeros(shape))
    assert res.shape.as_list() == shape
Exemplo n.º 23
0
def train(args):
    model = Unet(3, 1).to(device)
    #begin add
    # checkpoint = torch.load("./weights_19.pth",map_location=device)
    # model.load_state_dict(checkpoint['model_state_dict'])
    #end add
    batch_size = args.batch_size
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("./data/train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Exemplo n.º 24
0
def train(args):
    model = Unet(3, 1).to(device)  #  输入3通道,输出1通道
    batch_size = args.batch_size
    criterion = nn.BCEWithLogitsLoss()  #  损失函数
    optimizer = optim.Adam(model.parameters())  #  获得模型的参数
    liver_dataset = LiverDataset("data/train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    #  加载数据集,返回的是一对原图+掩膜,和所有图片的数目
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    #  DataLoader接口是自定义数据接口输出输入接口,将已有的数据输入按照batch size封装成Tensor
    #  batch_size=4,epoch=10,共100个minbatch
    # shuffle,每个epoch将数据打乱
    # num_workers: 多个进程倒入数据,加速倒入速度
    train_model(model, criterion, optimizer, dataloaders)  # 训练
Exemplo n.º 25
0
def train(args):
    model = Unet(3, 1).to(device)
    batch_size = args.batch_size
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("data/train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=1)
    # shuffle = True,  # 乱序
    # num_workers = 2  # 多进程
    # DataLoader:该接口主要用来将自定义的数据读取接口的输出或者PyTorch已有的数据读取接口的输入按照batch size封装成Tensor
    # batch_size:how many samples per minibatch to load,这里为4,数据集大小400,所以一共有100个minibatch
    # shuffle:每个epoch将数据打乱,这里epoch=10。一般在训练数据中会采用
    # num_workers:表示通过多个进程来导入数据,可以加快数据导入速度
    train_model(model, criterion, optimizer, dataloaders)
Exemplo n.º 26
0
def main(argv):
    with CytomineJob.from_cli(argv) as job:
        model_path = os.path.join(str(Path.home()), "models", "thyroid-unet")
        model_filepath = pick_model(model_path, job.parameters.tile_size,
                                    job.parameters.cytomine_zoom_level)
        device = torch.device(job.parameters.device)
        unet = Unet(job.parameters.init_fmaps, n_classes=1)
        unet.load_state_dict(torch.load(model_filepath, map_location=device))
        unet.to(device)
        unet.eval()

        segmenter = UNetSegmenter(device=job.parameters.device,
                                  unet=unet,
                                  classes=[0, 1],
                                  threshold=job.parameters.threshold)

        working_path = os.path.join(str(Path.home()), "tmp")
        tile_builder = CytomineTileBuilder(working_path)
        builder = SSLWorkflowBuilder()
        builder.set_n_jobs(1)
        builder.set_overlap(job.parameters.tile_overlap)
        builder.set_tile_size(job.parameters.tile_size,
                              job.parameters.tile_size)
        builder.set_tile_builder(tile_builder)
        builder.set_border_tiles(Workflow.BORDER_TILES_EXTEND)
        builder.set_background_class(0)
        builder.set_distance_tolerance(1)
        builder.set_seg_batch_size(job.parameters.batch_size)
        builder.set_segmenter(segmenter)
        workflow = builder.get()

        slide = CytomineSlide(img_instance=ImageInstance().fetch(
            job.parameters.cytomine_id_image),
                              zoom_level=job.parameters.cytomine_zoom_level)
        results = workflow.process(slide)

        print("-------------------------")
        print(len(results))
        print("-------------------------")

        collection = AnnotationCollection()
        for obj in results:
            wkt = shift_poly(obj.polygon,
                             slide,
                             zoom_level=job.parameters.cytomine_zoom_level).wkt
            collection.append(
                Annotation(location=wkt,
                           id_image=job.parameters.cytomine_id_image,
                           id_terms=[154005477],
                           id_project=job.project.id))
        collection.save(n_workers=job.parameters.n_jobs)

        return {}
Exemplo n.º 27
0
def train(args):
    model = Unet(3, 1).to(device)
    batch_size = args.batch_size

    LR = 0.005
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=0.0005)
    criterion = nn.BCEWithLogitsLoss()
    lr_list = []

    liver_dataset = LiverDataset("data/train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Exemplo n.º 28
0
def main():
    args = build_parser().parse_args()
    assert args.checkpoint_path

    result_dir = args.result_dir
    checkpoint_path = args.checkpoint_path
    test_dir = args.test_dir
    n_imgs = args.n_images

    image_size = [args.img_height, args.img_width]
    sess = tf.Session()
    unet = Unet(input_shape=image_size,
                sess=sess,
                filter_num=args.filter_num,
                batch_norm=args.batch_norm)
    unet.build_net(is_train=False)
    unet.load_weights(checkpoint_path)
    img_names = os.listdir(test_dir)
    img_names.sort()
    mask_names = None
    total_dice = None
    if args.mask_dir:
        mask_names = os.listdir(args.mask_dir)
        mask_names.sort()
        total_dice = 0

    if n_imgs <= 0:
        n_imgs = len(img_names)

    for i in range(n_imgs):
        print('%s %d/%d' % (img_names[i], i, n_imgs))
        img_mat = read_car_img(os.path.join(test_dir, img_names[i]),
                               image_size=image_size)
        img_mat = np.expand_dims(img_mat, axis=0)
        if mask_names:
            mask_mat = read_mask_img(os.path.join(args.mask_dir,
                                                  mask_names[i]),
                                     image_size=image_size)
            mask_mat = np.expand_dims(mask_mat, axis=0)
            res, dice = unet.predict_test(img_mat, mask_mat)
            dice = np.mean(dice)
            print('Dice coefficient:%.6f' % dice)
            total_dice += dice
        else:
            res = unet.predict(img_mat)

        if args.result_dir:
            res = res.reshape(image_size)
            misc.imsave(os.path.join(result_dir, img_names[i]), res)
    if total_dice:
        print('Average Dice coefficient:%.6f' % (total_dice / n_imgs))
Exemplo n.º 29
0
def test():
    model = Unet(3, 1).to(device)  # unet输入是三通道,输出是一通道,因为不算上背景只有肝脏一个类别
    weight_pre = r"./results/weights4_18_35.pth"
    model.load_state_dict(torch.load(weight_pre))  # 载入训练好的模型
    liver_dataset = LiverDataset(r"D:\project\data_sets\data_sci\val",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()  # 开启动态模式

    with torch.no_grad():
        i = 0  # 验证集中第i张图
        miou_total = 0
        num = len(dataloaders)  # 验证集图片的总数
        for x, _ in dataloaders:
            x = x.to(device)
            y = model(x)

            img_y = torch.squeeze(y).cpu().numpy(
            )  # 输入损失函数之前要把预测图变成numpy格式,且为了跟训练图对应,要额外加多一维表示batchsize
            mask = get_data(i)[1]  # 得到当前mask的路径
            miou_total += get_iou(mask, img_y)  # 获取当前预测图的miou,并加到总miou中
            plt.subplot(121)
            plt.imshow(Image.open(get_data(i)[0]))
            plt.subplot(122)
            plt.imshow(img_y)
            plt.pause(0.01)
            if i < num: i += 1  # 处理验证集下一张图
        plt.show()
        print('Miou=%f' % (miou_total / 10))
        res_record("weights4_13_40.pth Miou=%f \n" % (miou_total / 10))
Exemplo n.º 30
0
def test():
    model = Unet(3, 1).to(device)  #unet输入是三通道,输出是一通道,因为不算上背景只有肝脏一个类别
    model.load_state_dict(torch.load(args.ckp, map_location='cpu'))  #载入训练好的模型
    liver_dataset = LiverDataset(
        r"H:\BaiduNetdisk\BaiduDownload\u_net_liver-master\data\val",
        transform=x_transforms,
        target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=1)
    model.eval()
    import matplotlib.pyplot as plt
    plt.ion()  #开启动态模式

    with torch.no_grad():
        i = 0  #验证集中第i张图
        miou_total = 0
        num = len(dataloaders)  #验证集图片的总数
        for x, _ in dataloaders:
            x = x.to(device)
            y = model(x)

            img_y = torch.squeeze(y).cpu().numpy(
            )  #输入损失函数之前要把预测图变成numpy格式,且为了跟训练图对应,要额外加多一维表示batchsize
            mask = get_data(i)[1]  #得到当前mask的路径
            miou_total += get_iou(mask, img_y)  #获取当前预测图的miou,并加到总miou中
            plt.subplot(121)
            plt.imshow(Image.open(get_data(i)[0]))
            plt.subplot(122)
            plt.imshow(img_y)
            plt.pause(0.01)
            if i < num: i += 1  #处理验证集下一张图
        plt.show()
        print('Miou=%f' % (miou_total / 20))