Exemple #1
0
def main(argv):
    tf_flags = tf.app.flags.FLAGS
    # gpu config.
    # config = tf.ConfigProto()
    # config.gpu_options.per_process_gpu_memory_fraction = 0.5
    # config.gpu_options.allow_growth = True
    # config = config
    if tf_flags.phase == "train":
        with tf.Session() as sess:
            # when use queue to load data, not use with to define sess
            train_model = unet.UNet(sess, tf_flags)
            train_model.train(tf_flags.batch_size, tf_flags.training_steps,
                              tf_flags.summary_steps,
                              tf_flags.checkpoint_steps, tf_flags.save_steps)
    else:
        with tf.Session() as sess:
            # test on a image pair.
            test_model = unet.UNet(sess, tf_flags)
            test_model.load(tf_flags.checkpoint)
            image, output_masks = test_model.test()
            # return numpy ndarray.

            # save two images.
            filename_A = "input.png"
            filename_B = "output_masks.png"

            cv2.imwrite(filename_A, np.uint8(image[0].clip(0., 1.) * 255.))
            cv2.imwrite(filename_B,
                        np.uint8(output_masks[0].clip(0., 1.) * 255.))

            # Utilize cv2.imwrite() to save images.
            print("Saved files: {}, {}".format(filename_A, filename_B))
Exemple #2
0
def main(argv):
    # tf.app.flags.FLAGS接受命令行传递参数或者tf.app.flags定义的默认参数
    tf_flags = tf.flags.FLAGS

    # gpu config.
    # tf.ConfigProto()函数用在创建session的时候,用来对session进行参数配置
    config = tf.ConfigProto()

    # tf提供了两种控制GPU资源使用的方法,第一种方式就是限制GPU的使用率:
    # config.gpu_options.per_process_gpu_memory_fraction = 0.5  # 占用50%显存
    # 第二种是让TensorFlow在运行过程中动态申请显存,需要多少就申请多少:
    config.gpu_options.allow_growth = True

    if tf_flags.phase == "train":
        # 使用上面定义的config设置session
        with tf.Session(config=config) as sess:
            # when use queue to load data, not use with to define sess
            # 定义Unet模型
            train_model = unet.UNet(sess, tf_flags)
            # 训练Unet网络,参数:batch_size,训练迭代步......
            train_model.train(tf_flags.batch_size, tf_flags.training_steps,
                              tf_flags.summary_steps, tf_flags.checkpoint_steps, tf_flags.save_steps)

    else:
        with tf.Session(config=config) as sess:
            # test on a image pair.
            test_model = unet.UNet(sess, tf_flags)

            # test阶段:加载checkpoint文件的数据给模型参数初始化
            test_model.load(tf_flags.checkpoint)

            test_model.test(os.path.join(tf_flags.testing_set,"test"))

            print("Saved test files successfully !")
Exemple #3
0
def main(argv):
    tf_flags = tf.app.flags.FLAGS
    # 参数名称、参数默认值、参数描述
    tf.app.flags.DEFINE_string("output_dir", "model_output",
                               "checkpoint and summary directory.")
    tf.app.flags.DEFINE_string("phase", "train", "model phase: train/test.")
    tf.app.flags.DEFINE_string("training_set", "./datasets",
                               "dataset path for training.")
    tf.app.flags.DEFINE_string("testing_set", "./datasets/test",
                               "dataset path for testing one image pair.")

    tf.app.flags.DEFINE_integer("batch_size", 64, "batch size for training.")
    tf.app.flags.DEFINE_integer("training_steps", 100000,
                                "total training steps.")
    tf.app.flags.DEFINE_integer("summary_steps", 100, "summary period.")
    tf.app.flags.DEFINE_integer("checkpoint_steps", 1000, "checkpoint period.")
    tf.app.flags.DEFINE_integer("save_steps", 500, "checkpoint period.")
    tf.app.flags.DEFINE_string("checkpoint", None,
                               "checkpoint name for restoring.")

    # gpu config
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"

    config = tf.ConfigProto()

    config.gpu_options.allow_growth = True

    config.gpu_options.per_process_gpu_memory_fraction = 0.5

    if tf_flags.phase == "train":
        with tf.Session(config=config) as sess:
            # with tf.Session() as sess:
            train_model = unet.UNet(sess, tf_flags)

            train_model.train(tf_flags.batch_size, tf_flags.training_steps,
                              tf_flags.summary_steps,
                              tf_flags.checkpoint_steps, tf_flags.save_steps)
    else:
        with tf.Session(config=config) as sess:
            # with tf.Session() as sess:
            test_model = unet.UNet(sess, tf_flags)
            test_model.load(tf_flags.checkpint)
            image, output_masks = test_model.test()

            filename_A = "input.png"
            filename_B = "output_masks.png"

            cv2.imwrite(filename_A, np.uint8(image[0].clip(0., 1.) * 255.))
            cv2.imwrite(filename_B,
                        np.uint8(output_masks[0].clip(0., 1.) * 255.))

            print("Saved files : {}, {}".format(filename_A, filename_B))
Exemple #4
0
def main():
    global best_prec1

    batch_size = args.batch_size
    split = args.split
    num_workers = args.num_workers

    if args.model == 'resnet34unet':
        model = unet.UNet(3, 1, activation='sigmoid', weight=None)
        preprocess = get_preprocessing_fn('resnet34')
    else:
        raise ValueError(args.model, ' is not in our model')

    valid_aug = augmentation.get_augmentations('valid', 1.0, args.image_size)

    valid_dataset = SIIM.SIIM_ACR(mode='valid',
                                  split=split,
                                  preprocess=preprocess,
                                  augmentation=valid_aug)

    val_dataloader = DataLoader(valid_dataset,
                                batch_size=batch_size,
                                shuffle=False,
                                num_workers=num_workers,
                                pin_memory=True)

    if args.use_flip:
        valid_dataset_tta = SIIM.SIIM_ACR(mode='valid',
                                          split=split,
                                          preprocess=preprocess,
                                          augmentation=valid_aug,
                                          is_tta=True)

        global val_dataloader_tta
        val_dataloader_tta = DataLoader(valid_dataset_tta,
                                        batch_size=batch_size,
                                        shuffle=False,
                                        num_workers=num_workers,
                                        pin_memory=True)

    model = nn.DataParallel(model, device_ids=args.gpus).cuda()

    global metric
    metric = metrics.FscoreMetric(threshold=None)

    for i in range(args.snapshot):

        if os.path.exists(os.path.join('ckpt', args.model, str(args.split))):
            print('=> loading checkpoint {}'.format(
                os.path.join('ckpt', args.model, str(args.split))))
            ckpt = torch.load(
                os.path.join('ckpt', args.model, str(args.split),
                             str(i) + '_size%d' % args.image_size + '.pth'))

            model.load_state_dict(ckpt)
        else:
            raise ValueError('=> no checkpoint found at {}'.format(
                os.path.join('ckpt', args.model)))

        prec1 = validate(val_dataloader, model, i)
Exemple #5
0
def main(_):
    tf.logging.set_verbosity(tf.logging.INFO)
    # gpu config.
    config = tf.ConfigProto()
    #config.gpu_options.per_process_gpu_memory_fraction = 0.5
    # config.gpu_options.allow_growth = True

    if FLAGS.phase == 'train':
        with tf.Session(config=config) as sess:
            # when use queue to load data, not use with to define sess
            train_model = unet.UNet(sess, FLAGS, is_training=True)
            train_model.train(
                batch_size=FLAGS.train_batch_size,
                training_number_of_steps=FLAGS.training_number_of_steps,
                summary_steps=FLAGS.summary_steps,
                checkpoint_steps=FLAGS.checkpoint_steps,
                save_steps=FLAGS.save_steps,
                dataset_dir=FLAGS.dataset_dir,
                dataset=FLAGS.dataset,
                tf_initial_checkpoint_dir=FLAGS.tf_initial_checkpoint_dir)
    else:
        with tf.Session(config=config) as sess:
            # test on a image pair.
            test_model = unet.UNet(sess, FLAGS)
            test_model.load(FLAGS.checkpoint)
            image, output_masks = test_model.test()
            # return numpy ndarray.

            # save two images.
            print('\n\n\n\n???\n\n\n\n')
            filename_A = 'input.png'
            filename_B = 'output_masks.png'

            cv2.imwrite(filename_A, np.uint8(image[0].clip(0., 1.) * 255.))
            cv2.imwrite(filename_B,
                        np.uint8(output_masks[0].clip(0., 1.) * 255.))

            # Utilize cv2.imwrite() to save images.
            print('Saved files: {}, {}'.format(filename_A, filename_B))
Exemple #6
0
def main():
    global best_prec1

    batch_size = args.batch_size
    split = args.split
    num_workers = args.num_workers
    scheduler_step = args.epochs

    print('using split %d' % split)

    if args.model == 'resnet34unet':
        model = unet.UNet(3, 1, activation=None, dr=args.dr)
        preprocess = get_preprocessing_fn('resnet34')
    else:
        raise ValueError(args.model, ' is not in our model')

    if os.path.exists(os.path.join('ckpt', args.model, str(
            args.split))) is False:
        os.makedirs(os.path.join('ckpt', args.model, str(args.split)))

    if os.path.exists(os.path.join('logdir', args.model, str(
            args.split))) is False:
        os.makedirs(os.path.join('logdir', args.model, str(args.split)))

    train_aug = augmentation.get_augmentations('train', 1.0, args.image_size)
    valid_aug = augmentation.get_augmentations('valid', 1.0, args.image_size)

    train_dataset = SIIM.SIIM_ACR(
        mode='train',
        split=split,
        preprocess=preprocess,
        augmentation=train_aug,
    )

    valid_dataset = SIIM.SIIM_ACR(
        mode='valid',
        split=split,
        preprocess=preprocess,
        augmentation=valid_aug,
    )

    print('train valid dataset init successfully')

    if args.use_total:
        train_dataset = SIIM.SIIM_ACR(mode='train',
                                      split=split,
                                      preprocess=preprocess,
                                      augmentation=train_aug,
                                      use_total=True)

    # weights = train_dataset.weight + 1
    # train_sampler = WeightedRandomSampler(weights, num_samples=len(train_dataset))

    train_dataloader = DataLoader(train_dataset,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  num_workers=num_workers,
                                  pin_memory=True)

    val_dataloader = DataLoader(valid_dataset,
                                batch_size=batch_size,
                                shuffle=False,
                                num_workers=num_workers,
                                pin_memory=True)

    model = nn.DataParallel(model, device_ids=args.gpus).cuda()

    global metric
    metric = metrics.FscoreMetric(threshold=0.5)

    cudnn.benchmark = True

    criterion_1 = loss.BCEDiceLoss().cuda()
    # criterion_1 = nn.BCEWithLogitsLoss()
    # criterion_1 = loss.WeightedBCELoss().cuda()
    # criterion_1 = loss.DiceLoss().cuda()
    criterion_2 = lovasz_losses.lovasz_hinge

    if args.optim == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(),
                                    args.learning_rate,
                                    momentum=0.9,
                                    weight_decay=1e-4)
    elif args.optim == 'adam':
        optimizer = torch.optim.Adam(model.parameters(), args.learning_rate)

    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, T_max=args.epochs, eta_min=args.min_lr)

    if args.pretrained_model:
        ckpt = torch.load(args.pretrained_model)
        model.load_state_dict(ckpt)
        print('use pretrained model on', args.pretrained_model)

    i = 0
    if args.use_total and args.resume:
        if os.path.exists(os.path.join('ckpt', args.model)):
            print('=> loading checkpoint {}'.format(
                os.path.join('ckpt', args.model)))
            ckpt = torch.load(
                os.path.join('ckpt', args.model,
                             str(i) + '_size%d' % args.image_size + '.pth'))

            model.load_state_dict(ckpt)
        else:
            print('=> no checkpoint found at {}'.format(
                os.path.join('ckpt', args.model,
                             str(i) + '_size%d' % args.image_size + '.pth')))

    elif args.resume:
        if os.path.exists(os.path.join('ckpt', args.model, str(args.split))):
            print('=> loading checkpoint {}'.format(
                os.path.join('ckpt', args.model, str(args.split))))
            ckpt = torch.load(
                os.path.join('ckpt', args.model, str(args.split),
                             str(i) + '_size%d' % args.image_size + '.pth'))

            model.load_state_dict(ckpt)
        else:
            print('=> no checkpoint found at {}'.format(
                os.path.join('ckpt', args.model,
                             str(i) + '_size%d' % args.image_size + '.pth')))

    for i in range(args.snapshot):

        for epoch in range(args.epochs):

            if args.lovasz is False:
                train(train_dataloader, model, criterion_1, optimizer, epoch,
                      i)
            else:
                train(train_dataloader, model, criterion_2, optimizer, epoch,
                      i)

            lr_scheduler.step()
            if (epoch + 1) % args.val_freq == 0 or epoch == args.epochs - 1:

                if args.lovasz is False:
                    prec1 = validate(val_dataloader, model, criterion_1, epoch,
                                     i)
                else:
                    prec1 = validate(val_dataloader, model, criterion_2, epoch,
                                     i)

                if prec1 > best_prec1:
                    best_prec1 = prec1
                    best_param = model.state_dict()

                    if args.use_total:
                        torch.save(
                            best_param,
                            os.path.join(
                                'ckpt', args.model,
                                str(i) + '_size%d' % args.image_size + '.pth'))
                    else:
                        torch.save(
                            best_param,
                            os.path.join(
                                'ckpt', args.model, str(args.split),
                                str(i) + '_size%d' % args.image_size + '.pth'))

        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=args.learning_rate,
                                    momentum=0.9,
                                    weight_decay=1e-4)
        lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer, scheduler_step, args.min_lr)
        best_prec1 = 0
Exemple #7
0
def main():
    global best_prec1

    batch_size = args.batch_size
    lr = args.learning_rate
    epochs = args.epochs
    val_freq = args.val_freq
    split = args.split
    num_workers = args.num_workers

    if os.path.exists(os.path.join('logdir', args.model, str(
            args.split))) is False:
        os.makedirs(os.path.join('logdir', args.model, str(args.split)))

    if args.model == 'resnet34unet':
        model = unet.UNet(3, 1, activation='sigmoid', weight=None)
        preprocess = get_preprocessing_fn('resnet34')
    else:
        raise ValueError(args.model, ' is not in our model')

    valid_aug = augmentation.get_augmentations('valid', 1.0, args.image_size)

    valid_dataset = SIIM_test.SIIM_ACR(mode='test',
                                       preprocess=preprocess,
                                       augmentation=valid_aug)

    val_dataloader = DataLoader(valid_dataset,
                                batch_size=1,
                                shuffle=False,
                                num_workers=num_workers,
                                pin_memory=True)

    model = nn.DataParallel(model, device_ids=args.gpus).cuda()

    global metric
    metric = metrics.FscoreMetric(threshold=None)

    # cudnn.benchmark = True

    for i in range(args.snapshot):

        if args.pretrained_model:
            ckpt = torch.load(args.pretrained_model)
            model.load_state_dict(ckpt)
            print('use pretrained model on', args.pretrained_model)
        elif args.use_total:
            print('=> loading checkpoint {}'.format(
                os.path.join('ckpt', args.model)))
            ckpt = torch.load(
                os.path.join('ckpt', args.model,
                             str(i) + '_size%d' % args.image_size + '.pth'))

            model.load_state_dict(ckpt)
        elif os.path.exists(os.path.join('ckpt', args.model, str(args.split))):
            print('=> loading checkpoint {}'.format(
                os.path.join('ckpt', args.model, str(args.split))))
            ckpt = torch.load(
                os.path.join('ckpt', args.model, str(args.split),
                             str(i) + '_size%d' % args.image_size + '.pth'))

            model.load_state_dict(ckpt)

        else:
            print('=> no checkpoint found at {}'.format(
                os.path.join('ckpt', args.model)))

        prec1 = test(val_dataloader, model, i)
Exemple #8
0
import numpy as np

X_train = np.load('../data/DRIVE/584_584/X_train.npy')
Y_train = np.load('../data/DRIVE/584_584/Y_train.npy')
Y_test = np.load('../data/DRIVE/Y_test.npy')

data_gen_args = dict(rotation_range=0.2,
                     width_shift_range=0.05,
                     height_shift_range=0.05,
                     shear_range=0.05,
                     zoom_range=0.05,
                     horizontal_flip=True,
                     fill_mode='nearest')
myGene = trainGenerator(1, '../data/DRIVE/training', 'images', '1st_manual', data_gen_args, save_to_dir=None)

model = unet.UNet(input_shape=(584, 565, 3))
model.summary()
model_checkpoint = ModelCheckpoint('unet_membrane.hdf5', monitor='loss', verbose=1, save_best_only=True)
model.fit_generator(myGene, steps_per_epoch=300, epochs=1, callbacks=[model_checkpoint])

testGene = testGenerator("../data/DRIVE/test/")
results = model.predict_generator(testGene, 20, verbose=1)
print(results[0])
np.save('results.npy', results)

saveResult("../data/DRIVE/test/", results)

Y_test = np.array(Y_test, dtype="int").flatten()
results = np.array(results > 0, dtype="int").flatten()
F1 = f1_score(Y_test, results, labels=None, average='binary', sample_weight=None)
print(">> F1-Score = {:.2f}%".format(np.mean(F1 * 100)))
import scipy.io
from skimage.transform import resize
from scipy.spatial import distance

cuda_device = 'cuda:2'
device = torch.device(cuda_device if torch.cuda.is_available() else "cpu")
torch.cuda.set_device(device)

num_class = 37
H = 600
W = 480
model_folder = '600_480_pow_5_binary_unet_setup_1'
mat_name = 'mat500'
setup = model_folder[-1]

model = unet.UNet(1, num_class, [64, 128, 256, 512]).to(device)

# 저장된 model 로드
model.load_state_dict(
    torch.load('../../saved_models/Laplace/setup' + setup + '/' +
               model_folder + '/loss_4.5879849302392e-05_E_759.pth',
               map_location=cuda_device)['model'])
model.eval()


###################
def angle(v1, v2):
    v1 = np.array(v1)
    v2 = np.array(v2)
    v1 = v1 - [H / 2, W / 2]
    v2 = v2 - [H / 2, W / 2]