示例#1
0
def convert_save(path, name, ckpth, image):
    model = Unet()
    model.load_state_dict(torch.load(os.path.join(ckpth, 'Generator.pth')))

    output_image = model(image).squeeze(0).detach().numpy()
    output_image = np.moveaxis(output_image, 0, 2)
    output_image = output_image * np.array((0.5, 0.5, 0.5)) + \
        np.array((0.5, 0.5, 0.5))
    output_image = np.clip(output_image, 0, 1)
    output_image = Image.fromarray(np.uint8(output_image * 255))
    output_image.save(os.path.join(path, name))
示例#2
0
def test_unet():
    data = mnist_data()

    backbone = ResNet()
    # preds = backbone(data.get_test()[0])
    gen = Unet()
    # input_shape = gen.get_input_shape()
    # print(gen.get_output_shape())
    rand_data_shape = (50, 28, 28, 1)
    random_noise_data = np.random.normal(size=rand_data_shape)
    # import pdb; pdb.set_trace()  # breakpoint 7e7a66fc //
    preds = gen.predict(random_noise_data)
    return True
示例#3
0
    def _build_training(self):
        # Unet
        self.output = Unet(name="UNet", in_data=self.input_data, reuse=False)

        # loss.
        # self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
        #     labels=self.input_masks, logits=self.output))
        # self.loss = tf.reduce_mean(tf.squared_difference(self.input_masks,
        #     self.output))
        # Use Tensorflow and Keras at the same time.
        self.loss = tf.reduce_mean(
            tf.keras.losses.binary_crossentropy(self.input_masks, self.output))

        # optimizer
        self.opt = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(
            self.loss, name="opt")

        # summary
        tf.summary.scalar('loss', self.loss)

        self.summary = tf.summary.merge_all()
        # summary and checkpoint
        self.writer = tf.summary.FileWriter(self.summary_dir,
                                            graph=self.sess.graph)
        self.saver = tf.train.Saver(max_to_keep=10, name=self.saver_name)
        self.summary_proto = tf.Summary()
示例#4
0
def test(args):
    # device
    device = torch.device("cuda:%d" %
                          args.gpu if torch.cuda.is_available() else "cpu")
    torch.backends.cudnn.benchmark = True

    # data
    testset = SonyTestDataset(args.input_dir, args.gt_dir)
    test_loader = DataLoader(testset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=args.num_workers)

    # model
    model = Unet()
    model.load_state_dict(torch.load(args.model))
    model.to(device)
    model.eval()

    # testing
    for i, databatch in tqdm(enumerate(test_loader), total=len(test_loader)):
        input_full, scale_full, gt_full, test_id, ratio = databatch
        scale_full, gt_full = torch.squeeze(scale_full), torch.squeeze(gt_full)

        # processing
        inputs = input_full.to(device)
        outputs = model(inputs)
        outputs = outputs.cpu().detach()
        outputs = torch.squeeze(outputs)
        outputs = outputs.permute(1, 2, 0)

        # scaling can clipping
        outputs, scale_full, gt_full = outputs.numpy(), scale_full.numpy(
        ), gt_full.numpy()
        scale_full = scale_full * np.mean(gt_full) / np.mean(
            scale_full
        )  # scale the low-light image to the same mean of the ground truth
        outputs = np.minimum(np.maximum(outputs, 0), 1)

        # saving
        if not os.path.isdir(os.path.join(args.result_dir, 'eval')):
            os.makedirs(os.path.join(args.result_dir, 'eval'))
        scipy.misc.toimage(
            scale_full * 255, high=255, low=0, cmin=0, cmax=255).save(
                os.path.join(
                    args.result_dir, 'eval',
                    '%05d_00_train_%d_scale.jpg' % (test_id[0], ratio[0])))
        scipy.misc.toimage(
            outputs * 255, high=255, low=0, cmin=0, cmax=255).save(
                os.path.join(
                    args.result_dir, 'eval',
                    '%05d_00_train_%d_out.jpg' % (test_id[0], ratio[0])))
        scipy.misc.toimage(
            gt_full * 255, high=255, low=0, cmin=0, cmax=255).save(
                os.path.join(
                    args.result_dir, 'eval',
                    '%05d_00_train_%d_gt.jpg' % (test_id[0], ratio[0])))
示例#5
0
    def _build_test(self):
        # network.
        output = Unet(name="UNet", in_data=self.input_data, reuse=False)

        self.saver = tf.train.Saver(max_to_keep=10, name=self.saver_name)
        # define saver, after the network!

        return output
示例#6
0
def main():
    if opt.is_continue:
        model = torch.load('unet.pth').to(device)
    else:
        model = Unet(19).to(device)

    dataset = Cityscapes(opt.root, resize=opt.resize, crop=opt.crop)
    dataloader = DataLoader(dataset,
                            batch_size=opt.batch_size,
                            shuffle=True,
                            num_workers=1)

    criterion = BCELoss().to(device)
    optimizer = Adam(model.parameters(), lr=0.001)

    t_now = time.time()

    for epoch in range(opt.n_epochs):
        print('epoch {}'.format(epoch))
        for i, batch in enumerate(dataloader):
            inputs, labels = batch

            inputs = inputs.to(device)
            labels = labels.to(device)

            optimizer.zero_grad()

            outputs = model(inputs)

            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            if i % 100 == 0:
                print(loss)
                print('time:', time.time() - t_now)
                t_now = time.time()

        print(loss)

        torch.save(model, 'unet.pth')
示例#7
0
def test(args):
    # device
    device = torch.device("cuda:%d" %
                          args.gpu if torch.cuda.is_available() else "cpu")
    torch.backends.cudnn.benchmark = True

    # images path
    fns = glob.glob(path.join(args.imgdir, '*.DNG'))
    n = len(fns)

    # model
    model = Unet()
    model.load_state_dict(torch.load(args.model))
    model.to(device)
    model.eval()

    # ratio
    ratio = 200

    for idx in range(n):
        fn = fns[idx]
        print(fn)
        raw = rawpy.imread(fn)

        input = np.expand_dims(pack_raw(raw), axis=0) * ratio
        scale_full = np.expand_dims(np.float32(input / 65535.0), axis=0)
        input = crop_center(input, 1024, 1024)
        input = torch.from_numpy(input)
        input = torch.squeeze(input)
        input = input.permute(2, 0, 1)
        input = torch.unsqueeze(input, dim=0)
        input = input.to(device)

        outputs = model(input)
        outputs = outputs.cpu().detach()
        outputs = torch.squeeze(outputs)
        outputs = outputs.permute(1, 2, 0)

        outputs = outputs.numpy()
        outputs = np.minimum(np.maximum(outputs, 0), 1)

        scale_full = torch.from_numpy(scale_full)
        scale_full = torch.squeeze(scale_full)

        scipy.misc.toimage(outputs * 255, high=255, low=0, cmin=0,
                           cmax=255).save(
                               path.join(args.imgdir,
                                         path.basename(fn) + '_out.jpg'))
    def _build_training(self):
        """
        定义self.loss,self.opt,self.summary,self.writer,self.saver
        """

        self.output = Unet(name="UNet", in_data=self.input_data, reuse=False)

        # loss. softmax交叉熵函数。  损失函数用来衡量网络准确性,提高预测精度并减少误差,损失函数越小越好。
        self.loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(labels=self.input_masks,
                                                    logits=self.output))

        # self.loss = tf.reduce_mean(tf.squared_difference(self.input_masks,
        #     self.output))
        # Use Tensorflow and Keras at the same time.
        # self.loss = tf.reduce_mean(tf.keras.losses.binary_crossentropy(
        #     self.input_masks, self.output))

        # 准确率计算  20191111添加
        # tf.equal()返回一个bool值,两参数相等时返回1 , tf.argmax()就是返回最大的那个数值所在的下标 , tf.cast()转换数据类型
        correct_prediction = tf.equal(self.input_masks, self.output)
        self.acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        # optimizer
        # 定义Adam优化器,是一个寻找全局最优点的优化算法,引入了二次方梯度校正
        # Adam的优点主要在于经过偏置校正后,每一次迭代学习率都有个确定范围,使得参数比较平稳。
        self.opt = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(
            self.loss, name="opt")

        # summary    用来显示标量信息
        tf.summary.scalar("acc", self.acc)  # 20191111添加
        tf.summary.scalar("loss", self.loss)

        # 添加一个操作,代表执行所有summary操作,这样可以避免人工执行每一个summary op。
        self.summary = tf.summary.merge_all()

        # summary and checkpoint        用于将Summary写入磁盘,需要制定存储路径logdir
        # 如果传递了Graph对象,则在Graph Visualization会显示Tensor Shape Information。执行summary op后,将返回结果传递给add_summary()方法即可。
        self.writer = tf.summary.FileWriter(self.summary_dir,
                                            graph=self.sess.graph)

        # 最多保存10个最新的checkpoint文件 , tf.train.Saver()用来保存tensorflow训练模型的,默认保存全部参数
        self.saver = tf.train.Saver(max_to_keep=10, name=self.saver_name)
        self.summary_proto = tf.Summary()
示例#9
0
def train(lr, patch_size, n_classes, epochs, gpuid, model_name, batch_size):
    if model_name == "Unet":
        model = Unet.Unet(input_size=(patch_size, patch_size, 3),
                          classes=n_classes)
    elif model_name == "Linknet":
        model_c = Linknet.Linknet(n_classes)
        model = model_c.base((patch_size, patch_size, 3))
    elif model_name == "ResUnet":
        model_c = ResUnet.ResUnet()
        model = model_c.base((patch_size, patch_size, 3), n_classes)
    else:
        model_c = proposedCNN.proposedCnn()
        model = model_c.base((patch_size, patch_size, 3), n_classes)

    x_train = HDF5Matrix("table_train.pytable", 'img')
    y_train = HDF5Matrix("table_train.pytable", 'mask')
    x_val = HDF5Matrix("table_val.pytable", 'img')
    y_val = HDF5Matrix("table_val.pytable", 'mask')

    os.environ['CUDA_VISIBLE_DEVICES'] = gpuid

    path = "checkpoint.hdf5"
    optimizer = Adam(lr=lr)
    checkpoint = ModelCheckpoint(filepath=path,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True)
    earlystopping = EarlyStopping(monitor='val_loss', patience=10, verbose=1)
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy'
                  if n_classes > 1 else 'binary_crossentropy',
                  metrics=['acc'])
    model.fit(x=x_train,
              y=y_train,
              batch_size=batch_size,
              epochs=epochs,
              callbacks=[checkpoint, earlystopping],
              validation_data=(x_val, y_val),
              shuffle="batch")

    model.load_weights(path)
    model.save(model_name + " segmentation.hdf5")
    def _build_test(self):
        # network.
        output = Unet(name="UNet", in_data=self.input_data, reuse=False)
        test_prediction = tf.equal(tf.argmax(self.input_data, 1),
                                   tf.argmax(output, 1))
        self.acc = tf.reduce_mean(tf.cast(test_prediction, tf.float32))

        # summary    用来显示标量信息
        tf.summary.scalar("acc", self.acc)

        self.summary = tf.summary.merge_all()

        self.writer = tf.summary.FileWriter(self.summary_dir,
                                            graph=self.sess.graph)

        # 最多保存10个最新的checkpoint文件 , tf.train.Saver()用来保存tensorflow训练模型的,默认保存全部参数
        self.saver = tf.train.Saver(max_to_keep=10, name=self.saver_name)
        self.summary_proto = tf.Summary()
        # define saver, after the network!
        return output
示例#11
0
    def _build_training(self):
        self.output = Unet(name="UNet", in_data=self.input_data, reuse=False)

        # loss
        self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            labels=self.input_masks, logits=self.output))

        # 定义Adam优化器
        self.opt = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss, name="opt")

        # summary
        tf.summary.scalar('loss', self.loss)

        self.summary = tf.summary.merge_all()

        self.writer = tf.summary.FileWriter(
            self.summary_dir, graph=self.sess.graph)

        self.saver = tf.train.Saver(max_to_keep=10, name=self.saver_name)
        self.summary_proto = tf.Summary()
示例#12
0
def getModelsandData(path, batch_size, device):
    netD = PatchGAN()
    netG = Unet()
    apply_weights(netD)
    apply_weights(netG)

    device = torch.device(device)

    transform = transforms.Compose([
        transforms.Resize((286, 572)),
        RandomCropMap(256),
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])

    dataset = MapDataset(path, transform=transform)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             shuffle=True)

    return netD, netG, dataloader, device
示例#13
0
    def test_unet_model(self):
        image = Raster \
            .read \
            .format("geotiff") \
            .load(TEST_IMAGE_PATH)

        label: Raster = Raster \
            .read \
            .format("shp") \
            .options(
            pixel=image.pixel,
            extent=image.extent
        ) \
            .load(self.shape_path)

        standarize1 = ImageStand(raster=image)
        standarized = standarize1.standarize_image(StandardScaler())
        raster_data = RasterData(standarized, label)
        unet_images = raster_data.prepare_unet_data(image_size=[64, 64])

        callbacks = [
            EarlyStopping(patience=100, verbose=1),
            ReduceLROnPlateau(factor=0.1, patience=100, min_lr=0, verbose=1),
            ModelCheckpoint('model_more_class_pixels.h5',
                            verbose=1,
                            save_best_only=True,
                            save_weights_only=False)
        ]
        config = UnetConfig(
            input_size=[64, 64, 3],
            metrics=["accuracy"],
            optimizer=SGD(lr=0.001),
            callbacks=callbacks,
            loss="binary_crossentropy",
        )
        #
        unet = Unet(config=config)
        unet.compile()
        unet.fit(unet_images, epochs=1)
        predicted = unet.predict(x=unet_images.x_test[0], threshold=0.4)
        SubPlots().extend(predicted, unet_images.x_test[0]).plot(nrows=1)
示例#14
0
def run_model():
    # Running the model
    dataset = CD_Dataset(path=DATASET_PATH,
                         download=True,
                         fit=False,
                         num_classes=OUTPUT_CHANNELS[0])
    dataset.mean_features = np.array([0.5, 0.5, 0.5])
    dataset.std_features = np.array([0.5, 0.5, 0.5])

    INPUT_PATCH_SIZE = [SPATCH, SPATCH]
    model_input_size = INPUT_PATCH_SIZE + INPUT_CHANNELS
    if MODEL == MIMO:
        MODEL_PATH_NAME = 'MIMO_' + MODEL_PATH_NAME
        model = MimoNet(model_input_size,
                        classes=OUTPUT_CHANNELS[0],
                        regularized=True)
    elif MODEL == UNET:
        MODEL_PATH_NAME = 'UNET_' + MODEL_PATH_NAME
        model = Unet(model_input_size,
                     classes=OUTPUT_CHANNELS[0],
                     regularized=True)
    else:
        print('CHOOSE MODEL: 0:MIMO, 1:UNET')
        sys.exit(0)

    if LOAD_MODEL:
        print("loading model " + MODEL_PATH_NAME + " from disk.")
        model.load_model(MODEL_PATH_NAME)

    if MODEL_TRAINING_SESSION:
        print("trainig model")
        train(model,
              dataset,
              epochs=EPOCHS,
              n_batch=N_PATCH_BATCH,
              use_weights=True,
              name=MODEL_PATH_NAME)
        print("saving model " + MODEL_PATH_NAME + " to disk.")
示例#15
0
def main():
    args = parser.parse_args()
    step = 0
    exp_name = f'{args.name}_{hp.max_lr}_{hp.cycle_length}'

    transforms = segtrans.JointCompose([segtrans.Resize(400),
                                        segtrans.RandomRotate(0, 90),
                                        segtrans.RandomCrop(256, 256),
                                        segtrans.ToTensor(),
                                        segtrans.Normalize(mean=hp.mean,
                                                           std=hp.std)])

    val_transforms = segtrans.JointCompose([segtrans.PadToFactor(),
                                            segtrans.ToTensor(),
                                            segtrans.Normalize(mean=hp.mean,
                                                               std=hp.std)])

    train_dataset = DSBDataset(f'{args.data}/train', transforms=transforms)
    val_dataset = DSBDataset(f'{args.data}/val', transforms=val_transforms)

    model = Unet()

    if args.checkpoint:
        checkpoint = torch.load(args.checkpoint)
        model.load_state_dict(checkpoint['state'])
        step = checkpoint['step']
        exp_name = checkpoint['exp_name']

    optimizer = Adam(model.parameters(), lr=hp.max_lr)

    if args.find_lr:
        scheduler = LRFinderScheduler(optimizer)
    else:
        scheduler = SGDRScheduler(optimizer, min_lr=hp.min_lr,
                                  max_lr=hp.max_lr, cycle_length=hp.cycle_length, current_step=step)

    model.cuda(device=args.device)
    train(model, optimizer, scheduler, train_dataset, val_dataset,
          n_epochs=args.epochs, batch_size=args.batch_size,
          exp_name=exp_name, device=args.device, step=step)
示例#16
0
            file_name = '../submission.csv'
            with open(file_name, 'a+') as f:

                for i in range(mask_preds.shape[0]):
                    s = str(i + 1 + start) + ',' + rles[i]
                    f.write(s + '\n')

        start += mask_preds.shape[0]


file_name = '../submission.csv'
with open(file_name, 'a+') as f:
    f.write('img,pixels\n')

# Load saved model
model = Unet(1, add_residual=True)
model.load_state_dict(torch.load('./saved_model'))  # Load trained model

if use_cuda and torch.cuda.is_available():
    model.cuda()

transforms_valid = A.Compose([
    A.Resize(height=512, width=512, p=1.0),

    # A.Normalize(mean=(0),std=(255),p=1.0),
    ToTensorV2(p=1.0),
])

sub = pd.read_csv('../data-samples/sample_submission.csv')
sub_data = Ultrasound_Dataset(
    sub, transform=transforms_valid)  # Same Transform as in validation
示例#17
0
def main(args):

    data_path = '/home/birgit/MA/Code/torchmeta/gitlab/data'
    with open(args.config, 'r') as f:
        config = json.load(f)

    if args.folder is not None:
        config['folder'] = args.folder
    if args.num_steps > 0:
        config['num_steps'] = args.num_steps
    if args.num_batches > 0:
        config['num_batches'] = args.num_batches

    device = torch.device(
        'cuda' if args.use_cuda and torch.cuda.is_available() else 'cpu')

    loss_function = DiceLoss()

    dataset = 'pascal5i'
    fold = config['fold']

    steps = config['num_adaption_steps']

    padding = 1

    if 'feature_scale' in config.keys():
        model = Unet(feature_scale=config['feature_scale'], padding=padding)
    else:
        model = Unet(feature_scale=4, padding=padding)

    # get datasets and load into meta learning format
    meta_train_dataset, meta_val_dataset, meta_test_dataset = get_datasets(
        dataset,
        data_path,
        config['num_ways'],
        config['num_shots'],
        config['num_shots_test'],
        fold=fold,
        download=False,
        augment=False)

    meta_val_dataloader = BatchMetaDataLoader(meta_val_dataset,
                                              batch_size=config['batch_size'],
                                              shuffle=True,
                                              num_workers=args.num_workers,
                                              pin_memory=True)

    print('num shots = ', config['num_shots'])
    print(f'Using device: {device}')

    with open(config['model_path'], 'rb') as f:
        model.load_state_dict(torch.load(f, map_location=device))

    metalearner = ModelAgnosticMetaLearning(model,
                                            first_order=config['first_order'],
                                            num_adaptation_steps=steps,
                                            step_size=config['step_size'],
                                            loss_function=loss_function,
                                            device=device)

    results = metalearner.evaluate(meta_val_dataloader,
                                   max_batches=config['num_batches'],
                                   verbose=args.verbose,
                                   desc='Test',
                                   is_test=True)

    if dataset == 'pascal5i':
        labels = [
            'aeroplane', 'bike', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
            'chair', 'cow', 'dining table', 'dog', 'horse', 'motorbike',
            'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
        ]
        accuracies = [
            value for _, value in results['mean_acc_per_label'].items()
        ]
        ious = [value for _, value in results['mean_iou_per_label'].items()]

        val_ious = [x for x in ious if x > 0.0]
        val_accs = [x for x in accuracies if x > 0.0]

        y_pos = np.arange(len(labels))

        fig, (ax1, ax2) = plt.subplots(1, 2)

        ax1.barh(y_pos, accuracies, align='center', alpha=0.5)
        ax1.set_yticks(y_pos)
        ax1.set_yticklabels(labels)
        ax1.set_xlabel('acc')
        ax1.set_xlim(0, 1)
        ax1.set_title('Accuracies per label')

        ax2.barh(y_pos, ious, align='center', alpha=0.5)
        ax2.set_yticks(y_pos)
        ax2.set_yticklabels(labels)
        ax2.set_xlabel('iou')
        ax2.set_xlim(0, 1)
        ax2.set_title('IoU scores per label')
        plt.grid(True)

        plt.show()

    # Save results
    dirname = os.path.dirname(config['model_path'])
    with open(os.path.join(dirname, 'test_results.json'), 'w') as f:
        json.dump(results, f)
示例#18
0
lr = 0.0005
weight_decay = 5e-5
lr_schedule = 0.985


def adjust_lr(optimizer, current_lr, schedule):
    current_lr = current_lr * schedule
    for param_group in optimizer.param_groups:
        param_group['lr'] = current_lr
    return current_lr


if __name__ == "__main__":
    args, unparsed = config.get_args()
    model = Unet(args)
    model = model.cuda()
    model.train()
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=lr,
                                 weight_decay=weight_decay)
    loss = MulticlassDiceLoss()

    train = get_file_list(brats_preprocessed_folder, train_ids_path)
    val = get_file_list(brats_preprocessed_folder, valid_ids_path)

    shapes = [brats_dataloader.load_patient(i)[0].shape[1:] for i in train]
    max_shape = np.max(shapes, 0)
    max_shape = list(np.max((max_shape, patch_size), 0))

    dataloader_train = brats_dataloader(train,
示例#19
0
from skimage.transform import resize
from medpy.io import load
import numpy as np

#import cv2
import nibabel as nib
from PIL import Image

from utils import dice_coef_loss, dice_coef, one_hot_encode, standardize

#checkpoint = ModelCheckpoint('new/weights.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='min')
#earlystopping = EarlyStopping(monitor = 'val_loss', verbose = 1,min_delta = 0.01, patience = 3, mode = 'min')
#callbacks_list = [checkpoint, earlystopping]

input_img = Input((240, 240, 4))
model = Unet(input_img, 16, 0.1, True)
learning_rate = 0.001
epochs = 5000
decay_rate = learning_rate / epochs
model.compile(optimizer=Adam(lr=learning_rate, decay=decay_rate),
              loss=dice_coef_loss,
              metrics=[dice_coef])
model.summary()

# data preprocessing starts here
path = '../BRATS2017/Brats17TrainingData/HGG'
all_images = os.listdir(path)
#print(len(all_images))
all_images.sort()
data = np.zeros((240, 240, 155, 4))
x_to = []
def main(args):

    logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
    device = torch.device(
        'cuda' if args.use_cuda and torch.cuda.is_available() else 'cpu')

    # Create output folder
    if (args.output_folder is not None):
        if not os.path.exists(args.output_folder):
            os.makedirs(args.output_folder)
            logging.debug('Creating folder `{0}`'.format(args.output_folder))

        output_folder = os.path.join(args.output_folder,
                                     time.strftime('%Y-%m-%d_%H%M%S'))
        os.makedirs(output_folder)
        logging.debug('Creating folder `{0}`'.format(output_folder))

        args.datafolder = os.path.abspath(args.datafolder)
        args.model_path = os.path.abspath(
            os.path.join(output_folder, 'model.th'))

        # Save the configuration in a config.json file
        with open(os.path.join(output_folder, 'config.json'), 'w') as f:
            json.dump(vars(args), f, indent=2)
        logging.info('Saving configuration file in `{0}`'.format(
            os.path.abspath(os.path.join(output_folder, 'config.json'))))

    # Get datasets and load into meta learning format
    meta_train_dataset, meta_val_dataset, _ = get_datasets(
        args.dataset,
        args.datafolder,
        args.num_ways,
        args.num_shots,
        args.num_shots_test,
        augment=augment,
        fold=args.fold,
        download=download_data)

    meta_train_dataloader = BatchMetaDataLoader(meta_train_dataset,
                                                batch_size=args.batch_size,
                                                shuffle=True,
                                                num_workers=args.num_workers,
                                                pin_memory=True)

    meta_val_dataloader = BatchMetaDataLoader(meta_val_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=args.num_workers,
                                              pin_memory=True)

    # Define model
    model = Unet(device=device, feature_scale=args.feature_scale)
    model = model.to(device)
    print(f'Using device: {device}')

    # Define optimizer
    meta_optimizer = torch.optim.Adam(model.parameters(),
                                      lr=args.meta_lr)  #, weight_decay=1e-5)
    #meta_optimizer = torch.optim.RMSprop(model.parameters(), lr=learning_rate, momentum = 0.99)

    # Define meta learner
    metalearner = ModelAgnosticMetaLearning(
        model,
        meta_optimizer,
        first_order=args.first_order,
        num_adaptation_steps=args.num_adaption_steps,
        step_size=args.step_size,
        learn_step_size=False,
        loss_function=loss_function,
        device=device)

    best_value = None

    # Training loop
    epoch_desc = 'Epoch {{0: <{0}d}}'.format(1 +
                                             int(math.log10(args.num_epochs)))
    train_losses = []
    val_losses = []
    train_ious = []
    train_accuracies = []
    val_accuracies = []
    val_ious = []

    start_time = time.time()

    for epoch in range(args.num_epochs):
        print('start epoch ', epoch + 1)
        print('start train---------------------------------------------------')
        train_loss, train_accuracy, train_iou = metalearner.train(
            meta_train_dataloader,
            max_batches=args.num_batches,
            verbose=args.verbose,
            desc='Training',
            leave=False)
        print(f'\n train accuracy: {train_accuracy}, train loss: {train_loss}')
        print('end train---------------------------------------------------')
        train_losses.append(train_loss)
        train_accuracies.append(train_accuracy)
        train_ious.append(train_iou)

        # Evaluate in given intervals
        if epoch % args.val_step_size == 0:
            print(
                'start evaluate-------------------------------------------------'
            )
            results = metalearner.evaluate(meta_val_dataloader,
                                           max_batches=args.num_batches,
                                           verbose=args.verbose,
                                           desc=epoch_desc.format(epoch + 1),
                                           is_test=False)
            val_acc = results['accuracy']
            val_loss = results['mean_outer_loss']
            val_losses.append(val_loss)
            val_accuracies.append(val_acc)
            val_ious.append(results['iou'])
            print(
                f'\n validation accuracy: {val_acc}, validation loss: {val_loss}'
            )
            print(
                'end evaluate-------------------------------------------------'
            )

            # Save best model
            if 'accuracies_after' in results:
                if (best_value is None) or (best_value <
                                            results['accuracies_after']):
                    best_value = results['accuracies_after']
                    save_model = True
            elif (best_value is None) or (best_value >
                                          results['mean_outer_loss']):
                best_value = results['mean_outer_loss']
                save_model = True
            else:
                save_model = False

            if save_model and (args.output_folder is not None):
                with open(args.model_path, 'wb') as f:
                    torch.save(model.state_dict(), f)

        print('end epoch ', epoch + 1)

    elapsed_time = time.time() - start_time
    print('Finished after ',
          time.strftime('%H:%M:%S', time.gmtime(elapsed_time)))

    r = {}
    r['train_losses'] = train_losses
    r['train_accuracies'] = train_accuracies
    r['train_ious'] = train_ious
    r['val_losses'] = val_losses
    r['val_accuracies'] = val_accuracies
    r['val_ious'] = val_ious
    r['time'] = time.strftime('%H:%M:%S', time.gmtime(elapsed_time))
    with open(os.path.join(output_folder, 'train_results.json'), 'w') as g:
        json.dump(r, g)
        logging.info('Saving results dict in `{0}`'.format(
            os.path.abspath(os.path.join(output_folder,
                                         'train_results.json'))))

    # Plot results
    plot_errors(args.num_epochs,
                train_losses,
                val_losses,
                val_step_size=args.val_step_size,
                output_folder=output_folder,
                save=True,
                bce_dice_focal=bce_dice_focal)
    plot_accuracy(args.num_epochs,
                  train_accuracies,
                  val_accuracies,
                  val_step_size=args.val_step_size,
                  output_folder=output_folder,
                  save=True)
    plot_iou(args.num_epochs,
             train_ious,
             val_ious,
             val_step_size=args.val_step_size,
             output_folder=output_folder,
             save=True)

    if hasattr(meta_train_dataset, 'close'):
        meta_train_dataset.close()
        meta_val_dataset.close()
示例#21
0
def run_hyperparam(modelStr, numEpochs):
    # Hyperparameter search
    import itertools

    #---------------------------------------------
    param_key = ['lr']
    lr = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5]
    #---------------------------------------------

    #----set variables to emulate run_model()-----
    runMode = 'train'
    ckptDir = 'hyperparam_tmpckpt'
    if numEpochs == NUM_EPOCHS:
        numEpochs = NUM_EPOCHS_HYPERPARAM
    #---------------------------------------------

    ps = [lr]
    params = list(itertools.product(*ps))

    logDir, logName = createLog('_hyperparam')

    count = 0
    best_val = 0
    best_param = None
    for param in params:
        # build a new computational graph
        tf.reset_default_graph()

        input_sz = [IMG_DIM, IMG_DIM, 1]
        output_sz = [IMG_DIM, IMG_DIM, 3]

        curModel = Unet(input_sz, output_sz, verbose=False)
        curModel.create_model()
        curModel.metrics()

        count += 1
        printSeparator('Running #%d of %d runs...' % (count, len(params)))
        print(getParamStr(param_key, param))

        with tf.Session(config=GPU_CONFIG) as sess:
            # train the network
            dataset_filenames = getDataFileNames(
                TRAIN_DATA, excludeFnames=['.filepart', 'test'])
            for i in range(numEpochs):
                random.shuffle(dataset_filenames)
                train_loss = run_one_epoch(sess,
                                           curModel,
                                           dataset_filenames,
                                           modelStr,
                                           is_training=True)
                print('#%d training loss: %f' % (i, train_loss))

            # run the trained network on the validation set
            dataset_filenames = getDataFileNames(VALIDATION_DATA)
            val_loss = run_one_epoch(sess,
                                     curModel,
                                     dataset_filenames,
                                     modelStr,
                                     is_training=False)

            logToFile(
                logName,
                'train loss: %f, val loss: %f\n' % (train_loss, val_loss))
            if best_val < val_loss:
                best_val = val_loss
                best_param = param

    logToFile(logName, 'Best validation accuracy: %f' % best_val)
    logToFile(logName, getParamStr(param_key, best_param))
示例#22
0
def run_model(modelStr, runMode, ckptDir, dataDir, sampleDir, overrideCkpt,
              numEpochs):
    print('Running model...')

    # choose the correct dataset
    if dataDir == '':
        if runMode == 'train':
            dataDir = TRAIN_DATA
        elif runMode == 'test':
            dataDir = TEST_DATA
        elif runMode == 'val':
            dataDir = VALIDATION_DATA

    if not os.path.exists(dataDir):
        print(
            'Please specify a valid data directory, "%s" is not a valid directory. Exiting...'
            % dataDir)
        return
    else:
        print('Using dataset %s' % dataDir)

    print("Using checkpoint directory: {0}".format(ckptDir))

    is_training = (runMode == 'train')
    batch_size = 1 if runMode == 'sample' else BATCH_SIZE
    numEpochs = numEpochs if is_training else 1
    overrideCkpt = overrideCkpt if is_training else False

    printSeparator('Initializing %s/reading constants.py' % modelStr)
    input_sz = [IMG_DIM, IMG_DIM, 1]
    if modelStr == 'unet':
        output_sz = [IMG_DIM, IMG_DIM, 3]
        curModel = Unet(input_sz, output_sz)
    if modelStr == 'zhangnet':
        output_sz = [(IMG_DIM / 4)**2, 512]
        curModel = ZhangNet(input_sz, output_sz)

    printSeparator('Building ' + modelStr)
    curModel.create_model()
    curModel.metrics()

    print("Running {0} model for {1} epochs.".format(modelStr, numEpochs))

    print("Reading in {0}-set filenames.".format(runMode))

    global_step = tf.Variable(
        0, trainable=False,
        name='global_step')  #tf.contrib.framework.get_or_create_global_step()
    saver = tf.train.Saver(max_to_keep=numEpochs)
    step = 0
    counter = 0

    if runMode == 'sample':
        logDir, logName = None, None
    else:
        logDir, logName = createLog(runMode)

    # get the data file names and check if the @dataDir is a hdf5 file
    if is_training:
        dataset_filenames = getDataFileNames(
            dataDir, excludeFnames=['.filepart', 'test'])
    else:
        dataset_filenames = getDataFileNames(dataDir,
                                             excludeFnames=['.filepart'])
    if ('.jpg' in dataset_filenames[0]) or ('.png' in dataset_filenames[0]):
        print('The input data is detected to be raw images')
        NUM_SAMPLES = len(dataset_filenames)
        dataset_filenames = [dataset_filenames]

    printSeparator('Starting TF session')
    with tf.Session(config=GPU_CONFIG) as sess:
        print("Inititialized TF Session!")

        # load checkpoint if necessary
        i_stopped, found_ckpt = get_checkpoint(overrideCkpt, ckptDir, sess,
                                               saver)

        # save weights
        # printVars()
        # show_weights(getVar(sess, 'combine_3/kernel:0'))
        # exit(0)

        if runMode != 'sample':
            file_writer = tf.summary.FileWriter(logDir,
                                                graph=sess.graph,
                                                max_queue=10,
                                                flush_secs=30)

        if (not found_ckpt):
            if is_training:
                init_op = tf.global_variables_initializer(
                )  # tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())
                init_op.run()
            else:
                # Exit if no checkpoint to test]
                print('Valid checkpoint not found under %s, exiting...' %
                      ckptDir)
                return

        if not is_training:
            numEpochs = i_stopped + 1

        # run the network
        for epochCounter in range(i_stopped, numEpochs):
            batch_loss = []
            printSeparator("Running epoch %d" % epochCounter)
            random.shuffle(dataset_filenames)

            for j, data_file in enumerate(dataset_filenames):
                mini_loss = []
                for iter_val in range(DATA_LOAD_PARTITION):
                    # Get data
                    print('Reading data in %s, iter_val: %d...' %
                          (data_file, iter_val))
                    # try:
                    if runMode == 'sample' and PAPER_IMG_NAMES != None:
                        input_batches, output_batches, imgNames = h52numpy(
                            data_file,
                            batch_sz=batch_size,
                            iter_val=iter_val,
                            mod_output=(modelStr == 'zhangnet'),
                            fileNames=PAPER_IMG_NAMES)
                        print(input_batches.shape)
                    else:
                        input_batches, output_batches, imgNames = h52numpy(
                            data_file,
                            batch_sz=batch_size,
                            iter_val=iter_val,
                            mod_output=(modelStr == 'zhangnet'))
                    # except:
                    #     logToFile(logName, "File reading failed...")
                    #     continue
                    print('Done reading, running the network (%d of %d)' %
                          (j + 1, len(dataset_filenames)))

                    bar = progressbar.ProgressBar(
                        maxval=int(len(input_batches) / batch_size))
                    bar.start()
                    count = 0
                    for dataIndx in range(0, len(imgNames), batch_size):
                        in_batch = input_batches[dataIndx:dataIndx +
                                                 batch_size]
                        if output_batches is None:
                            out_batch = None
                        else:
                            out_batch = output_batches[dataIndx:dataIndx +
                                                       batch_size]
                        imgName = imgNames[dataIndx:dataIndx + batch_size]

                        # look at the images in the dataset (for debug usage)
                        #for kk in range(batch_size):
                        #    numpy2jpg('tmp'+str(kk+dataIndx)+'.jpg', in_batch[kk,:,:,0], overlay=None, meanVal=LINE_MEAN, verbose=False)
                        #    numpy2jpg('KAK'+str(kk+dataIndx)+'.jpg', out_batch[kk,:,:], overlay=None, meanVal=1, verbose=False)
                        #if dataIndx>batch_size*2:
                        #    exit(0)

                        if runMode == 'sample':
                            curModel.sample(
                                sess,
                                in_batch,
                                out_batch,
                                imgName=[os.path.join(sampleDir, imgName[0])])
                            if (NUM_SAMPLES - 1) == step:
                                exit(0)
                        else:
                            summary_loss, loss = curModel.run(
                                sess,
                                in_batch,
                                out_batch,
                                is_training,
                                imgName=os.path.join(sampleDir, imgName[0]))

                            file_writer.add_summary(summary_loss, step)
                            batch_loss.append(loss)
                            mini_loss.append(loss)

                        # Processed another batch
                        step += 1
                        count += 1
                        bar.update(count)
                    bar.finish()

                    input_batches = None
                    output_batches = None

                logToFile(
                    logName, "Epoch %d Dataset #%d loss: %f" %
                    (epochCounter, j, np.mean(mini_loss)))

                counter += 1
                # run the sample images through the net to record the results to the Tensorflow (also locally stored)
                if is_training:
                    img_summary = curModel.sample(sess,
                                                  out2board=True,
                                                  imgName=logDir + '/imgs')
                    file_writer.add_summary(img_summary, counter)

                    if counter % SAVE_CKPT_COUNTER == 0:
                        save_checkpoint(
                            ckptDir, sess, saver,
                            i_stopped + int(counter / SAVE_CKPT_COUNTER))

            test_loss = np.mean(batch_loss)
            logToFile(logName, "Epoch %d loss: %f" % (epochCounter, test_loss))

            if is_training:
                # Checkpoint model - every epoch
                #save_checkpoint(ckptDir, sess, saver, epochCounter)
                pass
            elif runMode != 'sample':
                if runMode == 'val':
                    # Update the file for choosing best hyperparameters
                    curFile = open(curModel.config.val_filename, 'a')
                    curFile.write("Validation set loss: {0}".format(test_loss))
                    curFile.write('\n')
                    curFile.close()
示例#23
0
}
# y_ints = [y]
# class_weights = class_weight.compute_class_weight('balanced', 8, y_ints)
# class_weights = dict(enumerate(class_weights))

os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1"

tfconfig = tf.ConfigProto()
tfconfig.gpu_options.allow_growth = True
tfconfig.allow_soft_placement = True
sess = tf.Session(config=tfconfig)
sess.run(tf.compat.v1.global_variables_initializer())
keras.backend.set_session(sess)

input_img = Input((512, 512, 3))
model = Unet(input_img, 16, 0.1, True)
learning_rate = 0.001
epochs = 500
decay_rate = learning_rate / epochs
model.compile(optimizer=Adam(lr=learning_rate, decay=decay_rate), loss='mse')
model.summary()

history = model.fit(x=x, y=x, batch_size=32, epochs=20)

maxpool_model = keras.Sequential()
maxpool_model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2))

flair_encoder = Model(model.input, model.get_layer('activation_10').output)
flair_encoder.summary()

bottleneck = flair_encoder.predict(x)
示例#24
0
    def _build_test(self):
        output = Unet(name="UNet", in_data=self.input_data, reuse=False)

        self.Saver = tf.train.Saver(max_to_keep=10, name=self.saver_name)

        return output
示例#25
0
	train_loader = DataLoader(train_data , batch_size = 4,shuffle=True)

	valid_data = Ultrasound_Dataset(valid_df,transform=transforms_valid)
	valid_loader = DataLoader(valid_data , batch_size = 4,shuffle=False)

	# Checking GPU Avalaibility

	use_cuda = True
	if use_cuda and torch.cuda.is_available():
		print('yes')
	print(torch.cuda.is_available())


	# Model Initialization

	model = Unet(1,net_type='semi_inception',version='b',add_residual=True)
	
	if use_cuda and torch.cuda.is_available():
		model.cuda()
	
	criterion = CustomLoss(0.5,1)

	optimizer = optim.Adam(model.parameters(),5e-6)
	scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience = 3)
	training_loss,valid_loss,model,saved_model=train_(model,optimizer,scheduler,criterion,train_loader,valid_loader,epochs=5)
	plot_learning_curve(training_loss,valid_loss)

	# save model for further use

	torch.save(model.state_dict(),'../Mymodel')
示例#26
0
# In[6]:


plt.imshow(isbi.train[0].reshape(512, 512), cmap='gray');


# In[7]:


plt.imshow(isbi.targets[0].reshape(512, 512) , cmap='gray');


# In[8]:


unet = Unet()
unet.cuda();


# In[9]:


trainer = Trainer(unet)


# In[10]:


criterion = nn.BCEWithLogitsLoss()

示例#27
0
def train(args):
    # device
    device = torch.device("cuda:%d" %
                          args.gpu if torch.cuda.is_available() else "cpu")

    # data
    trainset = SonyDataset(args.input_dir, args.gt_dir, args.ps)
    train_loader = DataLoader(trainset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=12,
                              pin_memory=True)
    logging.info("data loading okay")

    # model
    model = Unet().to(device)

    # loss function
    criterion = nn.L1Loss()

    # optimizer
    optimizer = optim.Adam(model.parameters(),
                           lr=args.lr,
                           weight_decay=args.wd)

    # lr scheduler
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=2000, gamma=0.1)

    # training
    running_loss = 0.0
    for epoch in range(args.num_epoch):
        scheduler.step()
        for i, databatch in enumerate(train_loader):
            # get the inputs
            input_patch, gt_patch, train_id, ratio = databatch
            input_patch, gt_patch = input_patch.to(device), gt_patch.to(device)

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = model(input_patch)
            loss = criterion(outputs, gt_patch)
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()
            if i % args.log_interval == (args.log_interval - 1):
                print('[%d, %5d] loss: %.3f %s' %
                      (epoch, i, running_loss / args.log_interval,
                       datetime.now()))
                running_loss = 0.0

            if epoch % args.save_freq == 0:
                if not os.path.isdir(
                        os.path.join(args.result_dir, '%04d' % epoch)):
                    os.makedirs(os.path.join(args.result_dir, '%04d' % epoch))

                gt_patch = gt_patch.cpu().detach().numpy()
                outputs = outputs.cpu().detach().numpy()
                train_id = train_id.numpy()
                ratio = ratio.numpy()

                temp = np.concatenate(
                    (gt_patch[0, :, :, :], outputs[0, :, :, :]), axis=2)
                scipy.misc.toimage(
                    temp * 255, high=255, low=0, cmin=0,
                    cmax=255).save(args.result_dir +
                                   '%04d/%05d_00_train_%d.jpg' %
                                   (epoch, train_id[0], ratio[0]))

        # at the end of epoch
        if epoch % args.model_save_freq == 0:
            torch.save(model.state_dict(),
                       args.checkpoint_dir + './model_%d.pl' % epoch)
X_train_t1 = np.zeros((285, 240, 240, 155))
X_train_t1ce = np.zeros((285, 240, 240, 155))
X_train_flair = np.zeros((285, 240, 240, 155))
X_train_t2 = np.zeros((285, 240, 240, 155))
t1_small_ = np.zeros((285, 144, 144, 155))
t1ce_small_ = np.zeros((285, 144, 144, 155))
flair_small_ = np.zeros((285, 144, 144, 155))
t2_small_ = np.zeros((285, 144, 144, 155))
t1_small = np.zeros((285, 144, 144, 100))
t1ce_small = np.zeros((285, 144, 144, 100))
flair_small = np.zeros((285, 144, 144, 100))
t2_small = np.zeros((285, 144, 144, 100))

input_img = Input((144, 144, 100))
model = Unet(input_img, 16, 0.1, True)
learning_rate = 0.001
epochs = 500
decay_rate = learning_rate / epochs
model.compile(optimizer=Adam(lr=learning_rate, decay=decay_rate), loss='mse')
model.summary()


# data preprocessing starts here
path = 'BRATS2017/Brats17TrainingData/HGG'
all_images = os.listdir(path)
# print(len(all_images))
all_images.sort()
data = np.zeros((240, 240, 155, 4))

for i in range(len(all_images)):