def get_model(self) -> Model:
        """Get selected model.

        Returns
        -------
        Model
            Keras model object, compiled.

        """
        # Build the model
        optimizer = RMSprop(self.learning_rate)
        if self.model_type == "unet":
            model = models.unet(self.input_shape, self.classes, optimizer,
                                self.loss)
        elif self.model_type == "unet_large":
            model = models.unet_large(self.input_shape, self.classes,
                                      optimizer, self.loss)
        elif self.model_type == "fcdensenet":
            model = models.fcdensenet(self.input_shape, self.classes,
                                      optimizer, self.loss)
        elif self.model_type == "fcn_small":
            model = models.fcn_small(self.input_shape, self.classes, optimizer,
                                     self.loss)
        model.summary()
        return model
    def get_model(self) -> Model:
        """Get selected model.

        Returns
        -------
        Model
            Keras model object, compiled.

        """
        # Build the model
        optimizer = RMSprop(self.learning_rate)
        if self.model_type == "unet":
            model = models.unet(self.input_shape, self.classes, optimizer,
                                self.loss)
        elif self.model_type == "unet_large":
            model = models.unet_large(self.input_shape, self.classes,
                                      optimizer, self.loss)
        elif self.model_type == "fcdensenet":
            model = models.fcdensenet(self.input_shape, self.classes,
                                      optimizer, self.loss)
        elif self.model_type == "fcn_small":
            model = models.fcn_small(self.input_shape, self.classes, optimizer,
                                     self.loss)
        if self.preload_weights:
            logger.info(
                "=====================================================")
            logger.info(f"Using weights from {self.preload_weights}")
            logger.info(
                "=====================================================")
            model.load_weights(self.preload_weights)
        model.run_eagerly = True
        model.summary()
        return model
示例#3
0
def train_unet(input_shape, final_channels):
    _model = unet(input_shape,
                  final_channels,
                  use_pooling=False,
                  skip_layers='inception',
                  final_activation='sigmoid')
    _model.load_weights('degan/generator.h5')

    lr = 2e-3
    _model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
                   loss='mse',
                   metrics='mae')

    tensorboard = TensorBoard(profile_batch='10, 20')

    model_path = 'unet/unet.h5'

    model_checkpoints = ModelCheckpoint(model_path,
                                        monitor='mae',
                                        save_best_only=True,
                                        verbose=1)

    lr_sch = ReduceLROnPlateau(monitor='mae', factor=0.5, verbose=1)

    callbacks = [lr_sch, tensorboard, model_checkpoints]
    return _model, callbacks
def iterate_over_image_and_evaluate_patchwise_cnn(image_stack,
                                                  model_path,
                                                  out_filename,
                                                  out_meta,
                                                  n_classes,
                                                  tile_size=24):
    model = unet((None, None, 98), n_classes=3, initial_exp=5)
    model.load_weights(model_path)

    print(image_stack.shape)
    image_stack = np.swapaxes(image_stack, 0, 2)
    image_stack = np.expand_dims(image_stack, 0)
    print(image_stack.shape)

    predictions = np.zeros(
        (image_stack.shape[1], image_stack.shape[2], n_classes))
    for i in range(0, image_stack.shape[1] - tile_size, tile_size):
        for j in range(0, image_stack.shape[2] - tile_size, tile_size):
            image_tile = image_stack[:, i:i + tile_size, j:j + tile_size, :]
            if np.all(image_tile == 0):
                continue
            predictions[i:i + tile_size, j:j + tile_size, :] = np.squeeze(
                model.predict(image_tile))
        stdout.write("{:.3f}\r".format(i / image_stack.shape[1]))

    predictions = np.swapaxes(predictions, 0, 2)
    out_meta.update({'count': n_classes, 'dtype': np.float64})
    with rasopen(out_filename, "w", **out_meta) as dst:
        dst.write(predictions)
示例#5
0
def main(args):
    optimizer = Adam(args.learning_rate)
    mdl = models.unet()
    class_weight = 1. * (constants.class_radius[args.label]**
                         2) / (args.crop_size**2) / 2
    mdl.compile(optimizer, lambda x, y: weighted_logloss(x, y, class_weight))
    # mdl.compile(optimizer, jaccard)

    train_generator = RandomRastorGenerator(args.train_dir,
                                            label=args.label,
                                            label_file=args.label_file,
                                            batch_size=args.batch_size,
                                            crop_size=args.crop_size,
                                            transformer=transformer)
    val_generator = RandomRastorGenerator(args.val_dir,
                                          label=args.label,
                                          label_file=args.label_file,
                                          batch_size=args.batch_size,
                                          crop_size=args.crop_size,
                                          transformer=transformer)
    # print 'train_gen size: {}, val_gen size: {}'.format(len(train_generator), len(val_generator))

    sess = K.get_session()
    writer = tf.summary.FileWriter(os.path.join(args.model_dir, 'logs'),
                                   sess.graph)

    best_loss = None
    step = 0
    stop_count = 0
    for epoch in xrange(args.num_epochs):
        # train
        batch_x, batch_y = train_generator.next()
        loss = mdl.train_on_batch(batch_x, batch_y)
        if step % 25 == 0:
            print 'Step {}: Loss = {}'.format(step, loss)

        # write train/val loss summary
        if step % STEPS_PER_VAL == 0:
            train_loss, train_summary = get_summary('train_loss', mdl,
                                                    train_generator)
            val_loss, val_summary = get_summary('val_loss', mdl, val_generator)
            writer.add_summary(train_summary, epoch)
            writer.add_summary(val_summary, epoch)
            print 'Train_loss: {}, Val_loss: {}'.format(train_loss, val_loss)

            if (best_loss is None) or (val_loss < best_loss):
                print 'New best validation loss. Saving...'
                best_loss = val_loss
                stop_count = 0
                mdl.save(os.path.join(args.model_dir, 'weights.h5'))
            else:
                stop_count += 1

        if args.early_stop > 0 and stop_count >= args.early_stop:
            print 'Validation loss did not improve after {} steps. Stopping...'.format(
                args.early_stop)
            break
        step += 1
示例#6
0
    def get_model(self) -> Model:
        """Get selected model.

        Returns
        -------
        Model
            Keras model object, compiled.

        """
        # Build the model
        optimizer = RMSprop(self.learning_rate)
        if self.model_type == "unet":
            model = models.unet(
                self.input_shape,
                self.classes,
                self.loss,
            )
        elif self.model_type == "unet_large":
            model = models.unet_large(
                self.input_shape,
                self.classes,
                self.loss,
            )
        elif self.model_type == "fcdensenet":
            model = models.fcdensenet(
                self.input_shape,
                self.classes,
                self.loss,
            )
        elif self.model_type == "fcn_small":
            model = models.fcn_small(self.input_shape, self.classes, self.loss)
        if self.preload_weights:
            logger.info(
                "=====================================================")
            logger.info(f"Using weights from {self.preload_weights}")
            logger.info(
                "=====================================================")
            model.load_weights(self.preload_weights)
            if config.FINE_TUNE_AT > 0:
                for layer in model.layers[:config.FINE_TUNE_AT]:
                    layer.trainable = False
                logger.info(
                    "=====================================================")
                logger.info(f"Fine tuning from %d layer" % config.FINE_TUNE_AT)
                logger.info(
                    "=====================================================")

        model = models.compile_model(model, self.loss, optimizer)

        model.run_eagerly = True
        model.summary()
        return model
def model_load(model_path):
    model_path = model_path

    # We need to create an SSDLoss object in order to pass that to the model
    #  loader.
    ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=0.8)

    K.clear_session()  # Clear previous models from memory.

    model = load_model(os.path.join(model_path, 'ssd300_all.h5'),
                       custom_objects={'AnchorBoxes': AnchorBoxes,
                                       'L2Normalization': L2Normalization,
                                       'compute_loss': ssd_loss.compute_loss})

    model2 = unet()
    model2.load_weights(os.path.join(model_path, "unet_8.hdf5"))
    return model, model2
示例#8
0
def train_wgan(input_shape, final_channels):
    generator = unet(input_shape,
                     final_channels,
                     use_pooling=False,
                     skip_layers='inception',
                     final_activation='tanh')
    disc_model = discriminator(INPUT_SHAPE, final_activation='linear')

    # generator.load_weights('degan/generator.h5')
    # discriminator.load_weights('degan/discriminator.h5')

    _model = DEGAN(generator, disc_model)

    gen_decay_rate = 5e-4
    disc_decay_rate = 1e-4
    _model.compile(tf.keras.optimizers.Adam(learning_rate=gen_decay_rate),
                   tf.keras.optimizers.RMSprop(learning_rate=disc_decay_rate),
                   wasserstein_gen_loss_fn, wasserstein_disc_loss_fn)

    gen_lr = CustomReduceLROnPlateau(_model.gen_optimizer,
                                     'gen_lr',
                                     monitor='generator_loss',
                                     patience=200,
                                     factor=RATE_DECAY,
                                     verbose=1)

    disc_lr = CustomReduceLROnPlateau(_model.disc_optimizer,
                                      'disc_lr',
                                      monitor='discriminator_loss',
                                      patience=200,
                                      factor=RATE_DECAY,
                                      verbose=1)

    model_path = 'degan/model_name.h5'

    model_checkpoints = ModelCheckpoint(model_path,
                                        monitor='generator_mae',
                                        save_best_only=True,
                                        verbose=1)

    tensorboard = TensorBoard(profile_batch='10, 20')

    callbacks = [gen_lr, disc_lr, tensorboard, model_checkpoints]
    return _model, callbacks
示例#9
0
文件: test.py 项目: ycpan1597/MRI-AI
def test(args):

    data_loader = get_loader(args.dataset)
    data_path = get_data_path()
    loader = data_loader(data_path, is_transform=True)
    n_classes = loader.n_classes

    # Setup Model
    model = unet(n_classes=n_classes, in_channels=1)
    state = convert_state_dict(torch.load(args.model_path)['model_state'])
    model.load_state_dict(state)
    model.eval()
    model.cuda(0)

    # Shannon and Preston's way of processing test files
    test_dataset = MRI(args.test_root,
                       img_size=(args.img_rows, args.img_cols),
                       mode='test')
    testLoader = DataLoader(test_dataset, batch_size=1)

    for (img, gt) in testLoader:
        img = img.numpy()
        img = img.astype(np.float64)
        # img -= loader.mean
        img -= 128
        img = img.astype(float) / 255.0
        img = np.expand_dims(img, axis=2)
        # NHWC -> NCWH # what does this mean?
        # N = number of images in the batch, H = height of the image, W = width of the image, C = number of channels of the image
        # https://stackoverflow.com/questions/37689423/convert-between-nhwc-and-nchw-in-tensorflow
        img = img.transpose(2, 0, 1)
        img = np.expand_dims(img, 0)
        img = torch.from_numpy(img).float()

        images = Variable(img.cuda(0), volatile=True)

    print("files are read!")
def train(df,
          gs,
          batch_size=128,
          epochs=1000,
          validation_size=1000,
          test_size=1000):
    train_img_ids = get_training_img_ids(df)

    (x_train_list, y_train_list) = get_train_data(train_img_ids, df, gs)

    model = unet((INPUT_SIZE, INPUT_SIZE, x_train_list[0].shape[2]),
                 len(CLASSES))
    model.compile(optimizer=Adam(),
                  loss=binary_crossentropy,
                  metrics=[jaccard_index])

    (x_val_patches, y_val_patches) = get_patches(x_train_list, y_train_list,
                                                 validation_size)

    for epoch in range(epochs):
        (x_train_patches,
         y_train_patches) = get_patches(x_train_list, y_train_list, batch_size)
        train_scores = model.train_on_batch(x_train_patches, y_train_patches)
        val_scores = eval_model(model, x_val_patches, y_val_patches,
                                batch_size)

        print('[Epoch {}] Loss: {} - Accuracy: {} - Validation Accuracy: {}'.
              format(epoch, train_scores[0], train_scores[1], val_scores[1]))

    (x_test_patches, y_test_patches) = get_patches(x_train_list, y_train_list,
                                                   test_size)
    test_scores = eval_model(model, x_test_patches, y_test_patches, batch_size)

    print('Test Accuracy: {}'.format(test_scores[1]))

    save_model(model)
gpus = tf.config.experimental.list_physical_devices('GPU')

# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
    tf.config.experimental.set_memory_growth(gpu, True)

# path to zipped & working directories
path_zip = r'E:\Data\denoising-dirty-documents'

wd = os.path.join(path_zip, 'working')
test_img = sorted(glob(os.path.join(wd, 'test', '*.png')))

INPUT_SHAPE = (256, 256, 1)

model = unet(INPUT_SHAPE, 1,
             use_pooling=False,
             skip_layers='inception',
             final_activation='sigmoid')
model.load_weights('unet/unet.h5')


def crop_img(img, crop_shape):
    img_shape = img.shape
    imgs = []
    points = []
    for y in range(0, img_shape[0], crop_shape[0]):
        for x in range(0, img_shape[1], crop_shape[1]):
            if y + crop_shape[0] >= img_shape[0]:
                y = img_shape[0] - crop_shape[0] - 1
            if x + crop_shape[1] >= img_shape[1]:
                x = img_shape[1] - crop_shape[1] - 1
            _crop_img = img[y: y + crop_shape[1], x: x + crop_shape[0]]
示例#12
0
    valid_test_ds, (args.valid_set, args.test_set))
valid_dl = DataLoader(valid_ds, batch_size=10, shuffle=True)
test_dl = DataLoader(test_ds, batch_size=10, shuffle=False)

# Define the RGB Seg Network network
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

if args.ML_network == 'DENSE-UNet':
    from models import UNet
    model = UNet()
elif args.ML_network == 'ResNet-UNet':
    from models import ResNetUNet
    model = ResNetUNet(3)
elif args.ML_network == 'U-Net':
    from models import unet
    model = unet()

model = model.to(device)


# Set A function for training
def train_model(model, optimizer, num_epochs=25):
    best_model_wts = copy.deepcopy(model.state_dict())
    best_loss = 1e10

    train_loss, valid_loss = [], []

    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)
示例#13
0
from config import model_name, n_classes
from models import unet, fcn_8


def sorted_fns(dir):
    return sorted(os.listdir(dir), key=lambda x: int(x.split('.')[0]))


if len(os.listdir('images')) != len(os.listdir('annotated')):
    generate_missing_json()

image_paths = [os.path.join('images', x) for x in sorted_fns('images')]
annot_paths = [os.path.join('annotated', x) for x in sorted_fns('annotated')]

if 'unet' in model_name:
    model = unet(pretrained=False, base=4)
elif 'fcn_8' in model_name:
    model = fcn_8(pretrained=False, base=4)

tg = DataGenerator(image_paths=image_paths,
                   annot_paths=annot_paths,
                   batch_size=3,
                   augment=True)

checkpoint = ModelCheckpoint(os.path.join('models', model_name + '.model'),
                             monitor='dice',
                             verbose=1,
                             mode='max',
                             save_best_only=True,
                             save_weights_only=False,
                             period=10)
def main():
    prog_name = sys.argv[0]
    args = do_args(sys.argv[1:], prog_name)

    verbose = args.verbose
    output = args.output
    name = args.name

    data_dir = args.data_dir
    training_states = args.training_states
    validation_states = args.validation_states
    superres_states = args.superres_states

    num_epochs = args.epochs
    model_type = args.model_type
    batch_size = args.batch_size
    learning_rate = args.learning_rate
    time_budget = args.time_budget
    loss = args.loss
    do_color_aug = args.color
    do_superres = loss == "superres"

    log_dir = os.path.join(output, name)

    assert os.path.exists(log_dir), "Output directory doesn't exist"

    f = open(os.path.join(log_dir, "args.txt"), "w")
    for k, v in args.__dict__.items():
        f.write("%s,%s\n" % (str(k), str(v)))
    f.close()

    print("Starting %s at %s" % (prog_name, str(datetime.datetime.now())))
    start_time = float(time.time())

    #------------------------------
    # Step 1, load data
    #------------------------------

    training_patches = []
    for state in training_states:
        print("Adding training patches from %s" % (state))
        fn = os.path.join(data_dir, "%s_extended-train_patches.csv" % (state))
        df = pd.read_csv(fn)
        for fn in df["patch_fn"].values:
            training_patches.append((os.path.join(data_dir, fn), state))

    validation_patches = []
    for state in validation_states:
        print("Adding validation patches from %s" % (state))
        fn = os.path.join(data_dir, "%s_extended-val_patches.csv" % (state))
        df = pd.read_csv(fn)
        for fn in df["patch_fn"].values:
            validation_patches.append((os.path.join(data_dir, fn), state))

    print("Loaded %d training patches and %d validation patches" %
          (len(training_patches), len(validation_patches)))

    if do_superres:
        print("Using %d states in superres loss:" % (len(superres_states)))
        print(superres_states)

    #------------------------------
    # Step 2, run experiment
    #------------------------------
    #training_steps_per_epoch = len(training_patches) // batch_size
    #validation_steps_per_epoch = len(validation_patches) // batch_size

    training_steps_per_epoch = 300
    validation_steps_per_epoch = 39

    print("Number of training/validation steps per epoch: %d/%d" %
          (training_steps_per_epoch, validation_steps_per_epoch))

    # Build the model
    optimizer = RMSprop(learning_rate)
    if model_type == "unet":
        model = models.unet((240, 240, 4), 5, optimizer, loss)
    elif model_type == "unet_large":
        model = models.unet_large((240, 240, 4), 5, optimizer, loss)
    elif model_type == "fcdensenet":
        model = models.fcdensenet((240, 240, 4), 5, optimizer, loss)
    elif model_type == "fcn_small":
        model = models.fcn_small((240, 240, 4), 5, optimizer, loss)
    model.summary()

    validation_callback = utils.LandcoverResults(log_dir=log_dir,
                                                 time_budget=time_budget,
                                                 verbose=verbose)
    learning_rate_callback = LearningRateScheduler(utils.schedule_stepped,
                                                   verbose=verbose)

    model_checkpoint_callback = ModelCheckpoint(os.path.join(
        log_dir, "model_{epoch:02d}.h5"),
                                                verbose=verbose,
                                                save_best_only=False,
                                                save_weights_only=False,
                                                period=20)

    training_generator = datagen.DataGenerator(
        training_patches,
        batch_size,
        training_steps_per_epoch,
        240,
        240,
        4,
        do_color_aug=do_color_aug,
        do_superres=do_superres,
        superres_only_states=superres_states)
    validation_generator = datagen.DataGenerator(validation_patches,
                                                 batch_size,
                                                 validation_steps_per_epoch,
                                                 240,
                                                 240,
                                                 4,
                                                 do_color_aug=do_color_aug,
                                                 do_superres=do_superres,
                                                 superres_only_states=[])

    model.fit_generator(
        training_generator,
        steps_per_epoch=training_steps_per_epoch,
        #epochs=10**6,
        epochs=num_epochs,
        verbose=verbose,
        validation_data=validation_generator,
        validation_steps=validation_steps_per_epoch,
        max_queue_size=256,
        workers=4,
        use_multiprocessing=True,
        callbacks=[
            validation_callback,
            #learning_rate_callback,
            model_checkpoint_callback
        ],
        initial_epoch=0)

    #------------------------------
    # Step 3, save models
    #------------------------------
    model.save(os.path.join(log_dir, "final_model.h5"))

    model_json = model.to_json()
    with open(os.path.join(log_dir, "final_model.json"), "w") as json_file:
        json_file.write(model_json)
    model.save_weights(os.path.join(log_dir, "final_model_weights.h5"))

    print("Finished in %0.4f seconds" % (time.time() - start_time))
    del model, training_generator, validation_generator
示例#15
0
with tf.Graph().as_default(), tf.Session() as sess:

    # placeholders for training data

    phone_ = tf.placeholder(tf.float32, [None, PATCH_SIZE])
    phone_image = tf.reshape(phone_, [-1, PATCH_HEIGHT, PATCH_WIDTH, 3])

    dslr_ = tf.placeholder(tf.float32, [None, PATCH_SIZE])
    dslr_image = tf.reshape(dslr_, [-1, PATCH_HEIGHT, PATCH_WIDTH, 3])

    adv_ = tf.placeholder(tf.float32, [None, 1])

    # get processed enhanced image

    enhanced = models.unet(phone_image)

    # transform both dslr and enhanced images to grayscale

    enhanced_gray = tf.reshape(tf.image.rgb_to_grayscale(enhanced),
                               [-1, PATCH_WIDTH * PATCH_HEIGHT])
    dslr_gray = tf.reshape(tf.image.rgb_to_grayscale(dslr_image),
                           [-1, PATCH_WIDTH * PATCH_HEIGHT])

    # push randomly the enhanced or dslr image to an adversarial CNN-discriminator

    adversarial_ = tf.multiply(enhanced_gray, 1 - adv_) + tf.multiply(
        dslr_gray, adv_)
    adversarial_image = tf.reshape(adversarial_,
                                   [-1, PATCH_HEIGHT, PATCH_WIDTH, 1])
示例#16
0
import models
from data import gen, test_data
from keras.callbacks import ModelCheckpoint, EarlyStopping

shape = (128, 128)
batch_size = 1

# Load model
print('\n')
print('-' * 30)
print('Loading model...')
print('-' * 30)
model = models.unet(shape, models.res_block_basic, models.Activation('relu'),
                    0, False)
#model = models.get_unet(shape)
#model = models.test_net(shape)
callbacks = [
    EarlyStopping(monitor='val_loss', patience=3, verbose=0),
    ModelCheckpoint('./weights/weights.hdf5',
                    monitor='val_loss',
                    save_best_only=True)
]

# Training
print('\n')
print('-' * 30)
print('Begin training...')
print('-' * 30)
dgen = gen(shape, batch_size)
model.fit_generator(generator=dgen,
                    samples_per_epoch=batch_size * 50,
示例#17
0
        # defining set of callbacks
        callbacks_list = [checkpoint, csv_logger, earlystopping]

        results = train_model.fit_generator(train_gen,
                                            epochs=epochs,
                                            steps_per_epoch=100,
                                            validation_data=val_gen,
                                            validation_steps=20,
                                            callbacks=callbacks_list)
        return results


if __name__ == '__main__':

    dataset_object = CelebADataset()

    train_gen, val_gen = dataset_object.data_gen(Constant.TRAIN_FRAMES_DIR,
                                                 Constant.TRAIN_MASKS_DIR,
                                                 Constant.VAL_FRAMES_DIR,
                                                 Constant.VAL_MASKS_DIR)

    # define model
    weights_path = 'best_model.h5'
    model = models.unet(pretrained_weights=weights_path)
    results = Runner().train(model,
                             weights_path=weights_path,
                             epochs=15,
                             batch_size=Constant.BATCH_SIZE)
    print(results)
示例#18
0
def main():
    args = parse_args()

    timestr = time.strftime("%Y%m%d%H%M%S")

    log_dir_parent = args.log_directory
    log_dir = log_dir_parent + "/{}".format(timestr)
    checkpoints_dir = log_dir + "/checkpoints"
    os.mkdir(log_dir)
    os.mkdir(checkpoints_dir)

    optimizer = Adam()

    train = np.load(args.data_directory + "/train.npy")
    valid = np.load(args.data_directory + "/valid.npy")

    callbacks = [
        Logger(
            filename=log_dir + "/{}.log".format(timestr),
            optimizer=optimizer,
            sigma=args.sigma,
            epochs=args.epochs,
            batch_size=args.batch_size,
            dataset_dir=args.data_directory,
            checkpoints_dir=checkpoints_dir,
            noise2noise=args.noise2noise,
            dtype=train.dtype),
        CSVLogger(log_dir + "/{}.csv".format(timestr)),
        TerminateOnNaN(),
        ModelCheckpoint(
            checkpoints_dir + '/checkpoint.' + timestr + \
            '.{epoch:03d}-{val_loss:.3f}-{val_psnr:.5f}.h5',
            monitor='val_psnr',
            mode='max',
            save_best_only=True)
    ]

    # Build PSNR function
    if train.dtype == np.uint8:
        psnr = get_psnr(8)
    elif train.dtype == np.uint16:
        psnr = get_psnr(16)
    elif np.issubdtype(train.dtype, np.floating):
        psnr = get_psnr(1)
    else:
        raise TypeError

    model = unet(shape=(None, None, 1))
    model.compile(optimizer=optimizer, loss='mse', metrics=[psnr])

    # Same noise for all training procedure
    # train_input = NoiseGenerator.add_gaussian_noise(train, args.sigma)
    # if args.noise2noise:
    #     train_target = NoiseGenerator.add_gaussian_noise(train, args.sigma)
    # else:
    #     train_target = train

    # valid_input = NoiseGenerator.add_gaussian_noise(valid, args.sigma)
    # valid_target = valid  # Validation images should be clean

    # hist = model.fit(train_input, train_target,
    #                  epochs=args.epochs,
    #                  batch_size=args.batch_size,
    #                  verbose=args.verbose,
    #                  callbacks=callbacks,
    #                  validation_data=(valid_input, valid_target))

    # New noise for each new batch
    train_generator = NoiseGenerator(data=train,
                                     batch_size=args.batch_size,
                                     sigma=args.sigma,
                                     noise2noise=args.noise2noise)
    valid_generator = NoiseGenerator(data=valid,
                                     batch_size=args.batch_size,
                                     sigma=args.sigma,
                                     noise2noise=False)
    
    model.fit_generator(generator=train_generator,
                        epochs=args.epochs,
                        verbose=args.verbose,
                        callbacks=callbacks,
                        validation_data=valid_generator)
示例#19
0
def main(epochs=100, batch_size=32):
    train_df = pd.read_csv(TRAIN_DF_PATH)

    imgs = []
    masks = []
    coverage_classes = []

    print('Loading training data')
    for img_id in tqdm(train_df.id):
        img_path = os.path.join(TRAIN_IMGS_DIR, '{}.png'.format(img_id))
        img = np.array(load_img(img_path, color_mode='grayscale')) / 255
        imgs.append(img)

        mask_path = os.path.join(TRAIN_MASKS_DIR, '{}.png'.format(img_id))
        mask = np.array(load_img(mask_path, color_mode='grayscale')) / 255
        masks.append(mask)

        coverage = np.sum(mask) / (IMG_SIZE * IMG_SIZE)
        coverage_class = coverage_to_class(coverage)
        coverage_classes.append(coverage_class)
    print('Training data loaded')

    x_train, x_val, y_train, y_val = train_test_split(
        np.array(list(map(upsample, imgs))).reshape(-1, IMG_MODEL_SIZE,
                                                    IMG_MODEL_SIZE, 1),
        np.array(list(map(upsample, masks))).reshape(-1, IMG_MODEL_SIZE,
                                                     IMG_MODEL_SIZE, 1),
        test_size=0.2,
        stratify=coverage_classes,
    )

    x_train, y_train = data_augmentation(x_train, y_train)

    x_train = np.repeat(x_train, 3, axis=3)
    x_val = np.repeat(x_val, 3, axis=3)

    model = unet((IMG_MODEL_SIZE, IMG_MODEL_SIZE, 3))
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=[iou_metric])

    result_dir = os.path.join(RESULTS_DIR, str(time.time()))
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    model_output_path = os.path.join(result_dir, 'unet_resnet50.model')
    raw_output_path = os.path.join(result_dir, 'history.pickle')
    plot_output_path = os.path.join(result_dir, 'plot.png')

    model_checkpoint = ModelCheckpoint(model_output_path,
                                       monitor='val_iou_metric',
                                       mode='max',
                                       save_best_only=True,
                                       verbose=1)

    history = model.fit(x_train,
                        y_train,
                        validation_data=[x_val, y_val],
                        epochs=epochs,
                        batch_size=batch_size,
                        callbacks=[model_checkpoint],
                        shuffle=True,
                        verbose=1)

    save_history_raw(history, raw_output_path)
    save_history_plot(history, plot_output_path)
示例#20
0
experiment_name = 'experiment name'  #set experiment name
save_folder = os.path.join('results', d, experiment_name)
if not os.path.exists(save_folder):
    os.makedirs(save_folder)

#deifne call backs
checkpointer = ModelCheckpoint(os.path.join(save_folder, 'best_model.h5'),
                               monitor='val_loss',
                               verbose=1,
                               save_best_only=True)

csv_logger = CSVLogger(os.path.join(save_folder, 'log.csv'),
                       append=True,
                       separator=';')

early_stopper = EarlyStopping(monitor='val_loss', patience=10)

callbacks_list = [checkpointer, csv_logger, early_stopper]

#build model and compile
model = unet(input_shape=(ih, iw, 3))
model.compile(loss=loss, optimizer=opt)

history = model.fit(train_gen,
                    epochs=n_epochs,
                    steps_per_epoch=(tstop // batch_size),
                    validation_data=val_gen,
                    validation_steps=((vstop - vstart) // batch_size),
                    callbacks=callbacks_list,
                    verbose=1)
示例#21
0
#Try more or less layers
#Try w and w/o Batchnormalization()

#-Fit-----------------------------------------------------------------------
epochs = 100
batch_size = 1
callbacks=[EarlyStopping(patience=16,verbose=1),\
            ReduceLROnPlateau(factor=0.1, patience=5, min_delta=0.001, min_lr=0.0000001,verbose=1),\
                ModelCheckpoint('modelName.h5'.format(epochs),verbose=1, save_best_only=True,\
                                save_weights_only=False),\
                    CSVLogger('modelName.log')] #DON'T OVERWRITE
#module 'h5py' has no attribute 'Group' <-- if folder does not exist

#-----------------------------------------------------------------------------

model = unet(pretrained_weights = pretrained_weights, input_size = input_size, weights = weights,\
         activation=activation, dropout=dropout, loss=loss, optimizer=optimizer, dilation_rate=dilation_rate)

#for array dataset with shape (n_img, size, size) one more dimension needs to be created in order to train
images_all = images_original[..., np.newaxis]
test_images_all = test_images_original[..., np.newaxis]
#normalize (not very effective if e.g. in one image maximum is 35000 and in another 15000. Background (and everything else) will be different)
# normalization_value = 255
# images_all = images_all/images_all.max()*normalization_value
# test_images_all = test_images_all/test_images_original.max()*normalization_value

results = model.fit(images_all, masks, validation_split=split, epochs=epochs, batch_size=batch_size,\
                      callbacks=callbacks, shuffle=True)
print('Model correctly trained and saved')

plt.figure(figsize=(8, 8))
plt.grid(False)
示例#22
0
im_size = (512, 512, 1)
n_classes = 3
dataset_name = os.path.join('.', 'data', 'liver_dataset.npz')

# preprocess data if necessary
if not os.path.isfile(dataset_name):
    data_preprocessing.preprocess_data_folder(os.path.join('.', 'data'))

# load data
dataset = np.load(dataset_name)
x, y, xv, yv = dataset['x'], dataset['y'], dataset['xv'], dataset['yv']
m, s = dataset['m'], dataset['s']

model = unet(im_size=im_size,
             output_size=n_classes,
             n_blocks=6,
             n_convs=1,
             n_filters=8)

# training parameters
bs = 4
epochs = 6

# train
model.m = m
model.s = s
history = model.fit(x,
                    y,
                    batch_size=bs,
                    epochs=epochs,
                    validation_data=(xv, yv),
示例#23
0
                                epoch_size=cfg.epoch_size // cfg.batch_size,
                                rotation=cfg.aug_rotation,
                                scaling=cfg.aug_scaling)
test_generator = DataGenerator(cfg.test_datasets,
                               batch_size=cfg.batch_size,
                               subframe_size=cfg.subframe_size,
                               normalize_subframes=cfg.normalize_subframes,
                               epoch_size=cfg.epoch_size // cfg.batch_size,
                               rotation=cfg.aug_rotation,
                               scaling=cfg.aug_scaling)

# create model
# model = models.unet(train_generator.shape_X[1:], train_generator.shape_y[-1], filters=cfg.filters)
model = models.unet((None, None, train_generator.shape_X[-1]),
                    train_generator.shape_y[-1],
                    filters=cfg.filters,
                    kernel_initializer='glorot_normal',
                    batch_normalization=cfg.batch_normalization,
                    high_pass_sigma=cfg.high_pass_sigma)


# get predictions for single batch
def save_prediction_imgs(generator, model_in, folder):
    X, y = generator[0]
    y_pred = model_in.predict(X)
    for i in range(X.shape[0]):
        file = os.path.join(folder, 'prediction%i.png' % i)
        utils.save_prediction_img(file,
                                  X[i],
                                  y[i],
                                  y_pred[i],
                                  X_contrast=(0, 100))
示例#24
0
res_sizes = utils.get_resolutions()

# get the specified image resolution
IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_SIZE = utils.get_specified_res(
    res_sizes, phone, resolution)

# disable gpu if specified
config = tf.ConfigProto(
    device_count={'GPU': 0}) if use_gpu == "false" else None

# create placeholders for input images
x_ = tf.placeholder(tf.float32, [None, IMAGE_SIZE])
x_image = tf.reshape(x_, [-1, IMAGE_HEIGHT, IMAGE_WIDTH, 3])

# generate enhanced image
enhanced = unet(x_image)

with tf.Session(config=config) as sess:

    test_dir = dped_dir
    test_photos = [
        f for f in os.listdir(test_dir) if os.path.isfile(test_dir + f)
    ]

    if test_subset == "small":
        # use five first images only
        test_photos = test_photos[0:5]

    if phone.endswith("_orig"):

        # load pre-trained model
示例#25
0
文件: main.py 项目: bbastardes/CM2003
    img_train, img_val, mask_train, mask_val = train_test_split(
        images, masks, test_size=0.2, random_state=42)

    # hyperparameters
    batch_size = 8
    seed = 1
    lr = 0.0001
    n_epochs = 100
    size = (240, 240, 1)

    # call model, compile and fit
    Unet_Model = unet(Base=16,
                      size=(240, 240, 1),
                      batch_norm=True,
                      dropout=True,
                      drop_r=0.5,
                      spatial_drop=False,
                      spatial_drop_r=0.1,
                      multi_class=False,
                      classes=1)
    Unet_Model.compile(optimizer=Adam(lr=lr),
                       loss=dice_coef_loss,
                       metrics=[
                           dice_coef,
                           tf.keras.metrics.Precision(),
                           tf.keras.metrics.Recall()
                       ])
    History = Unet_Model.fit(img_train,
                             mask_train,
                             batch_size=batch_size,
                             epochs=n_epochs,
示例#26
0
for i in range(20000):
    np.random.shuffle(training_patches)
    np.random.shuffle(validation_patches)

training_steps_per_epoch = 300
validation_steps_per_epoch = 25

print("Number of training/validation steps per epoch: %d/%d" %
      (training_steps_per_epoch, validation_steps_per_epoch))

# In[ ]:

import models
optimizer = RMSprop(learning_rate)
model = models.unet((256, 256, 4), 8, optimizer, loss)
#model.summary()

# In[ ]:

import datagen
training_generator = datagen.DataGenerator(training_patches, batch_size,
                                           training_steps_per_epoch, 256, 256,
                                           4)
validation_generator = datagen.DataGenerator(validation_patches, batch_size,
                                             validation_steps_per_epoch, 256,
                                             256, 4)

# In[ ]:

model_checkpoint_callback = ModelCheckpoint(os.path.join(
x_val = read.load_data_npy("/cmach-data/segthor/Val/")
y_val = read.load_label_npy("/cmach-data/segthor/Val/")
# x_test = read.load_test_data("/cmach-data/segthor/test/", 41, 60)
print(len(x_train), len(x_val))

# preprocess input
# x_train = preprocess_input(x_train)
# x_val = preprocess_input(x_val)

# define model
# model = Unet(BACKBONE, input_shape=(None, None, 1), classes=5, encoder_weights=None)
# model.compile('Adam', loss=bce_jaccard_loss, metrics=[iou_score])

# model = load_model('unet_membrane.hdf5')

model = models.unet()
model.compile('Adam', loss=bce_jaccard_loss, metrics=[iou_score])

model_checkpoint = ModelCheckpoint('ordjcd50.hdf5',
                                   monitor='loss',
                                   verbose=1,
                                   save_best_only=True)

# fit model
model.fit(x=x_train,
          y=y_train,
          batch_size=6,
          epochs=50,
          validation_data=(x_val, y_val),
          callbacks=[model_checkpoint])
示例#28
0
import matplotlib.pyplot as plt
import matplotlib.image as im

import numpy as np

# Deets and parameters
nc_in = 1
nc_out = 1
ngf = 64

loadSize = 286
imageSize = 128
batchSize = 16
lrG = 2e-4

model = unet(imageSize, nc_in, nc_out, ngf)
model.summary()

# use (model.trainable = False) to freeze weights

# test base unet without discriminator version first

model.compile(loss='mean_squared_error',
              optimizer='adam',
              metrics=['accuracy'])
#model.fit(self, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0.0, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None)  # starts training

img = im.imread('./datasets/facades/train/1.jpg')
img = np.float32(img)
img = np.array(img) / 255 * 2 - 1
plt.ion()
示例#29
0
    mask_subdirectory="label",
    batch_size=validation_batchsize,
    seed=seed
)
print("\nTest dataset statistics:")
test_generator = ImageDataGenerator(preprocessing_function=image_preprocessing).flow_from_directory(
    test_dir,
    target_size=(height, width),
    color_mode='rgb',
    classes=["."],
    class_mode=None,
    batch_size=test_batchsize,
    shuffle=False
)

model = unet(**unet_args)
if use_custom_losses is True:
    loss = image_categorical_crossentropy if background_as_class is True else image_binary_crossentropy
else:
    loss = "categorical_crossentropy" if background_as_class is True else "binary_crossentropy"
model.compile(optimizer=optimizer, loss=loss, metrics=["acc"])
model.summary()

callbacks = [
    callbacks.ModelCheckpoint(os.path.join(weights_dir, 'best_loss.hdf5'), monitor="loss",
                              verbose=1, save_best_only=True, save_weights_only=False),
    callbacks.ModelCheckpoint(os.path.join(weights_dir, 'best_acc.hdf5'), monitor="acc",
                              verbose=1, save_best_only=True, save_weights_only=False),
    callbacks.ModelCheckpoint(os.path.join(weights_dir, 'best_val_loss.hdf5'), monitor="val_loss",
                              verbose=1, save_best_only=True, save_weights_only=False),
    callbacks.ModelCheckpoint(os.path.join(weights_dir, 'best_val_acc.hdf5'), monitor="val_acc",