示例#1
0
def main():
    # 1936 x 1216
    input_size = (320, 480, 3)
    classes = 20
    train_dataset_x = '../seg_train_images/seg_train_images'
    train_dataset_y = '../seg_train_annotations/seg_train_annotations'
    test_size = 0.2
    batch_size = 8

    datasets_paths = get_data_paths(train_dataset_x, train_dataset_y)
    train_data, test_data = train_test_split(datasets_paths, test_size=test_size)
    net = Unet(input_size, classes)
    #net = SegNet(input_size, classes)
    net.summary()
    train_gen =  DataGenerator(train_data, input_size, classes, batch_size)
    val_gen =  DataGenerator(test_data, input_size, classes, batch_size)

    callbacks = [
        ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1),
        EarlyStopping(monitor='val_loss', min_delta=0, patience=9, verbose=1),
        ModelCheckpoint('checkpoint/ep{epoch:03d}-loss{loss:.5f}-val_loss{val_loss:.5f}.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='min')
    ]

    net.compile(optimizer=Adam(1e-3),loss=categorical_crossentropy)
    history = net.fit_generator(
        train_gen, 
        steps_per_epoch=train_gen.num_batches_per_epoch,
        validation_data=val_gen,
        validation_steps=val_gen.num_batches_per_epoch,
        initial_epoch=0,
        epochs=50,
        callbacks=callbacks
    )
    net.save_weights('checkpoint/first_stage.h5')

    train_data, test_data = train_test_split(datasets_paths, test_size=test_size)
    train_gen =  DataGenerator(train_data, input_size, classes, batch_size)
    val_gen =  DataGenerator(test_data, input_size, classes, batch_size)

    net.compile(optimizer=Adam(1e-4),loss=categorical_crossentropy)
    history = net.fit_generator(
        train_gen, 
        steps_per_epoch=train_gen.num_batches_per_epoch,
        validation_data=val_gen,
        validation_steps=val_gen.num_batches_per_epoch,
        initial_epoch=50,
        epochs=100,
        callbacks=callbacks
    )
    net.save_weights('checkpoint/final_stage.h5')
示例#2
0
文件: main.py 项目: daniellozuz/chaos
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"

Environment(training=[2, 5, 6, 10, 14, 16, 19],
            testing=[1, 8, 18],
            training_every=10,
            testing_every=10).configure()

train_gene = TrainGenerator(batch_size=9)

model = Unet().create()
checkpoint = ModelCheckpoint('weights/{epoch:03d}.liver_weights.hdf5',
                             monitor='loss',
                             verbose=1,
                             period=1)
plateau = ReduceLROnPlateau(factor=0.2,
                            min_delta=1e-8,
                            mode='min',
                            monitor='loss',
                            patience=10,
                            cooldown=20,
                            verbose=1)

model.fit_generator(train_gene,
                    epochs=4,
                    callbacks=[checkpoint, plateau],
                    validation_data=ValidationGenerator())

Predictor().save()
Measurer().show_metrics()
Visualizer().show_animation()
示例#3
0
文件: train.py 项目: W0207/Test
                                                      class_mode=None,
                                                      batch_size=batch_size,
                                                      seed=1)

    train_generator = zip(image_generator, mask_generator)
    for (img, mask) in train_generator:
        img = img / 255.
        mask = mask / 255.
        mask[mask > 0.5] = 1
        mask[mask <= 0.5] = 0
        yield (img, mask)


model = Unet(1, image_size=512)
trainset = DataGenerator("dataset/train", batch_size=2)
model.fit_generator(trainset, steps_per_epoch=100, epochs=1)
model.save_weights("model.h5")

testSet = DataGenerator("dataset/test", batch_size=1)
alpha = 0.3
model.load_weights("model.h5")
if not os.path.exists("./results"): os.mkdir("./results")

for idx, (img, mask) in enumerate(testSet):
    oring_img = img[0]
    # 开始用模型进行预测
    pred_mask = model.predict(img)[0]

    pred_mask[pred_mask > 0.5] = 1
    pred_mask[pred_mask <= 0.5] = 0
    # 如果这里展示的预测结果一片黑,请调整lr,同时注意图片的深度是否为8
示例#4
0
    input_size = (512, 512, 1)
    batch = 8
    model = Unet(input_size=input_size)
    model.summary()
    target_size = input_size[:2]  # 前两项
    train_generator = load_data.trainGenerator(batch,
                                               train_dir,
                                               data_gen_args,
                                               image_folder='image',
                                               mask_folder='label',
                                               target_size=target_size)
    valid_generator = load_data.trainGenerator(1,
                                               validation_dir,
                                               valid_data_gen_args,
                                               image_folder='image',
                                               mask_folder='label',
                                               target_size=target_size)
    history = LossHistory()
    model.fit_generator(
        train_generator,
        steps_per_epoch=len(glob.glob('trainset_2d/train/image/' + '*.png')) //
        batch,
        validation_data=valid_generator,
        validation_steps=1,
        epochs=30,
        verbose=1,
        callbacks=[history],
    )

    #history.loss_plot('epoch')