Exemple #1
0
def main():
    # 1936 x 1216
    input_size = (320, 480, 3)
    classes = 20
    train_dataset_x = '../seg_train_images/seg_train_images'
    train_dataset_y = '../seg_train_annotations/seg_train_annotations'
    test_size = 0.2
    batch_size = 8

    datasets_paths = get_data_paths(train_dataset_x, train_dataset_y)
    train_data, test_data = train_test_split(datasets_paths, test_size=test_size)
    net = Unet(input_size, classes)
    #net = SegNet(input_size, classes)
    net.summary()
    train_gen =  DataGenerator(train_data, input_size, classes, batch_size)
    val_gen =  DataGenerator(test_data, input_size, classes, batch_size)

    callbacks = [
        ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1),
        EarlyStopping(monitor='val_loss', min_delta=0, patience=9, verbose=1),
        ModelCheckpoint('checkpoint/ep{epoch:03d}-loss{loss:.5f}-val_loss{val_loss:.5f}.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='min')
    ]

    net.compile(optimizer=Adam(1e-3),loss=categorical_crossentropy)
    history = net.fit_generator(
        train_gen, 
        steps_per_epoch=train_gen.num_batches_per_epoch,
        validation_data=val_gen,
        validation_steps=val_gen.num_batches_per_epoch,
        initial_epoch=0,
        epochs=50,
        callbacks=callbacks
    )
    net.save_weights('checkpoint/first_stage.h5')

    train_data, test_data = train_test_split(datasets_paths, test_size=test_size)
    train_gen =  DataGenerator(train_data, input_size, classes, batch_size)
    val_gen =  DataGenerator(test_data, input_size, classes, batch_size)

    net.compile(optimizer=Adam(1e-4),loss=categorical_crossentropy)
    history = net.fit_generator(
        train_gen, 
        steps_per_epoch=train_gen.num_batches_per_epoch,
        validation_data=val_gen,
        validation_steps=val_gen.num_batches_per_epoch,
        initial_epoch=50,
        epochs=100,
        callbacks=callbacks
    )
    net.save_weights('checkpoint/final_stage.h5')
Exemple #2
0
                                                      batch_size=batch_size,
                                                      seed=1)

    train_generator = zip(image_generator, mask_generator)
    for (img, mask) in train_generator:
        img = img / 255.
        mask = mask / 255.
        mask[mask > 0.5] = 1
        mask[mask <= 0.5] = 0
        yield (img, mask)


model = Unet(1, image_size=512)
trainset = DataGenerator("dataset/train", batch_size=2)
model.fit_generator(trainset, steps_per_epoch=100, epochs=1)
model.save_weights("model.h5")

testSet = DataGenerator("dataset/test", batch_size=1)
alpha = 0.3
model.load_weights("model.h5")
if not os.path.exists("./results"): os.mkdir("./results")

for idx, (img, mask) in enumerate(testSet):
    oring_img = img[0]
    # 开始用模型进行预测
    pred_mask = model.predict(img)[0]

    pred_mask[pred_mask > 0.5] = 1
    pred_mask[pred_mask <= 0.5] = 0
    # 如果这里展示的预测结果一片黑,请调整lr,同时注意图片的深度是否为8
    # cv2.imshow('pred_mask', pred_mask)