Beispiel #1
0
def main():
    # 1936 x 1216
    input_size = (320, 480, 3)
    classes = 20
    train_dataset_x = '../seg_train_images/seg_train_images'
    train_dataset_y = '../seg_train_annotations/seg_train_annotations'
    test_size = 0.2
    batch_size = 8

    datasets_paths = get_data_paths(train_dataset_x, train_dataset_y)
    train_data, test_data = train_test_split(datasets_paths, test_size=test_size)
    net = Unet(input_size, classes)
    #net = SegNet(input_size, classes)
    net.summary()
    train_gen =  DataGenerator(train_data, input_size, classes, batch_size)
    val_gen =  DataGenerator(test_data, input_size, classes, batch_size)

    callbacks = [
        ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1),
        EarlyStopping(monitor='val_loss', min_delta=0, patience=9, verbose=1),
        ModelCheckpoint('checkpoint/ep{epoch:03d}-loss{loss:.5f}-val_loss{val_loss:.5f}.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='min')
    ]

    net.compile(optimizer=Adam(1e-3),loss=categorical_crossentropy)
    history = net.fit_generator(
        train_gen, 
        steps_per_epoch=train_gen.num_batches_per_epoch,
        validation_data=val_gen,
        validation_steps=val_gen.num_batches_per_epoch,
        initial_epoch=0,
        epochs=50,
        callbacks=callbacks
    )
    net.save_weights('checkpoint/first_stage.h5')

    train_data, test_data = train_test_split(datasets_paths, test_size=test_size)
    train_gen =  DataGenerator(train_data, input_size, classes, batch_size)
    val_gen =  DataGenerator(test_data, input_size, classes, batch_size)

    net.compile(optimizer=Adam(1e-4),loss=categorical_crossentropy)
    history = net.fit_generator(
        train_gen, 
        steps_per_epoch=train_gen.num_batches_per_epoch,
        validation_data=val_gen,
        validation_steps=val_gen.num_batches_per_epoch,
        initial_epoch=50,
        epochs=100,
        callbacks=callbacks
    )
    net.save_weights('checkpoint/final_stage.h5')
Beispiel #2
0
        type=float,
        default=0.0001,
        help="learning rate",
    )
    parser.add_argument(
        "--logs",
        type=str,
        default="./logs",
        help="Log folder",
    )
    args = parser.parse_args()

    model = Unet()
    model.build(input_shape=(None, 256, 256, 3))
    model.compile(optimizer=optimizers.Adam(lr=args.lr),
                  loss=dice_coef_loss,
                  metrics=['accuracy', dice_coef])

    # Loading Dataset
    X_train, Y_train, X_test, Y_test = get_db(args.data_dir)
    # Set tf.keras.callbacks.ModelCheckpoint callback to automatically save the model
    checkpoint_path = "weight/ep{epoch:03d}-val_dice_coef{val_dice_coef:.3f}-val_acc{val_accuracy:.3f}.ckpt"
    modelCheckpoint = callbacks.ModelCheckpoint(
        filepath=checkpoint_path,  # Path to save the model
        verbose=1,  # Whether to output information
        save_weights_only=True,
        period=1,  # Save the model every few rounds
    )
    earlyStopping = callbacks.EarlyStopping(
        monitor='val_loss',  # Monitored data
        min_delta=0.001,
Beispiel #3
0
                                                      dtype=np.float32)
print(str(xtrain.shape), 'images and', str(ytrain.shape), 'masks')

unet = Unet(DESIRED_SIZE,
            DESIRED_SIZE,
            nclasses=NUM_CLASSES,
            filters=UNET_FILTERS)
print(unet.output_shape)

unet.summary()
# Setting BCE to False to use focal loss
BCE = False
print(xtrain.shape, train_images.shape)
if BCE:
    unet.compile(optimizer='adam',
                 loss="sparse_categorical_crossentropy",
                 metrics=['accuracy'])
    history = unet.fit(train_images,
                       train_labels,
                       batch_size=BATCH_SIZE,
                       epochs=EPOCHS,
                       validation_data=(valid_images, valid_labels))
else:
    unet.compile(optimizer='adam',
                 loss=SparseCategoricalFocalLoss(gamma=2),
                 metrics=['accuracy'])
    history = unet.fit(xtrain,
                       ytrain,
                       batch_size=BATCH_SIZE,
                       epochs=EPOCHS,
                       validation_data=(xval, yval))
    '--generate_images',
    help=
    'saves representative image of mask in addition to the mask prediction',
    action="store_true")
args = parser.parse_args()

assert os.path.isfile(args.weights), "Could not find weights file!"
assert os.path.isdir(args.images), "Could not find images directory!"

unet = Unet(DESIRED_SIZE,
            DESIRED_SIZE,
            nclasses=NUM_CLASSES,
            filters=UNET_FILTERS)
unet.load_weights(args.weights)
unet.compile(optimizer='adam',
             loss=SparseCategoricalFocalLoss(gamma=2),
             metrics=['accuracy'])

image_paths = glob.glob(args.images + '/*.jpg')
images = [
    x[0] for x in [
        get_resized_image_and_mask_label(cv2.imread(p), NEGATIVE)
        for p in image_paths
    ]
]

mpreds = unet.predict(np.array(images, dtype=np.uint8))
preds = maskpred(mpreds)

cmap = copy.copy(plt.cm.get_cmap("gist_rainbow", 20))
cmap.set_under(color="black")
    os.mkdir(logdir)
output_model_file = os.path.join(logdir, 'slate_models2.h5')

callbacks = [
    tf.keras.callbacks.TensorBoard(logdir),
    tf.keras.callbacks.ModelCheckpoint(output_model_file,
                                       monitor='val_acc',
                                       save_best_only=True),
    # tf.keras.callbacks.EarlyStopping(monitor='val_accuracy',patience =10,min_delta = 0.1)
]


def focal(y_ture, y_pre):
    loss = -tf.reduce_mean(
        y_ture * tf.multiply(1 - y_pre, 1 - y_pre) *
        tf.math.log(tf.clip_by_value(y_pre, 1e-10, 1.0))) * 0.25
    return loss


# def meanIOU(y_ture,y_pre):
#
#
#
# #'categorical_crossentropy'
model.compile(loss=focal, optimizer=optimizers.Adam(lr=1e-4), metrics=['acc'])
model.fit(train_db,
          epochs=100,
          validation_data=test_db,
          callbacks=callbacks,
          steps_per_epoch=3)