Exemple #1
0
            DESIRED_SIZE,
            nclasses=NUM_CLASSES,
            filters=UNET_FILTERS)
print(unet.output_shape)

unet.summary()
# Setting BCE to False to use focal loss
BCE = False
print(xtrain.shape, train_images.shape)
if BCE:
    unet.compile(optimizer='adam',
                 loss="sparse_categorical_crossentropy",
                 metrics=['accuracy'])
    history = unet.fit(train_images,
                       train_labels,
                       batch_size=BATCH_SIZE,
                       epochs=EPOCHS,
                       validation_data=(valid_images, valid_labels))
else:
    unet.compile(optimizer='adam',
                 loss=SparseCategoricalFocalLoss(gamma=2),
                 metrics=['accuracy'])
    history = unet.fit(xtrain,
                       ytrain,
                       batch_size=BATCH_SIZE,
                       epochs=EPOCHS,
                       validation_data=(xval, yval))

unet.save(f"unet-SAMPLESIZE{SAMPLE_SIZE}-EPOCHS{EPOCHS}.h5")

loss = history.history["loss"]
Exemple #2
0
    )
    earlyStopping = callbacks.EarlyStopping(
        monitor='val_loss',  # Monitored data
        min_delta=0.001,
        patience=
        4,  # Can accept the number of rounds with a promotion less than min_delta
    )
    # Continuous patience rounds of monotor without improvement will change the learning rate
    reduceLROnPlateau = callbacks.ReduceLROnPlateau(
        factor=0.2,  # new_lr = lr * factor
        patience=10,  # Can accept rounds without lifting
        min_lr=0.0000000001)  # lr lower limit
    tensorboard = callbacks.TensorBoard(
        log_dir=args.logs,
        write_graph=True,  # Visualize images in TensorBoard
        update_freq=
        'epoch'  # Write loss and metrics to TensorBoard after each epoch
    )
    model.fit(
        x=X_train,
        y=Y_train,
        epochs=args.epochs,
        validation_data=(X_test, Y_test),
        validation_freq=1,  # Test every few rounds
        callbacks=[
            modelCheckpoint, reduceLROnPlateau, tensorboard, earlyStopping
        ],
        batch_size=args.batch_size,
        workers=args.workers,
    )
    os.mkdir(logdir)
output_model_file = os.path.join(logdir, 'slate_models2.h5')

callbacks = [
    tf.keras.callbacks.TensorBoard(logdir),
    tf.keras.callbacks.ModelCheckpoint(output_model_file,
                                       monitor='val_acc',
                                       save_best_only=True),
    # tf.keras.callbacks.EarlyStopping(monitor='val_accuracy',patience =10,min_delta = 0.1)
]


def focal(y_ture, y_pre):
    loss = -tf.reduce_mean(
        y_ture * tf.multiply(1 - y_pre, 1 - y_pre) *
        tf.math.log(tf.clip_by_value(y_pre, 1e-10, 1.0))) * 0.25
    return loss


# def meanIOU(y_ture,y_pre):
#
#
#
# #'categorical_crossentropy'
model.compile(loss=focal, optimizer=optimizers.Adam(lr=1e-4), metrics=['acc'])
model.fit(train_db,
          epochs=100,
          validation_data=test_db,
          callbacks=callbacks,
          steps_per_epoch=3)