# loss=tf.keras.losses.BinaryCrossentropy(from_logits=False), # Computes the cross-entropy loss between true labels and predicted labels.
    # Focal loss instead of class weights: https://www.dlology.com/blog/multi-class-classification-with-focal-loss-for-imbalanced-datasets/
    model.compile(
        loss=SigmoidFocalCrossEntropy(
        ),  # https://www.tensorflow.org/addons/api_docs/python/tfa/losses/SigmoidFocalCrossEntropy
        optimizer=keras.optimizers.Adam(config.learning_rate),
        metrics=[
            tf.metrics.BinaryAccuracy(name='accuracy'),
            tf.keras.metrics.Precision(
                name='precision'
            ),  # Computes the precision of the predictions with respect to the labels.
            tf.keras.metrics.Recall(
                name='recall'
            ),  # Computes the recall of the predictions with respect to the labels.
            F1Score(
                num_classes=10, name="f1_score"
            )  # https://www.tensorflow.org/addons/api_docs/python/tfa/metrics/F1Score
        ]
        #               sample_weight_mode="temporal" # This argument is not supported when x is a dataset or a dataset iterator, instead pass sample weights as the third element of x.
    )

    epochs = epochs
    history = model.fit(
        gen.training_dataset,
        validation_data=gen.validation_dataset,
        epochs=epochs,
        callbacks=callbacks_list,
        shuffle=True  # whether to shuffle the training data before each epoch
    )
Beispiel #2
0
	return metrics


#------------------------------------------------#

#------------------------------------------------#
#                     Init                       #
#------------------------------------------------#

s_optimizer = Adam(0.0002)
train_loss = Mean(name='train_loss')
test_loss = Mean(name='test_loss')
train_accuracy = BinaryAccuracy(name='train_accuracy')
test_accuracy = BinaryAccuracy(name='test_accuracy')
hl = HammingLoss(mode='multilabel', threshold=0.5)
f1micro = F1Score(5, average='micro', name='f1_micro', threshold=0.5)
ema = ExponentialMovingAverage(0.99)
es=early(30,80)

#------------------------------------------------#
#------------------------------------------------#
#                     Load                       #
#------------------------------------------------#

# UKDALE
uk_dale = DataSet(os.path.join(PATH_TO_DATASET_DIR,'dale/ukdale.h5'))
appliances = ['kettle', 'microwave', 'dish washer', 'washing machine', 'fridge']
dh1 = house(uk_dale, appliances, 1, '2016-06-01', '2016-08-31')
dh2 = house(uk_dale, appliances, 2, '2013-06-01', '2013-08-05')

## REDD
    Conv2D(16,(3,3), padding='same'),
    Activation('relu'),
    Dropout(0.3),
    MaxPooling2D(pool_size=(2,2)),
    Flatten(),
    Dense(128),
    Activation('relu'),
    Dense(8),
    Activation('softmax')
])

model.summary()
model.compile(
    loss='categorical_crossentropy',
    optimizer='adam',
    metrics=['accuracy', Precision(), Recall(), F1Score(num_classes=8)]
)

history = model.fit(X_train, Y_train, batch_size=128, epochs=2, steps_per_epoch=20,shuffle=True)

print('Training Finished..')
print('Testing ..')

# --------- Test set  ---------

score = model.evaluate(X_test, Y_test)

print('===Testing Metrics===')
print('Test loss: ', score[0])
print('Test accuracy: ', score[1])
print('Test precision: ', score[2])
Beispiel #4
0
    return es


# In[12]:

# Constant
EPOCH = 50
BATCH_SIZE = 2048
VERBOSE = 0

# In[13]:

METRICS = [
    SparseCategoricalAccuracy(name='accuracy'),
    CohenKappa(name='kappa', num_classes=5, sparse_labels=True),
    F1Score(name='f1_micro', num_classes=5, average="micro", threshold=0.5),
]


def create_mlp():
    MLP = Sequential([
        Dense(
            10,
            activation='relu',
            input_dim=X_train.shape[1],
        ),
        Dropout(0.5),
        Dense(5, activation='softmax')
    ])
    MLP.compile(optimizer='adam',
                loss=SparseCategoricalCrossentropy(from_logits=True),
Beispiel #5
0
 def test_keras_model(self):
     f1 = F1Score(5)
     utils._get_model(f1, 5)
Beispiel #6
0
 def test_config(self):
     f1 = F1Score(3)
     config = f1.get_config()
     self.assertFalse("beta" in config)
    #   mode='min', min_lr=0.000001)

    early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_precision', mode='max', patience=20, verbose=1)

    callbacks_list = [save_checkpoint_wandb, early_stop]

    model = define_model(10, (100,100,18))

    # loss=tf.keras.losses.BinaryCrossentropy(from_logits=False), # Computes the cross-entropy loss between true labels and predicted labels.
    # Focal loss instead of class weights: https://www.dlology.com/blog/multi-class-classification-with-focal-loss-for-imbalanced-datasets/
    model.compile(loss=SigmoidFocalCrossEntropy(),
                  # https://www.tensorflow.org/addons/api_docs/python/tfa/losses/SigmoidFocalCrossEntropy
                  optimizer=keras.optimizers.Adam(0.001),
                  metrics=[tf.metrics.BinaryAccuracy(name='accuracy'),
                           tf.keras.metrics.Precision(name='precision'),
                           # Computes the precision of the predictions with respect to the labels.
                           tf.keras.metrics.Recall(name='recall'),
                           # Computes the recall of the predictions with respect to the labels.
                           F1Score(num_classes=10, name="f1_score")
                           # https://www.tensorflow.org/addons/api_docs/python/tfa/metrics/F1Score
                           ]
                  )

    # model = Simple_CNN(10, input_shape=(100, 100, 18))
    epochs = 2
    history = model.fit(gen.training_dataset, validation_data=gen.validation_dataset,
                        epochs=epochs,
                        callbacks=callbacks_list,
                        # shuffle=True  # whether to shuffle the training data before each epoch
                        )
    Dense(128),
    Activation('sigmoid'),
    Dropout(0.2),
    Dense(8),
    Activation('softmax')
])

model.summary()

model.compile(
    loss='categorical_crossentropy',
    optimizer='adam',
    metrics=['accuracy',
             Precision(),
             Recall(),
             F1Score(num_classes=8)])

history = model.fit(X_train, Y_train, batch_size=64, epochs=4)

y_pred = model.predict(X_test)
y_pred = np.argmax(y_pred, axis=-1)

conf_mat = confusion_matrix(np.argmax(Y_test, axis=-1), y_pred)
f, ax = plt.subplots(figsize=(5, 5))
# Normalize the confusion matrix.
conf_mat = np.around(conf_mat.astype('float') /
                     conf_mat.sum(axis=1)[:, np.newaxis],
                     decimals=2)
plt.title("Confusion matrix")
sns.heatmap(conf_mat,
            annot=True,
Beispiel #9
0
def test_config_f1():
    f1 = F1Score(3)
    config = f1.get_config()
    assert "beta" not in config
model = Sequential([
    ResNet50(weights=None,
             input_shape=(50, 50, 3),
             include_top=True,
             classes=2),
])

model.summary()

model.compile(
    loss='categorical_crossentropy',
    optimizer=Adam(learning_rate=0.0001),
    metrics=['accuracy',
             Precision(),
             Recall(),
             F1Score(num_classes=2)])
#add momentum,
history = model.fit(train_generator,
                    epochs=6,
                    steps_per_epoch=200,
                    validation_data=test_generator,
                    validation_steps=100)

print('Training Finished..')
print('Testing ..')

# --------- Test set  ---------

# score = model.evaluate(test_generator)
# print('===Testing Metrics===')
# print('Test loss: ', score[0])
Beispiel #11
0
# Make a list of the test folders to be used when predicting the model. This will be fed into the prediction
# flow to generate the stitched image based off the predictions of the patches fed into the network
_, test_fol, _ = next(os.walk(test_dataset))

# Load in the relevant datasets
X_train, Y_train, X_test, Y_test, X_val, Y_val = DatasetLoad(
    train_dataset, test_dataset, val_dataset)

################################ RESIDUAL UNET ################################

sgd_optimizer = Adam()

# Metrics to be used when evaluating the network
precision = tf.keras.metrics.Precision()
recall = tf.keras.metrics.Recall()
f1 = F1Score(num_classes=2, name='f1', average='micro', threshold=0.4)

# Instantiate the network
model = model_resunet.ResUNet((IMG_SIZE, IMG_SIZE, 3))
model.compile(optimizer=sgd_optimizer,
              loss='binary_crossentropy',
              metrics=['accuracy', precision, recall, f1])
model.summary()

# Callacks to be used in the network. Checkpoint can be adjusted to save the best (lowest loss) if desired.
checkpoint_path = os.path.join(dname, 'models',
                               'resunet.{epoch:02d}-{f1:.2f}.hdf5')
checkpoint = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
                                                verbose=1,
                                                save_best_only=False)