示例#1
0
    def test_sample_weighted(self):
        bce_obj = losses.BinaryCrossentropy()
        y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
        y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
        sample_weight = K.constant([1.2, 3.4], shape=(2, 1))
        loss = bce_obj(y_true, y_pred, sample_weight=sample_weight)

        # EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
        # y` = clip(output, EPSILON, 1. - EPSILON)
        # y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]

        # Loss = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
        #      = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
        #         -log(Y_MAX + EPSILON), -log(1)]
        #      = [0, 15.33, 0, 0]
        # Reduced loss = 15.33 * 1.2 / 4

        assert np.isclose(K.eval(loss), 4.6, atol=1e-3)

        # Test with logits.
        y_true = K.constant([[1, 0, 1], [0, 1, 1]])
        logits = K.constant([[100.0, -100.0, 100.0], [100.0, 100.0, -100.0]])
        weights = K.constant([4, 3])
        bce_obj = losses.BinaryCrossentropy(from_logits=True)
        loss = bce_obj(y_true, logits, sample_weight=weights)

        # Loss = max(x, 0) - x * z + log(1 + exp(-abs(x)))
        #            (where x = logits and z = y_true)
        # Loss = [(0 + 0 + 0)/3, 200 / 3]
        # Weighted loss = [0 * 4, 66.666 * 3]
        # Reduced loss = (0 + 66.666 * 3) / 2

        assert np.isclose(K.eval(loss), 100, atol=1e-3)
示例#2
0
    def test_all_correct_unweighted(self):
        y_true = K.constant([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
        bce_obj = losses.BinaryCrossentropy()
        loss = bce_obj(y_true, y_true)
        assert np.isclose(K.eval(loss), 0.0, atol=1e-3)

        # Test with logits.
        logits = K.constant([[100.0, -100.0, -100.0], [-100.0, 100.0, -100.0],
                             [-100.0, -100.0, 100.0]])
        bce_obj = losses.BinaryCrossentropy(from_logits=True)
        loss = bce_obj(y_true, logits)
        assert np.isclose(K.eval(loss), 0.0, 3)
示例#3
0
def main(path, input_size, n_classes=2, batch_size=16, epochs_count=30):

    train_gen = data_generator(path, batch_size)
    model = Segnet(input_size, n_classes)
    model.build_model('adam',
                      loss=losses.BinaryCrossentropy(),
                      metrics=["accuracy"])
    model.evaluate_generator(train_gen, 11, "./models/modelsweights.10.hdf5")
示例#4
0
def main(path, input_size, n_classes=2, batch_size=16, epochs_count=30):

    train_gen = data_generator(path, batch_size)
    model = Segnet(input_size, n_classes)
    model.build_model('adam', loss=losses.BinaryCrossentropy(), metrics=["accuracy"])
    model.train_generator(train_gen, steps_per_epoch = 24,
            epochs=epochs_count, save_path = "./models")
    print("Training Done....")
示例#5
0
    def test_no_reduction(self):
        y_true = K.constant([[1, 0, 1], [0, 1, 1]])
        logits = K.constant([[100.0, -100.0, 100.0], [100.0, 100.0, -100.0]])
        bce_obj = losses.BinaryCrossentropy(
            from_logits=True, reduction=losses_utils.Reduction.NONE)
        loss = bce_obj(y_true, logits)

        # Loss = max(x, 0) - x * z + log(1 + exp(-abs(x)))
        #            (where x = logits and z = y_true)
        # Loss = [(0 + 0 + 0)/3, (200)/3]

        assert np.allclose(K.eval(loss), (0., 66.6666), atol=1e-3)
示例#6
0
def main(path, input_size, n_classes=2, batch_size=30, epochs_count=50):

    train_gen = data_generator(path, batch_size)
    model = Segnet(input_size, n_classes)
    model.build_model()
    model.compile('sgd',
                  loss=losses.BinaryCrossentropy(),
                  metrics=["accuracy"])
    model.train_generator(train_gen,
                          steps_per_epoch=(300) // batch_size,
                          epochs=epochs_count)
    print("Training Done....")
示例#7
0
    def test_unweighted(self):
        y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
        y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
        bce_obj = losses.BinaryCrossentropy()
        loss = bce_obj(y_true, y_pred)

        # EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
        # y` = clip(output, EPSILON, 1. - EPSILON)
        # y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]

        # Loss = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
        #      = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
        #         -log(Y_MAX + EPSILON), -log(1)]
        #      = [0, 15.33, 0, 0]
        # Reduced loss = 15.33 / 4

        assert np.isclose(K.eval(loss), 3.833, atol=1e-3)

        # Test with logits.
        y_true = K.constant([[1., 0., 1.], [0., 1., 1.]])
        logits = K.constant([[100.0, -100.0, 100.0], [100.0, 100.0, -100.0]])
        bce_obj = losses.BinaryCrossentropy(from_logits=True)
        loss = bce_obj(y_true, logits)

        # Loss = max(x, 0) - x * z + log(1 + exp(-abs(x)))
        #            (where x = logits and z = y_true)
        #      = [((100 - 100 * 1 + log(1 + exp(-100))) +
        #          (0 + 100 * 0 + log(1 + exp(-100))) +
        #          (100 - 100 * 1 + log(1 + exp(-100))),
        #         ((100 - 100 * 0 + log(1 + exp(-100))) +
        #          (100 - 100 * 1 + log(1 + exp(-100))) +
        #          (0 + 100 * 1 + log(1 + exp(-100))))]
        #      = [(0 + 0 + 0) / 3, 200 / 3]
        # Reduced loss = (0 + 66.666) / 2

        assert np.isclose(K.eval(loss), 33.333, atol=1e-3)
    def build(self):
        self.model = Sequential()
        self.model.add(layers.Embedding(self.features + 1, 16))
        self.model.add(
            layers.Bidirectional(layers.LSTM(64, return_sequences=True)))
        self.model.add(layers.Bidirectional(layers.LSTM(32)))
        self.model.add(layers.Dropout(0.2))
        self.model.add(layers.Dense(units=64, activation='relu'))
        self.model.add(layers.Dense(1))

        self.model.summary()

        self.model.compile(loss=losses.BinaryCrossentropy(from_logits=True),
                           optimizer='adam',
                           metrics=tf.metrics.BinaryAccuracy(threshold=0.0))
示例#9
0
 def test_label_smoothing(self):
     logits = K.constant([[100.0, -100.0, -100.0]])
     y_true = K.constant([[1, 0, 1]])
     label_smoothing = 0.1
     # Loss: max(x, 0) - x * z + log(1 + exp(-abs(x)))
     #            (where x = logits and z = y_true)
     # Label smoothing: z' = z * (1 - L) + 0.5L
     #                  1  = 1 - 0.5L
     #                  0  = 0.5L
     # Applying the above two fns to the given input:
     # (100 - 100 * (1 - 0.5 L)  + 0 +
     #  0   + 100 * (0.5 L)      + 0 +
     #  0   + 100 * (1 - 0.5 L)  + 0) * (1/3)
     #  = (100 + 50L) * 1/3
     bce_obj = losses.BinaryCrossentropy(from_logits=True,
                                         label_smoothing=label_smoothing)
     loss = bce_obj(y_true, logits)
     expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
     assert np.isclose(K.eval(loss), expected_value, atol=1e-3)
示例#10
0
 def test_config(self):
     bce_obj = losses.BinaryCrossentropy(
         reduction=losses_utils.Reduction.SUM, name='bce_1')
     assert bce_obj.name == 'bce_1'
     assert bce_obj.reduction == losses_utils.Reduction.SUM
示例#11
0
    x_train, y_train, x_test, y_test, x_valid, y_valid = get_data(
        path_train_jpg,
        path_train_txt,
        path_valid_jpg,
        path_valid_txt)

    input_shape = (220, 220, 3)

    if os.path.exists('model.h5'):
        model = tf.keras.models.load_model('model.h5', custom_objects={'custom_loss': custom_loss, 'mean_iou': mean_iou})
    else:
        model = create_model(input_shape)

        model.compile(optimizer=keras.optimizers.Adam(lr=0.0001),
                      loss={
                          'class': losses.BinaryCrossentropy(),
                          'out': custom_loss
                      },
                      metrics={
                          'class': 'accuracy',
                          'out': mean_iou
                      })

        model.fit(x_train, y_train, validation_data=(x_valid, y_valid), batch_size=32, epochs=10)
        model.save('model.h5')

    loss, class_loss, out_loss, test_acc, iou = model.evaluate(x_test, y_test, verbose=2)
    print('\nmIoU {:.2f}%, classification accuracy {:.2f}%, {} train, {} valid.'.format(iou*100, test_acc*100, len(x_train), len(x_valid)))

    predictions = model.predict(x_test)
model.add(
    layers.Conv2D(image_size, (3, 3),
                  activation='relu',
                  input_shape=(image_size, image_size, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(image_size * 2, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(image_size * 2, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(image_size * 2, activation='relu'))
model.add(layers.Dense(1))

#compiling and evaluating model
model.compile(optimizer='adam',
              loss=losses.BinaryCrossentropy(),
              metrics=['accuracy'])

train_images = [imageList[:round(0.75 * (len(imageList) - 1))]]
train_labels = [labelsList[:round(0.75 * (len(labelsList) - 1))]]

test_images = [imageList[round(0.75 * (len(imageList) - 1)):len(imageList)]]
test_labels = [labelsList[round(0.75 * (len(labelsList) - 1)):len(labelsList)]]

history = model.fit(train_images,
                    train_labels,
                    epochs=10,
                    validation_data=(test_images, test_labels))

print("Optimization is done!")
showLossGraph(history.history['loss'])