def test_save_load(self):
        custom_objects = {
            "true_positive": keras_metrics.true_positive(sparse=self.sparse),
            "true_negative": keras_metrics.true_negative(sparse=self.sparse),
            "false_positive": keras_metrics.false_positive(sparse=self.sparse),
            "false_negative": keras_metrics.false_negative(sparse=self.sparse),
            "precision": keras_metrics.precision(sparse=self.sparse),
            "recall": keras_metrics.recall(sparse=self.sparse),
            "f1_score": keras_metrics.f1_score(sparse=self.sparse),
            "sin": keras.backend.sin,
            "abs": keras.backend.abs,
        }

        x, y = self.samples(100)
        self.model.fit(x, y, epochs=10)

        with tempfile.NamedTemporaryFile() as file:
            self.model.save(file.name, overwrite=True)
            model = keras.models.load_model(file.name,
                                            custom_objects=custom_objects)

            expected = self.model.evaluate(x, y)[1:]
            received = model.evaluate(x, y)[1:]

            self.assertEqual(expected, received)
示例#2
0
    def test_metrics(self):
        tp = keras_metrics.true_positive()
        fp = keras_metrics.false_positive()
        fn = keras_metrics.false_negative()

        precision = keras_metrics.precision()
        recall = keras_metrics.recall()

        model = keras.models.Sequential()
        model.add(keras.layers.Dense(1, activation="sigmoid", input_dim=2))
        model.add(keras.layers.Dense(1, activation="softmax"))

        model.compile(optimizer="sgd",
                      loss="binary_crossentropy",
                      metrics=[tp, fp, fn, precision, recall])

        samples = 1000
        x = numpy.random.random((samples, 2))
        y = numpy.random.randint(2, size=(samples, 1))

        model.fit(x, y, epochs=1, batch_size=10)
        metrics = model.evaluate(x, y, batch_size=10)[1:]

        tp_val = metrics[0]
        fp_val = metrics[1]
        fn_val = metrics[2]

        precision = metrics[3]
        recall = metrics[4]

        expected_precision = tp_val / (tp_val + fp_val)
        expected_recall = tp_val / (tp_val + fn_val)

        self.assertAlmostEqual(expected_precision, precision, delta=0.05)
        self.assertAlmostEqual(expected_recall, recall, delta=0.05)
示例#3
0
def build_model(max_len, label_count, dropout_ratio=0.1, filter_length=50):
    
    text_input = Input(shape=(max_len,), dtype=tf.string)

    elmo_embedding = Lambda(ElmoEmbedding, output_shape=(max_len, 1024))(text_input)

    dropout = Dropout(dropout_ratio)(elmo_embedding)

    conv1d = Conv1D(filter_length, 3, padding='valid', activation='relu', strides=1)(dropout)

    global_1d = GlobalMaxPool1D()(conv1d)

    dense = Dense(label_count, activation='sigmoid')(global_1d)

    text_model = Model(
        inputs=text_input, 
        outputs=dense
    )
    
    parallel_model = multi_gpu_model(text_model, gpus=2)

    parallel_model.compile(
        optimizer='adam',
        loss=abs_KL_div,#'binary_crossentropy',
        metrics=[
            'categorical_accuracy',
            precision_m,
            recall_m,
            f1_m,
            'mae',
            abs_KL_div,
            true_positive(),
            false_positive(),
            true_negative(),
            false_negative(),
            'accuracy',
        ]
    )
    text_model.summary()
    
    return text_model
    def setUp(self):
        tp = keras_metrics.true_positive(sparse=self.sparse)
        tn = keras_metrics.true_negative(sparse=self.sparse)
        fp = keras_metrics.false_positive(sparse=self.sparse)
        fn = keras_metrics.false_negative(sparse=self.sparse)

        precision = keras_metrics.precision(sparse=self.sparse)
        recall = keras_metrics.recall(sparse=self.sparse)
        f1 = keras_metrics.f1_score(sparse=self.sparse)

        self.model = keras.models.Sequential()
        self.model.add(keras.layers.Activation(keras.backend.sin))
        self.model.add(keras.layers.Activation(keras.backend.abs))

        if self.sparse:
            loss = "sparse_categorical_crossentropy"
        else:
            loss = "binary_crossentropy"

        self.model.compile(optimizer="sgd",
                           loss=loss,
                           metrics=[tp, tn, fp, fn, precision, recall, f1])
示例#5
0
    ])
    model.compile(optimizer=optimizer,
                  loss=loss,
                  metrics=metrics)
    model.fit(train_images, train_labels, epochs=epochs, batch_size=batch_size, callbacks=[KafkaCallback(session_name)])
    test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2, callbacks=[KafkaCallback(session_name)])

    print('\nTest accuracy:', test_acc)


if __name__ == "__main__":
    METRICS = [
        'accuracy',
        km.precision(),
        km.recall(),
        km.true_positive(),
        km.false_negative(),
        km.true_negative(),
        km.false_negative(),
    ]
    METRICS = [
        keras.metrics.TruePositives(name='tp'),
        keras.metrics.FalsePositives(name='fp'),
        keras.metrics.TrueNegatives(name='tn'),
        keras.metrics.FalseNegatives(name='fn'),
        keras.metrics.BinaryAccuracy(name='accuracy'),
        keras.metrics.Precision(name='precision'),
        keras.metrics.Recall(name='recall'),
        keras.metrics.AUC(name='auc'),
    ]
l_dense = Dense(1024, activation='relu')(l_flat)
l_dense = Dropout(0.5)(l_dense)
preds = Dense(1, activation='sigmoid')(l_dense)

model = Model(sequence_input, preds)

#CNN model summary and compilation
print("Model summary:")
print(model.summary())
model.compile(loss='binary_crossentropy',
              optimizer="adam",
              metrics=[
                  'accuracy',
                  keras_metrics.precision(),
                  keras_metrics.recall(),
                  keras_metrics.true_positive()
              ])

#Creating a callback
es = EarlyStopping(monitor="val_loss",
                   min_delta=0.01,
                   patience=1,
                   verbose=1,
                   mode="max")

#Training and testing CNN model
model.fit(x_train,
          y_train,
          batch_size=batch_size,
          epochs=num_epochs,
          verbose=2,
示例#7
0
def unet(pretrained_weights=None, input_size=(256, 256, 1)):
    inputs = Input(input_size)
    conv1 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(inputs)
    conv1 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    conv2 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool1)
    conv2 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    conv3 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool2)
    conv3 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    conv4 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool3)
    conv4 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv4)
    drop4 = Dropout(0.5)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

    conv5 = Conv2D(1024,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool4)
    conv5 = Conv2D(1024,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv5)
    drop5 = Dropout(0.5)(conv5)

    up6 = Conv2D(512,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(drop5))
    merge6 = concatenate([drop4, up6], axis=3)
    conv6 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge6)
    conv6 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv6)

    up7 = Conv2D(256,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(conv6))
    merge7 = concatenate([conv3, up7], axis=3)
    conv7 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge7)
    conv7 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv7)

    up8 = Conv2D(128,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(conv7))
    merge8 = concatenate([conv2, up8], axis=3)
    conv8 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge8)
    conv8 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv8)

    up9 = Conv2D(64,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(conv8))
    merge9 = concatenate([conv1, up9], axis=3)
    conv9 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge9)
    conv9 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv9)
    conv9 = Conv2D(2,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv9)
    conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)

    model = Model(input=inputs, output=conv10)

    model.compile(optimizer=Adam(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=[
                      keras_metrics.true_positive(),
                      keras_metrics.true_negative(),
                      keras_metrics.false_positive(),
                      keras_metrics.false_negative(), 'accuracy',
                      keras_metrics.f1_score(),
                      keras_metrics.precision(),
                      keras_metrics.recall()
                  ])

    #model.summary()

    if (pretrained_weights):
        model.load_weights(pretrained_weights)

    return model
示例#8
0
    ])
    model.compile(optimizer=optimizer,
                  loss=loss,
                  metrics=metrics)
    model.fit(train_images, train_labels, epochs=epochs, batch_size=batch_size, callbacks=[KafkaCallback(session_name)])
    test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2, callbacks=[KafkaCallback(session_name)])

    print('\nTest accuracy:', test_acc)


if __name__ == "__main__":
    METRICS = [
        'accuracy',
        km.precision(label=0),
        km.recall(label=0),
        km.true_positive(label=0),
        km.true_negative(label=0),
        km.false_positive(label=0),
        km.false_negative(label=0),
    ]
    LOSS = [
        (1, tf.keras.losses.BinaryCrossentropy(from_logits=True), 'Binary crossentropy'),
        (2, tf.keras.losses.CategoricalCrossentropy(from_logits=True), 'Categorical crossentropy'),
        (3, tf.keras.losses.CategoricalHinge(), 'Categorical hinge'),
        (4, tf.keras.losses.CosineSimilarity(), 'Cosine similarity'),
        (5, tf.keras.losses.Hinge(), 'Hinge'),
        (6, tf.keras.losses.Huber(), 'Huber'),
        (7, tf.keras.losses.SquaredHinge(), 'Squared hinge'),
        (8, tf.keras.losses.LogCosh(), 'Hyperbolic Cosine'),
        (9, tf.keras.losses.MeanAbsoluteError(), 'Mean absolute error'),
        (10, tf.keras.losses.MeanAbsolutePercentageError(), 'Mean absolute percentage error'),
示例#9
0
    def test_metrics(self):
        tp = keras_metrics.true_positive()
        tn = keras_metrics.true_negative()
        fp = keras_metrics.false_positive()
        fn = keras_metrics.false_negative()

        precision = keras_metrics.precision()
        recall = keras_metrics.recall()
        f1 = keras_metrics.f1_score()

        model = keras.models.Sequential()
        model.add(keras.layers.Activation(keras.backend.sin))
        model.add(keras.layers.Activation(keras.backend.abs))

        model.compile(optimizer="sgd",
                      loss="binary_crossentropy",
                      metrics=[tp, tn, fp, fn, precision, recall, f1])

        samples = 10000
        batch_size = 100
        lim = numpy.pi / 2

        x = numpy.random.uniform(0, lim, (samples, 1))
        y = numpy.random.randint(2, size=(samples, 1))

        model.fit(x, y, epochs=10, batch_size=batch_size)
        metrics = model.evaluate(x, y, batch_size=batch_size)[1:]

        metrics = list(map(float, metrics))

        tp_val = metrics[0]
        tn_val = metrics[1]
        fp_val = metrics[2]
        fn_val = metrics[3]

        precision = metrics[4]
        recall = metrics[5]
        f1 = metrics[6]

        expected_precision = tp_val / (tp_val + fp_val)
        expected_recall = tp_val / (tp_val + fn_val)

        f1_divident = (expected_precision * expected_recall)
        f1_divisor = (expected_precision + expected_recall)
        expected_f1 = (2 * f1_divident / f1_divisor)

        self.assertGreaterEqual(tp_val, 0.0)
        self.assertGreaterEqual(fp_val, 0.0)
        self.assertGreaterEqual(fn_val, 0.0)
        self.assertGreaterEqual(tn_val, 0.0)

        self.assertEqual(sum(metrics[0:4]), samples)

        places = 4
        self.assertAlmostEqual(expected_precision, precision, places=places)
        self.assertAlmostEqual(expected_recall, recall, places=places)
        self.assertAlmostEqual(expected_f1, f1, places=places)

        model.save('test.hdf5', overwrite=True)

        del model

        custom_objects = {
            "true_positive": keras_metrics.true_positive(),
            "true_negative": keras_metrics.true_negative(),
            "false_positive": keras_metrics.false_negative(),
            "false_negative": keras_metrics.false_negative(),
            "precision": keras_metrics.precision(),
            "recall": keras_metrics.recall(),
            "f1_score": keras_metrics.f1_score(),
            "sin": keras.backend.sin,
            "abs": keras.backend.abs,
        }

        model = load_model('test.hdf5', custom_objects=custom_objects)