Beispiel #1
0
def evaluation(x, y):
    y = tf.cast(y, dtype=tf.float32)
    y_pred = predict(x)

    predict_accuracy = tf.keras.metrics.BinaryAccuracy(name='predict_accuracy')
    acc = predict_accuracy(y, y_pred)
    mi_f1 = micro_f1(y, y_pred)
    ma_f1 = macro_f1(y, y_pred)
    print("val accuracy {:.4f}, micro f1 {:.4f} macro f1 {:.4f}".format(
        acc.numpy(), mi_f1.numpy(), ma_f1.numpy()))
    return acc, mi_f1, ma_f1
def test(model,x_test,y_test):

    print('Start Testing......')
    y_pred = model.predict(x_test)

    y_pred = tf.constant(y_pred,tf.float32)
    y_test = tf.constant(y_test,tf.float32)

    print(micro_f1(y_test,y_pred))
    print(macro_f1(y_test,y_pred))

    print(classification_report(y_test,y_pred))
Beispiel #3
0
    def train_step(x, y):
        enc_padding_mask = create_padding_mask(x)
        with tf.GradientTape() as tape:
            y_pred = model(x, training=True, enc_padding_mask=enc_padding_mask)
            loss = loss_object(y, y_pred)
        grads = tape.gradient(loss, model.trainable_variables)
        optimizer.apply_gradients(zip(grads, model.trainable_variables))

        train_loss(loss)
        train_accuracy(y, y_pred)

        mi_f1 = micro_f1(y, y_pred)
        ma_f1 = macro_f1(y, y_pred)
        return mi_f1, ma_f1, y_pred
def evaluate(test_dataset):
    predictions = []
    tars = []
    for (batch, (inp, tar)) in tqdm(enumerate(test_dataset)):
        enc_padding_mask = create_padding_mask(inp)
        predict = transformer(inp, False, enc_padding_mask=enc_padding_mask)
        predictions.append(predict)
        tars.append(tar)
    predictions = tf.concat(predictions, axis=0)
    tars = tf.concat(tars, axis=0)
    mi_f1 = micro_f1(tars, predictions)
    ma_f1 = macro_f1(tars, predictions)

    predictions = np.where(predictions > 0.5, 1, 0)
    tars = np.where(tars > 0.5, 1, 0)

    smaple_f1 = f1_score(tars, predictions, average='samples')
    return mi_f1, ma_f1, smaple_f1, tars, predictions
def train_step(inp, tar):

    enc_padding_mask = create_padding_mask(inp)

    with tf.GradientTape() as tape:
        predictions = transformer(inp,
                                  training=True,
                                  enc_padding_mask=enc_padding_mask)
        loss = loss_function(tar, predictions)
    gradients = tape.gradient(loss, transformer.trainable_variables)
    optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))

    train_loss(loss)
    train_accuracy(tar, predictions)

    mi_f1 = micro_f1(tar, predictions)
    ma_f1 = macro_f1(tar, predictions)
    return mi_f1, ma_f1
def predict(inp, tar, enc_padding_mask):
    predictions = transformer(inp, False, enc_padding_mask=enc_padding_mask)
    mi_f1 = micro_f1(tar, predictions)
    ma_f1 = macro_f1(tar, predictions)
    return mi_f1, ma_f1