callbacks=[TensorBoard(str(output_dir / "sparse_categorical_crossentropy"))],
)
loss, accuracy = classifier.evaluate(
    test_dataset.map(lambda x, y: (tf.image.convert_image_dtype(x, tf.float32), y)), steps=test_steps
)
results += [{"experiment": "classifier", "loss": loss, "top_score_classification_accuracy": accuracy}]
embeddings = encoder.predict(test_dataset.map(lambda x, y: (tf.image.convert_image_dtype(x, tf.float32), y)), steps=test_steps)
np.savetxt(str(output_dir / "classifier_embeddings.tsv"), embeddings, delimiter="\t")

#%% Train
experiments = [
    {
        "name": "l2_triplet_loss",
        "kernel": Lambda(lambda x: tf.reduce_sum(tf.square(x[0] - x[1]), axis=1)),
        "loss": TripletLoss(1),
        "metrics": [classification_accuracy(ascending=True)],
    },
    {
        "name": "l1_triplet_loss",
        "kernel": Lambda(lambda x: tf.reduce_sum(tf.abs(x[0] - x[1]), axis=1)),
        "loss": TripletLoss(1),
        "metrics": [classification_accuracy(ascending=True)],
    },
    {
        "name": "cosine_similarity_triplet_loss",
        "kernel": Lambda(
            lambda x: 1 - tf.reduce_sum(tf.nn.l2_normalize(x[0], axis=1) * tf.nn.l2_normalize(x[1], axis=1), axis=1)
        ),
        "loss": TripletLoss(0.1),
        "metrics": [classification_accuracy(ascending=True)],
    },
        "name": "binary_crossentropy",
        "loss": ClippedBinaryCrossentropy(upper=0.75)
    },
    {
        "name": "class_consistency",
        "loss": ClassConsistencyLoss()
    },
]
for experiment in experiments:
    pprint(experiment)
    encoder.load_weights(str(output_dir / "initial_encoder.h5"))
    model = Sequential([encoder, GramMatrix(kernel="LearntNorms")])
    model.compile(
        optimizer="adam",
        loss=experiment["loss"],
        metrics=[classification_accuracy(ascending=False)],
    )
    model.fit(
        train_dataset.repeat(),
        epochs=10,
        steps_per_epoch=train_steps,
        validation_data=val_dataset.repeat(),
        validation_steps=val_steps,
        callbacks=[
            TensorBoard(str(output_dir / experiment["name"])),
            EarlyStopping(patience=10)
        ],
    )
    results += [{
        "name":
        experiment["name"],
encoder.compile(
    optimizer=tf.keras.optimizers.Adam(0.001),
    loss=tfa.losses.TripletSemiHardLoss(),
)
encoder.fit(train_dataset.map(lambda x, y: (preprocessing(x), y)),
            epochs=5,
            callbacks=[TensorBoard("tfa_loss")])
encoder.evaluate(test_dataset.map(lambda x, y: (preprocessing(x), y)))
results = encoder.predict(test_dataset.map(lambda x, y: (preprocessing(x), y)))
np.savetxt("tfa_embeddings.tsv", results, delimiter="\t")

#%% Train with keras_fsl triplet loss
encoder.load_weights("initial_encoder.h5")
model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
              loss=triplet_loss(),
              metrics=[classification_accuracy(ascending=True)])
model.fit(
    train_dataset.map(lambda x, y: (preprocessing(x), get_dummies(y)[0])),
    epochs=5,
    callbacks=[TensorBoard("keras_fsl_loss")])
model.evaluate(
    test_dataset.map(lambda x, y: (preprocessing(x), get_dummies(y)[0])))
results = encoder.predict(
    test_dataset.map(lambda x, y: (preprocessing(x), get_dummies(y)[0])))
np.savetxt("keras_fsl_embeddings.tsv", results, delimiter="\t")

#%% Try with l1 norm
support_layer.kernel = Lambda(
    lambda inputs: tf.math.reduce_sum(tf.abs(inputs[0] - inputs[1]), axis=1))
encoder.load_weights("initial_encoder.h5")
model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
            )
        ),
    }
]

#%% Train
experiments = [
    {"name": "binary_crossentropy", "loss": BinaryCrossentropy(upper=0.75)},
    {"name": "class_consistency", "loss": ClassConsistencyLoss()},
]
for experiment in experiments:
    pprint(experiment)
    encoder.load_weights(str(output_dir / "initial_encoder.h5"))
    model = Sequential([encoder, GramMatrix(kernel="LearntNorms")])
    model.compile(
        optimizer="adam", loss=experiment["loss"], metrics=[classification_accuracy(ascending=False)],
    )
    model.fit(
        train_dataset.repeat(),
        epochs=10,
        steps_per_epoch=train_steps,
        validation_data=val_dataset.repeat(),
        validation_steps=val_steps,
        callbacks=[TensorBoard(str(output_dir / experiment["name"])), EarlyStopping(patience=10)],
    )
    results += [
        {
            "name": experiment["name"],
            **dict(zip(model.metrics_names, model.evaluate(test_dataset.repeat(), steps=test_steps),)),
        }
    ]
示例#5
0
    x_test, y_test = X.iloc[test], Y.iloc[test]
    x_support, x_query, y_support, y_query = train_test_split(x_train,
                                                              y_train,
                                                              test_size=0.15,
                                                              stratify=y_train)

    # create the model
    input_shape = x_support.shape[1]  # first shape is batch_size

    # %% Training
    encoder = Dense(73)
    support_layer = GramMatrix("DenseSigmoid")
    model = Sequential([encoder, support_layer])
    model.compile(optimizer="Adam",
                  loss=BinaryCrossentropy(),
                  metrics=[classification_accuracy(), min_eigenvalue])
    model.fit(x=x_support,
              y=y_support,
              validation_data=([x_query], [y_query]),
              epochs=5)

    # model = XGBClassifier(class_weights=[1,5])
    # model.fit(x_train, y_train)ddd

    y_pred = model.predict_proba(x_test)[:, 1]
    print_results(y_test, y_pred)

    # y_pred = test_time_augmentation(model, x_test, n)
    # print_results(y_test, y_pred)

    #model = XGBClassifier(class_weights=[1,5])