Beispiel #1
0
def run_kelpie(train_samples):
    print("Wrapping the original model in a Kelpie explainable model...")
    # use model_to_explain to initialize the Kelpie model
    kelpie_model = KelpieComplEx(model=original_model, dataset=kelpie_dataset, init_size=1e-3) # type: KelpieComplEx
    kelpie_model.to('cuda')

    ###########  BUILD THE OPTIMIZER AND RUN POST-TRAINING
    print("Running post-training on the Kelpie model...")
    optimizer = KelpieMultiClassNLLptimizer(model=kelpie_model,
                                            optimizer_name=args.optimizer,
                                            batch_size=args.batch_size,
                                            learning_rate=args.learning_rate,
                                            decay1=args.decay1,
                                            decay2=args.decay2,
                                            regularizer_name="N3",
                                            regularizer_weight=args.reg)

    optimizer.train(train_samples=train_samples, max_epochs=args.max_epochs)

    ###########  EXTRACT RESULTS

    print("\nExtracting results...")
    kelpie_entity_id = kelpie_dataset.kelpie_entity_id
    kelpie_sample_tuple = (kelpie_entity_id, relation_id, tail_id) if args.perspective == "head" else (head_id, relation_id, kelpie_entity_id)
    kelpie_sample = numpy.array(kelpie_sample_tuple)

    ### Evaluation on original entity

    # Kelpie model on original fact
    scores, ranks, predictions = kelpie_model.predict_sample(sample=original_sample, original_mode=True)
    original_direct_score, original_inverse_score = scores[0], scores[1]
    original_head_rank, original_tail_rank = ranks[0], ranks[1]
    print("\nKelpie model on the original test fact: <%s, %s, %s>" % original_triple)
    print("\tDirect fact score: %f; Inverse fact score: %f" % (original_direct_score, original_inverse_score))
    print("\tHead Rank: %f" % original_head_rank)
    print("\tTail Rank: %f" % original_tail_rank)

    # Kelpie model on all facts containing the original entity
    print("\nKelpie model on all test facts containing the original entity:")
    mrr, h1 = KelpieEvaluator(kelpie_model).eval(samples=original_test_samples, original_mode=True)
    print("\tMRR: %f\n\tH@1: %f" % (mrr, h1))


    ### Evaluation on kelpie entity

    # results on kelpie fact
    scores, ranks, _ = kelpie_model.predict_sample(sample=kelpie_sample, original_mode=False)
    kelpie_direct_score, kelpie_inverse_score = scores[0], scores[1]
    kelpie_head_rank, kelpie_tail_rank = ranks[0], ranks[1]
    print("\nKelpie model on the Kelpie test fact: <%s, %s, %s>" % kelpie_sample_tuple)
    print("\tDirect fact score: %f; Inverse fact score: %f" % (kelpie_direct_score, kelpie_inverse_score))
    print("\tHead Rank: %f" % kelpie_head_rank)
    print("\tTail Rank: %f" % kelpie_tail_rank)

    # results on all facts containing the kelpie entity
    print("\nKelpie model on all test facts containing the Kelpie entity:")
    mrr, h1 = KelpieEvaluator(kelpie_model).eval(samples=kelpie_test_samples, original_mode=False)
    print("\tMRR: %f\n\tH@1: %f" % (mrr, h1))

    return kelpie_direct_score, kelpie_inverse_score, kelpie_head_rank, kelpie_tail_rank
Beispiel #2
0
# Original model on all facts containing the original entity
print("\nOriginal model on all test facts containing the original entity:")
mrr, h1 = Evaluator(original_model).eval(samples=original_entity_test_samples)
print("\tMRR: %f\n\tH@1: %f" % (mrr, h1))

# Kelpie model model on original fact
scores, ranks, _ = kelpie_model.predict_sample(sample=original_sample, original_mode=True)
print("\nKelpie model on original test fact: <%s, %s, %s>" % original_sample_tuple)
print("\tDirect fact score: %f; Inverse fact score: %f" % (scores[0], scores[1]))
print("\tHead Rank: %f" % ranks[0])
print("\tTail Rank: %f" % ranks[1])

# Kelpie model on all facts containing the original entity
print("\nKelpie model on all test facts containing the original entity:")
mrr, h1 = KelpieEvaluator(kelpie_model).eval(samples=original_entity_test_samples, original_mode=True)
print("\tMRR: %f\n\tH@1: %f" % (mrr, h1))


### Evaluation on kelpie entity

# results on kelpie fact
scores, ranks, _ = kelpie_model.predict_sample(sample=kelpie_sample, original_mode=False)
print("\nKelpie model on original test fact: <%s, %s, %s>" % kelpie_sample_tuple)
print("\tDirect fact score: %f; Inverse fact score: %f" % (scores[0], scores[1]))
print("\tHead Rank: %f" % ranks[0])
print("\tTail Rank: %f" % ranks[1])

# results on all facts containing the kelpie entity
print("\nKelpie model on all test facts containing the Kelpie entity:")
mrr, h1 = KelpieEvaluator(kelpie_model).eval(samples=kelpie_test_samples, original_mode=False)
Beispiel #3
0
        kelpie_dataset.original_test_samples)
    print(
        "Original model overall results on the original entity:\tMR: %f; H@1: %f;"
        % (mr, h1))

    # kelpie model results on kelpie fact
    scores, ranks, _ = kelpie_model.predict_sample(sample=kelpie_sample,
                                                   original_mode=False)
    print("\nKelpie model on kelpie test fact: <%s, %s, %s>" % kelpie_triple)
    print("\tDirect fact score: %f; Inverse fact score: %f" %
          (scores[0], scores[1]))
    print("\tHead Rank: %f" % ranks[0])
    print("\tTail Rank: %f" % ranks[1])

    # kelpie model overall results on the kelpie entity
    mr, h1 = KelpieEvaluator(kelpie_model).eval(
        samples=kelpie_dataset.kelpie_test_samples, original_mode=False)
    print(
        "Kelpie model overall results on the kelpie entity:\tMR: %f; H@1: %f;"
        % (mr, h1))

    print("\nComputing embedding distances...")
    with torch.no_grad():
        all_embeddings = kelpie_model.entity_embeddings.cpu().numpy()
        kelpie_embedding = all_embeddings[-1]
        original_embedding = all_embeddings[original_entity_id]

        original_distances = []
        kelpie_distances = []
        for i in range(kelpie_dataset.num_entities):
            if i != original_entity_id and i != kelpie_entity_id:
                original_distances.append(
Beispiel #4
0
                                            decay1=args.decay1,
                                            decay2=args.decay2,
                                            regularizer_name="N3",
                                            regularizer_weight=args.reg)

    optimizer.train(train_samples=kelpie_dataset.kelpie_train_samples,
                    max_epochs=args.max_epochs)

    ###########  EXTRACT RESULTS

    kelpie_entity_id = kelpie_dataset.kelpie_entity_id

    ### Evaluation on original entity
    # Kelpie model on all facts containing the original entity
    print("Original entity performances (both head and tail predictions):")
    original_mrr, original_h1 = KelpieEvaluator(kelpie_model).eval(
        samples=kelpie_dataset.original_test_samples, original_mode=True)
    print("\tMRR: %f\n\tH@1: %f" % (original_mrr, original_h1))

    ### Evaluation on kelpie entity
    # results on all facts containing the kelpie entity
    print("Kelpie entity performances (both head and tail predictions):")
    kelpie_mrr, kelpie_h1 = KelpieEvaluator(kelpie_model).eval(
        samples=kelpie_dataset.kelpie_test_samples, original_mode=False)
    print("\tMRR: %f\n\tH@1: %f" % (kelpie_mrr, kelpie_h1))

    outlines.append("\t".join([
        kelpie_dataset.entity_id_2_name[original_entity_id],
        str(original_mrr),
        str(original_h1),
        str(kelpie_mrr),
        str(kelpie_h1)