def rbo(original_model: ComplEx, kelpie_model: KelpieComplEx, original_samples: numpy.array, kelpie_samples: numpy.array): _, original_ranks, original_predictions = original_model.predict_samples( original_samples) _, kelpie_ranks, kelpie_predictions = kelpie_model.predict_samples( samples=kelpie_samples, original_mode=False) all_original_ranks = [] for (a, b) in original_ranks: all_original_ranks.append(a) all_original_ranks.append(b) all_kelpie_ranks = [] for (a, b) in kelpie_ranks: all_kelpie_ranks.append(a) all_kelpie_ranks.append(b) original_mrr = mrr(all_original_ranks) kelpie_mrr = mrr(all_kelpie_ranks) original_h1 = hits_k(all_original_ranks, 1) kelpie_h1 = hits_k(all_kelpie_ranks, 1) rbos = [] for i in range(len(original_samples)): _original_sample = original_samples[i] _kelpie_sample = kelpie_samples[i] original_target_head, _, original_target_tail = _original_sample kelpie_target_head, _, kelpie_target_tail = _kelpie_sample original_target_head_index, original_target_tail_index = int( original_ranks[i][0] - 1), int(original_ranks[i][1] - 1) kelpie_target_head_index, kelpie_target_tail_index = int( kelpie_ranks[i][0] - 1), int(kelpie_ranks[i][1] - 1) # get head and tail predictions original_head_predictions = original_predictions[i][0] kelpie_head_predictions = kelpie_predictions[i][0] original_tail_predictions = original_predictions[i][1] kelpie_tail_predictions = kelpie_predictions[i][1] assert original_head_predictions[ original_target_head_index] == original_target_head assert kelpie_head_predictions[ kelpie_target_head_index] == kelpie_target_head assert original_tail_predictions[ original_target_tail_index] == original_target_tail assert kelpie_tail_predictions[ kelpie_target_tail_index] == kelpie_target_tail # replace the target head id with the same value (-1 in this case) original_head_predictions[original_target_head_index] = -1 kelpie_head_predictions[kelpie_target_head_index] = -1 # cut both lists at the max rank that the target head obtained, between original and kelpie model original_head_predictions = original_head_predictions[: original_target_head_index + 1] kelpie_head_predictions = kelpie_head_predictions[: kelpie_target_head_index + 1] # replace the target tail id with the same value (-1 in this case) original_tail_predictions[original_target_tail_index] = -1 kelpie_tail_predictions[kelpie_target_tail_index] = -1 # cut both lists at the max rank that the target tail obtained, between original and kelpie model original_tail_predictions = original_tail_predictions[: original_target_tail_index + 1] kelpie_tail_predictions = kelpie_tail_predictions[: kelpie_target_tail_index + 1] rbos.append( ranking_similarity.rank_biased_overlap(original_head_predictions, kelpie_head_predictions)) rbos.append( ranking_similarity.rank_biased_overlap(original_tail_predictions, kelpie_tail_predictions)) avg_rbo = float(sum(rbos)) / float(len(rbos)) return avg_rbo, original_mrr, kelpie_mrr, original_h1, kelpie_h1
original_train_samples = kelpie_dataset.original_train_samples original_valid_samples = kelpie_dataset.original_valid_samples original_test_samples = kelpie_dataset.original_test_samples kelpie_train_samples = kelpie_dataset.kelpie_train_samples kelpie_valid_samples = kelpie_dataset.kelpie_valid_samples kelpie_test_samples = kelpie_dataset.kelpie_test_samples original_direct_samples = numpy.vstack( (original_train_samples, original_valid_samples, original_test_samples)) kelpie_direct_samples = numpy.vstack( (kelpie_train_samples, kelpie_valid_samples, kelpie_test_samples)) _, original_ranks, _ = original_model.predict_samples( samples=original_direct_samples) _, kelpie_ranks, _ = kelpie_model.predict_samples( samples=kelpie_direct_samples, original_mode=False) train_ranks = [] valid_ranks = [] test_ranks = [] for i in range(len(original_train_samples)): (head_id, relation_id, tail_id) = original_train_samples[i] cur_original_triple = (head_id, relation_id, tail_id) (head_id, relation_id, tail_id) = kelpie_train_samples[i] cur_kelpie_triple = (head_id, relation_id, tail_id) original_head_rank, original_tail_rank = original_ranks[i] kelpie_head_rank, kelpie_tail_rank = kelpie_ranks[i] train_ranks.append((original_head_rank, kelpie_head_rank))
scores, ranks, _ = kelpie_model.predict_sample(sample=kelpie_sample, original_mode=False) print("\nKelpie model on original test fact: <%s, %s, %s>" % kelpie_sample_tuple) print("\tDirect fact score: %f; Inverse fact score: %f" % (scores[0], scores[1])) print("\tHead Rank: %f" % ranks[0]) print("\tTail Rank: %f" % ranks[1]) # results on all facts containing the kelpie entity print("\nKelpie model on all test facts containing the Kelpie entity:") mrr, h1 = KelpieEvaluator(kelpie_model).eval(samples=kelpie_test_samples, original_mode=False) print("\tMRR: %f\n\tH@1: %f" % (mrr, h1)) print("\n\nComputing RBO between original model predictions and Kelpie model predictions...") rbos = [] _, original_ranks, original_predictions = original_model.predict_samples(original_entity_test_samples) _, kelpie_ranks, kelpie_predictions = kelpie_model.predict_samples(samples=kelpie_test_samples, original_mode=False) for i in range(len(original_entity_test_samples)): original_sample = original_entity_test_samples[i] kelpie_sample = kelpie_test_samples[i] original_target_head, _, original_target_tail = original_sample kelpie_target_head, _, kelpie_target_tail = kelpie_sample original_target_head_index, original_target_tail_index = int(original_ranks[i][0]-1), int(original_ranks[i][1]-1) kelpie_target_head_index, kelpie_target_tail_index= int(kelpie_ranks[i][0]-1), int(kelpie_ranks[i][1]-1) # get head and tail predictions original_head_predictions = original_predictions[i][0]