def test_batch_v1(): embedding_size = 100 triples = [('a', 'p', f'b{i}') for i in range(128)] entity_lst = sorted({e for (e, _, _) in triples} | {e for (e, _, e) in triples}) predicate_lst = sorted({p for (_, p, _) in triples}) nb_entities, nb_predicates = len(entity_lst), len(predicate_lst) entity_to_index = {e: i for i, e in enumerate(entity_lst)} predicate_to_index = {p: i for i, p in enumerate(predicate_lst)} kernel = GaussianKernel() entity_embeddings = nn.Embedding(nb_entities, embedding_size, sparse=True) predicate_embeddings = nn.Embedding(nb_predicates, embedding_size, sparse=True) for scoring_type in ['concat']: # ['min', 'concat']: for _fact_size in range(len(triples)): with torch.no_grad(): model = BatchNeuralKB(kernel=kernel, scoring_type=scoring_type) xp_emb = encode_relation( facts=triples, relation_embeddings=predicate_embeddings, relation_to_idx=predicate_to_index) xs_emb, xo_emb = encode_arguments( facts=triples, entity_embeddings=entity_embeddings, entity_to_idx=entity_to_index) batch_size = len(triples) fact_size = len(triples) rel_emb = xp_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1) arg1_emb = xs_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1) arg2_emb = xo_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1) nb_facts = torch.tensor( [_fact_size for _ in range(batch_size)], dtype=torch.long) facts = [rel_emb, arg1_emb, arg2_emb] inf = model.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts) inf_np = inf.cpu().numpy() exp = [1] * _fact_size + [0] * (batch_size - _fact_size) np.testing.assert_allclose(inf_np, exp, rtol=1e-2, atol=1e-2) print(inf_np)
def test_adv_v5(): embedding_size = 20 torch.manual_seed(0) rs = np.random.RandomState(0) triples = [ ('a', 'p', 'b'), ('a', 'p', 'd'), ('c', 'p', 'd'), ('e', 'q', 'f'), ('f', 'p', 'c'), ('x', 'r', 'y') ] entity_lst = sorted({e for (e, _, _) in triples} | {e for (_, _, e) in triples}) predicate_lst = sorted({p for (_, p, _) in triples}) nb_entities = len(entity_lst) nb_predicates = len(predicate_lst) entity_to_index = {e: i for i, e in enumerate(entity_lst)} predicate_to_index = {p: i for i, p in enumerate(predicate_lst)} with torch.no_grad(): kernel = GaussianKernel() entity_embeddings = nn.Embedding(nb_entities, embedding_size * 2, sparse=True) predicate_embeddings = nn.Embedding(nb_predicates, embedding_size * 2, sparse=True) rel_emb = encode_relation(facts=triples, relation_embeddings=predicate_embeddings, relation_to_idx=predicate_to_index) arg1_emb, arg2_emb = encode_arguments(facts=triples, entity_embeddings=entity_embeddings, entity_to_idx=entity_to_index) batch_size = 6 fact_size = rel_emb.shape[0] entity_size = entity_embeddings.weight.shape[0] rel_emb = rel_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1) arg1_emb = arg1_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1) arg2_emb = arg2_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1) nb_facts = torch.tensor([fact_size for _ in range(batch_size)], dtype=torch.long) emb = entity_embeddings.weight.view(1, entity_size, -1).repeat(batch_size, 1, 1) _nb_entities = torch.tensor([entity_size for _ in range(batch_size)], dtype=torch.long) facts = [rel_emb, arg1_emb, arg2_emb] model = BatchNeuralKB(kernel=kernel) indices = torch.from_numpy(np.array([predicate_to_index['p']])) reformulator = SymbolicReformulator(predicate_embeddings, indices) unary = BatchUnary(model, hops_lst=[(reformulator, True)]) xs_np = rs.randint(nb_entities, size=batch_size) xp_np = rs.randint(nb_predicates, size=batch_size) xo_np = rs.randint(nb_entities, size=batch_size) xs_np[0] = entity_to_index['b'] xp_np[0] = predicate_to_index['r'] xo_np[0] = entity_to_index['a'] xs_np[1] = entity_to_index['b'] xp_np[1] = predicate_to_index['r'] xo_np[1] = entity_to_index['b'] xs_np[2] = entity_to_index['b'] xp_np[2] = predicate_to_index['r'] xo_np[2] = entity_to_index['c'] xs_np[3] = entity_to_index['b'] xp_np[3] = predicate_to_index['r'] xo_np[3] = entity_to_index['d'] xs_np[4] = entity_to_index['b'] xp_np[4] = predicate_to_index['r'] xo_np[4] = entity_to_index['e'] xs_np[5] = entity_to_index['b'] xp_np[5] = predicate_to_index['r'] xo_np[5] = entity_to_index['f'] xs = torch.from_numpy(xs_np) xp = torch.from_numpy(xp_np) xo = torch.from_numpy(xo_np) xs_emb = entity_embeddings(xs) xp_emb = predicate_embeddings(xp) xo_emb = entity_embeddings(xo) inf = unary.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts, entity_embeddings=emb, nb_entities=_nb_entities) inf_np = inf.cpu().numpy() print(inf_np) np.testing.assert_allclose(inf_np, [1] * batch_size, rtol=1e-2, atol=1e-2)
def main(argv): argparser = argparse.ArgumentParser('CLUTRR', formatter_class=argparse.ArgumentDefaultsHelpFormatter) train_path = test_path = "data/clutrr-emnlp/data_test/64.csv" argparser.add_argument('--train', action='store', type=str, default=train_path) argparser.add_argument('--test', nargs='+', type=str, default=[test_path]) # model params argparser.add_argument('--embedding-size', '-k', action='store', type=int, default=20) argparser.add_argument('--k-max', '-m', action='store', type=int, default=10) argparser.add_argument('--max-depth', '-d', action='store', type=int, default=2) argparser.add_argument('--test-max-depth', action='store', type=int, default=None) argparser.add_argument('--hops', nargs='+', type=str, default=['2', '2', '1R']) argparser.add_argument('--encoder', nargs='+', type=str, default=None) # training params argparser.add_argument('--epochs', '-e', action='store', type=int, default=100) argparser.add_argument('--learning-rate', '-l', action='store', type=float, default=0.1) argparser.add_argument('--batch-size', '-b', action='store', type=int, default=8) argparser.add_argument('--test-batch-size', '--tb', action='store', type=int, default=None) argparser.add_argument('--optimizer', '-o', action='store', type=str, default='adagrad', choices=['adagrad', 'adam', 'sgd']) argparser.add_argument('--seed', action='store', type=int, default=0) argparser.add_argument('--evaluate-every', '-V', action='store', type=int, default=1) argparser.add_argument('--evaluate-every-batches', action='store', type=int, default=None) argparser.add_argument('--N2', action='store', type=float, default=None) argparser.add_argument('--N3', action='store', type=float, default=None) argparser.add_argument('--entropy', '-E', action='store', type=float, default=None) argparser.add_argument('--scoring-type', '-s', action='store', type=str, default='concat', choices=['concat', 'min']) argparser.add_argument('--tnorm', '-t', action='store', type=str, default='min', choices=['min', 'prod', 'mean']) argparser.add_argument('--reformulator', '-r', action='store', type=str, default='linear', choices=['static', 'linear', 'attentive', 'memory', 'ntp']) argparser.add_argument('--nb-rules', '-R', action='store', type=int, default=4) argparser.add_argument('--gradient-accumulation-steps', action='store', type=int, default=1) argparser.add_argument('--GNTP-R', action='store', type=int, default=None) argparser.add_argument('--slope', '-S', action='store', type=float, default=None) argparser.add_argument('--init-size', '-i', action='store', type=float, default=1.0) argparser.add_argument('--init', action='store', type=str, default='uniform') argparser.add_argument('--ref-init', action='store', type=str, default='random') argparser.add_argument('--fix-relations', '--FR', action='store_true', default=False) argparser.add_argument('--start-simple', action='store', type=int, default=None) argparser.add_argument('--debug', '-D', action='store_true', default=False) argparser.add_argument('--load', action='store', type=str, default=None) argparser.add_argument('--save', action='store', type=str, default=None) argparser.add_argument('--predicate', action='store_true', default=False) args = argparser.parse_args(argv) train_path = args.train test_paths = args.test embedding_size = args.embedding_size k_max = args.k_max max_depth = args.max_depth test_max_depth = args.test_max_depth hops_str = args.hops encoder_str = args.encoder nb_epochs = args.epochs learning_rate = args.learning_rate batch_size = args.batch_size test_batch_size = batch_size if args.test_batch_size is None else args.test_batch_size optimizer_name = args.optimizer seed = args.seed evaluate_every = args.evaluate_every evaluate_every_batches = args.evaluate_every_batches N2_weight = args.N2 N3_weight = args.N3 entropy_weight = args.entropy scoring_type = args.scoring_type tnorm_name = args.tnorm reformulator_name = args.reformulator nb_rules = args.nb_rules nb_gradient_accumulation_steps = args.gradient_accumulation_steps gntp_R = args.GNTP_R slope = args.slope init_size = args.init_size init_type = args.init ref_init_type = args.ref_init is_fixed_relations = args.fix_relations start_simple = args.start_simple is_debug = args.debug load_path = args.load save_path = args.save is_predicate = args.predicate np.random.seed(seed) random_state = np.random.RandomState(seed) torch.manual_seed(seed) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') logger.info(f'Device: {device}') if torch.cuda.is_available(): torch.set_default_tensor_type(torch.cuda.FloatTensor) data = Data(train_path=train_path, test_paths=test_paths) entity_lst, relation_lst = data.entity_lst, data.relation_lst predicate_lst = data.predicate_lst relation_to_predicate = data.relation_to_predicate test_relation_lst = ["aunt", "brother", "daughter", "daughter-in-law", "father", "father-in-law", "granddaughter", "grandfather", "grandmother", "grandson", "mother", "mother-in-law", "nephew", "niece", "sister", "son", "son-in-law", "uncle"] test_predicate_lst = sorted({relation_to_predicate[r] for r in test_relation_lst}) nb_entities = len(entity_lst) nb_relations = len(relation_lst) nb_predicates = len(predicate_lst) entity_to_idx = {e: i for i, e in enumerate(entity_lst)} relation_to_idx = {r: i for i, r in enumerate(relation_lst)} predicate_to_idx = {p: i for i, p in enumerate(predicate_lst)} kernel = GaussianKernel(slope=slope) entity_embeddings = nn.Embedding(nb_entities, embedding_size, sparse=True).to(device) nn.init.uniform_(entity_embeddings.weight, -1.0, 1.0) entity_embeddings.requires_grad = False relation_embeddings = nn.Embedding(nb_relations if not is_predicate else nb_predicates, embedding_size, sparse=True).to(device) if is_fixed_relations is True: relation_embeddings.requires_grad = False if init_type in {'uniform'}: nn.init.uniform_(relation_embeddings.weight, -1.0, 1.0) relation_embeddings.weight.data *= init_size model = BatchNeuralKB(kernel=kernel, scoring_type=scoring_type).to(device) memory: Dict[int, MemoryReformulator.Memory] = {} def make_hop(s: str) -> Tuple[BaseReformulator, bool]: nonlocal memory if s.isdigit(): nb_hops, is_reversed = int(s), False else: nb_hops, is_reversed = int(s[:-1]), True res = None if reformulator_name in {'static'}: res = StaticReformulator(nb_hops, embedding_size, init_name=ref_init_type) elif reformulator_name in {'linear'}: res = LinearReformulator(nb_hops, embedding_size, init_name=ref_init_type) elif reformulator_name in {'attentive'}: res = AttentiveReformulator(nb_hops, relation_embeddings, init_name=ref_init_type) elif reformulator_name in {'memory'}: if nb_hops not in memory: memory[nb_hops] = MemoryReformulator.Memory(nb_hops, nb_rules, embedding_size, init_name=ref_init_type) res = MemoryReformulator(memory[nb_hops]) elif reformulator_name in {'ntp'}: res = NTPReformulator(nb_hops=nb_hops, embedding_size=embedding_size, kernel=kernel, init_name=ref_init_type) assert res is not None return res.to(device), is_reversed hops_lst = [make_hop(s) for s in hops_str] encoder_model = model if encoder_str is not None: encoder_lst = [make_hop(s) for s in encoder_str] encoder_model = BatchHoppy(model=model, k=k_max, depth=1, tnorm_name=tnorm_name, hops_lst=encoder_lst, R=gntp_R).to(device) hoppy = BatchHoppy(model=encoder_model, k=k_max, depth=max_depth, tnorm_name=tnorm_name, hops_lst=hops_lst, R=gntp_R).to(device) def scoring_function(instances_batch: List[Instance], relation_lst: List[str], is_train: bool = False, _depth: Optional[int] = None) -> Tuple[Tensor, List[Tensor]]: rel_emb_lst: List[Tensor] = [] arg1_emb_lst: List[Tensor] = [] arg2_emb_lst: List[Tensor] = [] story_rel_lst: List[Tensor] = [] story_arg1_lst: List[Tensor] = [] story_arg2_lst: List[Tensor] = [] embeddings_lst: List[Tensor] = [] label_lst: List[int] = [] for i, instance in enumerate(instances_batch): if is_predicate is True: def _convert_fact(fact: Fact) -> Fact: _s, _r, _o = fact return _s, relation_to_predicate[_r], _o new_story = [_convert_fact(f) for f in instance.story] new_target = _convert_fact(instance.target) instance = Instance(new_story, new_target, instance.nb_nodes) story, target = instance.story, instance.target s, r, o = target story_rel = encode_relation(story, relation_embeddings.weight, predicate_to_idx if is_predicate else relation_to_idx, device) story_arg1, story_arg2 = encode_arguments(story, entity_embeddings.weight, entity_to_idx, device) embeddings = encode_entities(story, entity_embeddings.weight, entity_to_idx, device) target_lst: List[Tuple[str, str, str]] = [(s, x, o) for x in relation_lst] assert len(target_lst) == len(test_predicate_lst if is_predicate else test_relation_lst) # true_predicate = rel_to_predicate[r] # label_lst += [int(true_predicate == rel_to_predicate[r]) for r in relation_lst] label_lst += [int(tr == r) for tr in relation_lst] rel_emb = encode_relation(target_lst, relation_embeddings.weight, predicate_to_idx if is_predicate else relation_to_idx, device) arg1_emb, arg2_emb = encode_arguments(target_lst, entity_embeddings.weight, entity_to_idx, device) batch_size = rel_emb.shape[0] fact_size = story_rel.shape[0] entity_size = embeddings.shape[0] # [B, E] rel_emb_lst += [rel_emb] arg1_emb_lst += [arg1_emb] arg2_emb_lst += [arg2_emb] # [B, F, E] story_rel_lst += [story_rel.view(1, fact_size, -1).repeat(batch_size, 1, 1)] story_arg1_lst += [story_arg1.view(1, fact_size, -1).repeat(batch_size, 1, 1)] story_arg2_lst += [story_arg2.view(1, fact_size, -1).repeat(batch_size, 1, 1)] # [B, N, E] embeddings_lst += [embeddings.view(1, entity_size, -1).repeat(batch_size, 1, 1)] def cat_pad(t_lst: List[Tensor]) -> Tuple[Tensor, Tensor]: lengths: List[int] = [t.shape[1] for t in t_lst] max_len: int = max(lengths) def my_pad(_t: Tensor, pad: List[int]) -> Tensor: return torch.transpose(F.pad(torch.transpose(_t, 1, 2), pad=pad), 1, 2) res_t: Tensor = torch.cat([my_pad(t, pad=[0, max_len - lengths[i]]) for i, t in enumerate(t_lst)], dim=0) res_l: Tensor = torch.tensor([t.shape[1] for t in t_lst for _ in range(t.shape[0])], dtype=torch.long, device=device) return res_t, res_l rel_emb = torch.cat(rel_emb_lst, dim=0) arg1_emb = torch.cat(arg1_emb_lst, dim=0) arg2_emb = torch.cat(arg2_emb_lst, dim=0) story_rel, nb_facts = cat_pad(story_rel_lst) story_arg1, _ = cat_pad(story_arg1_lst) story_arg2, _ = cat_pad(story_arg2_lst) facts = [story_rel, story_arg1, story_arg2] _embeddings, nb_embeddings = cat_pad(embeddings_lst) max_depth_ = hoppy.depth if not is_train and test_max_depth is not None: hoppy.depth = test_max_depth if _depth is not None: hoppy.depth = _depth scores = hoppy.score(rel_emb, arg1_emb, arg2_emb, facts, nb_facts, _embeddings, nb_embeddings) if not is_train and test_max_depth is not None: hoppy.depth = max_depth_ if _depth is not None: hoppy.depth = max_depth_ return scores, [rel_emb, arg1_emb, arg2_emb] def evaluate(instances: List[Instance], path: str, sample_size: Optional[int] = None) -> float: res = 0.0 if len(instances) > 0: res = accuracy(scoring_function=scoring_function, instances=instances, sample_size=sample_size, relation_lst=test_predicate_lst if is_predicate else test_relation_lst, batch_size=test_batch_size, relation_to_predicate=relation_to_predicate if is_predicate else None, is_debug=is_debug) logger.info(f'Test Accuracy on {path}: {res:.6f}') return res loss_function = nn.BCELoss() N2_reg = N2() if N2_weight is not None else None N3_reg = N3() if N3_weight is not None else None entropy_reg = Entropy(use_logits=False) if entropy_weight is not None else None params_lst = [p for p in hoppy.parameters() if not torch.equal(p, entity_embeddings.weight)] if is_fixed_relations is False: params_lst += relation_embeddings.parameters() params = nn.ParameterList(params_lst).to(device) if load_path is not None: model.load_state_dict(torch.load(load_path)) for tensor in params_lst: logger.info(f'\t{tensor.size()}\t{tensor.device}') optimizer_factory = { 'adagrad': lambda arg: optim.Adagrad(arg, lr=learning_rate), 'adam': lambda arg: optim.Adam(arg, lr=learning_rate), 'sgd': lambda arg: optim.SGD(arg, lr=learning_rate) } assert optimizer_name in optimizer_factory optimizer = optimizer_factory[optimizer_name](params) global_step = 0 for epoch_no in range(1, nb_epochs + 1): training_set, is_simple = data.train, False if start_simple is not None and epoch_no <= start_simple: training_set = [ins for ins in training_set if len(ins.story) == 2] is_simple = True logger.info(f'{len(data.train)} → {len(training_set)}') batcher = Batcher(batch_size=batch_size, nb_examples=len(training_set), nb_epochs=1, random_state=random_state) nb_batches = len(batcher.batches) epoch_loss_values = [] for batch_no, (batch_start, batch_end) in enumerate(batcher.batches, start=1): global_step += 1 indices_batch = batcher.get_batch(batch_start, batch_end) instances_batch = [training_set[i] for i in indices_batch] if is_predicate is True: label_lst: List[int] = [int(relation_to_predicate[ins.target[1]] == tp) for ins in instances_batch for tp in test_predicate_lst] else: label_lst: List[int] = [int(ins.target[1] == tr) for ins in instances_batch for tr in test_relation_lst] labels = torch.tensor(label_lst, dtype=torch.float32, device=device) scores, query_emb_lst = scoring_function(instances_batch, test_predicate_lst if is_predicate else test_relation_lst, is_train=True, _depth=1 if is_simple else None) loss = loss_function(scores, labels) factors = [hoppy.factor(e) for e in query_emb_lst] loss += N2_weight * N2_reg(factors) if N2_weight is not None else 0.0 loss += N3_weight * N3_reg(factors) if N3_weight is not None else 0.0 if entropy_weight is not None: for hop, _ in hops_lst: attn_logits = hop.projection(query_emb_lst[0]) attention = torch.softmax(attn_logits, dim=1) loss += entropy_weight * entropy_reg([attention]) loss_value = loss.item() epoch_loss_values += [loss_value] if nb_gradient_accumulation_steps > 1: loss = loss / nb_gradient_accumulation_steps loss.backward() if nb_gradient_accumulation_steps == 1 or global_step % nb_gradient_accumulation_steps == 0: optimizer.step() optimizer.zero_grad() logger.info(f'Epoch {epoch_no}/{nb_epochs}\tBatch {batch_no}/{nb_batches}\tLoss {loss_value:.4f}') if evaluate_every_batches is not None: if global_step % evaluate_every_batches == 0: for test_path in test_paths: evaluate(instances=data.test[test_path], path=test_path) if epoch_no % evaluate_every == 0: for test_path in test_paths: evaluate(instances=data.test[test_path], path=test_path) if is_debug is True: with torch.no_grad(): show_rules(model=hoppy, kernel=kernel, relation_embeddings=relation_embeddings, relation_to_idx=predicate_to_idx if is_predicate else relation_to_idx, device=device) loss_mean, loss_std = np.mean(epoch_loss_values), np.std(epoch_loss_values) slope = kernel.slope.item() if isinstance(kernel.slope, Tensor) else kernel.slope logger.info(f'Epoch {epoch_no}/{nb_epochs}\tLoss {loss_mean:.4f} ± {loss_std:.4f}\tSlope {slope:.4f}') import time start = time.time() for test_path in test_paths: evaluate(instances=data.test[test_path], path=test_path) end = time.time() logger.info(f'Evaluation took {end - start} seconds.') if save_path is not None: torch.save(model.state_dict(), save_path) logger.info("Training finished")
def test_adv_v1(): embedding_size = 20 torch.manual_seed(0) rs = np.random.RandomState(0) triples = [ ('a', 'p', 'b'), ('b', 'q', 'c'), ('c', 'p', 'd'), ('d', 'q', 'e'), ('e', 'p', 'f'), ('f', 'q', 'g'), ('g', 'p', 'h'), ('h', 'q', 'i'), ('i', 'p', 'l'), ('l', 'q', 'm'), ('m', 'p', 'n'), ('n', 'q', 'o'), ('o', 'p', 'p'), ('p', 'q', 'q'), ('q', 'p', 'r'), ('r', 'q', 's'), ('s', 'p', 't'), ('t', 'q', 'u'), ('u', 'p', 'v'), ('v', 'q', 'w'), ('x', 'r', 'y'), ('x', 's', 'y') ] entity_lst = sorted({e for (e, _, _) in triples} | {e for (_, _, e) in triples}) predicate_lst = sorted({p for (_, p, _) in triples}) nb_entities = len(entity_lst) nb_predicates = len(predicate_lst) entity_to_index = {e: i for i, e in enumerate(entity_lst)} predicate_to_index = {p: i for i, p in enumerate(predicate_lst)} with torch.no_grad(): kernel = GaussianKernel() entity_embeddings = nn.Embedding(nb_entities, embedding_size * 2, sparse=True) predicate_embeddings = nn.Embedding(nb_predicates, embedding_size * 2, sparse=True) rel_emb = encode_relation(facts=triples, relation_embeddings=predicate_embeddings, relation_to_idx=predicate_to_index) arg1_emb, arg2_emb = encode_arguments(facts=triples, entity_embeddings=entity_embeddings, entity_to_idx=entity_to_index) batch_size = 12 fact_size = rel_emb.shape[0] entity_size = entity_embeddings.weight.shape[0] rel_emb = rel_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1) arg1_emb = arg1_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1) arg2_emb = arg2_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1) nb_facts = torch.tensor([fact_size for _ in range(batch_size)], dtype=torch.long) emb = entity_embeddings.weight.view(1, entity_size, -1).repeat(batch_size, 1, 1) _nb_entities = torch.tensor([entity_size for _ in range(batch_size)], dtype=torch.long) facts = [rel_emb, arg1_emb, arg2_emb] model = BatchNeuralKB(kernel=kernel) indices = torch.from_numpy(np.array([predicate_to_index['p'], predicate_to_index['q']])) reformulator = SymbolicReformulator(predicate_embeddings, indices) hoppy0 = BatchHoppy(model, hops_lst=[(reformulator, False)], depth=0) hoppy1 = BatchHoppy(model, hops_lst=[(reformulator, False)], depth=1) hoppy2 = BatchHoppy(model, hops_lst=[(reformulator, False)], depth=2) hoppy3 = BatchHoppy(model, hops_lst=[(reformulator, False)], depth=3) xs_np = rs.randint(nb_entities, size=batch_size) xp_np = rs.randint(nb_predicates, size=batch_size) xo_np = rs.randint(nb_entities, size=batch_size) xs_np[0] = entity_to_index['a'] xp_np[0] = predicate_to_index['r'] xo_np[0] = entity_to_index['c'] xs_np[1] = entity_to_index['a'] xp_np[1] = predicate_to_index['r'] xo_np[1] = entity_to_index['e'] xs_np[2] = entity_to_index['a'] xp_np[2] = predicate_to_index['r'] xo_np[2] = entity_to_index['g'] xs_np[3] = entity_to_index['a'] xp_np[3] = predicate_to_index['r'] xo_np[3] = entity_to_index['i'] xs_np[4] = entity_to_index['a'] xp_np[4] = predicate_to_index['r'] xo_np[4] = entity_to_index['m'] xs_np[5] = entity_to_index['a'] xp_np[5] = predicate_to_index['r'] xo_np[5] = entity_to_index['o'] xs_np[6] = entity_to_index['a'] xp_np[6] = predicate_to_index['r'] xo_np[6] = entity_to_index['q'] xs_np[7] = entity_to_index['a'] xp_np[7] = predicate_to_index['r'] xo_np[7] = entity_to_index['s'] xs_np[8] = entity_to_index['a'] xp_np[8] = predicate_to_index['r'] xo_np[8] = entity_to_index['u'] xs = torch.from_numpy(xs_np) xp = torch.from_numpy(xp_np) xo = torch.from_numpy(xo_np) xs_emb = entity_embeddings(xs) xp_emb = predicate_embeddings(xp) xo_emb = entity_embeddings(xo) inf0 = hoppy0.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts, entity_embeddings=emb, nb_entities=_nb_entities) inf1 = hoppy1.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts, entity_embeddings=emb, nb_entities=_nb_entities) inf2 = hoppy2.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts, entity_embeddings=emb, nb_entities=_nb_entities) inf3 = hoppy3.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts, entity_embeddings=emb, nb_entities=_nb_entities) inf0_np = inf0.cpu().numpy() inf1_np = inf1.cpu().numpy() inf2_np = inf2.cpu().numpy() inf3_np = inf3.cpu().numpy() np.testing.assert_allclose(inf0_np, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1) np.testing.assert_allclose(inf1_np, [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1) np.testing.assert_allclose(inf2_np, [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1) np.testing.assert_allclose(inf3_np, [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1) print(inf3_np)
def test_clutrr_v1(): embedding_size = 50 triples, hops = [], [] for i in range(16): triples += [(f'a{i}', 'p', f'b{i}'), (f'b{i}', 'q', f'c{i}')] hops += [(f'a{i}', 'r', f'c{i}')] entity_lst = sorted({e for (e, _, _) in triples + hops} | {e for (e, _, e) in triples + hops}) predicate_lst = sorted({p for (_, p, _) in triples + hops}) nb_entities, nb_predicates = len(entity_lst), len(predicate_lst) entity_to_index = {e: i for i, e in enumerate(entity_lst)} predicate_to_index = {p: i for i, p in enumerate(predicate_lst)} kernel = GaussianKernel() entity_embeddings = nn.Embedding(nb_entities, embedding_size, sparse=True) predicate_embeddings = nn.Embedding(nb_predicates, embedding_size, sparse=True) for scoring_type in ['concat']: # ['min', 'concat']: model = BatchNeuralKB(kernel=kernel, scoring_type=scoring_type) for s in entity_lst: for p in predicate_lst: for o in entity_lst: xs_np = np.array([entity_to_index[s]]) xp_np = np.array([predicate_to_index[p]]) xo_np = np.array([entity_to_index[o]]) with torch.no_grad(): xs = torch.from_numpy(xs_np) xp = torch.from_numpy(xp_np) xo = torch.from_numpy(xo_np) xs_emb = entity_embeddings(xs) xp_emb = predicate_embeddings(xp) xo_emb = entity_embeddings(xo) rel_emb = encode_relation(facts=triples, relation_embeddings=predicate_embeddings, relation_to_idx=predicate_to_index) arg1_emb, arg2_emb = encode_arguments(facts=triples, entity_embeddings=entity_embeddings, entity_to_idx=entity_to_index) batch_size = xp.shape[0] fact_size = rel_emb.shape[0] rel_emb = rel_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1) arg1_emb = arg1_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1) arg2_emb = arg2_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1) nb_facts = torch.tensor([fact_size for _ in range(batch_size)], dtype=torch.long) facts = [rel_emb, arg1_emb, arg2_emb] inf = model.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts) inf_np = inf.cpu().numpy() assert inf_np[0] > 0.95 if (s, p, o) in triples else inf_np[0] < 0.01
def test_clutrr_v4(): embedding_size = 50 rs = np.random.RandomState(0) for _ in range(4): with torch.no_grad(): triples = [ ('a', 'p', 'b'), ('c', 'q', 'd'), ('e', 'q', 'f'), ('g', 'q', 'h'), ('i', 'q', 'l'), ('m', 'q', 'n'), ('o', 'q', 'p'), ('q', 'q', 'r'), ('s', 'q', 't'), ('u', 'q', 'v') ] entity_lst = sorted({s for (s, _, _) in triples} | {o for (_, _, o) in triples}) predicate_lst = sorted({p for (_, p, _) in triples}) nb_entities, nb_predicates = len(entity_lst), len(predicate_lst) entity_to_index = {e: i for i, e in enumerate(entity_lst)} predicate_to_index = {p: i for i, p in enumerate(predicate_lst)} kernel = GaussianKernel() entity_embeddings = nn.Embedding(nb_entities, embedding_size * 2, sparse=True) predicate_embeddings = nn.Embedding(nb_predicates, embedding_size * 2, sparse=True) rel_emb = encode_relation(facts=triples, relation_embeddings=predicate_embeddings, relation_to_idx=predicate_to_index) arg1_emb, arg2_emb = encode_arguments(facts=triples, entity_embeddings=entity_embeddings, entity_to_idx=entity_to_index) batch_size = 16 fact_size = rel_emb.shape[0] entity_size = entity_embeddings.weight.shape[0] rel_emb = rel_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1) arg1_emb = arg1_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1) arg2_emb = arg2_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1) nb_facts = torch.tensor([fact_size for _ in range(batch_size)], dtype=torch.long) emb = entity_embeddings.weight.view(1, entity_size, -1).repeat(batch_size, 1, 1) _nb_entities = torch.tensor([entity_size for _ in range(batch_size)], dtype=torch.long) facts = [rel_emb, arg1_emb, arg2_emb] model = BatchNeuralKB(kernel=kernel) xs_np = rs.randint(nb_entities, size=batch_size) xp_np = rs.randint(nb_predicates, size=batch_size) xo_np = rs.randint(nb_entities, size=batch_size) xs_np[0] = 0 xp_np[0] = 0 xo_np[0] = 1 xs_np[1] = 2 xp_np[1] = 1 xo_np[1] = 3 xs = torch.from_numpy(xs_np) xp = torch.from_numpy(xp_np) xo = torch.from_numpy(xo_np) xs_emb = entity_embeddings(xs) xp_emb = predicate_embeddings(xp) xo_emb = entity_embeddings(xo) print('xp_emb', xp_emb.shape) scores_sp, scores_po = model.forward(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts, entity_embeddings=emb, nb_entities=_nb_entities) inf = model.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts) assert inf[0] > 0.9 assert inf[1] > 0.9 inf = inf.cpu().numpy() scores_sp = scores_sp.cpu().numpy() scores_po = scores_po.cpu().numpy() print('AAA', inf) print('BBB', scores_sp)
def test_clutrr_v3(): embedding_size = 20 batch_size = 8 torch.manual_seed(0) triples, hops = [], [] for i in range(32): triples += [(f'a{i}', 'p', f'b{i}'), (f'b{i}', 'q', f'c{i}')] hops += [(f'a{i}', 'r', f'c{i}')] entity_lst = sorted({s for (s, _, _) in triples + hops} | {o for (_, _, o) in triples + hops}) predicate_lst = sorted({p for (_, p, _) in triples + hops}) nb_entities, nb_predicates = len(entity_lst), len(predicate_lst) entity_to_index = {e: i for i, e in enumerate(entity_lst)} predicate_to_index = {p: i for i, p in enumerate(predicate_lst)} kernel = GaussianKernel(slope=None) entity_embeddings = nn.Embedding(nb_entities, embedding_size, sparse=True) predicate_embeddings = nn.Embedding(nb_predicates, embedding_size, sparse=True) # _hops = LinearReformulator(2, embedding_size) _hops = AttentiveReformulator(2, predicate_embeddings) model = BatchNeuralKB(kernel=kernel, scoring_type='concat') hoppy = BatchHoppy(model, hops_lst=[(_hops, False)], depth=1) params = [p for p in hoppy.parameters() if not torch.equal(p, entity_embeddings.weight) and not torch.equal(p, predicate_embeddings.weight)] for tensor in params: print(f'\t{tensor.size()}\t{tensor.device}') loss_function = nn.BCELoss() optimizer = optim.Adagrad(params, lr=0.1) hops_data = [] for i in range(64): hops_data += hops batches = make_batches(len(hops_data), batch_size) rs = np.random.RandomState() c, d = 0.0, 0.0 p_emb = predicate_embeddings(torch.from_numpy(np.array([predicate_to_index['p']]))) q_emb = predicate_embeddings(torch.from_numpy(np.array([predicate_to_index['q']]))) for batch_start, batch_end in batches: hops_batch = hops_data[batch_start:batch_end] s_lst = [s for (s, _, _) in hops_batch] p_lst = [p for (_, p, _) in hops_batch] o_lst = [o for (_, _, o) in hops_batch] nb_positives = len(s_lst) nb_negatives = nb_positives * 3 s_n_lst = rs.permutation(nb_entities)[:nb_negatives].tolist() nb_negatives = len(s_n_lst) o_n_lst = rs.permutation(nb_entities)[:nb_negatives].tolist() p_n_lst = list(islice(cycle(p_lst), nb_negatives)) xs_np = np.array([entity_to_index[s] for s in s_lst] + s_n_lst) xp_np = np.array([predicate_to_index[p] for p in p_lst + p_n_lst]) xo_np = np.array([entity_to_index[o] for o in o_lst] + o_n_lst) xs_emb = entity_embeddings(torch.from_numpy(xs_np)) xp_emb = predicate_embeddings(torch.from_numpy(xp_np)) xo_emb = entity_embeddings(torch.from_numpy(xo_np)) rel_emb = encode_relation(facts=triples, relation_embeddings=predicate_embeddings, relation_to_idx=predicate_to_index) arg1_emb, arg2_emb = encode_arguments(facts=triples, entity_embeddings=entity_embeddings, entity_to_idx=entity_to_index) batch_size = xp_emb.shape[0] fact_size = rel_emb.shape[0] entity_size = entity_embeddings.weight.shape[0] rel_emb = rel_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1) arg1_emb = arg1_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1) arg2_emb = arg2_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1) nb_facts = torch.tensor([fact_size for _ in range(batch_size)], dtype=torch.long) emb = entity_embeddings.weight.view(1, entity_size, -1).repeat(batch_size, 1, 1) _nb_entities = torch.tensor([entity_size for _ in range(batch_size)], dtype=torch.long) facts = [rel_emb, arg1_emb, arg2_emb] scores = hoppy.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts, entity_embeddings=emb, nb_entities=_nb_entities) labels_np = np.zeros(xs_np.shape[0]) labels_np[:nb_positives] = 1 labels = torch.from_numpy(labels_np).float() # for s, p, o, l in zip(xs_np, xp_np, xo_np, labels): # print(s, p, o, l) loss = loss_function(scores, labels) hop_1_emb = hoppy.hops_lst[0][0].hops_lst[0](xp_emb) hop_2_emb = hoppy.hops_lst[0][0].hops_lst[1](xp_emb) c = kernel.pairwise(p_emb, hop_1_emb).mean().cpu().detach().numpy() d = kernel.pairwise(q_emb, hop_2_emb).mean().cpu().detach().numpy() print(c, d) loss.backward() optimizer.step() optimizer.zero_grad() assert c > 0.95 and d > 0.95
def test_clutrr_v2(): embedding_size = 20 triples, hops = [], [] xxx = [] for i in range(16): triples += [(f'a{i}', 'p', f'b{i}'), (f'b{i}', 'q', f'c{i}')] hops += [(f'a{i}', 'r', f'c{i}')] xxx += [(f'a{i}', 'p', f'c{i}'), (f'a{i}', 'q', f'c{i}'), (f'a{i}', 'r', f'c{i}')] entity_lst = sorted({s for (s, _, _) in triples + hops} | {o for (_, _, o) in triples + hops}) predicate_lst = sorted({p for (_, p, _) in triples + hops}) nb_entities, nb_predicates = len(entity_lst), len(predicate_lst) entity_to_index = {e: i for i, e in enumerate(entity_lst)} predicate_to_index = {p: i for i, p in enumerate(predicate_lst)} kernel = GaussianKernel() entity_embeddings = nn.Embedding(nb_entities, embedding_size, sparse=True) predicate_embeddings = nn.Embedding(nb_predicates, embedding_size, sparse=True) for scoring_type in ['concat']: # ['min', 'concat']: model = BatchNeuralKB(kernel=kernel, scoring_type=scoring_type) indices = torch.from_numpy(np.array([predicate_to_index['p'], predicate_to_index['q']])) _hops = SymbolicReformulator(predicate_embeddings, indices) hoppy = BatchHoppy(model, hops_lst=[(_hops, False)], depth=1) for s in entity_lst: for p in predicate_lst: for o in entity_lst: xs_np = np.array([entity_to_index[s]]) xp_np = np.array([predicate_to_index[p]]) xo_np = np.array([entity_to_index[o]]) with torch.no_grad(): xs = torch.from_numpy(xs_np) xp = torch.from_numpy(xp_np) xo = torch.from_numpy(xo_np) xs_emb = entity_embeddings(xs) xp_emb = predicate_embeddings(xp) xo_emb = entity_embeddings(xo) rel_emb = encode_relation(facts=triples, relation_embeddings=predicate_embeddings, relation_to_idx=predicate_to_index) arg1_emb, arg2_emb = encode_arguments(facts=triples, entity_embeddings=entity_embeddings, entity_to_idx=entity_to_index) batch_size = xp.shape[0] fact_size = rel_emb.shape[0] entity_size = entity_embeddings.weight.shape[0] rel_emb = rel_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1) arg1_emb = arg1_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1) arg2_emb = arg2_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1) nb_facts = torch.tensor([fact_size for _ in range(batch_size)], dtype=torch.long) emb = entity_embeddings.weight.view(1, entity_size, -1).repeat(batch_size, 1, 1) _nb_entities = torch.tensor([entity_size for _ in range(batch_size)], dtype=torch.long) facts = [rel_emb, arg1_emb, arg2_emb] inf = hoppy.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts, entity_embeddings=emb, nb_entities=_nb_entities) inf_np = inf.cpu().numpy() print(s, p, o, inf_np) assert inf_np[0] > 0.9 if (s, p, o) in (triples + xxx) else inf_np[0] < 0.1