Exemple #1
0
 def make_hop(s: str) -> Tuple[BaseReformulator, bool]:
     nonlocal memory
     if s.isdigit():
         nb_hops, is_reversed = int(s), False
     else:
         nb_hops, is_reversed = int(s[:-1]), True
     res = None
     if reformulator_type in {'static'}:
         res = StaticReformulator(nb_hops, embedding_size, init_name=ref_init_type,
                                  lower_bound=lower_bound, upper_bound=upper_bound)
     elif reformulator_type in {'linear'}:
         res = LinearReformulator(nb_hops, embedding_size, init_name=ref_init_type,
                                  lower_bound=lower_bound, upper_bound=upper_bound)
     elif reformulator_type in {'attentive'}:
         res = AttentiveReformulator(nb_hops, predicate_embeddings, init_name=ref_init_type,
                                     lower_bound=lower_bound, upper_bound=upper_bound)
     elif reformulator_type in {'memory'}:
         if memory is None:
             memory = MemoryReformulator.Memory(nb_hops, nb_rules, embedding_size, init_name=ref_init_type,
                                                lower_bound=lower_bound, upper_bound=upper_bound)
         res = MemoryReformulator(memory)
     elif reformulator_type in {'ntp'}:
         res = NTPReformulator(nb_hops=nb_hops, embedding_size=embedding_size,
                               kernel=kernel, init_name=ref_init_type,
                               lower_bound=lower_bound, upper_bound=upper_bound)
     assert res is not None
     return res, is_reversed
Exemple #2
0
def test_hoppy_v1():
    nb_entities = 10
    nb_predicates = 5
    embedding_size = 10

    rs = np.random.RandomState(0)

    for _ in range(16):
        for nb_hops in range(6):
            for use_attention in [True, False]:
                with torch.no_grad():
                    entity_embeddings = nn.Embedding(nb_entities,
                                                     embedding_size * 2,
                                                     sparse=True)
                    predicate_embeddings = nn.Embedding(nb_predicates,
                                                        embedding_size * 2,
                                                        sparse=True)

                    base = ComplEx(entity_embeddings)

                    if use_attention:
                        reformulator = AttentiveReformulator(
                            nb_hops, predicate_embeddings)
                    else:
                        reformulator = LinearReformulator(
                            nb_hops, embedding_size * 2)

                    model = SimpleHoppy(base,
                                        entity_embeddings,
                                        hops=reformulator)

                    xs = torch.LongTensor(rs.randint(nb_entities, size=32))
                    xp = torch.LongTensor(rs.randint(nb_predicates, size=32))
                    xo = torch.LongTensor(rs.randint(nb_entities, size=32))

                    xs_emb = entity_embeddings(xs)
                    xp_emb = predicate_embeddings(xp)
                    xo_emb = entity_embeddings(xo)

                    scores = model.forward(xp_emb, xs_emb, xo_emb)
                    inf = model.score(xp_emb, xs_emb, xo_emb)

                    scores_sp, scores_po = scores

                    inf = inf.cpu().numpy()
                    scores_sp = scores_sp.cpu().numpy()
                    scores_po = scores_po.cpu().numpy()

                    for i in range(xs.shape[0]):
                        np.testing.assert_allclose(inf[i],
                                                   scores_sp[i, xo[i]],
                                                   rtol=1e-5,
                                                   atol=1e-5)
                        np.testing.assert_allclose(inf[i],
                                                   scores_po[i, xs[i]],
                                                   rtol=1e-5,
                                                   atol=1e-5)
Exemple #3
0
 def make_hop(s: str) -> Tuple[BaseReformulator, bool]:
     nonlocal memory
     if s.isdigit():
         nb_hops, is_reversed = int(s), False
     else:
         nb_hops, is_reversed = int(s[:-1]), True
     res = None
     if reformulator_type in {'static'}:
         res = StaticReformulator(nb_hops, rank)
     elif reformulator_type in {'linear'}:
         res = LinearReformulator(nb_hops, rank)
     elif reformulator_type in {'attentive'}:
         res = AttentiveReformulator(nb_hops, predicate_embeddings)
     elif reformulator_type in {'memory'}:
         memory = MemoryReformulator.Memory(
             nb_hops, nb_rules, rank) if memory is None else memory
         res = MemoryReformulator(memory)
     assert res is not None
     return res, is_reversed
Exemple #4
0
def test_multirhoppy_v1():
    nb_entities = 10
    nb_predicates = 5
    embedding_size = 10

    init_size = 1.0

    rs = np.random.RandomState(0)

    for _ in range(8):
        for nb_hops_lst in [[1], [2], [3], [1, 2], [2, 2], [3, 2], [1, 2, 2],
                            [2, 2, 2], [3, 2, 2]]:
            for depth in range(3):
                for use_attention in [True, False]:
                    with torch.no_grad():
                        entity_embeddings = nn.Embedding(nb_entities,
                                                         embedding_size * 2,
                                                         sparse=True)
                        predicate_embeddings = nn.Embedding(nb_predicates,
                                                            embedding_size * 2,
                                                            sparse=True)

                        entity_embeddings.weight.data *= init_size
                        predicate_embeddings.weight.data *= init_size

                        base = ComplEx(entity_embeddings)

                        hops_lst = []
                        for i in nb_hops_lst:
                            if use_attention:
                                reformulator = AttentiveReformulator(
                                    i, predicate_embeddings)
                            else:
                                reformulator = LinearReformulator(
                                    i, embedding_size * 2)
                            hops_lst += [(reformulator, False)]

                        model = Hoppy(model=base,
                                      entity_embeddings=entity_embeddings,
                                      hops_lst=hops_lst,
                                      depth=depth)

                        xs = torch.LongTensor(rs.randint(nb_entities, size=32))
                        xp = torch.LongTensor(
                            rs.randint(nb_predicates, size=32))
                        xo = torch.LongTensor(rs.randint(nb_entities, size=32))

                        xs_emb = entity_embeddings(xs)
                        xp_emb = predicate_embeddings(xp)
                        xo_emb = entity_embeddings(xo)

                        scores = model.forward(xp_emb, xs_emb, xo_emb)
                        inf = model.score(xp_emb, xs_emb, xo_emb)

                        scores_sp, scores_po = scores

                        inf = inf.cpu().numpy()
                        scores_sp = scores_sp.cpu().numpy()
                        scores_po = scores_po.cpu().numpy()

                        for i in range(xs.shape[0]):
                            np.testing.assert_allclose(inf[i],
                                                       scores_sp[i, xo[i]],
                                                       rtol=1e-5,
                                                       atol=1e-5)
                            np.testing.assert_allclose(inf[i],
                                                       scores_po[i, xs[i]],
                                                       rtol=1e-5,
                                                       atol=1e-5)
Exemple #5
0
def test_multi():
    nb_entities = 10
    nb_predicates = 5
    embedding_size = 10

    init_size = 1.0

    rs = np.random.RandomState(0)

    for _ in range(16):
        for nb_hops in range(1, 6):
            for use_attention in [True, False]:
                for pt in {'max', 'min', 'sum', 'mixture'}:
                    with torch.no_grad():
                        entity_embeddings = nn.Embedding(nb_entities,
                                                         embedding_size * 2,
                                                         sparse=True)
                        predicate_embeddings = nn.Embedding(nb_predicates,
                                                            embedding_size * 2,
                                                            sparse=True)

                        entity_embeddings.weight.data *= init_size
                        predicate_embeddings.weight.data *= init_size

                        base = ComplEx(entity_embeddings)

                        models = []
                        for i in range(nb_hops):
                            if use_attention:
                                reformulator = AttentiveReformulator(
                                    i, predicate_embeddings)
                            else:
                                reformulator = LinearReformulator(
                                    i, embedding_size * 2)

                            h_model = SimpleHoppy(base,
                                                  entity_embeddings,
                                                  hops=reformulator)
                            models += [h_model]

                        model = Multi(models=models,
                                      pooling_type=pt,
                                      embedding_size=embedding_size * 2)

                        xs = torch.LongTensor(rs.randint(nb_entities, size=32))
                        xp = torch.LongTensor(
                            rs.randint(nb_predicates, size=32))
                        xo = torch.LongTensor(rs.randint(nb_entities, size=32))

                        xs_emb = entity_embeddings(xs)
                        xp_emb = predicate_embeddings(xp)
                        xo_emb = entity_embeddings(xo)

                        scores = model.forward(xp_emb, xs_emb, xo_emb)
                        inf = model.score(xp_emb, xs_emb, xo_emb)

                        scores_sp, scores_po = scores

                        inf = inf.cpu().numpy()
                        scores_sp = scores_sp.cpu().numpy()
                        scores_po = scores_po.cpu().numpy()

                        for i in range(xs.shape[0]):
                            np.testing.assert_allclose(inf[i],
                                                       scores_sp[i, xo[i]],
                                                       rtol=1e-5,
                                                       atol=1e-5)
                            np.testing.assert_allclose(inf[i],
                                                       scores_po[i, xs[i]],
                                                       rtol=1e-5,
                                                       atol=1e-5)