Ejemplo n.º 1
0
    def __init__(self, edges, n, emb=16, bases=None, **kwargs):

        super().__init__()

        self.emb = emb

        # vertical stack to find the normalization
        vindices, vsize = util.adj(edges, n, vertical=False)
        ih, iw = vindices.size()

        vals = torch.ones((ih, ), dtype=torch.float)
        vals = vals / util.sum_sparse(vindices, vals, vsize)

        # horizontal stack for the actual message passing
        indices, size = util.adj(edges, n, vertical=False)

        _, rn = size
        r = rn//n

        graph = torch.sparse.FloatTensor(indices=indices.t(), values=vals, size=size) # will this get cuda'd properly?
        self.register_buffer('graph', graph)

        if bases is None:
            self.weights = nn.Parameter(torch.FloatTensor(r, n, emb))
            nn.init.xavier_uniform_(self.weights, gain=nn.init.calculate_gain('relu'))

            self.bases = None
        else:
            self.comps = nn.Parameter(torch.FloatTensor(r, bases) )
            nn.init.xavier_uniform_(self.weights, gain=nn.init.calculate_gain('relu'))

            self.bases = nn.Parameter(torch.FloatTensor(bases, n, emb))
            nn.init.xavier_uniform_(self.weights, gain=nn.init.calculate_gain('relu'))
Ejemplo n.º 2
0
def load_dir(path, no_sat=False):
    data = []
    for filename in os.listdir(path):
        name, ext = os.path.splitext(filename)
        if ext != '.cnf':
            continue
        f = CNF.from_file(os.path.join(path, filename))
        sat = int(not name.startswith('uu')) if not no_sat else -1
        data.append(DataSample(filename, f, adj(f), sat))
    return data
Ejemplo n.º 3
0
def load_dir(path):
    data = []
    for filename in os.listdir(path):
        name, ext = os.path.splitext(filename)
        if ext != '.cnf':
            continue
        f = CNF.from_file(os.path.join(path, filename))
        if name.startswith('uu'):
            continue
        data.append(DataSample(filename, f, adj(f), None))
    return data
Ejemplo n.º 4
0
    def __init__(self, edges, n, emb=16, bases=None, unify='sum', **kwargs):

        super().__init__()

        indices, size = util.adj(edges, n)

        rn, n = size
        r = rn//n

        ih, iw = indices.size()
        vals = torch.ones((ih, ), dtype=torch.float)

        vals = vals / util.sum_sparse(indices, vals, size)

        graph = torch.sparse.FloatTensor(indices=indices.t(), values=vals, size=size) # will this get cuda'd properly?
        self.register_buffer('graph', graph)


        if bases is None:
            self.weights = nn.Parameter(torch.FloatTensor(r, emb, emb))
            nn.init.xavier_uniform_(self.weights, gain=nn.init.calculate_gain('relu'))

            self.bases = None
        else:
            self.comps = nn.Parameter(torch.FloatTensor(r, bases))
            self.bases = nn.Parameter(torch.FloatTensor(bases, emb, emb))
            nn.init.xavier_uniform_(self.comps, gain=nn.init.calculate_gain('relu'))
            nn.init.xavier_uniform_(self.bases, gain=nn.init.calculate_gain('relu'))

        if unify == 'sum':
            self.unify = SumUnify()
        elif unify == 'attention':
            self.unify = AttentionUnify(r, emb)
        elif unify == 'mlp':
            self.unify = MLPUnify(r, emb)
        else:
            raise Exception(f'unify {unify} not recognized')
Ejemplo n.º 5
0
Archivo: dot.py Proyecto: shi27feng/sat
import sys

from graphviz import Graph

from cnf import CNF
from util import adj

f = CNF.from_file(sys.argv[1])
a_pos, a_neg = adj(f)
a_pos = a_pos.todense()
a_neg = a_neg.todense()

n, m = a_pos.shape

g = Graph('G', filename=sys.argv[2], format='pdf', engine='neato')

for x in range(1, n + 1):
    g.node(f'x{x}', label='x', color='blue', fillcolor='blue', shape='point')

for c in range(1, m + 1):
    g.node(f'c{c}', label='y', color='red', fillcolor='red', shape='point')

pw = sys.argv[3]

for x in range(n):
    for c in range(m):
        var = f'x{x + 1}'
        cl = f'c{c + 1}'
        if a_pos[x, c] == 1:
            # g.edge(var, cl, color='#ff0000', penwidth='0.001')
            g.edge(var, cl, color='#ff0000', penwidth=pw)
Ejemplo n.º 6
0
    def __init__(self,
                 edges,
                 n,
                 numcls,
                 emb=128,
                 h=16,
                 bases=None,
                 separate_emb=False,
                 indep=False,
                 normalize=False,
                 sample=False):

        super().__init__()

        self.emb = emb
        self.h = h
        self.bases = bases
        self.numcls = numcls
        self.separate_emb = separate_emb
        self.normalize = normalize
        self.sample = sample

        # horizontally and vertically stacked versions of the adjacency graph
        hor_ind, hor_size = util.adj(edges, n, vertical=False)
        ver_ind, ver_size = util.adj(edges, n, vertical=True)

        rn, _ = ver_size
        r = rn // n

        self.r, self.rn, self.n = r, rn, n

        t = len(edges[0][0])

        vals = torch.ones(ver_ind.size(0), dtype=torch.float)
        vals = vals / util.sum_sparse(ver_ind, vals, ver_size)
        # -- the values are the same for the horizontal and the vertically stacked adjacency matrices
        #    so we can just normalize them by the vertically stacked one and reuse for the horizontal

        # hor_graph = torch.sparse.FloatTensor(indices=hor_ind.t(), values=vals, size=hor_size)
        self.register_buffer('hor_indices', hor_ind)

        #ver_graph = torch.sparse.FloatTensor(indices=ver_ind.t(), values=vals, size=ver_size)
        self.register_buffer('ver_indices', ver_ind)
        self.register_buffer('values', vals)

        if separate_emb:
            self.embeddings = nn.Parameter(torch.FloatTensor(
                r, n, emb))  # single embedding per node
            nn.init.xavier_uniform_(self.embeddings,
                                    gain=nn.init.calculate_gain('relu'))
        else:
            self.embeddings = nn.Parameter(torch.FloatTensor(
                n, emb))  # single embedding per node
            nn.init.xavier_uniform_(self.embeddings,
                                    gain=nn.init.calculate_gain('relu'))

        # layer 1 weights
        if bases is None:
            self.weights1 = nn.Parameter(torch.FloatTensor(r, emb, h))
            nn.init.xavier_uniform_(self.weights1,
                                    gain=nn.init.calculate_gain('relu'))

            self.bases1 = None
        else:
            self.comps1 = nn.Parameter(torch.FloatTensor(r, bases))
            nn.init.xavier_uniform_(self.comps1,
                                    gain=nn.init.calculate_gain('relu'))

            self.bases1 = nn.Parameter(torch.FloatTensor(bases, emb, h))
            nn.init.xavier_uniform_(self.bases1,
                                    gain=nn.init.calculate_gain('relu'))

        # layer 2 weights
        if bases is None:

            self.weights2 = nn.Parameter(torch.FloatTensor(r, h, numcls))
            nn.init.xavier_uniform_(self.weights2,
                                    gain=nn.init.calculate_gain('relu'))

            self.bases2 = None
        else:
            self.comps2 = nn.Parameter(torch.FloatTensor(r, bases))
            nn.init.xavier_uniform_(self.comps2,
                                    gain=nn.init.calculate_gain('relu'))

            self.bases2 = nn.Parameter(torch.FloatTensor(bases, h, numcls))
            nn.init.xavier_uniform_(self.bases2,
                                    gain=nn.init.calculate_gain('relu'))

        self.bias1 = nn.Parameter(torch.FloatTensor(h).zero_())
        self.bias2 = nn.Parameter(torch.FloatTensor(numcls).zero_())

        # convert the edges dict to a matrix of triples
        s, o, p = [], [], []
        for pred, (sub, obj) in edges.items():
            s.extend(sub)
            o.extend(obj)
            p.extend([pred] * len(sub))

        # graph as triples
        self.register_buffer('indices',
                             torch.tensor([s, p, o], dtype=torch.long).t())

        # for computing the attention weights
        self.indep = indep
        if indep:
            self.weights = nn.Parameter(torch.randn(self.indices.size(0)))
        else:
            self.sscore = nn.Linear(emb, h)
            self.pscore = nn.Parameter(torch.FloatTensor(r, h))
            nn.init.xavier_uniform_(self.pscore,
                                    gain=nn.init.calculate_gain('relu'))
            self.oscore = nn.Linear(emb, h)
Ejemplo n.º 7
0
    def __init__(self,
                 edges,
                 n,
                 numcls,
                 emb=128,
                 h=16,
                 bases=None,
                 separate_emb=False):

        super().__init__()

        self.emb = emb
        self.h = h
        self.bases = bases
        self.numcls = numcls
        self.separate_emb = separate_emb

        # horizontally and vertically stacked versions of the adjacency graph
        hor_ind, hor_size = util.adj(edges, n, vertical=False)
        ver_ind, ver_size = util.adj(edges, n, vertical=True)

        rn, _ = ver_size
        r = rn // n

        t = len(edges[0][0])

        vals = torch.ones(ver_ind.size(0), dtype=torch.float)
        vals = vals / util.sum_sparse(ver_ind, vals, ver_size)
        # -- the values are the same for the horizontal and the vertically stacked adjacency matrices
        #    so we can just normalize them by the vertically stacked one and reuse for the horizontal

        hor_graph = torch.sparse.FloatTensor(indices=hor_ind.t(),
                                             values=vals,
                                             size=hor_size)
        self.register_buffer('hor_graph', hor_graph)

        ver_graph = torch.sparse.FloatTensor(indices=ver_ind.t(),
                                             values=vals,
                                             size=ver_size)
        self.register_buffer('ver_graph', ver_graph)

        if separate_emb:
            self.embeddings = nn.Parameter(torch.FloatTensor(
                r, n, emb))  # single embedding per node
            nn.init.xavier_uniform_(self.embeddings,
                                    gain=nn.init.calculate_gain('relu'))
        else:
            self.embeddings = nn.Parameter(torch.FloatTensor(
                n, emb))  # single embedding per node
            nn.init.xavier_uniform_(self.embeddings,
                                    gain=nn.init.calculate_gain('relu'))

        # layer 1 weights
        if bases is None:
            self.weights1 = nn.Parameter(torch.FloatTensor(r, emb, h))
            nn.init.xavier_uniform_(self.weights1,
                                    gain=nn.init.calculate_gain('relu'))

            self.bases1 = None
        else:
            self.comps1 = nn.Parameter(torch.FloatTensor(r, bases))
            nn.init.xavier_uniform_(self.comps1,
                                    gain=nn.init.calculate_gain('relu'))

            self.bases1 = nn.Parameter(torch.FloatTensor(bases, emb, h))
            nn.init.xavier_uniform_(self.bases1,
                                    gain=nn.init.calculate_gain('relu'))

        # layer 2 weights
        if bases is None:

            self.weights2 = nn.Parameter(torch.FloatTensor(r, h, numcls))
            nn.init.xavier_uniform_(self.weights2,
                                    gain=nn.init.calculate_gain('relu'))

            self.bases2 = None
        else:
            self.comps2 = nn.Parameter(torch.FloatTensor(r, bases))
            nn.init.xavier_uniform_(self.comps2,
                                    gain=nn.init.calculate_gain('relu'))

            self.bases2 = nn.Parameter(torch.FloatTensor(bases, h, numcls))
            nn.init.xavier_uniform_(self.bases2,
                                    gain=nn.init.calculate_gain('relu'))

        self.bias1 = nn.Parameter(torch.FloatTensor(h).zero_())
        self.bias2 = nn.Parameter(torch.FloatTensor(numcls).zero_())
Ejemplo n.º 8
0
    def __init__(self,
                 edges,
                 n,
                 numcls,
                 emb=16,
                 bases=None,
                 softmax=False,
                 triples=None,
                 num_rels=None):

        super().__init__()

        self.emb = emb
        self.bases = bases
        self.numcls = numcls
        self.softmax = softmax

        assert (edges is None or
                triples is None), 'Pass graph as edges or triples, not both.'
        assert (edges is not None or triples is not None), 'No graph passed.'

        if edges is not None:

            # horizontally and vertically stacked versions of the adjacency graph
            hor_ind, hor_size = util.adj(edges, n, vertical=False)
            ver_ind, ver_size = util.adj(edges, n, vertical=True)
        else:
            hor_ind, hor_size = util.adj_triples(triples,
                                                 n,
                                                 num_rels=num_rels,
                                                 vertical=False)
            ver_ind, ver_size = util.adj_triples(triples,
                                                 n,
                                                 num_rels=num_rels,
                                                 vertical=True)

        _, rn = hor_size
        r = rn // n

        vals = torch.ones(ver_ind.size(0), dtype=torch.float)
        vals = vals / util.sum_sparse(ver_ind, vals, ver_size)
        # -- the values are the same for the horizontal and the vertically stacked adjacency matrices
        #    so we can just normalize them by the vertically stacked one and reuse for the horizontal

        hor_graph = torch.sparse.FloatTensor(indices=hor_ind.t(),
                                             values=vals,
                                             size=hor_size)
        self.register_buffer('hor_graph', hor_graph)

        ver_graph = torch.sparse.FloatTensor(indices=ver_ind.t(),
                                             values=vals,
                                             size=ver_size)
        self.register_buffer('ver_graph', ver_graph)

        # layer 1 weights
        if bases is None:
            self.weights1 = nn.Parameter(torch.FloatTensor(r, n, emb))
            nn.init.xavier_uniform_(self.weights1,
                                    gain=nn.init.calculate_gain('relu'))

            self.bases1 = None
        else:
            self.comps1 = nn.Parameter(torch.FloatTensor(r, bases))
            nn.init.xavier_uniform_(self.comps1,
                                    gain=nn.init.calculate_gain('relu'))

            self.bases1 = nn.Parameter(torch.FloatTensor(bases, n, emb))
            nn.init.xavier_uniform_(self.bases1,
                                    gain=nn.init.calculate_gain('relu'))

        # layer 2 weights
        if bases is None:

            self.weights2 = nn.Parameter(torch.FloatTensor(r, emb, numcls))
            nn.init.xavier_uniform_(self.weights2,
                                    gain=nn.init.calculate_gain('relu'))

            self.bases2 = None
        else:
            self.comps2 = nn.Parameter(torch.FloatTensor(r, bases))
            nn.init.xavier_uniform_(self.comps2,
                                    gain=nn.init.calculate_gain('relu'))

            self.bases2 = nn.Parameter(torch.FloatTensor(bases, emb, numcls))
            nn.init.xavier_uniform_(self.bases2,
                                    gain=nn.init.calculate_gain('relu'))

        self.bias1 = nn.Parameter(torch.FloatTensor(emb).zero_())
        self.bias2 = nn.Parameter(torch.FloatTensor(numcls).zero_())