Exemplo n.º 1
0
def test_cg_conv():
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, 16))
    pseudo = torch.rand((edge_index.size(1), 3))

    conv = CGConv(16, 3)
    assert conv.__repr__() == 'CGConv(16, 16, dim=3)'
    assert conv(x, edge_index, pseudo).size() == (num_nodes, 16)
Exemplo n.º 2
0
 def __init__(self, in_ch_sl, in_ch_ad, nconv):
     super(comp2Net, self).__init__()
     self.conv1_sl = CGConv(in_ch_sl)
     self.conv2_sl = CGConv(in_ch_sl)
     self.conv1_ad = CGConv(in_ch_ad)
     self.conv2_ad = CGConv(in_ch_ad)
     self.lin_sl = torch.nn.Linear(in_ch_sl, in_ch_ad)
     self.lin = torch.nn.Linear(in_ch_ad, 1)
     self.nconv = nconv
Exemplo n.º 3
0
    def __init__(self, num_input, num_edge_features, num_output):
        super(EncoderV2, self).__init__()

        self.conv1 = CGConv(num_input, num_edge_features)
        self.conv2 = CGConv(num_input, num_edge_features)
        self.conv3 = CGConv(num_input, num_edge_features)

        self.lin1 = torch.nn.Linear(27, 126)
        self.lin2 = torch.nn.Linear(126, 64)
        self.lin3 = torch.nn.Linear(64, num_output)
Exemplo n.º 4
0
def test_cg_conv():
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, 16))
    pseudo = torch.rand((edge_index.size(1), 3))

    conv = CGConv(16, 3)
    assert conv.__repr__() == 'CGConv(16, 16, dim=3)'
    out = conv(x, edge_index, pseudo)
    assert out.size() == (num_nodes, 16)

    jit = torch.jit.script(conv.jittable())
    assert jit(x, edge_index, pseudo).tolist() == out.tolist()
Exemplo n.º 5
0
 def makeConv(self, nin, nout, conv_args):
     # FeaStConv
     if(conv_args['name'] == 'FeaSt'):
         return FeaStConv(nin, nout, conv_args["num_heads"])
     
     # SplineConv
     if(conv_args['name'] == 'Spline'):
         return SplineConv(nin, nout, 
             self.edge_dim,
             conv_args["kernel_size"],
             is_open_spline=conv_args["open_spline"],
             degree=conv_args["degree"]
         )
     
     # GMMConv
     if(conv_args['name'] == "GMM"):
         return GMMConv(nin, nout,
             self.edge_dim,
             conv_args["kernel_size"]
         )
     
     # NNConv
     if(conv_args["name"] == "NN"):
         h = nn.Sequential(
                 nn.Linear(self.edge_dim, nin*nout),
                 nn.ReLU()
                 #nn.Linear(int(nin*nout/2), nin*nout)
         )
         return NNConv(nin, nout, h)
     
     # PPFConv
     if(conv_args["name"] == "PPF"):
         cin = nin+4
         hl = nn.Sequential(
             nn.Linear(cin, conv_args['nhidden']),
             nn.ReLU()
         )
         hg = nn.Linear(conv_args['nhidden'], nout)
         #hl = nn.Sequential(
                 #nn.Linear(cin, int(conv_args['nhidden']/2)),
                 #nn.ReLU(),
                 #nn.Linear(int(conv_args['nhidden']/2), conv_args['nhidden'])
         #)
         #hg = nn.Sequential(
                 #nn.Linear(conv_args['nhidden'], nout),
                 #nn.ReLU(),
                 #nn.Linear(nout, nout)
         #)
         return PPFConv(hl, hg)
     
     # CGConv
     if(conv_args["name"] == "CG"):
         return CGConv(nin, self.edge_dim)
Exemplo n.º 6
0
 def __init__(self, node_atom:int, n_h1:int, dim_out):
     super(Net, self).__init__()
     #node 特征1维 原子类型  edge特征 1 维 bond 键数
     #graph卷积不提升维数
     self.conv1 = CGConv(1, dim=1)
     #self.conv1 = ChebConv(1, 1, K=5)
     self.conv2 = GCNConv(1, 3, add_self_loops =False)
     #self.conv2 = GCNConv(dim_output1, dim_out)
     #之前输出的是 50 node * 1维
     #普通线性层
     self.layer3 = nn.Sequential(nn.Linear(3*node_atom, n_h1), nn.BatchNorm1d(n_h1))
     self.layer4 = nn.Sequential(nn.Linear(n_h1, dim_out))
Exemplo n.º 7
0
def test_cg_conv_with_edge_features():
    x1 = torch.randn(4, 8)
    x2 = torch.randn(2, 16)
    edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])
    row, col = edge_index
    value = torch.rand(row.size(0), 3)
    adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4))

    conv = CGConv(8, dim=3)
    assert conv.__repr__() == 'CGConv(8, dim=3)'
    out = conv(x1, edge_index, value)
    assert out.size() == (4, 8)
    assert conv(x1, edge_index, value, size=(4, 4)).tolist() == out.tolist()
    assert conv(x1, adj.t()).tolist() == out.tolist()

    t = '(Tensor, Tensor, OptTensor, Size) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x1, edge_index, value).tolist() == out.tolist()
    assert jit(x1, edge_index, value, size=(4, 4)).tolist() == out.tolist()

    t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x1, adj.t()).tolist() == out.tolist()

    adj = adj.sparse_resize((4, 2))
    conv = CGConv((8, 16), dim=3)
    assert conv.__repr__() == 'CGConv((8, 16), dim=3)'
    out = conv((x1, x2), edge_index, value)
    assert out.size() == (2, 16)
    assert conv((x1, x2), edge_index, value, (4, 2)).tolist() == out.tolist()
    assert conv((x1, x2), adj.t()).tolist() == out.tolist()

    t = '(PairTensor, Tensor, OptTensor, Size) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit((x1, x2), edge_index, value).tolist() == out.tolist()
    assert jit((x1, x2), edge_index, value, (4, 2)).tolist() == out.tolist()

    t = '(PairTensor, SparseTensor, OptTensor, Size) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit((x1, x2), adj.t()).tolist() == out.tolist()
Exemplo n.º 8
0
    def __init__(self, n_features: int, n_edge:int, conv_dim:int=64):
        super(CGNet_squashed, self).__init__()

        self.fc1 = torch.nn.Linear(9, conv_dim)

        # OBS: root_weight needs to be false. We already account for by including a self-loop
        # to the root vertex in edge_index
        self.conv_l1 = CGConv(conv_dim, n_edge, aggr="mean", flow="target_to_source")

        self.fc2 = torch.nn.Linear(conv_dim, conv_dim)

        self.fc3 = torch.nn.Linear(conv_dim, 64)
        self.fc4 = torch.nn.Linear(64, 2)
Exemplo n.º 9
0
    def __init__(self, n_features: int, n_edge:int, conv_dim:int=64, depth:int=2):
        super(CGNet_flatten, self).__init__()

        self.depth = depth

        self.fc1 = torch.nn.Linear(9, conv_dim)

        # OBS: root_weight needs to be false. We already account for by including a self-loop
        # to the root vertex in edge_index
        self.conv_l1 = CGConv(conv_dim, 3, aggr="mean", flow="target_to_source")

        self.fc_shared = torch.nn.Linear(conv_dim, conv_dim)

        self.fc2_class = torch.nn.Linear(conv_dim, 64)
        self.fc3_class = torch.nn.Linear(64, 2)

        self.fc2_delta = torch.nn.Linear(conv_dim, 64)
        self.fc3_delta = torch.nn.Linear(64, 2)
Exemplo n.º 10
0
def test_cg_conv():
    x1 = torch.randn(4, 8)
    x2 = torch.randn(2, 16)
    edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])
    row, col = edge_index
    adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4))

    conv = CGConv(8)
    assert conv.__repr__() == 'CGConv(8, dim=0)'
    out = conv(x1, edge_index)
    assert out.size() == (4, 8)
    assert conv(x1, adj.t()).tolist() == out.tolist()

    t = '(Tensor, Tensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x1, edge_index).tolist() == out.tolist()

    t = '(Tensor, SparseTensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x1, adj.t()).tolist() == out.tolist()

    adj = adj.sparse_resize((4, 2))
    conv = CGConv((8, 16))
    assert conv.__repr__() == 'CGConv((8, 16), dim=0)'
    out = conv((x1, x2), edge_index)
    assert out.size() == (2, 16)
    assert conv((x1, x2), adj.t()).tolist() == out.tolist()

    t = '(PairTensor, Tensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit((x1, x2), edge_index).tolist() == out.tolist()

    t = '(PairTensor, SparseTensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit((x1, x2), adj.t()).tolist() == out.tolist()

    # Test batch_norm true:
    adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4))
    conv = CGConv(8, batch_norm=True)
    assert conv.__repr__() == 'CGConv(8, dim=0)'
    out = conv(x1, edge_index)
    assert out.size() == (4, 8)
    assert conv(x1, adj.t()).tolist() == out.tolist()

    t = '(Tensor, Tensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x1, edge_index).tolist() == out.tolist()
Exemplo n.º 11
0
 def __init__(self, in_ch, nconv):
     super(simpleNet, self).__init__()
     self.conv1 = CGConv(in_ch)
     self.conv2 = CGConv(in_ch)
     self.lin = torch.nn.Linear(in_ch, 1)
     self.nconv = nconv
Exemplo n.º 12
0
 def __init__(self, in_channels, dim, out_size):
     super(SimpleGNN, self).__init__()
     self.conv1 = CGConv(in_channels, dim, aggr='add', bias=True)
     self.conv2 = CGConv(in_channels, dim, aggr='add', bias=True)
     self.conv3 = CGConv(in_channels, dim, aggr='add', bias=True)
     self.lin = nn.Linear(in_channels, out_size)