def __init__(self): super(Discriminator, self).__init__() lin = Sequential(Linear(2, 1225), ReLU()) self.conv1 = NNConv(35, 35, lin, aggr='mean', root_weight=True, bias=True) self.conv11 = BatchNorm(35, eps=1e-03, momentum=0.1, affine=True, track_running_stats=True) lin = Sequential(Linear(2, 35), ReLU()) self.conv2 = NNConv(35, 1, lin, aggr='mean', root_weight=True, bias=True) self.conv22 = BatchNorm(1, eps=1e-03, momentum=0.1, affine=True, track_running_stats=True)
def __init__(self): super(Discriminator1, self).__init__() nn = Sequential(Linear(2, (nbr_of_regions * nbr_of_regions)), ReLU()) self.conv1 = NNConv(nbr_of_regions, nbr_of_regions, nn, aggr='mean', root_weight=True, bias=True) self.conv11 = BatchNorm(nbr_of_regions, eps=1e-03, momentum=0.1, affine=True, track_running_stats=True) nn = Sequential(Linear(2, nbr_of_regions), ReLU()) self.conv2 = NNConv(nbr_of_regions, 1, nn, aggr='mean', root_weight=True, bias=True) self.conv22 = BatchNorm(1, eps=1e-03, momentum=0.1, affine=True, track_running_stats=True)
def __init__(self,dim=64,edge_dim=12,node_in=8,edge_in=19,edge_in3=8,num_layers=1): super(Net_int_2Edges_Set2Set3, self).__init__() self.num_layers = num_layers self.dim = dim self.lin_node = torch.nn.Linear(node_in, dim) self.lin_edge_attr = torch.nn.Linear(edge_in, edge_dim) nn1 = Linear(edge_dim, dim * dim, bias=False) nn2 = Linear(edge_in3, dim * dim * 2 * 2, bias=False) self.conv1 = NNConv(dim, dim, nn1, aggr='mean', root_weight=False) self.gru1 = GRU(dim, dim) self.lin_covert = Sequential(BatchNorm1d(dim),Linear(dim, dim*2), \ RReLU(), Dropout(),Linear(dim*2, dim*2),RReLU()) self.conv2 = NNConv(dim*2, dim*2, nn2, aggr='mean', root_weight=False) self.gru2 = GRU(dim*2, dim*2) self.lin_weight = Linear(edge_in3, dim*2*2, bias=False) self.lin_bias = Linear(edge_in3, 1, bias=False) self.norm = BatchNorm1d(dim*2*2) self.norm_x = BatchNorm1d(node_in) self.head = Set2Set(dim*2,processing_steps=3,num_layers=num_layers) self.pool = Set2Set(dim*2,processing_steps=3) self.h_lin = Linear(edge_in3,num_layers*dim*2*2) self.q_star_lin = Linear(dim*2*2,dim*2*2)
def __init__(self, edge_net_list, dropout=False): super(ECCNet, self).__init__() self.conv1 = NNConv(num_node_feature, num_ecc_hidden_size, edge_net_list[0]) self.conv2 = NNConv(num_ecc_hidden_size, num_ecc_out_size, edge_net_list[1]) self.dropout = dropout
def test_nn_conv(): x1 = torch.randn(4, 8) x2 = torch.randn(2, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) row, col = edge_index value = torch.rand(row.size(0), 3) adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) nn = Seq(Lin(3, 32), ReLU(), Lin(32, 8 * 32)) conv = NNConv(8, 32, nn=nn) assert conv.__repr__() == ( 'NNConv(8, 32, aggr=add, nn=Sequential(\n' ' (0): Linear(in_features=3, out_features=32, bias=True)\n' ' (1): ReLU()\n' ' (2): Linear(in_features=32, out_features=256, bias=True)\n' '))') out = conv(x1, edge_index, value) assert out.size() == (4, 32) assert conv(x1, edge_index, value, size=(4, 4)).tolist() == out.tolist() assert conv(x1, adj.t()).tolist() == out.tolist() if is_full_test(): t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert jit(x1, edge_index, value).tolist() == out.tolist() assert jit(x1, edge_index, value, size=(4, 4)).tolist() == out.tolist() t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert jit(x1, adj.t()).tolist() == out.tolist() adj = adj.sparse_resize((4, 2)) conv = NNConv((8, 16), 32, nn=nn) assert conv.__repr__() == ( 'NNConv((8, 16), 32, aggr=add, nn=Sequential(\n' ' (0): Linear(in_features=3, out_features=32, bias=True)\n' ' (1): ReLU()\n' ' (2): Linear(in_features=32, out_features=256, bias=True)\n' '))') out1 = conv((x1, x2), edge_index, value) out2 = conv((x1, None), edge_index, value, (4, 2)) assert out1.size() == (2, 32) assert out2.size() == (2, 32) assert conv((x1, x2), edge_index, value, (4, 2)).tolist() == out1.tolist() assert conv((x1, x2), adj.t()).tolist() == out1.tolist() assert conv((x1, None), adj.t()).tolist() == out2.tolist() if is_full_test(): t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert jit((x1, x2), edge_index, value).tolist() == out1.tolist() assert jit((x1, x2), edge_index, value, size=(4, 2)).tolist() == out1.tolist() assert jit((x1, None), edge_index, value, size=(4, 2)).tolist() == out2.tolist() t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert jit((x1, x2), adj.t()).tolist() == out1.tolist() assert jit((x1, None), adj.t()).tolist() == out2.tolist()
def __init__(self, args): super(GraphCNNGANG, self).__init__() self.args = args self.dense = nn.Linear(self.args.latent_dim, self.args.num_hits * self.args.graphcnng_layers[0]) self.layers = nn.ModuleList() self.edge_weights = nn.ModuleList() self.bn_layers = nn.ModuleList() for i in range(len(self.args.graphcnng_layers) - 1): self.edge_weights.append(nn.Linear(self.args.graphcnng_layers[i], self.args.graphcnng_layers[i] * self.args.graphcnng_layers[i + 1])) self.layers.append(NNConv(self.args.graphcnng_layers[i], self.args.graphcnng_layers[i + 1], self.edge_weights[i], aggr='mean', root_weight=True, bias=True)) self.bn_layers.append(torch_geometric.nn.BatchNorm(self.args.graphcnng_layers[i + 1])) self.edge_weights.append(nn.Linear(self.args.graphcnng_layers[-1], self.args.graphcnng_layers[-1] * self.args.node_feat_size)) self.layers.append(NNConv(self.args.graphcnng_layers[-1], self.args.node_feat_size, self.edge_weights[-1], aggr='mean', root_weight=True, bias=True)) self.bn_layers.append(torch_geometric.nn.BatchNorm(self.args.node_feat_size)) logging.info("dense: ") logging.info(self.dense) logging.info("edge_weights: ") logging.info(self.edge_weights) logging.info("layers: ") logging.info(self.layers) logging.info("bn layers: ") logging.info(self.bn_layers)
def __init__(self, dataset, embedding_layer, hidden_dim=cmd_args.hidden_dim): super().__init__() self.embedding_layer = embedding_layer self.nn2 = nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, hidden_dim**2)) self.nn4 = nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, hidden_dim**2)) self.conv1 = GraphConvE(hidden_dim, hidden_dim) self.pool1 = TopKPooling(hidden_dim, ratio=1.0) self.conv2 = NNConv(hidden_dim, hidden_dim, self.nn2) self.pool2 = TopKPooling(hidden_dim, ratio=1.0) self.conv3 = GraphConvE(hidden_dim, hidden_dim) self.pool3 = TopKPooling(hidden_dim, ratio=1.0) self.conv4 = NNConv(hidden_dim, hidden_dim, self.nn4) self.pool4 = TopKPooling(hidden_dim, ratio=1.0) self.lin1 = torch.nn.Linear(hidden_dim, hidden_dim) self.lin2 = torch.nn.Linear(hidden_dim, hidden_dim) self.lin3 = torch.nn.Linear(hidden_dim, hidden_dim) self.l1 = torch.nn.Linear(hidden_dim, hidden_dim) self.l2 = torch.nn.Linear(hidden_dim, hidden_dim) self.l3 = torch.nn.Linear(hidden_dim, hidden_dim) self.l4 = torch.nn.Linear(hidden_dim, hidden_dim)
def __init__(self, D, C, G=0, E=1, Q=96, task='graph', aggr='add', pooltype='max', conclayers=True): super(NNNet, self).__init__() self.D = D # node feature dimension self.E = E # edge feature dimension self.G = G # global feature dimension self.C = C # number output classes self.Q = Q # latent dimension self.task = task self.pooltype = pooltype self.conclayers = conclayers # Convolution layers # nn with size [-1, num_edge_features] x [-1, in_channels * out_channels] self.conv1 = NNConv(in_channels=D, out_channels=D, nn=MLP([E, D * D]), aggr=aggr) self.conv2 = NNConv(in_channels=D, out_channels=D, nn=MLP([E, D * D]), aggr=aggr) # "Fusion" layer taking in conv layer outputs if self.conclayers: self.lin1 = MLP([D + D, Q]) else: self.lin1 = MLP([D, Q]) # Set2Set pooling operation produces always output with 2 x input dimension # => use linear layer to project down if pooltype == 's2s': self.S2Spool = Set2Set(in_channels=Q, processing_steps=3, num_layers=1) self.S2Slin = Linear(2 * Q, Q) if (self.G > 0): self.Z = Q + self.G else: self.Z = Q # Final layers concatenating everything self.mlp1 = MLP([self.Z, self.Z, self.C])
def __init__(self, atom_vertex_dim, atom_edge_dim, orbital_vertex_dim=NotImplemented, orbital_edge_dim=NotImplemented, output_dim=NotImplemented, mp_step=6, s2s_step=6): super(MultiNet, self).__init__() self.atom_vertex_dim = atom_vertex_dim self.atom_edge_dim = atom_edge_dim self.orbital_vertex_dim = orbital_vertex_dim self.orbital_edge_dim = orbital_edge_dim self.output_dim = output_dim self.mp_step = mp_step self.s2s_step = s2s_step # atom net atom_edge_gc = nn.Sequential(nn.Linear(atom_edge_dim[1], atom_vertex_dim[1] ** 2), nn.Dropout(0.2)) self.atom_vertex_conv = NNConv(atom_vertex_dim[1], atom_vertex_dim[1], atom_edge_gc, aggr="mean", root_weight=True) self.atom_vertex_gru = nn.GRU(atom_vertex_dim[1], atom_vertex_dim[1]) self.atom_s2s = Set2Set(atom_vertex_dim[1], processing_steps=s2s_step) self.atom_lin0 = nn.Sequential(nn.Linear(atom_vertex_dim[0], 2 * atom_vertex_dim[0]), nn.CELU(), nn.Linear(2 * atom_vertex_dim[0], atom_vertex_dim[1]), nn.CELU()) self.atom_lin1 = nn.Sequential(nn.Linear(atom_edge_dim[0], 2 * atom_edge_dim[0]), nn.CELU(), nn.Linear(2 * atom_edge_dim[0], atom_edge_dim[1]), nn.CELU()) self.atom_lin2 = nn.Sequential(nn.Linear(2 * atom_vertex_dim[1], 4 * atom_vertex_dim[1]), nn.CELU()) # orbital net orbital_edge_gc = nn.Sequential(nn.Linear(orbital_edge_dim[1], orbital_vertex_dim[1] ** 2), nn.Dropout(0.2)) self.orbital_vertex_conv = NNConv(orbital_vertex_dim[1], orbital_vertex_dim[1], orbital_edge_gc, aggr="mean", root_weight=True) self.orbital_vertex_gru = nn.GRU(orbital_vertex_dim[1], orbital_vertex_dim[1]) self.orbital_s2s = Set2Set(orbital_vertex_dim[1], processing_steps=s2s_step) self.orbital_lin0 = nn.Sequential(nn.Linear(orbital_vertex_dim[0], 2 * orbital_vertex_dim[0]), nn.CELU(), nn.Linear(2 * orbital_vertex_dim[0], orbital_vertex_dim[1]), nn.CELU()) self.orbital_lin1 = nn.Sequential(nn.Linear(orbital_edge_dim[0], 2 * orbital_edge_dim[0]), nn.CELU(), nn.Linear(2 * orbital_edge_dim[0], orbital_edge_dim[1]), nn.CELU()) self.orbital_lin2 = nn.Sequential(nn.Linear(2 * orbital_vertex_dim[1], 4 * orbital_vertex_dim[1]), nn.CELU()) # cross net self.cross_lin0 = nn.Sequential( nn.Linear(4 * atom_vertex_dim[1] + 4 * orbital_vertex_dim[1], 4 * output_dim), nn.CELU(), nn.Linear(4 * output_dim, output_dim) ) self.cross_o2a_lin = nn.Sequential(nn.Linear(orbital_vertex_dim[1], 2 * orbital_vertex_dim[1]), nn.CELU(), nn.Linear(2 * orbital_vertex_dim[1], int(atom_vertex_dim[1] / 2)), nn.CELU()) self.cross_o2a_s2s = Set2Set(int(atom_vertex_dim[1] / 2), processing_steps=s2s_step) self.cross_o2a_gru = nn.GRU(atom_vertex_dim[1], atom_vertex_dim[1]) self.cross_a2o_lin = nn.Sequential(nn.Linear(atom_vertex_dim[1], 2 * atom_vertex_dim[1]), nn.CELU(), nn.Linear(2 * atom_vertex_dim[1], orbital_vertex_dim[1]), nn.CELU()) self.cross_a2o_gru = nn.GRU(orbital_vertex_dim[1], orbital_vertex_dim[1])
def __init__(self): super(Net, self).__init__() nn1 = nn.Sequential(nn.Linear(1, 10), nn.ReLU(), nn.Linear(10, d.num_features*16)) self.conv1 = NNConv(d.num_features, 16, nn1, aggr='mean') nn2 = nn.Sequential(nn.Linear(1, 10), nn.ReLU(), nn.Linear(10, 32*16)) self.conv2 = NNConv(16, 32, nn2, aggr='mean') self.fc1 = torch.nn.Linear(32, 64) self.fc2 = torch.nn.Linear(64, d.num_classes)
def __init__(self, input_feature_dim, num_types, dim): super(DDIEncoder, self).__init__() nn1 = nn.Sequential(nn.Linear(num_types, dim), nn.BatchNorm1d(dim), nn.ReLU(), nn.Linear(dim, input_feature_dim * dim)) nn2 = nn.Sequential(nn.Linear(num_types, dim), nn.BatchNorm1d(dim), nn.ReLU(), nn.Linear(dim, dim * dim)) self.conv1 = NNConv(input_feature_dim, dim, nn1, aggr='mean') self.batch_norm = nn.BatchNorm1d(dim) self.conv2 = NNConv(dim, dim, nn2, aggr='mean')
def __init__(self): super(Net, self).__init__() n1 = nn.Sequential(nn.Linear(2, 25), nn.ReLU(), nn.Linear(25, 32)) self.conv1 = NNConv(d.num_features, 32, n1) n2 = nn.Sequential(nn.Linear(2, 25), nn.ReLU(), nn.Linear(25, 2048)) self.conv2 = NNConv(32, 64, n2) self.fc1 = torch.nn.Linear(64, 128) self.fc2 = torch.nn.Linear(128, d.num_classes)
def __init__(self): super(Net, self).__init__() nn1 = nn.Sequential(nn.Linear(2, 25), nn.ReLU(), nn.Linear(25, 32)) self.conv1 = NNConv(train_dataset.num_features, 32, nn1, aggr='mean') nn2 = nn.Sequential(nn.Linear(2, 25), nn.ReLU(), nn.Linear(25, 2048)) self.conv2 = NNConv(32, 64, nn2, aggr='mean') self.fc1 = torch.nn.Linear(64, 128) self.fc2 = torch.nn.Linear(128, train_dataset.num_classes)
def __init__(self): super(Net, self).__init__() nn1 = nn.Sequential(nn.Linear(2, 25), nn.ReLU(), nn.Linear(25, 32)) self.conv1 = NNConv(d.num_features, 32, nn1, aggr='mean') nn2 = nn.Sequential(nn.Linear(2, 25), nn.ReLU(), nn.Linear(25, 2048)) self.conv2 = NNConv(32, 64, nn2, aggr='mean') self.fc1 = torch.nn.Linear(64, 128) self.fc2 = torch.nn.Linear(128, d.num_classes) self.aggregator = ScatterAggregationLayer(function='max')
def test_nn_conv(): in_channels, out_channels = (16, 32) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) num_nodes = edge_index.max().item() + 1 x = torch.randn((num_nodes, in_channels)) pseudo = torch.rand((edge_index.size(1), 3)) nn = Seq(Lin(3, 32), ReLU(), Lin(32, in_channels * out_channels)) conv = NNConv(in_channels, out_channels, nn) assert conv.__repr__() == 'NNConv(16, 32)' assert conv(x, edge_index, pseudo).size() == (num_nodes, out_channels)
def __init__(self): super(Net, self).__init__() edge_network = nn.Sequential( nn.Linear(edge_input_dim, edge_hidden_dim), nn.ReLU(), nn.Linear(edge_hidden_dim, node_input_dim * node_input_dim)) self.conv1 = NNConv(dataset.num_node_features, 16, edge_network) self.conv2 = NNConv(16, 32, edge_network) self.conv3 = NNConv(32, 256, edge_network) self.conv4 = NNConv(256, 32, edge_network) #self.lin1 = torch.nn.Linear(32, ) self.lin2 = torch.nn.Linear(32, 1)
def __init__(self): super(Net, self).__init__() ######## 𝐱′𝑖 = 𝚯𝐱𝑖 + ∑𝑗∈(𝑖) 𝐱𝑗⋅ ℎ𝚯(𝐞𝑖,𝑗) nn1 = nn.Sequential(nn.Linear(2, 25), nn.ReLU(), nn.Linear(25, 32)) self.conv1 = NNConv(d.num_features, 32, nn1, aggr='mean') nn2 = nn.Sequential(nn.Linear(2, 25), nn.ReLU(), nn.Linear(25, 2048)) self.conv2 = NNConv(32, 64, nn2, aggr='mean') self.fc1 = torch.nn.Linear(64, 128) self.fc2 = torch.nn.Linear(128, d.num_classes)
def __init__(self): super(Net, self).__init__() nn1 = nn.Sequential(nn.Linear(1, 30), nn.ReLU(), nn.Linear(30, dataset.num_features * 48)) self.conv1 = NNConv(dataset.num_features, 48, nn1, aggr='mean') nn2 = nn.Sequential(nn.Linear(1, 30), nn.ReLU(), nn.Linear(30, 48 * 96)) self.conv2 = NNConv(48, 96, nn2, aggr='mean') self.fc1 = torch.nn.Linear(96, 128) self.fc2 = torch.nn.Linear(128, dataset.num_classes)
def __init__(self, MODEL_PARAMS): super(DGN, self).__init__() self.model_params = MODEL_PARAMS nn = Sequential(Linear(self.model_params["Linear1"]["in"], self.model_params["Linear1"]["out"]), ReLU()) self.conv1 = NNConv(self.model_params["conv1"]["in"], self.model_params["conv1"]["out"], nn, aggr='mean') nn = Sequential(Linear(self.model_params["Linear2"]["in"], self.model_params["Linear2"]["out"]), ReLU()) self.conv2 = NNConv(self.model_params["conv2"]["in"], self.model_params["conv2"]["out"], nn, aggr='mean') nn = Sequential(Linear(self.model_params["Linear3"]["in"], self.model_params["Linear3"]["out"]), ReLU()) self.conv3 = NNConv(self.model_params["conv3"]["in"], self.model_params["conv3"]["out"], nn, aggr='mean')
class NMP(torch.nn.Module): def __init__(self, num_layers, num_input_features, hidden, nn): super(NMP, self).__init__() self.conv1 = NNConv(num_input_features, hidden, nn(1, num_input_features * hidden), aggr="add") self.convs = torch.nn.ModuleList() for i in range(num_layers - 1): self.convs.append( NNConv(hidden, hidden, nn(1, hidden * hidden), aggr="add")) self.lin1 = Linear(3 * hidden, hidden) # linear layer self.lin2 = Linear(hidden, 2) # linear layer, output layer, 2 classes def reset_parameters(self): self.conv1.reset_parameters() for conv in self.convs: conv.reset_parameters( ) # .reset_parameters() is method of the torch_geometric.nn.NNConv class self.lin1.reset_parameters( ) # .reset_parameters() is method of the torch.nn.Linear class self.lin2.reset_parameters() self.att.reset_parameters() def forward(self, data): x, edge_index, edge_attr, batch = data.x, data.edge_index, data.edge_attr, data.batch # graph convolutions and relu activation x = F.relu(self.conv1(x, edge_index, edge_attr)) for conv in self.convs: x = F.relu(conv(x, edge_index, edge_attr)) x = torch.cat([ global_add_pool(x, batch), global_mean_pool(x, batch), global_max_pool(x, batch) ], dim=1) #linear layers, activation function, dropout x = F.relu(self.lin1(x)) x = F.dropout(x, p=0.5, training=self.training) x = self.lin2(x) output = F.log_softmax(x, dim=-1) return output def __repr__(self): #for getting a printable representation of an object return self.__class__.__name__
def __init__(self): super(Net, self).__init__() nn = Sequential(Linear(rel_data.num_features, dim)) self.fc1 = torch.nn.Parameter(torch.FloatTensor(data.num_features, dim)) self.NNConv1 = NNConv(dim, dim, nn, root_weight=False) if args.num_node_layer == 2: self.NNConv2 = NNConv(dim, dim, nn, root_weight=False) self.fc2 = Linear(dim, dataset.num_classes) self.RConv1 = RelationConv(train_eps=False) if args.num_edge_layer == 2: self.RConv2 = RelationConv(train_eps=False)
def __init__(self): super().__init__() nn1 = nn.Sequential(nn.Linear(2, 25), nn.ReLU(), nn.Linear(25, d.num_features * 32)) self.conv1 = NNConv(d.num_features, 32, nn1, aggr='mean') nn2 = nn.Sequential(nn.Linear(2, 25), nn.ReLU(), nn.Linear(25, 32 * 64)) self.conv2 = NNConv(32, 64, nn2, aggr='mean') self.fc1 = torch.nn.Linear(64, 128) self.fc2 = torch.nn.Linear(128, d.num_classes)
def __init__(self, dataset): super(MGN_NET, self).__init__() model_params = config.PARAMS nn = Sequential(Linear(model_params["Linear1"]["in"], model_params["Linear1"]["out"]), ReLU()) self.conv1 = NNConv(model_params["conv1"]["in"], model_params["conv1"]["out"], nn, aggr='mean') nn = Sequential(Linear(model_params["Linear2"]["in"], model_params["Linear2"]["out"]), ReLU()) self.conv2 = NNConv(model_params["conv2"]["in"], model_params["conv2"]["out"], nn, aggr='mean') nn = Sequential(Linear(model_params["Linear3"]["in"], model_params["Linear3"]["out"]), ReLU()) self.conv3 = NNConv(model_params["conv3"]["in"], model_params["conv3"]["out"], nn, aggr='mean')
def __init__(self): super(MPNNEncoder, self).__init__() self.embedding = nn.Embedding(config.vocab_size, config.emb_dim) self.nn = nn.Sequential( nn.Linear(config.emb_dim, config.emb_dim * config.hidden_dim), nn.ReLU()) self.nnconv1 = NNConv(in_channels=config.emb_dim, out_channels=config.hidden_dim, nn=self.nn, aggr="add", flow="target_to_source") self.nnconv2 = NNConv(in_channels=config.emb_dim, out_channels=config.hidden_dim, nn=self.nn, aggr="add", flow="source_to_target") # self.gcn1 = NNConv(config.emb_dim, config.hidden_dim, flow='source_to_target') self.nnconv_list1 = [self.nnconv1] for i in range(1, config.n_gcn_layers): self.nnconv_list1.append( NNConv(in_channels=config.hidden_dim, out_channels=config.hidden_dim, nn=self.nn, aggr="add", flow="source_to_target")) self.nnconv_seq1 = nn.Sequential(*self.nnconv_list1) # self.gcn2 = GCNConv(config.emb_dim, config.hidden_dim, flow='target_to_source') self.nnconv_list2 = [self.nnconv2] for i in range(1, config.n_gcn_layers): self.nnconv_list2.append( NNConv(in_channels=config.hidden_dim, out_channels=config.hidden_dim, nn=self.nn, aggr="add", flow="source_to_target")) self.nnconv_seq2 = nn.Sequential(*self.nnconv_list2) self.fc = nn.Sequential( nn.Linear(config.hidden_dim * 2, config.hidden_dim * 2), # nn.Dropout(0.2), nn.ReLU()) self.w1 = nn.Linear(config.hidden_dim * 2, config.hidden_dim) self.w2 = nn.Linear(config.hidden_dim * 2, config.hidden_dim) self.w3 = nn.Linear(config.hidden_dim * 2, config.hidden_dim) self.w4 = nn.Linear(config.hidden_dim * 2, config.hidden_dim)
def __init__(self, num_layers, num_input_features, hidden, nn): super(NMP, self).__init__() self.conv1 = NNConv(num_input_features, hidden, nn(1, num_input_features * hidden), aggr="add") self.convs = torch.nn.ModuleList() for i in range(num_layers - 1): self.convs.append( NNConv(hidden, hidden, nn(1, hidden * hidden), aggr="add")) self.lin1 = Linear(3 * hidden, hidden) # linear layer self.lin2 = Linear(hidden, 2) # linear layer, output layer, 2 classes
def __init__(self, num_classes): super().__init__() nn = Seq(Lin(3, 25), ReLU(), Lin(25, 1 * 64)) self.conv1 = NNConv(1, 64, nn, aggr='mean') nn = Seq(Lin(3, 25), ReLU(), Lin(25, 64 * 64)) self.conv2 = NNConv(64, 64, nn, aggr='mean') nn = Seq(Lin(3, 25), ReLU(), Lin(25, 64 * 128)) self.conv3 = NNConv(64, 128, nn, aggr='mean') self.lin1 = torch.nn.Linear(128, 256) self.lin2 = torch.nn.Linear(256, 256) self.lin3 = torch.nn.Linear(256, num_classes)
def test_nn_conv(): x1 = torch.randn(4, 8) x2 = torch.randn(2, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) row, col = edge_index value = torch.rand(row.size(0), 3) adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) nn = Seq(Lin(3, 32), ReLU(), Lin(32, 8 * 32)) conv = NNConv(8, 32, nn=nn) assert conv.__repr__() == 'NNConv(8, 32)' out = conv(x1, edge_index, value) assert out.size() == (4, 32) assert conv(x1, edge_index, value, size=(4, 4)).tolist() == out.tolist() assert conv(x1, adj.t()).tolist() == out.tolist() t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert jit(x1, edge_index, value).tolist() == out.tolist() assert jit(x1, edge_index, value, size=(4, 4)).tolist() == out.tolist() t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert jit(x1, adj.t()).tolist() == out.tolist() adj = adj.sparse_resize((4, 2)) conv = NNConv((8, 16), 32, nn=nn) assert conv.__repr__() == 'NNConv((8, 16), 32)' out1 = conv((x1, x2), edge_index, value) out2 = conv((x1, None), edge_index, value, (4, 2)) assert out1.size() == (2, 32) assert out2.size() == (2, 32) assert conv((x1, x2), edge_index, value, (4, 2)).tolist() == out1.tolist() assert conv((x1, x2), adj.t()).tolist() == out1.tolist() assert conv((x1, None), adj.t()).tolist() == out2.tolist() t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert jit((x1, x2), edge_index, value).tolist() == out1.tolist() assert jit((x1, x2), edge_index, value, size=(4, 2)).tolist() == out1.tolist() assert jit((x1, None), edge_index, value, size=(4, 2)).tolist() == out2.tolist() t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert jit((x1, x2), adj.t()).tolist() == out1.tolist() assert jit((x1, None), adj.t()).tolist() == out2.tolist()
def __init__(self, node_input_dim=15, edge_input_dim=5, output_dim=1, node_hidden_dim=64, edge_hidden_dim=128, num_step_message_passing=6, num_step_set2set=6): super(MPNN, self).__init__() self.num_step_message_passing = num_step_message_passing self.lin0 = nn.Linear(node_input_dim, node_hidden_dim) edge_network = nn.Sequential( nn.Linear(edge_input_dim, edge_hidden_dim), nn.ReLU(), nn.Linear(edge_hidden_dim, node_hidden_dim * node_hidden_dim)) self.conv = NNConv(node_hidden_dim, node_hidden_dim, edge_network, aggr='mean', root_weight=False) self.gru = nn.GRU(node_hidden_dim, node_hidden_dim) self.set2set = Set2Set(node_hidden_dim, processing_steps=num_step_set2set) self.lin1 = nn.Linear(2 * node_hidden_dim, node_hidden_dim) self.lin2 = nn.Linear(node_hidden_dim, output_dim)
def __init__(self, args): super(NetModular, self).__init__() self.args = args self.num_features = args.num_features # self.ddi_num_features = args.ddi_num_features self.num_edge_features = args.num_edge_features self.nhid = args.nhid self.ddi_nhid = args.ddi_nhid self.pooling_ratio = args.pooling_ratio self.dropout_ratio = args.dropout_ratio self.conv1 = GCNConv(self.num_features, self.nhid).to(args.device) self.pool1 = SAGPooling(self.nhid, ratio=self.pooling_ratio).to(args.device) self.conv2 = GCNConv(self.nhid, self.nhid).to(args.device) self.pool2 = SAGPooling(self.nhid, ratio=self.pooling_ratio).to(args.device) self.conv3 = GCNConv(self.nhid, self.nhid).to(args.device) self.pool3 = SAGPooling(self.nhid, ratio=self.pooling_ratio).to(args.device) self.nn = torch.nn.Linear(self.num_edge_features, 6 * self.nhid * self.ddi_nhid) self.conv4 = NNConv(6 * self.nhid, self.ddi_nhid, self.nn).to(args.device) self.conv_noattn = GCNConv(6 * self.nhid, self.ddi_nhid).to(args.device) self.lin1 = torch.nn.Linear(self.ddi_nhid, self.ddi_nhid) self.lin2 = torch.nn.Linear(self.ddi_nhid, self.ddi_nhid) self.lin3 = torch.nn.Linear(self.num_edge_features, self.ddi_nhid)
def __init__(self, args, device): super(Net, self).__init__() self.args = args self.device = device node_dim = self.args.node_dim edge_dim = self.args.edge_dim hidden_dim = self.args.hidden_dim processing_steps = self.args.processing_steps self.depth = self.args.depth self.lin0 = torch.nn.Linear(node_dim, hidden_dim) nn = Sequential(Linear(edge_dim, hidden_dim * 2), ReLU(), Linear(hidden_dim * 2, hidden_dim * hidden_dim)) self.conv = NNConv(hidden_dim, hidden_dim, nn, aggr='mean') self.gru = GRU(hidden_dim, hidden_dim) self.set2set = Set2Set(hidden_dim, processing_steps=processing_steps) self.lin1 = torch.nn.Linear(2 * hidden_dim, hidden_dim) self.lin2 = torch.nn.Linear(hidden_dim, 1) self.lin3 = torch.nn.Linear(hidden_dim, 36) self.lin4 = torch.nn.Linear(36, 2) self.lin5 = torch.nn.Linear(hidden_dim, 36) self.lin6 = torch.nn.Linear(36, 2) self.apply(init_weights)