def __init__(self, node_attr_dim: int, edge_attr_dim: int, state_dim: int = 8, num_heads: int = 8, num_conv: int = 2, out_dim: int = 1, dropout: float = 0.2, attention_pooling: bool = True): super(EdgeGATEncoder, self).__init__() self.__edge_gat = EdgeGAT(node_attr_dim=node_attr_dim, edge_attr_dim=edge_attr_dim, state_dim=state_dim, num_heads=num_heads, num_conv=num_conv, out_dim=state_dim, dropout=dropout) # Pooling layer is supposed to perform the following shape-shifting: # From [num_nodes, node_attr_dim * edge_attr_dim] # To [num_graphs, 2 * state_dim * edge_attr_dim] if attention_pooling: self.__pooling = pyg_nn.GlobalAttention( nn.Linear(state_dim * edge_attr_dim, 1), nn.Linear(state_dim * edge_attr_dim, 2 * state_dim * edge_attr_dim)) else: self.__pooling = pyg_nn.Set2Set(state_dim * edge_attr_dim, processing_steps=3) self.__out_linear = nn.Sequential( nn.Linear(2 * state_dim * edge_attr_dim, state_dim), nn.ReLU(), nn.Linear(state_dim, out_dim))
def __init__(self, node_attr_dim: int, edge_attr_dim: int, state_dim: int = 64, num_conv: int = 3, out_dim: int = 1, attention_pooling: bool = False): super(MPNN, self).__init__() self.__in_linear = nn.Sequential(nn.Linear(node_attr_dim, state_dim), nn.ReLU()) self.__num_conv = num_conv self.__nn_conv_linear = nn.Sequential( nn.Linear(edge_attr_dim, state_dim), nn.ReLU(), nn.Linear(state_dim, state_dim * state_dim)) self.__nn_conv = pyg_nn.NNConv(state_dim, state_dim, self.__nn_conv_linear, aggr='mean', root_weight=False) self.__gru = nn.GRU(state_dim, state_dim) # self.__set2set = pyg_nn.Set2Set(state_dim, processing_steps=3) if attention_pooling: self.__pooling = pyg_nn.GlobalAttention( nn.Linear(state_dim, 1), nn.Linear(state_dim, 2 * state_dim)) else: # Setting the num_layers > 1 will take significantly more time self.__pooling = pyg_nn.Set2Set(state_dim, processing_steps=3) self.__out_linear = nn.Sequential( nn.Linear(2 * state_dim, 2 * state_dim), nn.ReLU(), nn.Linear(2 * state_dim, out_dim))
def __init__(self, action_dim, hidden_dim, node_dim): super().__init__() self.gat = GAT(hidden_dim=hidden_dim, node_dim=node_dim) self.set2set = gnn.Set2Set(hidden_dim, processing_steps=6) self.mlp = nn.Sequential(nn.Linear(2 * hidden_dim, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, 1))
def __init__(self, action_dim, hidden_dim, edge_dim, node_dim): super().__init__() self.mpnn = MPNN(hidden_dim=hidden_dim, edge_dim=edge_dim, node_dim=node_dim) self.set2set = gnn.Set2Set(hidden_dim, processing_steps=6) self.fc = nn.Linear(2*hidden_dim, hidden_dim) self.mlp = nn.Sequential(nn.Linear(5*hidden_dim, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, action_dim)) self.hidden_dim = hidden_dim
def __init__(self, hparams, node_dim, edge_dim): super(GCN, self).__init__() self.node_dim = node_dim self.edge_dim = edge_dim self.hparams = hparams self.output_dim = 1 # Linear atom embedding self.linatoms = torch.nn.Linear(self.node_dim, hparams['conv_base_size']) # Graph Convolution emb_dim = hparams['emb_dim'] conv_dims = net_pattern(hparams['conv_n_layers'], hparams['conv_base_size'], hparams['conv_ratio']) + [emb_dim] conv_layers = [] for index in range(hparams['conv_n_layers']): conv_layers.append( gnn.GCNConv(conv_dims[index], conv_dims[index + 1], cached=False)) self.graph_conv = nn.ModuleList(conv_layers) if self.hparams['conv_batchnorm']: self.bn = nn.ModuleList( [nn.BatchNorm1d(dim) for dim in conv_dims[1:]]) # Graph embedding if hparams['emb_set2set']: self.graph_emb = gnn.Set2Set(emb_dim, processing_steps=3) emb_dim = emb_dim * 2 else: self.graph_emb = nn.Sequential(nn.Linear(emb_dim, emb_dim), str2act(hparams['emb_act'])) # Build mlp self.using_mlp = hparams['mlp_layers'] > 0 if self.using_mlp: self.mlp, last_dim = make_mlp(emb_dim, hparams['mlp_layers'], hparams['mlp_dim_ratio'], hparams['mlp_act'], hparams['mlp_batchnorm'], hparams['mlp_dropout']) else: last_dim = emb_dim # Prediction self.pred = nn.Linear(last_dim, self.output_dim) # placeholder for the gradients self.gradients = None
def run_check1(): batch_size = 10 num_node = 30 node_dim = 3 edge_dim = 2 num_edge = 24 edge_index = np.random.choice(num_node, (num_edge, 2)) node = np.random.uniform(-1, 1, (num_node, node_dim)) edge = np.random.uniform(-1, 1, (num_edge, edge_dim)) node_batch_index = np.random.choice(batch_size, num_node) node_batch_index = np.sort(node_batch_index) #--- edge_index = torch.from_numpy(edge_index).long() node_batch_index = torch.from_numpy(node_batch_index).long() node = torch.from_numpy(node).float() edge = torch.from_numpy(edge).float() set2set_ref = gnn.Set2Set(node_dim, processing_steps=1) set2set = Set2Set(node_dim, processing_step=1) set2set.lstm.bias_ih_l0.data = set2set_ref.lstm.bias_ih_l0 set2set.lstm.bias_hh_l0.data = set2set_ref.lstm.bias_hh_l0 set2set.lstm.weight_ih_l0.data = set2set_ref.lstm.weight_ih_l0 set2set.lstm.weight_hh_l0.data = set2set_ref.lstm.weight_hh_l0 #--- print('------------------------------') print('') print(set2set_ref) print('') print('node (x)') print(node.shape) print('') y = set2set_ref(node, node_batch_index) y1 = set2set(node, node_batch_index) print('y') print(y.shape) print(y) print('') print('y1') print(y1.shape) print(y1) print('')
def __init__(self, node_feature_size, edge_feature_size, node_hidden_size, edge_hidden_size, dropout_ratio=0.5, steps=6): super(MoleculeMPNN, self).__init__() self.node_feature_size = node_feature_size self.edge_feature_size = edge_feature_size self.node_hidden_size = node_hidden_size self.edge_feature_size = edge_hidden_size self.dropout_ratio = dropout_ratio self.embedder = nn.Sequential( LinearBlock(node_feature_size, 64, self.dropout_ratio, True, nn.ReLU()), LinearBlock(64, self.node_hidden_size, self.dropout_ratio, False), ) self.steps = steps self.edge_net = nn.Sequential( LinearBlock(edge_feature_size, 32, self.dropout_ratio, True, nn.ReLU()), LinearBlock(32, 64, self.dropout_ratio, True, nn.ReLU()), LinearBlock(64, self.edge_feature_size, self.dropout_ratio, True, nn.ReLU()), LinearBlock(self.edge_feature_size, self.node_hidden_size * self.node_hidden_size, self.dropout_ratio, True)) self.mpnn = gnn.NNConv(self.node_hidden_size, self.node_hidden_size, self.edge_net, aggr="mean", root_weight=True) self.gru = nn.GRUCell(self.node_hidden_size, self.node_hidden_size) self.set2set = gnn.Set2Set(self.node_hidden_size, self.steps) self.fc = nn.Sequential( LinearBlock(self.node_hidden_size * 4 + 8, 1024, self.dropout_ratio, True, nn.ReLU()), LinearBlock(1024, 8))
def __init__(self, hparams, node_dim=None, edge_dim=None): super(MPNN, self).__init__() self.node_dim = node_dim self.edge_dim = edge_dim self.hparams = hparams self.output_dim = 1 # Linear atom embedding atom_dim = hparams['atom_dim'] self.linatoms = torch.nn.Linear(self.node_dim, atom_dim) # MPNN part conv_dim = atom_dim * 2 nnet = nn.Sequential(*[ nn.Linear(self.edge_dim, conv_dim), str2act(hparams['conv_act']), nn.Linear(conv_dim, atom_dim * atom_dim) ]) self.conv = gnn.NNConv(atom_dim, atom_dim, nnet, aggr=hparams['conv_aggr'], root_weight=False) self.gru = nn.GRU(atom_dim, atom_dim) # Graph embedding self.set2set = gnn.Set2Set(atom_dim, processing_steps=hparams['emb_steps']) # Build mlp self.using_mlp = hparams['mlp_layers'] > 0 if self.using_mlp: self.mlp, last_dim = make_mlp(atom_dim * 2, hparams['mlp_layers'], hparams['mlp_dim_ratio'], hparams['mlp_act'], hparams['mlp_batchnorm'], hparams['mlp_dropout']) else: last_dim = atom_dim * 2 # Prediction self.pred = nn.Linear(last_dim, self.output_dim)
def __init__(self, r_dim, num_encoder_layers=6, num_decoder_layers=6, readout_steps=5, dropout=0.): # TODO batch # TODO edge attrs self.dropout = dropout self.num_encoder_layers = num_encoder_layers self.num_decoder_layers = num_decoder_layers encoder = GraphEncoder(r_dim, num_layers=self.num_encoder_layers, dropout=self.dropout) decoder = GraphDecoder(r_dim, num_layers=self.num_decoder_layers, dropout=self.dropout) super(VGAE, self).__init__(encoder, decoder, r_dim) self.set2set = gnn.Set2Set(r_dim, processing_steps=readout_steps)
def __init__(self, node_dim=13, edge_dim=5, num_target=8): super(Net, self).__init__() self.num_message_passing = 6 node_hidden_dim = 128 edge_hidden_dim = 128 self.preprocess = nn.Sequential( LinearBn(node_dim, 64), nn.ReLU(), LinearBn(64, node_hidden_dim), ) edge_net = nn.Sequential( LinearBn(edge_dim, 32), nn.ReLU(), #Swish(),#nn.ReLU(), LeakyReLU LinearBn(32, 64), nn.ReLU(), #Swish(),#nn.ReLU(), LinearBn(64, edge_hidden_dim), nn.ReLU(), #Swish(),#nn.ReLU(), LinearBn(edge_hidden_dim, node_hidden_dim * node_hidden_dim ) # edge_hidden_dim, node_hidden_dim *node_hidden_dim ) self.conv = gnn.NNConv( node_hidden_dim, node_hidden_dim, edge_net, aggr='mean', root_weight=True) #node_hidden_dim, node_hidden_dim self.gru = nn.GRU(node_hidden_dim, node_hidden_dim) self.set2set = gnn.Set2Set(node_hidden_dim, processing_steps=6) # node_hidden_dim #predict coupling constant self.predict = nn.Sequential( LinearBn(4 * node_hidden_dim, 512), #node_hidden_dim nn.ReLU(), nn.Linear(512, num_target), )
def __init__(self, node_dim=13, edge_dim=5, num_target=8, node_hidden_dim=128, edge_hidden_dim=128, num_message_passing=6, prep_hid_size=64): super(ChampsNet, self).__init__() self.num_message_passing = num_message_passing self.preprocess = nn.Sequential( LinearBn(node_dim, node_hidden_dim, act=nn.ReLU())) edge_net = nn.Sequential( LinearBn(edge_dim, edge_hidden_dim, act=nn.ReLU()), LinearBn(edge_hidden_dim, node_hidden_dim * node_hidden_dim) # edge_hidden_dim, node_hidden_dim *node_hidden_dim ) self.conv = gnn.NNConv( node_hidden_dim, node_hidden_dim, edge_net, aggr='mean', root_weight=True) #node_hidden_dim, node_hidden_dim self.gru = nn.GRU(node_hidden_dim, node_hidden_dim) self.set2set = gnn.Set2Set( node_hidden_dim, processing_steps=num_message_passing) # node_hidden_dim #predict coupling constant self.predict = nn.Sequential( LinearBn(4 * node_hidden_dim, num_target, act=nn.ReLU()), #node_hidden_dim )
def build_pool(self, in_channels, processing_steps=4, num_layers=1): return geom_nn.Set2Set(in_channels, processing_steps, num_layers)
import torch import torch_geometric.nn as gnn set2set = gnn.Set2Set(128, processing_steps=2) for i in range(1000): input = torch.randn(26, 128) output = set2set(input)