def __init__(self, mlp_layers, N, f_dict): super(RecurrentGraphEmbedding, self).__init__(f_dict) self.N = N model_fn = gn.mlp_fn(mlp_layers) self.component = 'MPGNN' self.gnn1 = gn.GNN( gn.EdgeModel(self.fe, self.fx, self.fu, model_fn, self.fe), gn.NodeModel(self.fe, self.fx, self.fu, model_fn, self.fx), gn.GlobalModel_NodeOnly(self.fx, self.fu, model_fn, self.fx)) self.gnn2 = gn.GNN( gn.EdgeModel(self.fe, self.fx, 2 * self.fu, model_fn, self.fe), gn.NodeModel(self.fe, self.fx, 2 * self.fu, model_fn, self.fx), gn.GlobalModel_NodeOnly(self.fx, 2 * self.fu, model_fn, self.fx)) self.mlp = model_fn(self.fu, self.fout)
def __init__(self, mlp_layers, N, f_dict): super(AlternatingDoublev2, self).__init__(f_dict) model_fn = gn.mlp_fn(mlp_layers) self.N = N self.component = 'MPGNN' self.proj = torch.nn.Linear(self.fx, self.h) self.gnn1 = gn.GNN( gn.EdgeModel(self.h, self.h, 2 * self.h, model_fn, self.h), gn.NodeModel(self.h, self.h, 2 * self.h, model_fn, self.h), gn.GlobalModel_NodeOnly(self.h, 2 * self.h, model_fn, self.h)) self.gnn2 = gn.GNN( gn.EdgeModel(self.h, self.h, 2 * self.h, model_fn, self.h), gn.NodeModel(self.h, self.h, 2 * self.h, model_fn, self.h), gn.GlobalModel_NodeOnly(self.h, 2 * self.h, model_fn, self.h)) self.mlp = model_fn(2 * self.h, self.fout)
def __init__(self, mlp_layers, N, f_dict): """ Simpler version of the alternating model. In this model there is no encoder network, we only have 1 layer of GNN on each processing step. We condition on the output global embedding from the processing on the previous graph, and we only condition the node computations since there are less nodes than edges (this is a choice that can be discussed). We aggregate nodes with attention in the global model. We use the same gnn for processing both inputs. In this model, since we may want to chain the passes, we let the number of input features unchanged. """ super(AlternatingSimple, self).__init__(f_dict) model_fn = gn.mlp_fn(mlp_layers) self.N = N self.component = 'MPGNN' # f_e, f_x, f_u, f_out = self.get_features(f_dict) self.gnn = gn.GNN( gn.EdgeModel(self.fe, self.fx, 2 * self.fu, model_fn, self.fe), gn.NodeModel(self.fe, self.fx, 2 * self.fu, model_fn, self.fx), gn.GlobalModel_NodeOnly(self.fx, 2 * self.fu, model_fn, self.fu)) self.mlp = model_fn(2 * self.fu, self.fout)
def __init__(self, mlp_layers, N, f_dict): super(RecurrentGraphEmbeddingv2, self).__init__(f_dict) self.N = N self.component = 'MPGNN' model_fn = gn.mlp_fn(mlp_layers) self.proj = torch.nn.Linear(self.fx, self.h) self.gnn1 = gn.GNN( gn.EdgeModel(self.h, self.h, self.h, model_fn, self.h), gn.NodeModel(self.h, self.h, self.h, model_fn, self.h), gn.GlobalModel_NodeOnly(self.h, self.h, model_fn, self.h)) self.gnn2 = gn.GNN( gn.EdgeModel(self.h, self.h, 2 * self.h, model_fn, self.h), gn.NodeModel(self.h, self.h, 2 * self.h, model_fn, self.h), gn.GlobalModel_NodeOnly(self.h, 2 * self.h, model_fn, self.h)) self.mlp = model_fn(self.h, self.fout)
def __init__(self, mlp_layers, N, f_dict): super(AlternatingDouble, self).__init__(f_dict) model_fn = gn.mlp_fn(mlp_layers) self.N = N self.component = 'MPGNN' # f_e, f_x, f_u, f_out = self.get_features(f_dict) self.gnn1 = gn.GNN( gn.EdgeModel(self.fe, self.fx, 2 * self.fu, model_fn, self.fe), gn.NodeModel(self.fe, self.fx, 2 * self.fu, model_fn, self.fx), gn.GlobalModel_NodeOnly(self.fx, 2 * self.fu, model_fn, self.fu)) self.gnn2 = gn.GNN( gn.EdgeModel(self.fe, self.fx, 2 * self.fu, model_fn, self.fe), gn.NodeModel(self.fe, self.fx, 2 * self.fu, model_fn, self.fx), gn.GlobalModel_NodeOnly(self.fx, 2 * self.fu, model_fn, self.fu)) self.mlp = model_fn(2 * self.fu, self.fout)
def __init__(self, mlp_layers, N, f_dict): super(GNN_NEAgg, self).__init__(f_dict) self.N = N mlp_fn = gn.mlp_fn(mlp_layers) self.gnn = gn.GNN( gn.EdgeModel(self.fe, self.fx, self.fu, mlp_fn, self.fe), gn.NodeModel(self.fe, self.fx, self.fu, mlp_fn, self.fx), gn.GlobalModel(self.fe, self.fx, self.fu, mlp_fn, self.fx)) self.mlp = mlp_fn(self.fu, self.fout)