def __init__(self,A, As, all_nodes_neighbors, N, d, layers, steps, delay, weight, GPU = False):
     super(MihGNNEmbedding12WithJaccard, self).__init__()
     self.original_A = torch.tensor(A, dtype=torch.float)
     self.one_hop_matrix = torch.matmul(self.original_A, self.original_A)
     self.A = Matrix_pre_handle(A, steps, delay)
     self.A = torch.tensor(self.A, dtype = torch.float)
     self.d = d
     self.e = torch.tensor(math.e, dtype=torch.float)
     self.layers = layers
     self.As = As
     self.weight = torch.tensor(weight, dtype = torch.float)
     self.all_nodes_neighbors = all_nodes_neighbors
     embedding_state = numpy.random.randn(N, d)
     embedding_state = torch.tensor(data=embedding_state, dtype=torch.float)
     self.aggregationModule = MihGNNAggregationModule2(A = self.A, As=As, all_nodes_neighbors = all_nodes_neighbors,
                                                   convolution_layers=layers, d=d,
                                                   embedding_states=embedding_state)
     self.edge_generator_forward  = nn.Sequential(
         nn.Linear(d * 2, d * 2, bias=True),
         nn.BatchNorm1d(num_features = d * 2),
         nn.ReLU(),
         nn.Linear(d * 2, d * 2, bias=True),
         nn.BatchNorm1d(num_features=d * 2),
         nn.ReLU()
     )
     # self.edge_generator_backward = nn.Sequential(
     #     nn.Linear(d * 2, d * 2, bias=True),
     #     nn.BatchNorm1d(num_features=d * 2),
     #     nn.ReLU()
     # )
     self.liner = LineNetwork(input_features=d * 2, output_features=2, hidden_features=d)
     self.soft_max = nn.Softmax(dim = -1)
     self.cross_entropy = nn.CrossEntropyLoss(weight = self.weight, reduction = 'sum')
 def __init__(self,
              A,
              As,
              all_nodes_neighbors,
              N,
              d,
              layers,
              steps,
              delay,
              GPU=False):
     super(MihGNNEmbedding13, self).__init__()
     self.A = Matrix_pre_handle(A, steps, delay)
     self.A = torch.tensor(self.A, dtype=torch.float)
     self.d = d
     self.e = torch.tensor(math.e, dtype=torch.float)
     self.layers = layers
     self.As = As
     self.all_nodes_neighbors = all_nodes_neighbors
     embedding_state = numpy.random.randn(N, d)
     embedding_state = torch.tensor(data=embedding_state, dtype=torch.float)
     self.aggregationModule = MihGNNAggregationModule2(
         A=self.A,
         As=As,
         all_nodes_neighbors=all_nodes_neighbors,
         convolution_layers=layers,
         d=d,
         embedding_states=embedding_state)
     self.lossFunction = MihOutputModule1(d=self.d, e=self.e, layers=layers)
    def __init__(self,
                 A,
                 As,
                 all_nodes_neighbors,
                 N,
                 d,
                 layers,
                 steps,
                 delay,
                 GPU=False):
        super(MihGNNEmbedding12WithTrainWeight, self).__init__()
        self.A = Matrix_pre_handle(A, steps, delay)
        self.A = torch.tensor(self.A, dtype=torch.float)
        self.d = d
        self.e = torch.tensor(math.e, dtype=torch.float)
        self.layers = layers
        self.As = As
        self.weight = torch.tensor([1, 1], dtype=torch.float)
        self.weight = Parameter(self.weight, requires_grad=True)
        self.all_nodes_neighbors = all_nodes_neighbors
        embedding_state = numpy.random.randn(N, d)
        embedding_state = torch.tensor(data=embedding_state, dtype=torch.float)
        self.aggregationModule = MihGNNAggregationModule2(
            A=self.A,
            As=As,
            all_nodes_neighbors=all_nodes_neighbors,
            convolution_layers=layers,
            d=d,
            embedding_states=embedding_state)
        self.liner = LineNetwork(input_features=d * 2,
                                 output_features=2,
                                 hidden_features=d)
        self.soft_max = nn.Softmax(dim=-1)

        self.loss = nn.NLLLoss()
 def __init__(self,
              A,
              As,
              all_nodes_neighbors,
              N,
              d,
              layers,
              steps,
              delay,
              weight,
              GPU=False):
     super(MihGNNEmbedding12, self).__init__()
     self.A = Matrix_pre_handle(A, steps, delay)
     self.A = torch.tensor(self.A, dtype=torch.float)
     self.d = d
     self.e = torch.tensor(math.e, dtype=torch.float)
     self.layers = layers
     self.As = As
     self.weight = torch.tensor(weight, dtype=torch.float)
     self.all_nodes_neighbors = all_nodes_neighbors
     embedding_state = numpy.random.randn(N, d)
     embedding_state = torch.tensor(data=embedding_state, dtype=torch.float)
     self.aggregationModule = MihGNNAggregationModule2(
         A=self.A,
         As=As,
         all_nodes_neighbors=all_nodes_neighbors,
         convolution_layers=layers,
         d=d,
         embedding_states=embedding_state)
     self.liner = LineNetwork(input_features=d * 2,
                              output_features=2,
                              hidden_features=d)
     self.soft_max = nn.Softmax(dim=-1)
     self.cross_entropy = nn.CrossEntropyLoss(weight=self.weight,
                                              reduction='sum')
 def __init__(self,
              G,
              A,
              As,
              all_nodes_neighbors,
              N,
              d,
              walk_length,
              p,
              q,
              iter,
              window_size,
              workers,
              layers,
              steps,
              delay,
              GPU=False):
     super(MihGNNEmbedding12AferRandomWalk, self).__init__()
     self.A = Matrix_pre_handle(A, steps, delay)
     self.A = torch.tensor(self.A, dtype=torch.float)
     self.d = d
     self.e = torch.tensor(math.e, dtype=torch.float)
     self.layers = layers
     self.As = As
     self.all_nodes_neighbors = all_nodes_neighbors
     print("Getting word embedding!")
     node2Vec = Node2vec(G=G,
                         A=A,
                         walk_length=walk_length,
                         p=p,
                         q=q,
                         embed_size=d,
                         iter=iter,
                         window_size=window_size,
                         workers=workers)
     embedding_state = node2Vec.word_embeddings
     # embedding_state = numpy.random.randn(N, d)
     # embedding_state = torch.tensor(data=embedding_state, dtype=torch.float)
     self.aggregationModule = MihGNNAggregationModule2(
         A=self.A,
         As=As,
         all_nodes_neighbors=all_nodes_neighbors,
         convolution_layers=layers,
         d=d,
         embedding_states=embedding_state)
     self.liner = LineNetwork(input_features=d * 2,
                              output_features=2,
                              hidden_features=d)
     self.soft_max = nn.Softmax(dim=-1)
     # self.cross_entropy = nn.CrossEntropyLoss(weight = self.weight, reduction = 'sum')
     self.cross_entropy = nn.CrossEntropyLoss()