Ejemplo n.º 1
0
 def __init__(self,A, As, all_nodes_neighbors, N, d, layers, steps, delay, weight, GPU = False):
     super(MihGNNEmbedding12WithJaccard, self).__init__()
     self.original_A = torch.tensor(A, dtype=torch.float)
     self.one_hop_matrix = torch.matmul(self.original_A, self.original_A)
     self.A = Matrix_pre_handle(A, steps, delay)
     self.A = torch.tensor(self.A, dtype = torch.float)
     self.d = d
     self.e = torch.tensor(math.e, dtype=torch.float)
     self.layers = layers
     self.As = As
     self.weight = torch.tensor(weight, dtype = torch.float)
     self.all_nodes_neighbors = all_nodes_neighbors
     embedding_state = numpy.random.randn(N, d)
     embedding_state = torch.tensor(data=embedding_state, dtype=torch.float)
     self.aggregationModule = MihGNNAggregationModule2(A = self.A, As=As, all_nodes_neighbors = all_nodes_neighbors,
                                                   convolution_layers=layers, d=d,
                                                   embedding_states=embedding_state)
     self.edge_generator_forward  = nn.Sequential(
         nn.Linear(d * 2, d * 2, bias=True),
         nn.BatchNorm1d(num_features = d * 2),
         nn.ReLU(),
         nn.Linear(d * 2, d * 2, bias=True),
         nn.BatchNorm1d(num_features=d * 2),
         nn.ReLU()
     )
     # self.edge_generator_backward = nn.Sequential(
     #     nn.Linear(d * 2, d * 2, bias=True),
     #     nn.BatchNorm1d(num_features=d * 2),
     #     nn.ReLU()
     # )
     self.liner = LineNetwork(input_features=d * 2, output_features=2, hidden_features=d)
     self.soft_max = nn.Softmax(dim = -1)
     self.cross_entropy = nn.CrossEntropyLoss(weight = self.weight, reduction = 'sum')
 def __init__(self,
              A,
              As,
              all_nodes_neighbors,
              N,
              d,
              layers,
              steps,
              delay,
              GPU=False):
     super(MihGNNEmbedding13, self).__init__()
     self.A = Matrix_pre_handle(A, steps, delay)
     self.A = torch.tensor(self.A, dtype=torch.float)
     self.d = d
     self.e = torch.tensor(math.e, dtype=torch.float)
     self.layers = layers
     self.As = As
     self.all_nodes_neighbors = all_nodes_neighbors
     embedding_state = numpy.random.randn(N, d)
     embedding_state = torch.tensor(data=embedding_state, dtype=torch.float)
     self.aggregationModule = MihGNNAggregationModule2(
         A=self.A,
         As=As,
         all_nodes_neighbors=all_nodes_neighbors,
         convolution_layers=layers,
         d=d,
         embedding_states=embedding_state)
     self.lossFunction = MihOutputModule1(d=self.d, e=self.e, layers=layers)
    def __init__(self,
                 A,
                 As,
                 all_nodes_neighbors,
                 N,
                 d,
                 layers,
                 steps,
                 delay,
                 GPU=False):
        super(MihGNNEmbedding12WithTrainWeight, self).__init__()
        self.A = Matrix_pre_handle(A, steps, delay)
        self.A = torch.tensor(self.A, dtype=torch.float)
        self.d = d
        self.e = torch.tensor(math.e, dtype=torch.float)
        self.layers = layers
        self.As = As
        self.weight = torch.tensor([1, 1], dtype=torch.float)
        self.weight = Parameter(self.weight, requires_grad=True)
        self.all_nodes_neighbors = all_nodes_neighbors
        embedding_state = numpy.random.randn(N, d)
        embedding_state = torch.tensor(data=embedding_state, dtype=torch.float)
        self.aggregationModule = MihGNNAggregationModule2(
            A=self.A,
            As=As,
            all_nodes_neighbors=all_nodes_neighbors,
            convolution_layers=layers,
            d=d,
            embedding_states=embedding_state)
        self.liner = LineNetwork(input_features=d * 2,
                                 output_features=2,
                                 hidden_features=d)
        self.soft_max = nn.Softmax(dim=-1)

        self.loss = nn.NLLLoss()
 def __init__(self,
              A,
              As,
              all_nodes_neighbors,
              N,
              d,
              layers,
              steps,
              delay,
              weight,
              GPU=False):
     super(MihGNNEmbedding12, self).__init__()
     self.A = Matrix_pre_handle(A, steps, delay)
     self.A = torch.tensor(self.A, dtype=torch.float)
     self.d = d
     self.e = torch.tensor(math.e, dtype=torch.float)
     self.layers = layers
     self.As = As
     self.weight = torch.tensor(weight, dtype=torch.float)
     self.all_nodes_neighbors = all_nodes_neighbors
     embedding_state = numpy.random.randn(N, d)
     embedding_state = torch.tensor(data=embedding_state, dtype=torch.float)
     self.aggregationModule = MihGNNAggregationModule2(
         A=self.A,
         As=As,
         all_nodes_neighbors=all_nodes_neighbors,
         convolution_layers=layers,
         d=d,
         embedding_states=embedding_state)
     self.liner = LineNetwork(input_features=d * 2,
                              output_features=2,
                              hidden_features=d)
     self.soft_max = nn.Softmax(dim=-1)
     self.cross_entropy = nn.CrossEntropyLoss(weight=self.weight,
                                              reduction='sum')
 def __init__(self,
              A,
              As,
              all_nodes_neighbors,
              N,
              d,
              layers,
              steps,
              delay,
              GPU=False):
     super(MihGNNEmbedding6, self).__init__()
     self.A = torch.tensor(Matrix_pre_handle(A, steps=steps, delay=delay),
                           dtype=torch.float)
     embedding_states = numpy.random.randn(N, d)
     embedding_states = torch.tensor(data=embedding_states,
                                     dtype=torch.float)
     self.aggregationModule = GNNAggregationModule(
         A=self.A,
         As=As,
         all_nodes_neighbors=all_nodes_neighbors,
         convolution_layers=layers,
         d=d,
         embedding_states=embedding_states)
     self.liner = LineNetwork(input_features=d * 2,
                              output_features=2,
                              hidden_features=d)
     self.soft_max = nn.Softmax(dim=1)
     self.cross_entropy = nn.CrossEntropyLoss()
     self.add_module(name="AggregationModule",
                     module=self.aggregationModule)
     self.add_module(name="FullConnectionLiner", module=self.liner)
Ejemplo n.º 6
0
 def __init__(self, A, As, all_nodes_neighbors, N, d, layers, steps, delay, GPU = False):
     super(MihGNNEmbedding3, self).__init__()
     self.A = torch.tensor(Matrix_pre_handle(A, steps=steps, delay=delay), dtype=torch.float)
     embedding_state = numpy.random.randn(N, d)
     embedding_state = torch.tensor(data=embedding_state, dtype=torch.float)
     self.aggregationModule = GNNAggregationModule(A=self.A, As= As, all_nodes_neighbors = all_nodes_neighbors,
                                                   convolution_layers=layers, d=d,
                                                   embedding_states=embedding_state)
     self.lossFunction = MihOutputModule2()
     self.relu = nn.ReLU()
 def __init__(self,
              G,
              A,
              As,
              all_nodes_neighbors,
              N,
              d,
              walk_length,
              p,
              q,
              iter,
              window_size,
              workers,
              layers,
              steps,
              delay,
              GPU=False):
     super(MihGNNEmbedding12AferRandomWalk, self).__init__()
     self.A = Matrix_pre_handle(A, steps, delay)
     self.A = torch.tensor(self.A, dtype=torch.float)
     self.d = d
     self.e = torch.tensor(math.e, dtype=torch.float)
     self.layers = layers
     self.As = As
     self.all_nodes_neighbors = all_nodes_neighbors
     print("Getting word embedding!")
     node2Vec = Node2vec(G=G,
                         A=A,
                         walk_length=walk_length,
                         p=p,
                         q=q,
                         embed_size=d,
                         iter=iter,
                         window_size=window_size,
                         workers=workers)
     embedding_state = node2Vec.word_embeddings
     # embedding_state = numpy.random.randn(N, d)
     # embedding_state = torch.tensor(data=embedding_state, dtype=torch.float)
     self.aggregationModule = MihGNNAggregationModule2(
         A=self.A,
         As=As,
         all_nodes_neighbors=all_nodes_neighbors,
         convolution_layers=layers,
         d=d,
         embedding_states=embedding_state)
     self.liner = LineNetwork(input_features=d * 2,
                              output_features=2,
                              hidden_features=d)
     self.soft_max = nn.Softmax(dim=-1)
     # self.cross_entropy = nn.CrossEntropyLoss(weight = self.weight, reduction = 'sum')
     self.cross_entropy = nn.CrossEntropyLoss()
Ejemplo n.º 8
0
    def __init__(self,A, N, embeding_size, layers, steps, delay, width, height, kernel_size, out_channel, GPU = False):
        super(CovE, self).__init__()
        self.A = torch.tensor(Matrix_pre_handle(A, steps=steps, delay=delay), dtype=torch.float)
        self.width = width
        self.height = height
        self.transformLiners = nn.Sequential()
        self.embeddings = torch.randn(size = [N, embeding_size], dtype = torch.float)
        for layer in range(layers):
            self.transformLiners.add_module(name = "transformLiners{0}".format(layer), module = nn.Linear(in_features = embeding_size, out_features = embeding_size))

        self.cov2d = nn.Conv2d(in_channels = 1, out_channels = out_channel, kernel_size = kernel_size , stride = 1)
        self.pool = nn.AvgPool2d(kernel_size = kernel_size)
        self.relu = nn.ReLU()
        output_width = int((width - kernel_size + 1) / kernel_size)
        output_height = int((height - kernel_size + 1) / kernel_size)
        self.output_features = output_height * output_width * out_channel
        self.ouputLiners = LineNetwork(input_features = int(self.output_features * 2), hidden_features = embeding_size, output_features = 2)
        self.softMax = nn.Softmax(dim = -1)
        self.crossEntropy = nn.CrossEntropyLoss()