コード例 #1
0
 def __init__(self,A, As, all_nodes_neighbors, N, d, layers, steps, delay, weight, GPU = False):
     super(MihGNNEmbedding12WithJaccard, self).__init__()
     self.original_A = torch.tensor(A, dtype=torch.float)
     self.one_hop_matrix = torch.matmul(self.original_A, self.original_A)
     self.A = Matrix_pre_handle(A, steps, delay)
     self.A = torch.tensor(self.A, dtype = torch.float)
     self.d = d
     self.e = torch.tensor(math.e, dtype=torch.float)
     self.layers = layers
     self.As = As
     self.weight = torch.tensor(weight, dtype = torch.float)
     self.all_nodes_neighbors = all_nodes_neighbors
     embedding_state = numpy.random.randn(N, d)
     embedding_state = torch.tensor(data=embedding_state, dtype=torch.float)
     self.aggregationModule = MihGNNAggregationModule2(A = self.A, As=As, all_nodes_neighbors = all_nodes_neighbors,
                                                   convolution_layers=layers, d=d,
                                                   embedding_states=embedding_state)
     self.edge_generator_forward  = nn.Sequential(
         nn.Linear(d * 2, d * 2, bias=True),
         nn.BatchNorm1d(num_features = d * 2),
         nn.ReLU(),
         nn.Linear(d * 2, d * 2, bias=True),
         nn.BatchNorm1d(num_features=d * 2),
         nn.ReLU()
     )
     # self.edge_generator_backward = nn.Sequential(
     #     nn.Linear(d * 2, d * 2, bias=True),
     #     nn.BatchNorm1d(num_features=d * 2),
     #     nn.ReLU()
     # )
     self.liner = LineNetwork(input_features=d * 2, output_features=2, hidden_features=d)
     self.soft_max = nn.Softmax(dim = -1)
     self.cross_entropy = nn.CrossEntropyLoss(weight = self.weight, reduction = 'sum')
コード例 #2
0
    def __init__(self, P, A, embedding_size, layers, K, GPU=False):
        super(MihPolysemousNetwork, self).__init__()
        N = A.shape[0]
        self.A = torch.tensor(A, dtype=torch.float)
        self.embedding_size = embedding_size
        self.K = K
        self.layers = layers
        self.GPU = GPU
        self.prior_distribution_matrix = P
        self.embedding_state = torch.randn(size=[K, N, embedding_size],
                                           dtype=torch.float)
        self.GCN_layer = PolysemousGcnLayer(layers=self.layers,
                                            K=self.K,
                                            embedding_size=self.embedding_size)
        self.add_module(name="GCN_layer", module=self.GCN_layer)
        self.relu = nn.ReLU()

        self.weight1 = torch.randn(size=[K, embedding_size], dtype=torch.float)
        self.weight1 = Parameter(self.weight1, requires_grad=True)
        self.weight2 = torch.randn(size=[K, embedding_size], dtype=torch.float)
        self.weight2 = Parameter(self.weight2, requires_grad=True)
        self.outputLinerNetwork = LineNetwork(
            input_features=self.embedding_size * 2,
            hidden_features=self.embedding_size,
            output_features=2)
        self.cross_entropy = nn.CrossEntropyLoss()
コード例 #3
0
    def __init__(self, A, As, all_nodes_neighbors, N, d, layers, steps, delay, GPU = False):
        super(MihGNNEmbedding9, self).__init__()

        # self.A = Matrix_pre_handle(A, steps, delay)
        # self.A = torch.tensor(self.A, dtype = torch.float)

        # I = numpy.eye(N)
        # self.A = torch.tensor(A + 5 * I, dtype=torch.float)

        self.A = torch.tensor(A, dtype = torch.float)
        self.A = self.pre_handle(self.A)

        self.layers = layers
        embedding_state = numpy.random.randn(N, d)
        embedding_state = torch.tensor(data = embedding_state, dtype=torch.float)

        self.tanh = nn.Tanh()
        self.embedding_state = embedding_state
        self.aggregationLayers = nn.Sequential()
        layers_embeddings = []
        for layer in range(layers):
            aggregation_layer = nn.Linear(in_features = d, out_features = d)
            self.aggregationLayers.add_module(name = "aggregation_layer_{0}".format(layer + 1), module = aggregation_layer)
            self.embedding_state = aggregation_layer(self.embedding_state)
            self.embedding_state = torch.matmul(self.A, self.embedding_state)
            self.embedding_state = self.tanh(self.embedding_state)
            # layers_embeddings.append(self.embedding_state * delay[layer])



        self.liner = LineNetwork(input_features = d * 2, output_features = 2, hidden_features = d)
        self.softMax = nn.Softmax(dim = -1)
        self.cross_entropy = nn.CrossEntropyLoss()
コード例 #4
0
    def __init__(self,
                 A,
                 As,
                 all_nodes_neighbors,
                 N,
                 d,
                 layers,
                 steps,
                 delay,
                 GPU=False):
        super(MihGNNEmbedding12WithTrainWeight, self).__init__()
        self.A = Matrix_pre_handle(A, steps, delay)
        self.A = torch.tensor(self.A, dtype=torch.float)
        self.d = d
        self.e = torch.tensor(math.e, dtype=torch.float)
        self.layers = layers
        self.As = As
        self.weight = torch.tensor([1, 1], dtype=torch.float)
        self.weight = Parameter(self.weight, requires_grad=True)
        self.all_nodes_neighbors = all_nodes_neighbors
        embedding_state = numpy.random.randn(N, d)
        embedding_state = torch.tensor(data=embedding_state, dtype=torch.float)
        self.aggregationModule = MihGNNAggregationModule2(
            A=self.A,
            As=As,
            all_nodes_neighbors=all_nodes_neighbors,
            convolution_layers=layers,
            d=d,
            embedding_states=embedding_state)
        self.liner = LineNetwork(input_features=d * 2,
                                 output_features=2,
                                 hidden_features=d)
        self.soft_max = nn.Softmax(dim=-1)

        self.loss = nn.NLLLoss()
コード例 #5
0
 def __init__(self,
              A,
              As,
              all_nodes_neighbors,
              N,
              d,
              layers,
              steps,
              delay,
              weight,
              GPU=False):
     super(MihGNNEmbedding12, self).__init__()
     self.A = Matrix_pre_handle(A, steps, delay)
     self.A = torch.tensor(self.A, dtype=torch.float)
     self.d = d
     self.e = torch.tensor(math.e, dtype=torch.float)
     self.layers = layers
     self.As = As
     self.weight = torch.tensor(weight, dtype=torch.float)
     self.all_nodes_neighbors = all_nodes_neighbors
     embedding_state = numpy.random.randn(N, d)
     embedding_state = torch.tensor(data=embedding_state, dtype=torch.float)
     self.aggregationModule = MihGNNAggregationModule2(
         A=self.A,
         As=As,
         all_nodes_neighbors=all_nodes_neighbors,
         convolution_layers=layers,
         d=d,
         embedding_states=embedding_state)
     self.liner = LineNetwork(input_features=d * 2,
                              output_features=2,
                              hidden_features=d)
     self.soft_max = nn.Softmax(dim=-1)
     self.cross_entropy = nn.CrossEntropyLoss(weight=self.weight,
                                              reduction='sum')
コード例 #6
0
 def __init__(self,
              A,
              As,
              all_nodes_neighbors,
              N,
              d,
              layers,
              steps,
              delay,
              GPU=False):
     super(MihGNNEmbedding6, self).__init__()
     self.A = torch.tensor(Matrix_pre_handle(A, steps=steps, delay=delay),
                           dtype=torch.float)
     embedding_states = numpy.random.randn(N, d)
     embedding_states = torch.tensor(data=embedding_states,
                                     dtype=torch.float)
     self.aggregationModule = GNNAggregationModule(
         A=self.A,
         As=As,
         all_nodes_neighbors=all_nodes_neighbors,
         convolution_layers=layers,
         d=d,
         embedding_states=embedding_states)
     self.liner = LineNetwork(input_features=d * 2,
                              output_features=2,
                              hidden_features=d)
     self.soft_max = nn.Softmax(dim=1)
     self.cross_entropy = nn.CrossEntropyLoss()
     self.add_module(name="AggregationModule",
                     module=self.aggregationModule)
     self.add_module(name="FullConnectionLiner", module=self.liner)
コード例 #7
0
 def __init__(self, N, embedding_size, layers):
     super(MihModule, self).__init__()
     embeddings = numpy.random.random([N, embedding_size])
     embeddings = torch.tensor(data = embeddings, dtype = torch.float)
     self.graphSage = GraphSage(num_layers = layers, input_size = embedding_size, out_size = embedding_size,
                           raw_features = embeddings, adj_lists = all_neighbors)
     self.liner = LineNetwork(input_features=embedding_size * 2, output_features=2, hidden_features=embedding_size)
     self.soft_max = nn.Softmax(dim=-1)
     self.cross_entropy = nn.CrossEntropyLoss()
コード例 #8
0
 def __init__(self, A, N, d, layers, steps, delay, GPU = False):
     super(MihGNNEmbedding7, self).__init__()
     I = numpy.eye(N)
     A = torch.tensor(A + I, dtype=torch.float)
     embedding_state = numpy.random.randn(N, d)
     embedding_state = torch.tensor(data = embedding_state, dtype=torch.float)
     self.aggregationModule = GCNAggregationModule(A = A, convolution_layers = layers, d = d, embedding_states = embedding_state)
     self.liner = LineNetwork(input_features = d * 2, output_features = 2, hidden_features = d)
     self.cross_entropy = nn.CrossEntropyLoss()
     self.add_module(name = "AggregationModule", module = self.aggregationModule)
     self.add_module(name = "FullConnectionLiner", module = self.liner)
コード例 #9
0
    def __init__(self, N, d, layers, GPU = False):
        super(MihGNNEmbedding12WithNoAggregation, self).__init__()
        self.d = d
        self.e = torch.tensor(math.e, dtype=torch.float)
        self.layers = layers
        embedding_state = numpy.random.randn(N, d)
        self.embedding_state = torch.tensor(data=embedding_state, dtype=torch.float)

        self.liner = LineNetwork(input_features=d * 2, output_features=2, hidden_features=d)
        # self.soft_max = nn.Softmax(dim = -1)
        # self.cross_entropy = nn.CrossEntropyLoss(weight = self.weight, reduction = 'sum')
        self.cross_entropy = nn.CrossEntropyLoss()
コード例 #10
0
 def __init__(self,
              G,
              A,
              As,
              all_nodes_neighbors,
              N,
              d,
              walk_length,
              p,
              q,
              iter,
              window_size,
              workers,
              layers,
              steps,
              delay,
              GPU=False):
     super(MihGNNEmbedding12AferRandomWalk, self).__init__()
     self.A = Matrix_pre_handle(A, steps, delay)
     self.A = torch.tensor(self.A, dtype=torch.float)
     self.d = d
     self.e = torch.tensor(math.e, dtype=torch.float)
     self.layers = layers
     self.As = As
     self.all_nodes_neighbors = all_nodes_neighbors
     print("Getting word embedding!")
     node2Vec = Node2vec(G=G,
                         A=A,
                         walk_length=walk_length,
                         p=p,
                         q=q,
                         embed_size=d,
                         iter=iter,
                         window_size=window_size,
                         workers=workers)
     embedding_state = node2Vec.word_embeddings
     # embedding_state = numpy.random.randn(N, d)
     # embedding_state = torch.tensor(data=embedding_state, dtype=torch.float)
     self.aggregationModule = MihGNNAggregationModule2(
         A=self.A,
         As=As,
         all_nodes_neighbors=all_nodes_neighbors,
         convolution_layers=layers,
         d=d,
         embedding_states=embedding_state)
     self.liner = LineNetwork(input_features=d * 2,
                              output_features=2,
                              hidden_features=d)
     self.soft_max = nn.Softmax(dim=-1)
     # self.cross_entropy = nn.CrossEntropyLoss(weight = self.weight, reduction = 'sum')
     self.cross_entropy = nn.CrossEntropyLoss()
コード例 #11
0
    def __init__(self,A, N, embeding_size, layers, steps, delay, width, height, kernel_size, out_channel, GPU = False):
        super(CovE, self).__init__()
        self.A = torch.tensor(Matrix_pre_handle(A, steps=steps, delay=delay), dtype=torch.float)
        self.width = width
        self.height = height
        self.transformLiners = nn.Sequential()
        self.embeddings = torch.randn(size = [N, embeding_size], dtype = torch.float)
        for layer in range(layers):
            self.transformLiners.add_module(name = "transformLiners{0}".format(layer), module = nn.Linear(in_features = embeding_size, out_features = embeding_size))

        self.cov2d = nn.Conv2d(in_channels = 1, out_channels = out_channel, kernel_size = kernel_size , stride = 1)
        self.pool = nn.AvgPool2d(kernel_size = kernel_size)
        self.relu = nn.ReLU()
        output_width = int((width - kernel_size + 1) / kernel_size)
        output_height = int((height - kernel_size + 1) / kernel_size)
        self.output_features = output_height * output_width * out_channel
        self.ouputLiners = LineNetwork(input_features = int(self.output_features * 2), hidden_features = embeding_size, output_features = 2)
        self.softMax = nn.Softmax(dim = -1)
        self.crossEntropy = nn.CrossEntropyLoss()
コード例 #12
0
ファイル: GraphSAGE.py プロジェクト: mouseM/learningMouse
    def __init__(self, N, embedding_size, layers):
        super(MihModule, self).__init__()
        embeddings = numpy.random.random([N, embedding_size])
        embeddings = torch.tensor(data = embeddings, dtype = torch.float)
        self.graphSage = GraphSage(num_layers = layers, input_size = embedding_size, out_size = embedding_size,
                              raw_features = embeddings, adj_lists = all_neighbors)
        self.liner = LineNetwork(input_features=embedding_size, output_features=2, hidden_features=embedding_size)
        self.soft_max = nn.Softmax(dim=-1)
        self.cross_entropy = nn.CrossEntropyLoss()

        self.edge_generator_forward = nn.Sequential(
            nn.Linear(embedding_size * 2, embedding_size * 2, bias=True),
            nn.BatchNorm1d(num_features=embedding_size * 2),
            nn.ReLU()
        )
        self.edge_generator_backward = nn.Sequential(
            nn.Linear(embedding_size * 2, embedding_size * 2, bias=True),
            nn.BatchNorm1d(num_features=embedding_size * 2),
            nn.ReLU()
        )