예제 #1
0
    def __init__(self, in_feats, h_feats, num_classes, pooling):
        super(GCN_GraphConv, self).__init__()
        assert isinstance(h_feats, list), "h_feats must be a list"
        assert len(
            h_feats) != 0, "h_feats is empty. unable to add hidden layers"
        self.list_of_layers = []
        dim = [in_feats] + h_feats

        # Convolution (Hidden) Layers
        for i in range(1, len(dim)):
            self.list_of_layers.append(GraphConv(dim[i - 1], dim[i]))

        # Final Layer
        # Followed example at: https://docs.dgl.ai/tutorials/blitz/5_graph_classification.html#sphx-glr-tutorials-blitz-5-graph-classification-py
        self.final = GraphConv(dim[-1], num_classes)

        # Pooling layer
        if pooling == "AvgPooling":
            self.pooling_layer = dgl.nn.AvgPooling()
        elif pooling == "MaxPooling":
            self.pooling_layer = dgl.nn.MaxPooling()
        elif pooling == "SumPooling":
            self.pooling_layer = dgl.nn.SumPooling()
        else:
            raise NotImplementedError
예제 #2
0
    def __init__(self, in_feats, hidden_size, num_classes):
        # inherit from backend NN model
        super(GCN, self).__init__()

        # add 2 conv layers
        self.conv1 = GraphConv(in_feats, hidden_size)
        self.conv2 = GraphConv(hidden_size, num_classes)
예제 #3
0
파일: model.py 프로젝트: yifeim/dgl
    def __init__(self,
                 in_dim,
                 hid_dim,
                 out_dim,
                 num_layers=1,
                 mode='cat',
                 dropout=0.):
        super(JKNet, self).__init__()

        self.mode = mode
        self.dropout = nn.Dropout(dropout)
        self.layers = nn.ModuleList()
        self.layers.append(GraphConv(in_dim, hid_dim, activation=F.relu))
        for _ in range(num_layers):
            self.layers.append(GraphConv(hid_dim, hid_dim, activation=F.relu))

        if self.mode == 'lstm':
            self.jump = JumpingKnowledge(mode, hid_dim, num_layers)
        else:
            self.jump = JumpingKnowledge(mode)

        if self.mode == 'cat':
            hid_dim = hid_dim * (num_layers + 1)

        self.output = nn.Linear(hid_dim, out_dim)
        self.reset_params()
예제 #4
0
    def __init__(self, num_feats):
        super(EEGGraphConvNet, self).__init__()

        self.conv1 = GraphConv(num_feats, 16)
        self.conv2 = GraphConv(16, 32)
        self.conv3 = GraphConv(32, 64)
        self.conv4 = GraphConv(64, 50)
        self.conv4_bn = BatchNorm1d(50,
                                    eps=1e-05,
                                    momentum=0.1,
                                    affine=True,
                                    track_running_stats=True)

        self.fc_block1 = nn.Linear(50, 30)
        self.fc_block2 = nn.Linear(30, 10)
        self.fc_block3 = nn.Linear(10, 2)

        # Xavier initializations
        self.fc_block1.apply(
            lambda x: nn.init.xavier_normal_(x.weight, gain=1))
        self.fc_block2.apply(
            lambda x: nn.init.xavier_normal_(x.weight, gain=1))
        self.fc_block3.apply(
            lambda x: nn.init.xavier_normal_(x.weight, gain=1))

        self.sumpool = SumPooling()
예제 #5
0
    def __init__(
        self,
        in_features: int,
        hidden_features: List[int],
        out_features: int,
        activation: Callable[[], nn.Module] = nn.ReLU,
    ):
        super().__init__()

        layers = [
            GraphConv(in_feats=in_features, out_feats=hidden_features[0])
        ]
        dropouts = [nn.Dropout(0.5) for _ in range(len(hidden_features))]
        batch_norms = [
            nn.BatchNorm1d(hidden_feature)
            for hidden_feature in hidden_features
        ]
        activations = [activation() for _ in range(len(hidden_features))]

        for i in range(1, len(hidden_features)):
            layers.append(GraphConv(hidden_features[i - 1],
                                    hidden_features[i]))

        layers.append(GraphConv(hidden_features[-1], out_features))
        self.layers = nn.ModuleList(layers)
        self.dropouts = nn.ModuleList(dropouts)
        self.batch_norms = nn.ModuleList(batch_norms)
        self.activations = nn.ModuleList(activations)
예제 #6
0
파일: arxiv.py 프로젝트: jcformanek/gnn
    def __init__(self, input_dim, hidden_dim, output_dim, num_layers, gnn_type):
        super(GNN, self).__init__()

        self.hidden_dim = hidden_dim
        self.num_layers = num_layers
        self.gnn_type = gnn_type

        self.convs = nn.ModuleList()

        if gnn_type == "gcn":
            self.convs.append(GraphConv(input_dim, hidden_dim))
        elif gnn_type == "sage":
            self.convs.append(SAGEConv(input_dim, hidden_dim, "gcn"))
        elif gnn_type == "gat":
            self.convs.append(GATConv(input_dim, hidden_dim, num_heads=3))
        else:
            raise ValueError("Invalid gnn_type")

        for i in range(num_layers - 2):
            if gnn_type == "gcn":
                self.convs.append(GraphConv(hidden_dim, hidden_dim))
            elif gnn_type == "sage":
                self.convs.append(SAGEConv(hidden_dim, hidden_dim, "gcn"))
            elif gnn_type == "gat":
                self.convs.append(GATConv(hidden_dim, hidden_dim, num_heads=3))

        if gnn_type == "gcn":
            self.convs.append(GraphConv(hidden_dim, output_dim))
        elif gnn_type == "sage":
            self.convs.append(SAGEConv(hidden_dim, output_dim, "gcn"))
        elif gnn_type == "gat":
            self.convs.append(GATConv(hidden_dim, output_dim, num_heads=3))
예제 #7
0
 def __init__(self,
              in_feats,
              hidden_dim,
              num_classes):
     super(GCN, self).__init__()
     self.conv1 = GraphConv(in_feats, hidden_dim)
     self.conv2 = GraphConv(hidden_dim, hidden_dim)
     self.classify = nn.Linear(hidden_dim, num_classes)
예제 #8
0
    def __init__(self, G, hid_dims, num_layers, multihot=True):
        super().__init__(G, hid_dims, num_layers, multihot)

        self.input_layer = GraphConv(self.in_dims, hid_dims)
        self.hidden_layers = [
            GraphConv(hid_dims, hid_dims) for _ in range(num_layers)
        ]
        self.output_layer = GraphConv(hid_dims, self.out_dims)
예제 #9
0
 def __init__(self, in_feats, hid_feats, out_feats, rel_names):
     super().__init__()
     self.conv1 = HeteroGraphConv({
         rel: GraphConv(in_feats, hid_feats) for rel in rel_names
     }, aggregate='sum')
     self.conv2 = HeteroGraphConv({
         rel: GraphConv(hid_feats, out_feats) for rel in rel_names
     }, aggregate='sum')
예제 #10
0
    def __init__(self,
                 graph_x_size,
                 graph_emb_size,
                 output_size=1,
                 model_predictor="agent"):
        super(MapGraphModel, self).__init__()
        self.graph_emb_size = graph_emb_size

        self.agent_layer = nn.Sequential(
            nn.Linear((64 + graph_emb_size) * 2, 32), nn.ReLU(True),
            nn.Linear(32, 1)
        )  # layer to process agent representations and predict novelty
        self.model_predictor = model_predictor
        # GCN layers for graph
        self.GCN1 = GraphConv(graph_x_size,
                              graph_x_size,
                              norm='both',
                              weight=True,
                              bias=False)
        self.GCN2 = GraphConv(graph_x_size,
                              graph_x_size,
                              norm='both',
                              weight=True,
                              bias=False)
        self.GCN3 = GraphConv(graph_x_size,
                              self.graph_emb_size,
                              norm='both',
                              weight=True,
                              bias=False)

        #CNN layers for Map
        self.Conv = torch.nn.Sequential(*[
            nn.Conv2d(graph_emb_size, 256, kernel_size=3, stride=1, padding=1),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1),
            nn.Tanh(),
        ])
        #Last activation function
        self.last_activation = torch.nn.functional.sigmoid if output_size == 1 else torch.nn.functional.softmax

        #CNN to process the maps of two adjacent timesteps and predict novelty.
        self.DiffConv = torch.nn.Sequential(*[
            nn.Conv2d(64 * 2, 32, kernel_size=3, stride=1, padding=0),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Conv2d(32, 16, kernel_size=4, stride=2, padding=1),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Conv2d(16, 8, kernel_size=4, stride=2, padding=1),
            nn.ReLU(True),
            nn.Conv2d(8, output_size, kernel_size=4, stride=2, padding=1)
        ])

        self.device = "cuda"
예제 #11
0
    def __init__(self,
                 in_dim,
                 hidden_dim,
                 layer_num,
                 sample_size,
                 device,
                 GNN_name="GIN"):
        super(GNNStructEncoder, self).__init__()
        self.n_distribution = 7  # How many gaussian distribution should exist
        self.out_dim = hidden_dim
        if GNN_name == "GIN":
            self.linear1 = MLP(layer_num, hidden_dim, hidden_dim, hidden_dim)
            self.graphconv1 = GINConv(apply_func=self.linear1,
                                      aggregator_type='sum')
            self.linear2 = MLP(layer_num, hidden_dim, hidden_dim, hidden_dim)
            self.graphconv2 = GINConv(apply_func=self.linear2,
                                      aggregator_type='sum')
        elif GNN_name == "GCN":
            self.graphconv1 = GraphConv(hidden_dim, hidden_dim)
            self.graphconv2 = GraphConv(hidden_dim, hidden_dim)
        else:
            self.graphconv = GATConv(hidden_dim, hidden_dim, num_heads=10)
        # self.neighbor_num_list = neighbor_num_list
        self.linear_classifier = MLP(1, hidden_dim, hidden_dim,
                                     self.n_distribution)
        self.neighbor_generator = MLP_generator(hidden_dim, hidden_dim,
                                                sample_size).to(device)
        # Gaussian Means, and std
        self.gaussian_mean = nn.Parameter(
            torch.FloatTensor(sample_size, self.n_distribution,
                              hidden_dim).uniform_(-0.5 / hidden_dim, 0.5 /
                                                   hidden_dim)).to(device)
        self.gaussian_log_sigma = nn.Parameter(
            torch.FloatTensor(sample_size, self.n_distribution,
                              hidden_dim).uniform_(-0.5 / hidden_dim, 0.5 /
                                                   hidden_dim)).to(device)
        self.m = torch.distributions.Normal(
            torch.zeros(sample_size, self.n_distribution, hidden_dim),
            torch.ones(sample_size, self.n_distribution, hidden_dim))

        # Before MLP Gaussian Means, and std
        self.mlp_gaussian_mean = nn.Parameter(
            torch.FloatTensor(hidden_dim).uniform_(-0.5 / hidden_dim, 0.5 /
                                                   hidden_dim)).to(device)
        self.mlp_gaussian_log_sigma = nn.Parameter(
            torch.FloatTensor(hidden_dim).uniform_(-0.5 / hidden_dim, 0.5 /
                                                   hidden_dim)).to(device)
        self.mlp_m = torch.distributions.Normal(torch.zeros(hidden_dim),
                                                torch.ones(hidden_dim))

        # Decoders
        self.degree_decoder = FNN(hidden_dim, hidden_dim, 1, 4)
        # self.degree_loss_func = FocalLoss(int(max_degree_num) + 1)
        self.degree_loss_func = nn.MSELoss()
        self.pool = mp.Pool(1)
        self.in_dim = in_dim
        self.sample_size = sample_size
예제 #12
0
 def __init__(self):
     super(Net, self).__init__()
     self.conv1 = GraphConv(num_features,
                            128,
                            norm='both',
                            weight=True,
                            bias=True)
     self.conv2 = GraphConv(128,
                            num_classes,
                            norm='both',
                            weight=True,
                            bias=True)
예제 #13
0
    def __init__(self, in_dim, out_dim, act_fn, num_layers=2):
        super(GCN, self).__init__()

        assert num_layers >= 2
        self.num_layers = num_layers
        self.convs = nn.ModuleList()

        self.convs.append(GraphConv(in_dim, out_dim * 2))
        for _ in range(self.num_layers - 2):
            self.convs.append(GraphConv(out_dim * 2, out_dim * 2))

        self.convs.append(GraphConv(out_dim * 2, out_dim))
        self.act_fn = act_fn
예제 #14
0
 def __init__(self, in_feats, h_feats1, h_feats2, num_outputs, dropout=0):
     super(GCN, self).__init__()
     # self.conv1 = GraphConv(in_feats, h_feats1, weight=True, bias=True) # GCNLayer
     # self.conv2 = GraphConv(h_feats1, h_feats2, weight=True, bias=True) # GCNLayer
     # self.conv3 = GraphConv(h_feats2, num_outputs, weight=True, bias=True) # GCNLayer
     self.msg_net = Sequential(MessageLayer(), MessageLayer(),
                               MessageLayer())
     self.conv1 = GraphConv(in_feats, h_feats1, weight=True,
                            bias=True)  # GCNLayer
     self.conv3 = GraphConv(h_feats1, num_outputs, weight=True,
                            bias=True)  # GCNLayer
     if (dropout):
         self.dropout = nn.Dropout(p=dropout)
예제 #15
0
    def __init__(self,
                 in_dim,
                 out_dim,
                 ntypes,
                 etypes,
                 activation=None,
                 dropout=0.0):
        """R-GCN层(用于异构图)

        :param in_dim: 输入特征维数
        :param out_dim: 输出特征维数
        :param ntypes: List[str] 顶点类型列表
        :param etypes: List[str] 边类型列表
        :param activation: callable, optional 激活函数,默认为None
        :param dropout: float, optional Dropout概率,默认为0
        """
        super().__init__()
        self.activation = activation
        self.dropout = nn.Dropout(dropout)

        self.conv = HeteroGraphConv(
            {
                etype: GraphConv(in_dim, out_dim, norm='right', bias=False)
                for etype in etypes
            }, 'sum')
        self.loop_weight = nn.ModuleDict({
            ntype: nn.Linear(in_dim, out_dim, bias=False)
            for ntype in ntypes
        })
예제 #16
0
파일: ex2.py 프로젝트: jjgarau/MIT6883ex2
    def __init__(self, n_hid_in, n_hid_out, dropout=0.3):
        super().__init__()
        """
        Define a branch of the graph convolution with
        1. GraphConv from n_hid_in to n_hid_in
        2. ReLU
        3. Dropout
        4. GraphConv from n_hid_in to n_hid_out

        Note: your should call dgl.nn.GraphConv with allow_zero_in_degree=True
        """
        ### Your code here ###
        self.gc1 = GraphConv(n_hid_in, n_hid_in, allow_zero_in_degree=True)
        self.act = nn.ReLU()
        self.drop = nn.Dropout(p=dropout)
        self.gc2 = GraphConv(n_hid_in, n_hid_out, allow_zero_in_degree=True)
예제 #17
0
    def __init__(self, in_feat, hid_feat, out_feat, n_hidden_layers):
        super().__init__()
        self.h_embedding = nn.Embedding(in_feat, hid_feat)
        self.mlp = MLPPredictor(hid_feat, out_feat)

        self.layers = nn.ModuleList([
            GraphConv(hid_feat, hid_feat, allow_zero_in_degree=True)
            for _ in range(n_hidden_layers)
        ])
예제 #18
0
    def __init__(self, num_feats):
        super(EEGGraphConvNet, self).__init__()

        self.conv1 = GraphConv(num_feats, 32)
        self.conv2 = GraphConv(32, 20)
        self.conv2_bn = nn.BatchNorm1d(20,
                                       eps=1e-05,
                                       momentum=0.1,
                                       affine=True,
                                       track_running_stats=True)
        self.fc_block1 = nn.Linear(20, 10)
        self.fc_block2 = nn.Linear(10, 2)

        # Xavier initializations
        self.fc_block1.apply(
            lambda x: nn.init.xavier_normal_(x.weight, gain=1))
        self.fc_block2.apply(
            lambda x: nn.init.xavier_normal_(x.weight, gain=1))
예제 #19
0
 def __init__(self, num_nodes, num_rels):
     super(testModel, self).__init__()
     self.e_emb = nn.Embedding(num_nodes, emb_size)
     self.e_gc = GraphConv(emb_size,
                           gc_size,
                           norm='both',
                           weight=True,
                           bias=True)
     self.r_emb = nn.Embedding(num_rels, emb_size)
예제 #20
0
    def __init__(self,
                 in_dim,
                 out_dim,
                 rel_names,
                 num_bases=None,
                 weight=True,
                 self_loop=True,
                 activation=None,
                 dropout=0.0):
        """R-GCN层(用于异构图)

        :param in_dim: 输入特征维数
        :param out_dim: 输出特征维数
        :param rel_names: List[str] 关系名称
        :param num_bases: int, optional 基的个数,默认使用关系个数
        :param weight: bool, optional 是否进行线性变换,默认为True
        :param self_loop: 是否包括自环消息,默认为True
        :param activation: callable, optional 激活函数,默认为None
        :param dropout: float, optional Dropout概率,默认为0
        """
        super().__init__()
        self.rel_names = rel_names
        self.self_loop = self_loop
        self.activation = activation
        self.dropout = nn.Dropout(dropout)

        self.conv = HeteroGraphConv({
            rel: GraphConv(in_dim,
                           out_dim,
                           norm='right',
                           weight=False,
                           bias=False)
            for rel in rel_names
        })

        self.use_weight = weight
        if not num_bases:
            num_bases = len(rel_names)
        self.use_basis = weight and 0 < num_bases < len(rel_names)
        if self.use_weight:
            if self.use_basis:
                self.basis = WeightBasis((in_dim, out_dim), num_bases,
                                         len(rel_names))
            else:
                self.weight = nn.Parameter(
                    torch.Tensor(len(rel_names), in_dim, out_dim))
                nn.init.xavier_uniform_(self.weight,
                                        nn.init.calculate_gain('relu'))

        if self.self_loop:
            self.loop_weight = nn.Parameter(torch.Tensor(in_dim, out_dim))
            nn.init.xavier_uniform_(self.loop_weight,
                                    nn.init.calculate_gain('relu'))
예제 #21
0
    def __init__(self, num_metapaths, hidden_dim, attn_drop):
        """元路径视图编码器

        :param num_metapaths: int 元路径数量M
        :param hidden_dim: int 隐含特征维数
        :param attn_drop: float 注意力dropout
        """
        super().__init__()
        self.gcns = nn.ModuleList([
            GraphConv(hidden_dim, hidden_dim, norm='right', activation=nn.PReLU())
            for _ in range(num_metapaths)
        ])
        self.attn = Attention(hidden_dim, attn_drop)
예제 #22
0
    def __init__(self, in_feats, h_feats, num_classes, pooling):
        super(GCN_GraphConv, self).__init__()
        assert isinstance(h_feats, list), "h_feats must be a list"
        assert len(
            h_feats) != 0, "h_feats is empty. unable to add hidden layers"
        self.list_of_layers = []
        dim = [in_feats] + h_feats

        # Convolution (Hidden) Layers
        for i in range(1, len(dim)):
            self.list_of_layers.append(GraphConv(dim[i - 1], dim[i]))

        # Final Layer
        self.final = GraphConv(dim[-1], num_classes)

        # Pooling layer
        if pooling == "AvgPooling":
            self.pooling_layer = dgl.nn.AvgPooling()
        elif pooling == "MaxPooling":
            self.pooling_layer = dgl.nn.MaxPooling()
        elif pooling == "SumPooling":
            self.pooling_layer = dgl.nn.SumPooling()
        else:
            raise NotImplementedError
예제 #23
0
파일: rate_main.py 프로젝트: daizigege/AGNN
    def __init__(self,dataset,args):
        super(myModel, self).__init__()
        self._act =args.model_activation
        self.user_embed = nn.Embedding(dataset.num_user, args.embed_units)
        self.item_embed = nn.Embedding(dataset.num_item, args.embed_units)
        self.num_user=dataset.num_user
        self.num_item=dataset.num_item
        data_dict={}
        for rating in rating_vals:
            data_dict[(str(rating)+'ed').replace('.','_')]=GraphConv(dataset.gender_len, dataset.gender_len)

        self.genre_conv=dgl.nn.HeteroGraphConv(
            data_dict#,allow_zero_in_degree=True
        )

        self.encoder = MyLayer(
            args.embed_units+dataset.gender_len,args.embed_units+dataset.gender_len,args.gcn_agg_units,args.gcn_out_units,
            rating_vals,args.gcn_dropout,args.gcn_agg_accum,self._act,self._act)
        self.pred=  MLPPredictor(args.gcn_out_units,args.gcn_dropout)
예제 #24
0
    def __init__(self, in_feat, h_feat, out_feat):
        super().__init__()

        self.gcl1 = GraphConv(in_feat, h_feat)
        self.relu = nn.ReLU()
        self.gcl2 = GraphConv(h_feat, out_feat)
예제 #25
0
 def __init__(self):
     super(Net, self).__init__()
     self.layer1 = GraphConv(602, 128)
     self.layer2 = GraphConv(128, 128)
     self.layer3 = GraphConv(128, 128)
     self.fc = nn.Linear(128, 41)
예제 #26
0
 def __init__(self):
     super(Net, self).__init__()
     self.layer1 = GraphConv(8710, 256)
     self.layer2 = GraphConv(256, 256)
     self.layer3 = GraphConv(256, 256)
     self.fc = nn.Linear(256, 70)
예제 #27
0
 def __init__(self, in_dim, hidden_dim, n_classes):
     super(Classifier, self).__init__()
     self.conv1 = GraphConv(in_dim, hidden_dim)
     self.conv2 = GraphConv(hidden_dim, hidden_dim)
     self.classify = nn.Linear(hidden_dim, n_classes)
예제 #28
0
 def __init__(self, in_feats, h_feats, num_classes):
     super(GCN, self).__init__()
     self.conv1 = GraphConv(in_feats, h_feats)
     self.conv2 = GraphConv(h_feats, num_classes)
예제 #29
0
 def __init__(self, in_feats, hidden_size, num_classes):
     super().__init__()
     self.conv1 = GraphConv(in_feats, hidden_size)
     self.conv2 = GraphConv(hidden_size, num_classes)
예제 #30
0
 def __init__(self, in_feats, h_feats, num_classes):
     super(GCN, self).__init__()
     self.conv1 = GraphConv(
         in_feats, h_feats)  # Layer 1: input features, hidden features;
     self.conv2 = GraphConv(
         h_feats, num_classes)  # Layer 2: hidden features, number of class;