示例#1
0
 def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
              dropout, k):
     super(GCNWithAttention, self).__init__()
     self.k = k
     self.hidden = hidden_channels
     self.num_layer = num_layers
     self.in_channels = in_channels
     self.out_channels = out_channels
     self.convs = torch.nn.ModuleList()
     self.convs.append(GCNConv(in_channels, hidden_channels))
     self.attention = torch.nn.ModuleList()
     self.dimension_reduce = torch.nn.ModuleList()
     self.attention.append(LowRankAttention(self.k, in_channels, dropout))
     self.dimension_reduce.append(nn.Sequential(nn.Linear(2*(self.k + hidden_channels),\
     hidden_channels),nn.ReLU()))
     self.dimension_reduce[0] = nn.Sequential(nn.Linear(2*self.k + hidden_channels + in_channels,\
     hidden_channels),nn.ReLU())
     self.bn = nn.ModuleList(
         [nn.BatchNorm1d(hidden_channels) for _ in range(num_layers - 1)])
     for _ in range(num_layers - 1):
         self.convs.append(GCNConv(hidden_channels, hidden_channels))
         self.attention.append(
             LowRankAttention(self.k, hidden_channels, dropout))
         self.dimension_reduce.append(nn.Sequential(nn.Linear(2*(self.k + hidden_channels),\
         hidden_channels)))
     self.dimension_reduce[-1] = nn.Sequential(nn.Linear(2*(self.k + hidden_channels),\
         out_channels))
     self.dropout = dropout
示例#2
0
 def __init__(self,
              board_size,
              feature_dim,
              gcn_layers,
              device='cpu',
              optimizer=torch.optim.Adam,
              **kwargs):
     super(GCNNet, self).__init__()
     self._input_size = feature_dim
     self._board_size = board_size
     self._hidden_size = gcn_layers
     self._device = device if device == 'cpu' else 'cuda'
     self._edge_idx = torch.tensor(
         build_edge_idx(board_size),
         dtype=torch.int64,
         # device=self._device
     )
     self._gcn_layers = ModuleList(
         [GCNConv(in_channels=feature_dim, out_channels=gcn_layers[0])]
     )
     for layer_idx, size in enumerate(gcn_layers[:-1]):
         self._gcn_layers.append(
             GCNConv(in_channels=size, out_channels=gcn_layers[layer_idx + 1])
         )
     self._policy_fc = torch.nn.Linear(in_features=gcn_layers[-1], out_features=1)
     self._value_fc = torch.nn.Linear(in_features=gcn_layers[-1], out_features=1)
     self._weight_init()
     self._optimizer = optimizer(
         self.parameters(),
         lr=kwargs.get('lr', .03),
         weight_decay=kwargs.get('weight_decay', .001)
     )
     self.to(self._device)
示例#3
0
文件: GCN.py 项目: ZhangKai2017/DIG
    def __init__(self, input_dim, output_dim, model_args):
        super(GCNNet, self).__init__()
        self.latent_dim = model_args.latent_dim
        self.mlp_hidden = model_args.mlp_hidden
        self.emb_normlize = model_args.emb_normlize
        self.device = model_args.device
        self.num_gnn_layers = len(self.latent_dim)
        self.num_mlp_layers = len(self.mlp_hidden) + 1
        self.dense_dim = self.latent_dim[-1]
        self.readout_layers = get_readout_layers(model_args.readout)

        self.gnn_layers = nn.ModuleList()
        self.gnn_layers.append(GCNConv(input_dim, self.latent_dim[0], normalize=model_args.adj_normlize))
        for i in range(1, self.num_gnn_layers):
            self.gnn_layers.append(GCNConv(self.latent_dim[i - 1], self.latent_dim[i], normalize=model_args.adj_normlize))
        self.gnn_non_linear = nn.ReLU()

        self.mlps = nn.ModuleList()
        if self.num_mlp_layers > 1:
            self.mlps.append(nn.Linear(self.dense_dim * len(self.readout_layers),
                                       model_args.mlp_hidden[0]))
            for i in range(1, self.num_mlp_layers-1):
                self.mlps.append(nn.Linear(self.mlp_hidden[i-1], self.mlp_hidden[1]))
            self.mlps.append(nn.Linear(self.mlp_hidden[-1], output_dim))
        else:
            self.mlps.append(nn.Linear(self.dense_dim * len(self.readout_layers),
                                       output_dim))
        self.dropout = nn.Dropout(model_args.dropout)
        self.Softmax = nn.Softmax(dim=-1)
        self.mlp_non_linear = nn.ELU()
示例#4
0
    def __init__(self, num_features, hidden_size, num_classes=2, dropout=0):
        super(GCN, self).__init__()

        self.conv1 = GCNConv(num_features, hidden_size)
        self.conv2 = GCNConv(hidden_size, num_classes)

        self.dropout = dropout
        self.activation = F.relu
示例#5
0
文件: pyg_ssp.py 项目: zhjhr181/cogdl
class CLS(torch.nn.Module):
    def __init__(self, d_in, d_out):
        super(CLS, self).__init__()
        self.conv = GCNConv(d_in, d_out)

    def reset_parameters(self):
        self.conv.reset_parameters()

    def forward(self, x, edge_index, mask=None):
        x = self.conv(x, edge_index)
        x = F.log_softmax(x, dim=1)
        return x
    def __init__(self, in_channels: int, hidden_channels: int, num_layers: int,
                 out_channels: Optional[int] = None, dropout: float = 0.0,
                 act: Optional[Callable] = ReLU(inplace=True),
                 norm: Optional[torch.nn.Module] = None, jk: str = 'last',
                 **kwargs):
        super().__init__(in_channels, hidden_channels, num_layers,
                         out_channels, dropout, act, norm, jk)

        self.convs.append(GCNConv(in_channels, hidden_channels, **kwargs))
        for _ in range(1, num_layers):
            self.convs.append(
                GCNConv(hidden_channels, hidden_channels, **kwargs))
示例#7
0
    def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
                 dropout):
        super(GCN, self).__init__()

        self.convs = torch.nn.ModuleList()
        self.convs.append(GCNConv(in_channels, hidden_channels, cached=True))
        for _ in range(num_layers - 2):
            self.convs.append(
                GCNConv(hidden_channels, hidden_channels, cached=True))
        self.convs.append(GCNConv(hidden_channels, out_channels, cached=True))

        self.dropout = dropout
示例#8
0
文件: pyg_ssp.py 项目: zhjhr181/cogdl
class CRD(torch.nn.Module):
    def __init__(self, d_in, d_out, p):
        super(CRD, self).__init__()
        self.conv = GCNConv(d_in, d_out) 
        self.p = p

    def reset_parameters(self):
        self.conv.reset_parameters()

    def forward(self, x, edge_index, mask=None):
        x = F.relu(self.conv(x, edge_index))
        x = F.dropout(x, p=self.p, training=self.training)
        return x
示例#9
0
    def __init__(self, config_file, coarse_mesh, fine_marker_dict, process_sim=lambda x, y: x,
                 freeze_mesh=False, num_convs=6, num_end_convs=3, hidden_channels=512,
                 out_channels=3, device='cuda'):
        super().__init__()
        meshes_temp_dir = 'temp_meshes'
        os.makedirs(meshes_temp_dir, exist_ok=True)
        self.mesh_file = meshes_temp_dir + '/' + str(os.getpid()) + '_mesh.su2'

        if not coarse_mesh:
            raise ValueError('Need to provide a coarse mesh for CFD-GCN.')
        nodes, edges, self.elems, self.marker_dict = get_mesh_graph(coarse_mesh)
        self.nodes = torch.from_numpy(nodes).to(device)
        if not freeze_mesh:
            self.nodes = nn.Parameter(self.nodes)
        self.elems, new_edges = quad2tri(sum(self.elems, []))
        self.elems = [self.elems]
        self.edges = torch.from_numpy(edges).to(device)
        print(self.edges.dtype, new_edges.dtype)
        self.edges = torch.cat([self.edges, new_edges.to(self.edges.device)], dim=1)
        self.marker_inds = torch.tensor(sum(self.marker_dict.values(), [])).unique()
        assert is_cw(self.nodes, self.elems[0]).nonzero().shape[0] == 0, 'Mesh has flipped elems'

        self.process_sim = process_sim
        self.su2 = SU2Module(config_file, mesh_file=self.mesh_file)
        logging.info(f'Mesh filename: {self.mesh_file.format(batch_index="*")}')

        self.fine_marker_dict = torch.tensor(fine_marker_dict['airfoil']).unique()
        self.sdf = None

        improved = False
        self.num_convs = num_end_convs
        self.convs = []
        if self.num_convs > 0:
            self.convs = nn.ModuleList()
            in_channels = out_channels + hidden_channels
            for i in range(self.num_convs - 1):
                self.convs.append(GCNConv(in_channels, hidden_channels, improved=improved))
                in_channels = hidden_channels
            self.convs.append(GCNConv(in_channels, out_channels, improved=improved))

        self.num_pre_convs = num_convs - num_end_convs
        self.pre_convs = []
        if self.num_pre_convs > 0:
            in_channels = 5 + 1  # one extra channel for sdf
            self.pre_convs = nn.ModuleList()
            for i in range(self.num_pre_convs - 1):
                self.pre_convs.append(GCNConv(in_channels, hidden_channels, improved=improved))
                in_channels = hidden_channels
            self.pre_convs.append(GCNConv(in_channels, hidden_channels, improved=improved))

        self.sim_info = {}  # store output of coarse simulation for logging / debugging
示例#10
0
 def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
              dropout):
     super(LGCN, self).__init__()
     self.convs = torch.nn.ModuleList()
     self.convs.append(
         GCNConv(in_channels, hidden_channels, normalize=False))
     self.dropout = dropout
示例#11
0
    def forward(self, x, edge_index, edge_weight=None):
        edge_index, norm = GCNConv.norm(edge_index, x.size(0), edge_weight,
                                        dtype=x.dtype)

        for k in range(self.K):
            x = self.propagate(edge_index, x=x, norm=norm)
        return x
示例#12
0
    def forward(self, x, edge_index, edge_weight=None):
        """"""
        if self.cached and self.cached_result is not None:
            if edge_index.size(1) != self.cached_num_edges:
                raise RuntimeError(
                    'Cached {} number of edges, but found {}. Please '
                    'disable the caching behavior of this layer by removing '
                    'the `cached=True` argument in its constructor.'.format(
                        self.cached_num_edges, edge_index.size(1)))

        if not self.cached:
            x = self.lin(x)

        if not self.cached or self.cached_result is None:
            self.cached_num_edges = edge_index.size(1)
            edge_index, norm = GCNConv.norm(edge_index,
                                            x.size(0),
                                            edge_weight,
                                            dtype=x.dtype)

            for k in range(self.K):
                x = self.propagate(edge_index, x=x, norm=norm)
            self.cached_result = x

        if self.cached:
            x = self.lin(self.cached_result)

        return x
示例#13
0
    def forward(self, x, edge_index, edge_weight=None):
        """"""
        if self.cached and self.cached_result is not None:
            if edge_index.size(1) != self.cached_num_edges:
                raise RuntimeError(
                    'Cached {} number of edges, but found {}'.format(
                        self.cached_num_edges, edge_index.size(1)))

        if not self.cached:
            x = self.lin(x)

        if not self.cached or self.cached_result is None:
            self.cached_num_edges = edge_index.size(1)
            edge_index, norm = GCNConv.norm(edge_index,
                                            x.size(0),
                                            edge_weight,
                                            dtype=x.dtype)

            for k in range(self.K):
                x = self.propagate(edge_index, x=x, norm=norm)
            self.cached_result = x

        if self.cached:
            x = self.lin(self.cached_result)

        return x
示例#14
0
    def forward(self, x, edge_index, edge_weight=None):
        """"""
        # x: [num_nodes, num_layers, channels]
        # edge_index: [2, num_edges]
        # edge_weight: [num_edges]

        if x.dim() != 3:
            raise ValueError('Feature shape must be [num_nodes, num_layers, '
                             'channels].')
        num_nodes, num_layers, channels = x.size()

        if self.cached and self.cached_result is not None:
            if edge_index.size(1) != self.cached_num_edges:
                raise RuntimeError(
                    'Cached {} number of edges, but found {}. Please '
                    'disable the caching behavior of this layer by removing '
                    'the `cached=True` argument in its constructor.'.format(
                        self.cached_num_edges, edge_index.size(1)))

        if not self.cached or self.cached_result is None:
            self.cached_num_edges = edge_index.size(1)
            edge_index, norm = GCNConv.norm(edge_index,
                                            x.size(self.node_dim),
                                            edge_weight,
                                            dtype=x.dtype)
            self.cached_result = edge_index, norm

        edge_index, norm = self.cached_result

        return self.propagate(edge_index, x=x, norm=norm)
示例#15
0
    def __init__(
        self,
        in_dim: int,
        num_layers: int,
        vertex_embed_dim: int,
        act,
        jk=True,
    ):
        super(GCNNet, self).__init__()
        self.act = act()

        gcn_layers_list = []
        batch_norms_list = []
        for i in range(num_layers):
            gcn_layers_list.append(
                GCNConv(vertex_embed_dim if i > 0 else in_dim,
                        vertex_embed_dim))
            batch_norms_list.append(nn.BatchNorm1d(vertex_embed_dim))

        self.gcn_layers = nn.ModuleList(gcn_layers_list)
        self.batch_norms = nn.ModuleList(batch_norms_list)

        self.jk = jk
        if self.jk:
            self.out_dim = in_dim + vertex_embed_dim * num_layers
        else:
            self.out_dim = vertex_embed_dim
示例#16
0
    def __init__(self, in_feats, hidden_sizes: list, drop_ratio=0.5, gnn_type=None):
        super(Mnist_graph_pred_GNN, self).__init__()

        self.in_feats = in_feats
        self.hidden_sizes = hidden_sizes
        self.conv_list = []
        self.conv_list.append(GCNConv(in_feats, hidden_sizes[0]).cuda())
        for i in range(1, len(hidden_sizes)):
            self.conv_list.append(GCNConv(hidden_sizes[i-1], hidden_sizes[i]).cuda())

        # Maybe use fc is a little tricky
        # self.fc1 = nn.Linear(784, 128)
        # self.fc2 = nn.Linear(128, 10)

        self.lin1 = nn.Linear(784, 128)
        self.classifier = self.get_classifier()
示例#17
0
    def __init__(self,
                 in_feats,
                 hidden_sizes: list,
                 drop_ratio=0.5,
                 gnn_type=None):
        super(Mnist_node_pred_GNN, self).__init__()

        self.in_feats = in_feats
        self.hidden_sizes = hidden_sizes
        self.conv_list = []
        self.conv_list.append(GCNConv(in_feats, hidden_sizes[0]).cuda())
        for i in range(1, len(hidden_sizes)):
            self.conv_list.append(
                GCNConv(hidden_sizes[i - 1], hidden_sizes[i]).cuda())
        # self.lin1 = nn.Linear(784, 300)
        # self.lin2 = nn.Linear(300, 100)
        self.classifier = self.get_classifier()
示例#18
0
    def forward(self, x, edge_index, edge_weight=None):
        edge_index, norm = GCNConv.norm(edge_index, x.size(0), edge_weight,
                                        dtype=x.dtype)

        xs = [x]
        for k in range(self.K):
            xs.append(self.propagate(edge_index, x=xs[-1], norm=norm))
        return torch.cat(xs, dim = 1)
    def __init__(self, hidden_size, dropout=0.5, negative_slope=0.2, heads=8, item_fusing=False):
        super(GroupGraph, self).__init__()
        self.hidden_size = hidden_size
        self.item_fusing = item_fusing

        self.W_1 = nn.Linear(8 * self.hidden_size, self.hidden_size)
        self.W_2 = nn.Linear(8 * self.hidden_size, self.hidden_size)
        self.q = nn.Linear(self.hidden_size, 1)
        self.W_3 = nn.Linear(16 * self.hidden_size, self.hidden_size)

        # self.gat = GATConv(in_channels=hidden_size, out_channels=hidden_size, dropout=dropout, negative_slope=negative_slope, heads=heads, concat=True)
        # self.gat2 = GATConv(in_channels=hidden_size*heads, out_channels=hidden_size*heads, dropout=dropout, negative_slope=negative_slope, heads=heads, concat=False)
        # self.gat3 = GATConv(in_channels=hidden_size*heads, out_channels=hidden_size, dropout=dropout, negative_slope=negative_slope, heads=heads, concat=True)
        # self.gat_out = GATConv(in_channels=hidden_size*heads, out_channels=hidden_size, dropout=dropout, negative_slope=negative_slope, heads=heads, concat=False)
        # self.gated = InOutGGNN(self.hidden_size, num_layers=2)
        self.gcn = GCNConv(in_channels=hidden_size, out_channels=hidden_size)
        self.gcn2 = GCNConv(in_channels=hidden_size, out_channels=hidden_size)

        self.sgcn = SGConv(in_channels=hidden_size, out_channels=hidden_size, K=2)
示例#20
0
    def forward(self, x, edge_index, edge_weight=None):
        """"""
        if not self.cached or self.cached_result is None:
            edge_index, norm = GCNConv.norm(
                edge_index, x.size(0), edge_weight, dtype=x.dtype)

            for k in range(self.K):
                x = self.propagate(edge_index, x=x, norm=norm)
            self.cached_result = x

        return self.lin(self.cached_result)
示例#21
0
 def __init__(self, input_dimension: int, dimensions: _typing.Sequence[int],
              _act: _typing.Optional[str],
              _dropout: _typing.Optional[float]):
     super(_GCN, self).__init__()
     self._act: _typing.Optional[str] = _act
     self._dropout: _typing.Optional[float] = _dropout
     self.__convolution_layers: torch.nn.ModuleList = torch.nn.ModuleList()
     for layer, output_dimension in enumerate(dimensions):
         self.__convolution_layers.append(
             GCNConv(
                 input_dimension if layer == 0 else dimensions[layer - 1],
                 output_dimension))
示例#22
0
    def forward(self, x, edge_index, edge_weight=None):
        """"""
        edge_index, norm = GCNConv.norm(
            edge_index, x.size(0), edge_weight, dtype=x.dtype)

        hidden = x
        for k in range(self.K):
            x = self.propagate('add', edge_index, x=x, norm=norm)
            x = x * (1 - self.alpha)
            x = x + self.alpha * hidden

        return x
示例#23
0
 def __init__(self, in_channels, hidden_channels, out_channels):
     super(GCNEncoder, self).__init__()
     self.gcn_shared = GCNConv(in_channels,
                               hidden_channels,
                               normalize=False)
     self.gcn_mu = GCNConv(hidden_channels, out_channels, normalize=False)
     self.gcn_logvar = GCNConv(hidden_channels,
                               out_channels,
                               normalize=False)
示例#24
0
    def __init__(self, num_features, num_classes, hidden_size, num_layers,
                 dropout):
        super(GCN, self).__init__()

        self.num_features = num_features
        self.num_classes = num_classes
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.dropout = dropout
        shapes = [num_features
                  ] + [hidden_size] * (num_layers - 1) + [num_classes]
        self.convs = nn.ModuleList([
            GCNConv(shapes[layer], shapes[layer + 1], cached=False)
            for layer in range(num_layers)
        ])
示例#25
0
    def __init__(self, in_channels, hidden_channels, out_channels, num_layers=6, improved=False,
                 cached=False, bias=True, fine_marker_dict=None):
        super().__init__()
        self.fine_marker_dict = torch.tensor(fine_marker_dict['airfoil']).unique()
        self.sdf = None
        in_channels += 1  # account for sdf

        channels = [in_channels]
        channels += [hidden_channels] * (num_layers - 1)
        channels.append(out_channels)

        convs = []
        for i in range(num_layers):
            convs.append(GCNConv(channels[i], channels[i+1], improved=improved,
                                 cached=cached, bias=bias))
        self.convs = nn.ModuleList(convs)
示例#26
0
    def forward(self, x, edge_index, edge_weight=None):
        edge_index, norm = GCNConv.norm(edge_index, x.size(0), edge_weight, dtype=x.dtype)

        preds = []
        preds.append(x)
        for k in range(self.K):
            x = self.propagate(edge_index, x=x, norm=norm)
            preds.append(x)
           
        pps = torch.stack(preds, dim=1)
        retain_score = self.proj(pps)
        retain_score = retain_score.squeeze()
        retain_score = torch.sigmoid(retain_score)
        retain_score = retain_score.unsqueeze(1)
        out = torch.matmul(retain_score, pps).squeeze()
        return out
示例#27
0
    def forward(self, x, edge_index, edge_weight=None):
        """"""
        # X: [num_nodes, num_layers, channels]
        # Edge Index: [2, num_edges]
        # Edge Weight: [num_edges]

        if x.dim() != 3:
            raise ValueError('Feature shape must be [num_nodes, num_layers, '
                             'channels].')
        num_nodes, num_layers, channels = x.size()

        if not self.cached or self.cached_result is None:
            edge_index, norm = GCNConv.norm(
                edge_index, x.size(0), edge_weight, dtype=x.dtype)
            self.cached_result = edge_index, norm
        edge_index, norm = self.cached_result

        return self.propagate(edge_index, x=x, norm=norm)
示例#28
0
    def __init__(self, num_features, num_classes, hidden_size, num_layers,
                 dropout):
        super(DrGCN, self).__init__()

        self.num_features = num_features
        self.num_classes = num_classes
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.dropout = dropout
        shapes = [num_features
                  ] + [hidden_size] * (num_layers - 1) + [num_classes]
        self.convs = nn.ModuleList([
            GCNConv(shapes[layer], shapes[layer + 1], cached=True)
            for layer in range(num_layers)
        ])
        self.ses = nn.ModuleList([
            SELayer(shapes[layer], se_channels=int(np.sqrt(shapes[layer])))
            for layer in range(num_layers)
        ])
示例#29
0
class GCNEncoder(nn.Module):
    def __init__(self, in_channels, hidden_channels, out_channels):
        super(GCNEncoder, self).__init__()
        self.gcn_shared = GCNConv(in_channels, hidden_channels, cached=True)
        self.gcn_mu = GCNConv(hidden_channels, out_channels, cached=True)
        self.gcn_logvar = GCNConv(hidden_channels, out_channels, cached=True)

    def reset_parameters(self):
        self.gcn_shared.reset_parameters()
        self.gcn_mu.reset_parameters()
        self.gcn_logvar.reset_parameters()

    def forward(self, x, edge_index):
        x = F.relu(self.gcn_shared(x, edge_index))
        mu = self.gcn_mu(x, edge_index)
        logvar = self.gcn_logvar(x, edge_index)
        return mu, logvar
示例#30
0
 def __init__(
     self,
     input_channels: int,
     output_channels: int,
     add_self_loops: bool = True,
     normalize: bool = True,
     activation_name: _typing.Optional[str] = ...,
     dropout_probability: _typing.Optional[float] = ...,
 ):
     super().__init__()
     self._convolution: GCNConv = GCNConv(
         input_channels,
         output_channels,
         add_self_loops=bool(add_self_loops),
         normalize=bool(normalize),
     )
     if (
         activation_name is not Ellipsis
         and activation_name is not None
         and type(activation_name) == str
     ):
         self._activation_name: _typing.Optional[str] = activation_name
     else:
         self._activation_name: _typing.Optional[str] = None
     if (
         dropout_probability is not Ellipsis
         and dropout_probability is not None
         and type(dropout_probability) == float
     ):
         if dropout_probability < 0:
             dropout_probability = 0
         if dropout_probability > 1:
             dropout_probability = 1
         self._dropout: _typing.Optional[torch.nn.Dropout] = torch.nn.Dropout(
             dropout_probability
         )
     else:
         self._dropout: _typing.Optional[torch.nn.Dropout] = None