Example #1
0
    def __init__(self):
        super(Model, self).__init__()

        #lift features of the nodes
        self.lifting_layer = nn.Embedding(hyperparams["num_features"],
                                          hyperparams["hsz"])

        #latent representations of the nodes
        self.sageConv1 = SAGEConv(in_feats = hyperparams["hsz"], \
                                                       out_feats = hyperparams["hsz"], \
                                                       aggregator_type = 'lstm')

        self.sageConv2 = SAGEConv(in_feats = hyperparams["hsz"], \
                                                       out_feats = hyperparams["hsz"], \
                                                       aggregator_type = 'lstm')

        self.sageConv3 = SAGEConv(in_feats = hyperparams["hsz"], \
                                                       out_feats = hyperparams["hsz"], \
                                                       aggregator_type = 'lstm')

        self.GAT_conv1 = GATConv(in_feats = hyperparams["hsz"], \
                                 out_feats = hyperparams["hsz"], \
                                 num_heads = hyperparams["num_heads"])

        #readout layer (also task specific layer  during pretraining)
        self.output_layer = nn.Linear(hyperparams["hsz"], 3)
Example #2
0
 def __init__(self, in_feats, n_hidden, n_classes, n_layers, activation,
              dropout, aggregator_type):
     super(GraphSAGE_model, self).__init__()
     self.layers = nn.ModuleList()
     # input layer
     self.layers.append(
         SAGEConv(in_feats,
                  n_hidden,
                  aggregator_type,
                  feat_drop=0.,
                  activation=activation))
     # hidden layers
     for i in range(n_layers - 1):
         self.layers.append(
             SAGEConv(n_hidden,
                      n_hidden,
                      aggregator_type,
                      feat_drop=dropout,
                      activation=activation))
     # output layer
     self.layers.append(
         SAGEConv(n_hidden,
                  n_classes,
                  aggregator_type,
                  feat_drop=dropout,
                  activation=None))
Example #3
0
    def __init__(self):
        super().__init__()
        self.layer1 = SAGEConv(4, 32, aggregator_type='mean')
        self.layer2 = SAGEConv(32, 32, aggregator_type='mean')
        self.layer3 = SAGEConv(32, 32, aggregator_type='mean')
        self.layer4 = SAGEConv(32, 32, aggregator_type='mean')

        self.layer5 = SAGEConv(32, 3, aggregator_type='mean')
Example #4
0
 def __init__(self, in_feats, h_feats, aggr, num_layers):
     super(GraphSAGE, self).__init__()
     self.conv1 = SAGEConv(in_feats, h_feats, aggr)
     # self.convs = torch.nn.ModuleList()
     # for i in range(num_layers - 2):
     #     self.convs.append(SAGEConv(h_feats, h_feats, aggr))
     self.conv2 = SAGEConv(h_feats, h_feats, aggr)
     self.h_feats = h_feats
Example #5
0
    def __init__(self, in_features, hidden_features, out_features, activation,
                 dropout, aggregator):
        super().__init__()

        self.conv1 = SAGEConv(in_features, hidden_features, aggregator)
        self.conv2 = SAGEConv(hidden_features, hidden_features, aggregator)
        self.conv3 = SAGEConv(hidden_features, out_features, aggregator)
        self.dropout = nn.Dropout(p=dropout)
        self.bn1 = torch.nn.BatchNorm1d(hidden_features)
        self.bn2 = torch.nn.BatchNorm1d(hidden_features)
        self.activation = F.elu_ if activation == 'Elu' else F.relu
Example #6
0
 def __init__(self,
              g,
              in_feats,
              n_hidden,
              n_classes,
              aggr,
              activation=F.relu,
              dropout=0.):
     super(GraphSAGE, self).__init__()
     self.layers = nn.ModuleList()
     self.g = g
     self.layers.append(SAGEConv(in_feats, n_hidden, aggr, activation=activation, bias=False))
     self.layers.append(SAGEConv(n_hidden, n_classes, aggr, feat_drop=dropout, activation=None, bias=False))
Example #7
0
 def __init__(self,
              in_features,
              hidden_features,
              out_features,
              aggregator,
              activation,
              dropout=0.5,
              is_out_layer=True):
     super().__init__()
     # Fixme: Deal zero degree
     self.conv1 = SAGEConv(in_features, hidden_features, aggregator)
     self.conv2 = SAGEConv(hidden_features, out_features, aggregator)
     self.dropout = nn.Dropout(p=dropout)
     self.is_out_layer = is_out_layer
     self.activation = F.elu_ if activation == 'Elu' else F.relu
Example #8
0
    def __init__(self, in_feats, n_hidden, n_classes, n_layers, activation,
                 dropout, aggregator_type):
        super(GraphSAGE, self).__init__()
        self.layers = nn.ModuleList()
        self.dropout = nn.Dropout(dropout)
        self.activation = activation

        # input layer
        self.layers.append(SAGEConv(in_feats, n_hidden, aggregator_type))
        # hidden layers
        for i in range(n_layers - 1):
            self.layers.append(SAGEConv(n_hidden, n_hidden, aggregator_type))
        # output layer
        self.layers.append(SAGEConv(n_hidden, n_classes,
                                    aggregator_type))  # activation None
Example #9
0
    def __init__(self,
                 in_feats,
                 hidden_feats=None,
                 activation=None,
                 dropout=None,
                 aggregator_type=None):
        super(GraphSAGE, self).__init__()

        if hidden_feats is None:
            hidden_feats = [64, 64]

        n_layers = len(hidden_feats)
        if activation is None:
            activation = [F.relu for _ in range(n_layers)]
        if dropout is None:
            dropout = [0. for _ in range(n_layers)]
        if aggregator_type is None:
            aggregator_type = ['mean' for _ in range(n_layers)]
        lengths = [len(hidden_feats), len(activation), len(dropout), len(aggregator_type)]
        assert len(set(lengths)) == 1, 'Expect the lengths of hidden_feats, activation, ' \
                                       'dropout and aggregator_type to be the same, ' \
                                       'got {}'.format(lengths)

        self.hidden_feats = hidden_feats
        self.gnn_layers = nn.ModuleList()
        for i in range(n_layers):
            self.gnn_layers.append(SAGEConv(in_feats, hidden_feats[i], aggregator_type[i],
                                            dropout[i], activation[i]))
            in_feats = hidden_feats[i]
Example #10
0
 def __init__(self, in_feats, n_hidden, n_classes, n_layers, activation,
              dropout, aggregator):
     super().__init__()
     self.n_layers = n_layers
     self.n_hidden = n_hidden
     self.n_classes = n_classes
     self.layers = nn.ModuleList()
     self.bns = nn.ModuleList()
     self.layers.append(SAGEConv(in_feats, n_hidden, aggregator))
     self.bns.append(torch.nn.BatchNorm1d(n_hidden))
     for i in range(1, n_layers - 1):
         self.layers.append(SAGEConv(n_hidden, n_hidden, aggregator))
         self.bns.append(torch.nn.BatchNorm1d(n_hidden))
     self.layers.append(SAGEConv(n_hidden, n_classes, aggregator))
     self.dropout = nn.Dropout(dropout)
     self.activation = activation
    def __init__(self, in_feats, out_feats, activation, dropout,
                 aggregator_type, batch_norm, residual=False, 
                 bias=True, dgl_builtin=False):
        super().__init__()
        self.in_channels = in_feats
        self.out_channels = out_feats
        self.aggregator_type = aggregator_type
        self.batch_norm = batch_norm
        self.residual = residual
        self.dgl_builtin = dgl_builtin
        
        if in_feats != out_feats:
            self.residual = False
        
        self.dropout = nn.Dropout(p=dropout)

        if dgl_builtin == False:
            self.nodeapply = NodeApply(in_feats, out_feats, activation, dropout,
                                   bias=bias)
            if aggregator_type == "maxpool":
                self.aggregator = MaxPoolAggregator(in_feats, in_feats,
                                                    activation, bias)
            elif aggregator_type == "lstm":
                self.aggregator = LSTMAggregator(in_feats, in_feats)
            else:
                self.aggregator = MeanAggregator()
        else:
            self.sageconv = SAGEConv(in_feats, out_feats, aggregator_type,
                    dropout, activation=activation)
        
        if self.batch_norm:
            self.batchnorm_h = nn.BatchNorm1d(out_feats)
    def __init__(self, input_dim, target_dim):
        super(NodeClassifier, self).__init__()

        hidden_dim = 32
        self.graph_conv1 = SAGEConv(input_dim,
                                    hidden_dim,
                                    aggregator_type='pool')
        self.graph_conv2 = SAGEConv(hidden_dim,
                                    hidden_dim,
                                    aggregator_type='pool')
        self.graph_conv3 = SAGEConv(hidden_dim,
                                    target_dim,
                                    aggregator_type='pool')

        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(p=0.2)
        self.sigmoid = nn.Sigmoid()
Example #13
0
 def __init__(self, in_feats, hidden_size, hidden_size1, hidden_size2, hidden_size3, hidden_size4, num_classes):
     super(GCN, self).__init__()
     self.conv1 = SAGEConv(in_feats, hidden_size, aggregator_type='pool', activation=torch.tanh)
     self.conv2 = ChebConv(hidden_size, hidden_size1, 4)
     self.conv3 = TAGConv(hidden_size1, hidden_size2, activation=F.leaky_relu, k=3)
     self.conv4 = SAGEConv(hidden_size2, hidden_size3, aggregator_type='pool', activation=torch.tanh)
     self.conv5 = ChebConv(hidden_size3, hidden_size4, 4)
     self.conv6 = TAGConv(hidden_size4, num_classes, activation=F.leaky_relu, k=3)
     x = 150
     self.encoder = nn.Sequential(
         nn.Conv2d(1, x, (3, 3)),
         nn.LeakyReLU(),
         nn.Dropout2d(),
         nn.Conv2d(x, 2*x, (3, 2)),
         nn.LeakyReLU(),
         nn.Dropout2d(),
         nn.Conv2d(2*x, 1, (3, 2)),
     )
Example #14
0
    def __init__(self, in_feats, hidden_feats, out_feats, num_layers, dropout):
        super(GraphSAGE, self).__init__()

        self.layers = nn.ModuleList()
        self.bns = nn.ModuleList()
        # input layer
        self.layers.append(SAGEConv(in_feats, hidden_feats, 'mean',
                                    bias=False))
        self.bns.append(nn.BatchNorm1d(hidden_feats))
        # hidden layers
        for _ in range(num_layers - 2):
            self.layers.append(
                SAGEConv(hidden_feats, hidden_feats, 'mean', bias=False))
            self.bns.append(nn.BatchNorm1d(hidden_feats))
        # output layer
        self.layers.append(
            SAGEConv(hidden_feats, out_feats, 'mean', bias=False))
        self.dropout = nn.Dropout(p=dropout)
Example #15
0
 def __init__(self, num_features, hidden_channels, num_layers, num_classes):
     super(SAGE, self).__init__()
     self.num_layers = num_layers
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers):
         inc = outc = hidden_channels
         if i == 0:
             inc = num_features
         if i == num_layers - 1:
             outc = num_classes
         self.convs.append(SAGEConv(inc, outc, "gcn"))
     self.dropout = torch.nn.Dropout()
Example #16
0
    def __init__(self):
        super(Model, self).__init__()
        ###########shared layers###########
        self.lifting_layer = nn.Embedding(hyperparams["num_features"],
                                          hyperparams["hsz"])
        self.sageConv1 = SAGEConv(in_feats = hyperparams["hsz"], \
                                                       out_feats = hyperparams["hsz"], \
                                                       aggregator_type = 'lstm')
        self.sageConv2 = SAGEConv(in_feats = hyperparams["hsz"], \
                                                       out_feats = hyperparams["hsz"], \
                                                       aggregator_type = 'lstm')
        self.sageConv3 = SAGEConv(in_feats = hyperparams["hsz"], \
                                                       out_feats = hyperparams["hsz"], \
                                                       aggregator_type = 'lstm')
        self.GAT_conv1 = GATConv(in_feats = hyperparams["hsz"], \
                                 out_feats = hyperparams["hsz"], \
                                 num_heads = hyperparams["num_heads"])
        ###################################

        #######task specific layer(s)#######
        self.readout_layer = nn.Linear(hyperparams["hsz"], 1)
Example #17
0
    def __init__(self, in_channels, out_channels, model):
        super(CoNet, self).__init__()

        if model == 'AFFN':
            self.layer1 = SAGEConv(in_channels, out_channels, 'mean')
            self.layer2 = GraphConv(in_channels, out_channels)
            self.layer3 = GATConv(in_channels, out_channels, 1)
        elif model == 'GCN':
            self.layer1 = GraphConv(in_channels, out_channels)
            self.layer2 = GraphConv(in_channels, out_channels)
            self.layer3 = GraphConv(in_channels, out_channels)
        elif model == 'SAGE':
            self.layer1 = SAGEConv(in_channels, out_channels, 'mean')
            self.layer2 = SAGEConv(in_channels, out_channels, 'mean')
            self.layer3 = SAGEConv(in_channels, out_channels, 'mean')
        else:
            self.layer1 = GATConv(in_channels, out_channels, 1)
            self.layer2 = GATConv(in_channels, out_channels, 1)
            self.layer3 = GATConv(in_channels, out_channels, 1)

        self.w = nn.Parameter(th.tensor([1, 1, 1], dtype=th.float))
        self.model = model
Example #18
0
    def __init__(self, g, in_feats, n_hidden, n_classes, n_layers, activation,
                 dropout, aggregator_type):
        super(GraphSAGE, self).__init__()
        self.g = g

        with self.name_scope():
            self.layers = nn.Sequential()
            # input layer
            self.layers.add_module(
                SAGEConv(in_feats,
                         n_hidden,
                         aggregator_type,
                         feat_drop=dropout,
                         activation=activation))
            # hidden layers
            for i in range(n_layers - 1):
                self.layers.add_module(
                    SAGEConv(n_hidden,
                             n_hidden,
                             aggregator_type,
                             feat_drop=dropout,
                             activation=activation))
            # output layer
            self.layers.add_module(nn.Linear(n_hidden, n_classes))
Example #19
0
 def __init__(self,
              g,
              in_feats,
              n_hidden,
              n_classes,
              n_layers,
              activation,
              dropout,
              aspect_embed_size,
              aggregator_type='pool'):
     super(GraphSAGE_post, self).__init__()
     self.g = g
     self.layers = nn.ModuleList()
     self.n_layers = n_layers
     self.layers.append(SAGEConv(in_feats, n_hidden, aggregator_type, \
                        activation=activation))
     for i in range(n_layers - 2):
         self.layers.append(SAGEConv(n_hidden, n_hidden, aggregator_type, \
                            activation=activation))
     self.layers.append(
         SAGEConv(n_hidden, aspect_embed_size, aggregator_type))
     self.layers.append(
         SAGEConv(aspect_embed_size, n_classes, aggregator_type))
     self.dropout = nn.Dropout(p=dropout)
Example #20
0
 def __init__(self, device='cpu', dim=64, model_type="gcn"):  #初始化
     super().__init__()
     self.dim = dim
     self.device = device
     self.embed = nn.Embedding(500, dim)
     if model_type == "gcn":
         self.conv = GraphConv(dim, dim)
     elif model_type == "gat":
         self.conv = GATConv(dim, dim, 1)
     elif model_type == "gin":
         print("gin")
         self.conv = GINConv(apply_func=nn.Linear(dim, dim),
                             aggregator_type="mean")
     elif model_type == "sage":
         print("sage")
         self.conv = SAGEConv(dim, dim, aggregator_type="gcn")
     self.func = nn.Linear(dim, 2)
     self.act = nn.ReLU()
Example #21
0
File: gnn.py Project: duzx16/lcgnn
    def __init__(self,
                 in_feats,
                 out_feats,
                 conv_type,
                 activation=None,
                 residual=True,
                 batchnorm=True,
                 dropout=0.,
                 num_heads=1,
                 negative_slope=0.2):
        super(ConvLayer, self).__init__()

        self.activation = activation
        self.conv_type = conv_type
        if conv_type == 'gcn':
            self.graph_conv = GraphConv(in_feats=in_feats,
                                        out_feats=out_feats,
                                        norm='both',
                                        activation=activation)
        elif conv_type == 'sage':
            self.graph_conv = SAGEConv(in_feats=in_feats,
                                       out_feats=out_feats,
                                       aggregator_type='mean',
                                       norm=None,
                                       activation=activation)
        elif conv_type == 'gat':
            assert out_feats % num_heads == 0
            self.graph_conv = GATConv(in_feats=in_feats,
                                      out_feats=out_feats // num_heads,
                                      num_heads=num_heads,
                                      feat_drop=dropout,
                                      attn_drop=dropout,
                                      negative_slope=negative_slope,
                                      activation=activation)
        self.dropout = nn.Dropout(dropout)

        self.residual = residual
        if residual:
            self.res_connection = nn.Linear(in_feats, out_feats)

        self.bn = batchnorm
        if batchnorm:
            self.bn_layer = nn.BatchNorm1d(out_feats)
Example #22
0
def track_time(graph_name, feat_dim, aggr_type):
    device = utils.get_bench_device()
    graph = utils.get_graph(graph_name).to(device)

    feat = torch.randn((graph.num_nodes(), feat_dim), device=device)
    model = SAGEConv(feat_dim,
                     feat_dim,
                     aggr_type,
                     activation=F.relu,
                     bias=False).to(device)

    # dry run
    for i in range(3):
        model(graph, feat)
    # timing
    with utils.Timer() as t:
        for i in range(50):
            model(graph, feat)

    return t.elapsed_secs / 50
Example #23
0
def track_time(feat_dim, num_relations):
    device = utils.get_bench_device()
    dd = {}
    nn_dict = {}
    candidate_edges = [
        dgl.data.CoraGraphDataset(verbose=False)[0].edges(),
        dgl.data.PubmedGraphDataset(verbose=False)[0].edges(),
        dgl.data.CiteseerGraphDataset(verbose=False)[0].edges()
    ]
    for i in range(num_relations):
        dd[('n1', 'e_{}'.format(i),
            'n2')] = candidate_edges[i % len(candidate_edges)]
        nn_dict['e_{}'.format(i)] = SAGEConv(feat_dim,
                                             feat_dim,
                                             'mean',
                                             activation=F.relu)

    # dry run
    feat_dict = {}
    graph = dgl.heterograph(dd)
    for i in range(num_relations):
        etype = 'e_{}'.format(i)
        feat_dict[etype] = torch.randn((graph[etype].num_nodes(), feat_dim),
                                       device=device)

    conv = HeteroGraphConv(nn_dict).to(device)

    # dry run
    for i in range(3):
        conv(graph, feat_dict)
    # timing
    with utils.Timer() as t:
        for i in range(50):
            conv(graph, feat_dict)

    return t.elapsed_secs / 50
 def __init__(self, vocablen, embedding_dim):
     super(Encode_Graph, self).__init__()
     self.embed = nn.Embedding(vocablen, embedding_dim)
     self.gcn1 = GraphConv(16, 32)
     self.gcn2 = SAGEConv(32, 64, aggregator_type='pool')
     self.linear1 = nn.Linear(64, 64)
Example #25
0
    def __init__(self,
                 num_layers,
                 hidden_units,
                 k=10,
                 gcn_type='gcn',
                 node_attributes=None,
                 edge_weights=None,
                 node_embedding=None,
                 use_embedding=False,
                 num_nodes=None,
                 dropout=0.5,
                 max_z=1000):
        super(DGCNN, self).__init__()
        self.num_layers = num_layers
        self.dropout = dropout
        self.use_attribute = False if node_attributes is None else True
        self.use_embedding = use_embedding
        self.use_edge_weight = False if edge_weights is None else True

        self.z_embedding = nn.Embedding(max_z, hidden_units)

        if node_attributes is not None:
            self.node_attributes_lookup = nn.Embedding.from_pretrained(
                node_attributes)
            self.node_attributes_lookup.weight.requires_grad = False
        if edge_weights is not None:
            self.edge_weights_lookup = nn.Embedding.from_pretrained(
                edge_weights)
            self.edge_weights_lookup.weight.requires_grad = False
        if node_embedding is not None:
            self.node_embedding = nn.Embedding.from_pretrained(node_embedding)
            self.node_embedding.weight.requires_grad = False
        elif use_embedding:
            self.node_embedding = nn.Embedding(num_nodes, hidden_units)

        initial_dim = hidden_units
        if self.use_attribute:
            initial_dim += self.node_attributes_lookup.embedding_dim
        if self.use_embedding:
            initial_dim += self.node_embedding.embedding_dim

        self.layers = nn.ModuleList()
        if gcn_type == 'gcn':
            self.layers.append(
                GraphConv(initial_dim, hidden_units,
                          allow_zero_in_degree=True))
            for _ in range(num_layers - 1):
                self.layers.append(
                    GraphConv(hidden_units,
                              hidden_units,
                              allow_zero_in_degree=True))
            self.layers.append(
                GraphConv(hidden_units, 1, allow_zero_in_degree=True))
        elif gcn_type == 'sage':
            self.layers.append(
                SAGEConv(initial_dim, hidden_units, aggregator_type='gcn'))
            for _ in range(num_layers - 1):
                self.layers.append(
                    SAGEConv(hidden_units, hidden_units,
                             aggregator_type='gcn'))
            self.layers.append(SAGEConv(hidden_units, 1,
                                        aggregator_type='gcn'))
        else:
            raise ValueError('Gcn type error.')

        self.pooling = SortPooling(k=k)
        conv1d_channels = [16, 32]
        total_latent_dim = hidden_units * num_layers + 1
        conv1d_kws = [total_latent_dim, 5]
        self.conv_1 = nn.Conv1d(1, conv1d_channels[0], conv1d_kws[0],
                                conv1d_kws[0])
        self.maxpool1d = nn.MaxPool1d(2, 2)
        self.conv_2 = nn.Conv1d(conv1d_channels[0], conv1d_channels[1],
                                conv1d_kws[1], 1)
        dense_dim = int((k - 2) / 2 + 1)
        dense_dim = (dense_dim - conv1d_kws[1] + 1) * conv1d_channels[1]
        self.linear_1 = nn.Linear(dense_dim, 128)
        self.linear_2 = nn.Linear(128, 1)
Example #26
0
    def __init__(self, k, feature_dims, emb_dims, output_classes, init_points = 512, input_dims=3,
                 dropout_prob=0.5, npart=1, id_skip=False, drop_connect_rate=0, res_scale = 1.0,
                 light = False, bias = False, cluster='xyz', conv='EdgeConv', use_xyz=True, use_se = True, graph_jitter = False):
        super(Model, self).__init__()

        self.npart = npart
        self.graph_jitter = graph_jitter
        self.res_scale = res_scale
        self.id_skip = id_skip
        self.drop_connect_rate = drop_connect_rate
        self.nng = KNNGraphE(k)  # with random neighbor
        self.conv = nn.ModuleList()
        self.conv_s1 = nn.ModuleList()
        self.conv_s2 = nn.ModuleList()
        self.bn = nn.ModuleList()
        self.sa = nn.ModuleList()
        self.cluster = cluster
        self.feature_dims = feature_dims
        self.conv_type = conv
        self.init_points = init_points
        self.k = k
        #self.proj_in = nn.Linear(input_dims, input_dims)

        self.num_layers = len(feature_dims)
        npoint = init_points
        for i in range(self.num_layers):
            if k==1: 
                    self.conv.append(nn.Linear(feature_dims[i-1] if i > 0 else input_dims, 
                                     feature_dims[i] ))
            elif conv == 'EdgeConv':
                if light:
                    self.conv.append(EdgeConv_Light(
                        feature_dims[i - 1] if i > 0 else input_dims,
                        feature_dims[i],
                        batch_norm=True))
                else: 
                    self.conv.append(EdgeConv(
                        feature_dims[i - 1] if i > 0 else input_dims,
                        feature_dims[i],
                        batch_norm=True))
            elif conv == 'GATConv':
                    self.conv.append(GATConv(
                        feature_dims[i - 1] if i > 0 else input_dims,
                        feature_dims[i],
                        feat_drop=0.2, attn_drop=0.2,
                        residual=True,
                        num_heads=1))
            elif conv == 'GraphConv':
                    self.conv.append( GraphConv(
                        feature_dims[i - 1] if i > 0 else input_dims,
                        feature_dims[i]))
            elif conv == 'SAGEConv':
                    self.conv.append( SAGEConv(
                        feature_dims[i - 1] if i > 0 else input_dims,
                        feature_dims[i],
                        feat_drop=0.2,
                        aggregator_type='mean', 
                        norm = nn.BatchNorm1d(feature_dims[i])
                        ) )
            elif conv == 'SGConv':
                    self.conv.append( SGConv(
                        feature_dims[i - 1] if i > 0 else input_dims,
                        feature_dims[i]) )
            elif conv == 'GatedGCN': # missing etypes
                    self.conv.append( GatedGCNLayer(
                        feature_dims[i - 1] if i > 0 else input_dims,
                        feature_dims[i], 
                        dropout=0.0, 
                        graph_norm=True, batch_norm=True, residual=True)
                        )


            if i>0 and feature_dims[i]>feature_dims[i-1]:
                npoint = npoint//2
                if id_skip and  npoint <= self.init_points//4: # Only work on high level
                    self.conv_s2.append( nn.Linear(feature_dims[i-1], feature_dims[i] ))

            self.sa.append(PointnetSAModule(
                npoint=npoint,
                radius=0.2,
                nsample=64,
                mlp=[feature_dims[i], feature_dims[i], feature_dims[i]],
                fuse = 'add',
                norml = 'bn',
                activation = 'relu',
                use_se = use_se,
                use_xyz = use_xyz,
                use_neighbor = False,
                light = light
            ))
            #if id_skip:
            #    self.conv_s1.append( nn.Linear(feature_dims[i], feature_dims[i] ))

        self.embs = nn.ModuleList()
        self.bn_embs = nn.ModuleList()
        self.dropouts = nn.ModuleList()

        self.partpool =  nn.AdaptiveAvgPool1d(self.npart)
        if self.npart == 1: 
            self.embs.append(nn.Linear(
                # * 2 because of concatenation of max- and mean-pooling
                feature_dims[-1]*2, emb_dims[0], bias=bias))
            self.bn_embs.append(nn.BatchNorm1d(emb_dims[0]))
            self.dropouts.append(nn.Dropout(dropout_prob, inplace=True))
            self.proj_output = nn.Linear(emb_dims[0], output_classes)
            self.proj_output.apply(weights_init_classifier)
        else: 
            self.proj_outputs = nn.ModuleList()
            for i in range(0, self.npart):
                self.embs.append(nn.Linear(512, 512, bias=bias))
                self.bn_embs.append(nn.BatchNorm1d(512))
                self.dropouts.append(nn.Dropout(dropout_prob, inplace=True))
                self.proj_outputs.append(nn.Linear(512, output_classes))
            self.proj_outputs.apply(weights_init_classifier)

        # initial
        #self.proj_in.apply(weights_init_kaiming)
        self.conv.apply(weights_init_kaiming)
        self.conv_s1.apply(weights_init_kaiming)
        self.conv_s2.apply(weights_init_kaiming)
        weights_init_kaiming2 = lambda x:weights_init_kaiming(x,L=self.num_layers)
        self.sa.apply(weights_init_kaiming2) 
        #self.proj.apply(weights_init_kaiming)
        self.embs.apply(weights_init_kaiming)
        self.bn.apply(weights_init_kaiming)
        self.bn_embs.apply(weights_init_kaiming)
        self.npart = npart
Example #27
0
    def __init__(self,
                 num_layers,
                 hidden_units,
                 gcn_type='gcn',
                 pooling_type='sum',
                 node_attributes=None,
                 edge_weights=None,
                 node_embedding=None,
                 use_embedding=False,
                 num_nodes=None,
                 dropout=0.5,
                 max_z=1000):
        super(GCN, self).__init__()
        self.num_layers = num_layers
        self.dropout = dropout
        self.pooling_type = pooling_type
        self.use_attribute = False if node_attributes is None else True
        self.use_embedding = use_embedding
        self.use_edge_weight = False if edge_weights is None else True

        self.z_embedding = nn.Embedding(max_z, hidden_units)
        if node_attributes is not None:
            self.node_attributes_lookup = nn.Embedding.from_pretrained(
                node_attributes)
            self.node_attributes_lookup.weight.requires_grad = False
        if edge_weights is not None:
            self.edge_weights_lookup = nn.Embedding.from_pretrained(
                edge_weights)
            self.edge_weights_lookup.weight.requires_grad = False
        if node_embedding is not None:
            self.node_embedding = nn.Embedding.from_pretrained(node_embedding)
            self.node_embedding.weight.requires_grad = False
        elif use_embedding:
            self.node_embedding = nn.Embedding(num_nodes, hidden_units)

        initial_dim = hidden_units
        if self.use_attribute:
            initial_dim += self.node_attributes_lookup.embedding_dim
        if self.use_embedding:
            initial_dim += self.node_embedding.embedding_dim

        self.layers = nn.ModuleList()
        if gcn_type == 'gcn':
            self.layers.append(
                GraphConv(initial_dim, hidden_units,
                          allow_zero_in_degree=True))
            for _ in range(num_layers - 1):
                self.layers.append(
                    GraphConv(hidden_units,
                              hidden_units,
                              allow_zero_in_degree=True))
        elif gcn_type == 'sage':
            self.layers.append(
                SAGEConv(initial_dim, hidden_units, aggregator_type='gcn'))
            for _ in range(num_layers - 1):
                self.layers.append(
                    SAGEConv(hidden_units, hidden_units,
                             aggregator_type='gcn'))
        else:
            raise ValueError('Gcn type error.')

        self.linear_1 = nn.Linear(hidden_units, hidden_units)
        self.linear_2 = nn.Linear(hidden_units, 1)
        if pooling_type != 'sum':
            raise ValueError('Pooling type error.')
        self.pooling = SumPooling()