def gen_layer(gw, nfeat, efeat, hidden_size, name): def _send_func(src_feat, dst_feat, edge_feat): h = src_feat['h'] + edge_feat['h'] h = L.relu(h) return h def _recv_func(msg): return L.sequence_pool(msg, "sum") beta = L.create_parameter( shape=[1], dtype='float32', default_initializer=F.initializer.ConstantInitializer(value=1.0), name=name + '_beta') # message passing msg = gw.send(_send_func, nfeat_list=[("h", nfeat)], efeat_list=[("h", efeat)]) output = gw.recv(msg, message_passing.softmax_agg(beta)) output = message_passing.msg_norm(nfeat, output, name) output = nfeat + output output = mlp(output, [hidden_size * 2, hidden_size], norm="layer_norm", name=name) return output
def gen_conv(gw, feature, name, beta=None): """Implementation of GENeralized Graph Convolution (GENConv), see the paper "DeeperGCN: All You Need to Train Deeper GCNs" in https://arxiv.org/pdf/2006.07739.pdf Args: gw: Graph wrapper object (:code:`StaticGraphWrapper` or :code:`GraphWrapper`) feature: A tensor with shape (num_nodes, feature_size). beta: [0, +infinity] or "dynamic" or None name: deeper gcn layer names. Return: A tensor with shape (num_nodes, feature_size) """ if beta == "dynamic": beta = L.create_parameter( shape=[1], dtype='float32', default_initializer= fluid.initializer.ConstantInitializer(value=1.0), name=name + '_beta') # message passing msg = gw.send(message_passing.copy_send, nfeat_list=[("h", feature)]) output = gw.recv(msg, message_passing.softmax_agg(beta)) # msg norm output = message_passing.msg_norm(feature, output, name) output = feature + output output = L.fc(output, feature.shape[-1], bias_attr=False, act="relu", param_attr=fluid.ParamAttr(name=name + '_weight1')) output = L.fc(output, feature.shape[-1], bias_attr=False, param_attr=fluid.ParamAttr(name=name + '_weight2')) return output