示例#1
0
 def msg_edge(self, edge):
     dist = edge.data['dist']
     while len(dist.shape) < len(edge.src['state'].shape):
         dist = nd.expand_dims(dist, axis=1)
     dist = nd.broadcast_to(dist,
                            shape=edge.src['state'].shape[:-1] + (0, ))
     state = nd.concat(edge.src['state'], edge.dst['state'], dist, dim=-1)
     alpha = nd.LeakyReLU(self.dense(state))
     return {'alpha': alpha, 'state': edge.src['state']}
def get_model(g, hyperparams, in_feats, n_classes, ctx, model_dir=None):

    if model_dir:  # load using saved model state
        with open(os.path.join(model_dir, 'model_hyperparams.pkl'), 'rb') as f:
            hyperparams = pickle.load(f)
        with open(os.path.join(model_dir, 'graph.pkl'), 'rb') as f:
            g = pickle.load(f)

    if hyperparams['heterogeneous']:
        model = HeteroRGCN(g,
                           in_feats,
                           hyperparams['n_hidden'],
                           n_classes,
                           hyperparams['n_layers'],
                           hyperparams['embedding_size'],
                           ctx)
    else:
        if hyperparams['model'] == 'gcn':
            model = GCN(g,
                        in_feats,
                        hyperparams['n_hidden'],
                        n_classes,
                        hyperparams['n_layers'],
                        nd.relu,
                        hyperparams['dropout'])
        elif hyperparams['model'] == 'graphsage':
            model = GraphSAGE(g,
                              in_feats,
                              hyperparams['n_hidden'],
                              n_classes,
                              hyperparams['n_hidden'],
                              nd.relu,
                              hyperparams['dropout'],
                              hyperparams['aggregator_type'])
        else:
            heads = ([hyperparams['num_heads']] * hyperparams['n_layers']) + [hyperparams['num_out_heads']]
            model = GAT(g,
                        in_feats,
                        hyperparams['n_hidden'],
                        n_classes,
                        hyperparams['n_layers'],
                        heads,
                        gluon.nn.Lambda(lambda data: nd.LeakyReLU(data, act_type='elu')),
                        hyperparams['dropout'],
                        hyperparams['attn_drop'],
                        hyperparams['alpha'],
                        hyperparams['residual'])

    if hyperparams['no_features']:
        model = NodeEmbeddingGNN(model, in_feats, hyperparams['embedding_size'])

    if model_dir:
        model.load_parameters(os.path.join(model_dir, 'model.params'))
    else:
        model.initialize(ctx=ctx)

    return model
示例#3
0
    def msg_edge(self, edge):
        state = nd.concat(edge.src['state'], edge.dst['state'], dim=-1)
        ctx = state.context

        alpha = nd.LeakyReLU(nd.dot(state, self.weight.data(ctx)))

        dist = edge.data['dist']
        while len(dist.shape) < len(alpha.shape):
            dist = nd.expand_dims(dist, axis=-1)

        alpha = alpha * dist
        return {'alpha': alpha, 'state': edge.src['state']}
示例#4
0
def leaky_relu(x):
    """slope=0.1 leaky ReLu

    Parameters
    ----------
    x : NDArray
        Input

    Returns
    -------
    y : NDArray
        y = x > 0 ? x : 0.1 * x
    """
    return nd.LeakyReLU(x, slope=.1)
    def forward(self, g, features):
        # get embeddings for all node types. for target node type, use passed in target features
        h_dict = {'target': features}
        for ntype in self.embed_dict:
            if g[0].number_of_nodes(ntype) > 0:
                h_dict[ntype] = self.embed_dict[ntype](nd.array(g[0].nodes(ntype), self.ctx))

        # pass through all layers
        for i, layer in enumerate(self.layers[:-1]):
            if i != 0:
                h_dict = {k: nd.LeakyReLU(h) for k, h in h_dict.items()}
            h_dict = layer(g[i], h_dict)

        # get target logits
        # return h_dict['target']
        return self.layers[-1](h_dict['target'])
示例#6
0
    def msg_edge(self, edge):
        state = nd.concat(edge.src['state'], edge.dst['state'], dim=-1)
        feature = nd.concat(edge.src['feature'], edge.dst['feature'], edge.data['dist'], dim=-1)

        # generate weight by hypernetwork
        weight = self.w_mlp(feature)
        weight = nd.reshape(weight, shape=(-1, self.hidden_size * 2, self.hidden_size))

        # reshape state to [n, b * t, d] for batch_dot (currently mxnet only support batch_dot for 3D tensor)
        shape = state.shape
        state = nd.reshape(state, shape=(shape[0], -1, shape[-1]))

        alpha = nd.LeakyReLU(nd.batch_dot(state, weight))

        # reshape alpha to [n, b, t, d]
        alpha = nd.reshape(alpha, shape=shape[:-1] + (self.hidden_size,))
        return { 'alpha': alpha, 'state': edge.src['state'] }
示例#7
0
 def forward(self, x):
     y = nd.LeakyReLU(x, act_type='leaky', slope=self._alpha, name='fwd')
     self.save_for_backward(x<0.)
     return y
示例#8
0
文件: base.py 项目: chr5tphr/ecGAN
 def forward(self, x):
     return nd.LeakyReLU(x, act_type='leaky', slope=self._alpha, name='fwd')