Example #1
0
 def __init__(self, in_feat, hidden_feat, out_feat, rel_names):
     super().__init__()
     self.conv1 = dglnn.HeteroGraphConv({
         rel: dglnn.GATConv(in_feat, hidden_feat, num_heads=4)
         for rel in rel_names
     })
     self.conv2 = dglnn.HeteroGraphConv({
         rel: dglnn.GATConv(hidden_feat, out_feat, num_heads=4)
         for rel in rel_names
     })
Example #2
0
    def __init__(self, in_feats, hid_feats, out_feats, rel_names):
        super().__init__()

        self.conv1 = dglnn.HeteroGraphConv(
            {rel: dglnn.GraphConv(in_feats, hid_feats)
             for rel in rel_names},
            aggregate='sum')
        self.conv2 = dglnn.HeteroGraphConv(
            {rel: dglnn.GraphConv(hid_feats, out_feats)
             for rel in rel_names},
            aggregate='sum')
Example #3
0
    def __init__(self, hidden_dims, num_ratings):
        super().__init__()

        self.heteroconv = dglnn.HeteroGraphConv(
            {
                'watchedby': GCMCConv(hidden_dims, num_ratings),
                'watched': GCMCConv(hidden_dims, num_ratings),
            },
            aggregate='sum')
Example #4
0
 def __init__(self, in_dim, h_dim, out_dim, n_layers, activation, dropout, rel_names):
     super().__init__()
     self.h_dim = h_dim
     self.out_dim = out_dim
     self.in_dim = in_dim
     self.layers = nn.ModuleList()
     #i2h
     self.layers.append(dglnn.HeteroGraphConv(
         {rel: dglnn.GraphConv(in_dim, h_dim) for rel in rel_names}))
     #h2h
     for i in range(1, n_layers - 1):
         self.layers.append(dglnn.HeteroGraphConv(
             {rel: dglnn.GraphConv(h_dim, h_dim) for rel in rel_names}))
     #h2o
     self.layers.append(dglnn.HeteroGraphConv(
         {rel: dglnn.GraphConv(h_dim, out_dim) for rel in rel_names}))
     self.dropout = nn.Dropout(dropout)
     self.activation = activation
Example #5
0
 def __init__(self, in_dim, h_dim, n_layers, activation, dropout, rel_names, label_entity):
     super().__init__()
     self.h_dim = h_dim
     self.in_dim = in_dim
     self.layers = nn.ModuleList()
     # self.batch_norms = nn.ModuleList()
     #i2h
     self.layers.append(dglnn.HeteroGraphConv(
         {rel: dglnn.SAGEConv(in_dim, h_dim, 'mean', activation=activation) for rel in rel_names}))
     # self.batch_norms.append(nn.BatchNorm1d(h_dim))
     #h2h
     for i in range(1, n_layers - 1):
         self.layers.append(dglnn.HeteroGraphConv(
             {rel: dglnn.SAGEConv(h_dim, h_dim, 'mean', feat_drop=dropout, activation=activation) for rel in rel_names}))
         # self.batch_norms.append(nn.BatchNorm1d(h_dim))
     #h2o
     self.layers.append(dglnn.HeteroGraphConv(
         {rel: dglnn.SAGEConv(h_dim, 1, 'mean', feat_drop=dropout) for rel in rel_names}))
     self.label_entity = label_entity
Example #6
0
    def __init__(self,
                 in_feat,
                 out_feat,
                 rel_names,
                 num_bases,
                 *,
                 weight=True,
                 bias=True,
                 activation=None,
                 self_loop=False,
                 dropout=0.0):
        super(RelGraphConvLayer, self).__init__()
        self.in_feat = in_feat
        self.out_feat = out_feat
        self.rel_names = rel_names
        self.num_bases = num_bases
        self.bias = bias
        self.activation = activation
        self.self_loop = self_loop

        self.conv = dglnn.HeteroGraphConv({
            rel: dglnn.GraphConv(in_feat,
                                 out_feat,
                                 norm='right',
                                 weight=False,
                                 bias=False)
            for rel in rel_names
        })

        self.use_weight = weight
        self.use_basis = num_bases < len(self.rel_names) and weight
        if self.use_weight:
            if self.use_basis:
                self.basis = dglnn.WeightBasis((in_feat, out_feat), num_bases,
                                               len(self.rel_names))
            else:
                self.weight = nn.Parameter(
                    torch.Tensor(len(self.rel_names), in_feat, out_feat))
                nn.init.xavier_uniform_(self.weight,
                                        gain=nn.init.calculate_gain('relu'))

        # bias
        if bias:
            self.h_bias = nn.Parameter(torch.Tensor(out_feat))
            nn.init.zeros_(self.h_bias)

        # weight for self loop
        if self.self_loop:
            self.loop_weight = nn.Parameter(torch.Tensor(in_feat, out_feat))
            nn.init.xavier_uniform_(self.loop_weight,
                                    gain=nn.init.calculate_gain('relu'))

        self.dropout = nn.Dropout(dropout)
Example #7
0
    def __init__(
        self,
        in_feats: int,
        out_feats: int,
        rel_names: List[str],
        num_bases: int,
        norm: str = 'right',
        weight: bool = True,
        bias: bool = True,
        activation: Callable[[torch.Tensor], torch.Tensor] = None,
        dropout: float = None,
        self_loop: bool = False,
    ):
        super().__init__()
        self._rel_names = rel_names
        self._num_rels = len(rel_names)
        self._conv = dglnn.HeteroGraphConv({
            rel: dglnn.GraphConv(in_feats,
                                 out_feats,
                                 norm=norm,
                                 weight=False,
                                 bias=False)
            for rel in rel_names
        })
        self._use_weight = weight
        self._use_basis = num_bases < self._num_rels and weight
        self._use_bias = bias
        self._activation = activation
        self._dropout = nn.Dropout(dropout) if dropout is not None else None
        self._use_self_loop = self_loop

        if weight:
            if self._use_basis:
                self.basis = dglnn.WeightBasis((in_feats, out_feats),
                                               num_bases, self._num_rels)
            else:
                self.weight = nn.Parameter(
                    torch.Tensor(self._num_rels, in_feats, out_feats))
                nn.init.xavier_uniform_(self.weight,
                                        gain=nn.init.calculate_gain('relu'))

        if bias:
            self.bias = nn.Parameter(torch.Tensor(out_feats))
            nn.init.zeros_(self.bias)

        if self_loop:
            self.self_loop_weight = nn.Parameter(
                torch.Tensor(in_feats, out_feats))
            nn.init.xavier_uniform_(self.self_loop_weight,
                                    gain=nn.init.calculate_gain('relu'))
Example #8
0
    def __init__(self, graph: dgl.DGLHeteroGraph, input_dim: int, hidden_dim: int, n_heads: int = 4,
                 dropout: float = 0.2, residual: bool = True):
        """
        :param graph: a heterogeneous graph
        :param input_dim: int, input dimension
        :param hidden_dim: int, hidden dimension
        :param n_heads: int, number of attention heads
        :param dropout: float, dropout rate
        :param residual: boolean, residual connections or not
        """
        super(HGConvLayer, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.n_heads = n_heads
        self.dropout = dropout
        self.residual = residual

        # hetero conv modules
        self.micro_conv = dglnn.HeteroGraphConv({
            etype: LSTMConv(dim=input_dim)
            for srctype, etype, dsttype in graph.canonical_etypes
        })

        # different types aggregation module
        self.macro_conv = MacroConv(in_feats=hidden_dim * n_heads, out_feats=hidden_dim,
                                                             num_heads=n_heads,
                                                             dropout=dropout, negative_slope=0.2)

        if self.residual:
            # residual connection
            self.res_fc = nn.ModuleDict()
            self.residual_weight = nn.ParameterDict()
            for ntype in graph.ntypes:
                self.res_fc[ntype] = nn.Linear(input_dim, n_heads * hidden_dim, bias=True)
                self.residual_weight[ntype] = nn.Parameter(torch.randn(1))

        self.reset_parameters()
Example #9
0
 def __init__(self,
              user_in_units,
              movie_in_units,
              msg_units,
              out_units,
              rating_vals,
              dropout_rate=0.0,
              agg='stack',  # or 'sum'
              agg_act=None,
              out_act=None
             ):
     super(MyLayer, self).__init__()
     len_rate = len(rating_vals)
     self.ufc = nn.Linear(msg_units*len_rate, out_units)
     self.ifc = nn.Linear(msg_units*len_rate, out_units)
     # if agg=='stack':
     #     msg_units = msg_units // len(rating_vals)
     self.dropout = nn.Dropout(dropout_rate)
     # self.W_r = nn.ParameterDict()
     self.agg=agg
     subConv = {}
     for rating in rating_vals:
         rating=str(rating)
         subConv[(rating).replace('.','_')] = MyGraphConv(user_in_units,
                                         msg_units,
                                         dropout_rate=dropout_rate)
         subConv[(rating+'ed').replace('.','_')] = MyGraphConv(movie_in_units,
                                             msg_units,
                                             dropout_rate=dropout_rate)
     # subConv['trust'] = MyGraphConv(movie_in_units,
     #                                     msg_units,
     #                                     dropout_rate=dropout_rate)
     self.conv = dglnn.HeteroGraphConv(subConv,agg)
     self.agg_act = get_activation(agg_act)
     self.out_act = get_activation(out_act)
     self.reset_parameters()
Example #10
0
 def __init__(self,
              dim_in,
              dim_out,
              dim_t,
              numr,
              nume,
              g,
              dropout=0,
              deepth=2,
              sampling=None,
              granularity=1,
              r_limit=None):
     super(EmbModule, self).__init__()
     self.dim_in = dim_in
     self.dim_out = dim_out
     self.dim_t = dim_t
     self.numr = numr
     self.nume = nume
     self.deepth = deepth
     self.g = g
     self.granularity = granularity
     mods = dict()
     mods['time_enc'] = TimeEnc(dim_t, nume)
     mods['entity_emb'] = nn.Embedding(nume, dim_in)
     if r_limit is None:
         r_limit = numr
     for l in range(self.deepth):
         mods['norm' + str(l)] = nn.LayerNorm(dim_in + dim_t)
         # mods['dropout' + str(l)] = nn.Dropout(dropout)
         conv_dict = dict()
         for r in range(r_limit):
             conv_dict['r' + str(r)] = dglnn.GATConv(dim_in + dim_t,
                                                     dim_out // 4,
                                                     4,
                                                     feat_drop=dropout,
                                                     attn_drop=dropout,
                                                     residual=False)
             conv_dict['-r' + str(r)] = dglnn.GATConv(dim_in + dim_t,
                                                      dim_out // 4,
                                                      4,
                                                      feat_drop=dropout,
                                                      attn_drop=dropout,
                                                      residual=False)
             conv_dict['self'] = dglnn.GATConv(dim_in + dim_t,
                                               dim_out // 4,
                                               4,
                                               feat_drop=dropout,
                                               attn_drop=dropout,
                                               residual=False)
             # conv_dict['r' + str(r)] = dglnn.GraphConv(dim_in + dim_t, dim_out)
             # conv_dict['-r' + str(r)] = dglnn.GraphConv(dim_in + dim_t, dim_out)
             # conv_dict['self'] = dglnn.GraphConv(dim_in + dim_t, dim_out)
         mods['conv' + str(l)] = dglnn.HeteroGraphConv(conv_dict,
                                                       aggregate='mean')
         mods['act' + str(l)] = nn.ReLU()
         dim_in = dim_out
     self.mods = nn.ModuleDict(mods)
     if sampling is not None:
         fanouts = [int(d) for d in sampling.split('/')]
         self.sampler = dgl.dataloading.MultiLayerNeighborSampler(
             fanouts=fanouts)
     else:
         self.sampler = dgl.dataloading.MultiLayerFullNeighborSampler(
             self.deepth)
Example #11
0
 def __init__(
         self,
         rating_vals,
         user_in_units,
         movie_in_units,
         msg_units,
         out_units,
         dropout_rate=0.0,
         agg='stack',  # or 'sum'
         agg_act=None,
         out_act=None,
         share_user_item_param=False,
         device=None):
     super(GCMCLayer, self).__init__()
     self.rating_vals = rating_vals
     self.agg = agg
     self.share_user_item_param = share_user_item_param
     self.ufc = nn.Linear(msg_units, out_units)
     if share_user_item_param:
         self.ifc = self.ufc
     else:
         self.ifc = nn.Linear(msg_units, out_units)
     if agg == 'stack':
         # divide the original msg unit size by number of ratings to keep
         # the dimensionality
         assert msg_units % len(rating_vals) == 0
         msg_units = msg_units // len(rating_vals)
     self.dropout = nn.Dropout(dropout_rate)
     self.W_r = nn.ParameterDict()
     subConv = {}
     for rating in rating_vals:
         # PyTorch parameter name can't contain "."
         rating = str(rating).replace('.', '_')
         rev_rating = 'rev-%s' % rating
         if share_user_item_param and user_in_units == movie_in_units:
             self.W_r[rating] = nn.Parameter(
                 th.randn(user_in_units, msg_units))
             self.W_r['rev-%s' % rating] = self.W_r[rating]
             subConv[rating] = GCMCGraphConv(user_in_units,
                                             msg_units,
                                             weight=False,
                                             device=device,
                                             dropout_rate=dropout_rate)
             subConv[rev_rating] = GCMCGraphConv(user_in_units,
                                                 msg_units,
                                                 weight=False,
                                                 device=device,
                                                 dropout_rate=dropout_rate)
         else:
             self.W_r = None
             subConv[rating] = GCMCGraphConv(user_in_units,
                                             msg_units,
                                             weight=True,
                                             device=device,
                                             dropout_rate=dropout_rate)
             subConv[rev_rating] = GCMCGraphConv(movie_in_units,
                                                 msg_units,
                                                 weight=True,
                                                 device=device,
                                                 dropout_rate=dropout_rate)
     self.conv = dglnn.HeteroGraphConv(subConv, aggregate=agg)
     self.agg_act = get_activation(agg_act)
     self.out_act = get_activation(out_act)
     self.device = device
     self.reset_parameters()
Example #12
0
 def __init__(
         self,
         rating_vals,
         user_in_units,
         movie_in_units,
         msg_units,
         out_units,
         dropout_rate=0.0,
         agg='stack',  # or 'sum'
         agg_act=None,
         out_act=None,
         share_user_item_param=False,
         ini=True,
         basis_units=4,
         device=None):
     super(GCMCLayer, self).__init__()
     self.rating_vals = rating_vals
     self.agg = agg
     self.share_user_item_param = share_user_item_param
     self.ufc = nn.Linear(msg_units, out_units)
     self.user_in_units = user_in_units
     self.msg_units = msg_units
     if share_user_item_param:
         self.ifc = self.ufc
     else:
         self.ifc = nn.Linear(msg_units, out_units)
     if agg == 'stack':
         # divide the original msg unit size by number of ratings to keep
         # the dimensionality
         assert msg_units % len(rating_vals) == 0
         msg_units = msg_units // len(rating_vals)
     if ini:
         msg_units = msg_units // 3
     self.ini = ini
     self.msg_units = msg_units
     self.dropout = nn.Dropout(dropout_rate)
     #self.W_r = nn.ParameterDict()
     self.W_r = {}
     subConv = {}
     self.basis_units = basis_units
     self.att = nn.Parameter(th.randn(len(self.rating_vals), basis_units))
     self.basis = nn.Parameter(
         th.randn(basis_units, user_in_units, msg_units))
     for i, rating in enumerate(rating_vals):
         # PyTorch parameter name can't contain "."
         rating = to_etype_name(rating)
         rev_rating = 'rev-%s' % rating
         if share_user_item_param and user_in_units == movie_in_units:
             #self.W_r_union = nn.Parameter(th.randn(user_in_units, msg_units))
             #self.W_r[rating] = th.tensor(float(rating)) * W_r_union
             #self.W_r[rating] = self.W[i, :, :]
             #self.W_r['rev-%s' % rating] = self.W_r[rating]
             #self.W_r[rating] = nn.Parameter(th.randn(user_in_units, msg_units))
             #self.W_r['rev-%s' % rating] = self.W_r[rating]
             subConv[rating] = GCMCGraphConv(user_in_units,
                                             msg_units,
                                             weight=False,
                                             device=device,
                                             dropout_rate=dropout_rate)
             subConv[rev_rating] = GCMCGraphConv(user_in_units,
                                                 msg_units,
                                                 weight=False,
                                                 device=device,
                                                 dropout_rate=dropout_rate)
         else:
             self.W_r = None
             subConv[rating] = GCMCGraphConv(user_in_units,
                                             msg_units,
                                             weight=True,
                                             device=device,
                                             dropout_rate=dropout_rate)
             subConv[rev_rating] = GCMCGraphConv(movie_in_units,
                                                 msg_units,
                                                 weight=True,
                                                 device=device,
                                                 dropout_rate=dropout_rate)
     self.conv = dglnn.HeteroGraphConv(subConv, aggregate=agg)
     self.agg_act = get_activation(agg_act)
     self.out_act = get_activation(out_act)
     self.device = device
     self.reset_parameters()
Example #13
0
    def __init__(
        self,
        g,
        n_layers: int,
        dim_dict,
        norm: bool = True,
        dropout: float = 0.0,
        aggregator_type: str = 'mean',
        pred: str = 'cos',
        aggregator_hetero: str = 'sum',
        embedding_layer: bool = True,
    ):
        """
        Initialize the ConvModel.

        Parameters
        ----------
        g:
            Graph, only used to query graph metastructure (fetch node types and edge types).
        n_layers:
            Number of ConvLayer.
        dim_dict:
            Dictionary with dimension for all input nodes, hidden dimension (aka embedding dimension), output dimension.
        norm, dropout, aggregator_type:
            See ConvLayer for details.
        aggregator_hetero:
            Since we are working with heterogeneous graph, all nodes will have messages coming from different types of
            nodes. However, the neighborhood messages are specific to a node type. Thus, we have to aggregate
            neighborhood messages from different edge types.
            Choices are 'mean', 'sum', 'max'.
        embedding_layer:
            Some GNN papers explicitly define an embedding layer, whereas other papers consider the first ConvLayer
            as the "embedding" layer. If true, an explicit embedding layer will be defined (using NodeEmbedding). If
            false, the first ConvLayer will have input dimensions equal to node features.

        """
        super().__init__()
        self.embedding_layer = embedding_layer
        if embedding_layer:
            self.user_embed = NodeEmbedding(dim_dict['user'],
                                            dim_dict['hidden'])
            self.item_embed = NodeEmbedding(dim_dict['item'],
                                            dim_dict['hidden'])
            if 'sport' in g.ntypes:
                self.sport_embed = NodeEmbedding(dim_dict['sport'],
                                                 dim_dict['hidden'])

        self.layers = nn.ModuleList()

        # input layer
        if not embedding_layer:
            self.layers.append(
                dglnn.HeteroGraphConv(
                    {
                        etype[1]: ConvLayer(
                            (dim_dict[etype[0]], dim_dict[etype[2]]),
                            dim_dict['hidden'], dropout, aggregator_type, norm)
                        for etype in g.canonical_etypes
                    },
                    aggregate=aggregator_hetero))

        # hidden layers
        for i in range(n_layers - 2):
            self.layers.append(
                dglnn.HeteroGraphConv(
                    {
                        etype[1]: ConvLayer(
                            (dim_dict['hidden'], dim_dict['hidden']),
                            dim_dict['hidden'], dropout, aggregator_type, norm)
                        for etype in g.canonical_etypes
                    },
                    aggregate=aggregator_hetero))

        # output layer
        self.layers.append(
            dglnn.HeteroGraphConv(
                {
                    etype[1]: ConvLayer(
                        (dim_dict['hidden'], dim_dict['hidden']),
                        dim_dict['out'], dropout, aggregator_type, norm)
                    for etype in g.canonical_etypes
                },
                aggregate=aggregator_hetero))

        if pred == 'cos':
            self.pred_fn = CosinePrediction()
        elif pred == 'nn':
            self.pred_fn = PredictingModule(PredictingLayer, dim_dict['out'])
        else:
            raise KeyError(
                'Prediction function {} not recognized.'.format(pred))
    def __init__(self, args, config):
        super(NumericHGN, self).__init__()
        self.args = args
        self.config = config
        self.encoder = ContextEncoder(self.args, config)
        self.bi_attn = BiAttention(args, self.config.hidden_size)
        self.bi_attn_linear = nn.Linear(self.config.hidden_size * 4,
                                        self.config.hidden_size)
        self.bi_lstm = nn.LSTM(self.config.hidden_size,
                               self.config.hidden_size,
                               bidirectional=True)
        self.para_node_mlp = nn.Linear(self.config.hidden_size * 2,
                                       self.config.hidden_size)
        self.sent_node_mlp = nn.Linear(self.config.hidden_size * 2,
                                       self.config.hidden_size)
        self.ent_node_mlp = nn.Linear(self.config.hidden_size * 2,
                                      self.config.hidden_size)

        # https://docs.dgl.ai/api/python/nn.pytorch.html#dgl.nn.pytorch.HeteroGraphConv
        self.gat = dglnn.HeteroGraphConv(
            {
                "ps":
                dglnn.GATConv(self.config.hidden_size,
                              self.config.hidden_size,
                              num_heads=1),
                "sp":
                dglnn.GATConv(self.config.hidden_size,
                              self.config.hidden_size,
                              num_heads=1),
                "se":
                dglnn.GATConv(self.config.hidden_size,
                              self.config.hidden_size,
                              num_heads=1),
                "es":
                dglnn.GATConv(self.config.hidden_size,
                              self.config.hidden_size,
                              num_heads=1),
                "pp":
                dglnn.GATConv(self.config.hidden_size,
                              self.config.hidden_size,
                              num_heads=1),
                "ss":
                dglnn.GATConv(self.config.hidden_size,
                              self.config.hidden_size,
                              num_heads=1),
                "qp":
                dglnn.GATConv(self.config.hidden_size,
                              self.config.hidden_size,
                              num_heads=1),
                "pq":
                dglnn.GATConv(self.config.hidden_size,
                              self.config.hidden_size,
                              num_heads=1),
                "qe":
                dglnn.GATConv(self.config.hidden_size,
                              self.config.hidden_size,
                              num_heads=1),
                "eq":
                dglnn.GATConv(self.config.hidden_size,
                              self.config.hidden_size,
                              num_heads=1),
                # TODO: Need (i) bi-directional edges and (ii) more edge types (e.g., question-paragraph, paragraph-paragraph, etc.)
            },
            aggregate='sum'
        )  # TODO: May need to change aggregate function (test it!) - ‘sum’, ‘max’, ‘min’, ‘mean’, ‘stack’.

        self.gated_attn = GatedAttention(self.args, self.config)

        self.para_mlp = nn.Sequential(
            nn.Linear(self.config.hidden_size, self.config.hidden_size),
            nn.Linear(self.config.hidden_size, args.num_paragraphs))
        self.sent_mlp = nn.Sequential(
            nn.Linear(self.config.hidden_size, self.config.hidden_size),
            nn.Linear(self.config.hidden_size, args.num_sentences))
        self.ent_mlp = nn.Sequential(
            nn.Linear(self.config.hidden_size, self.config.hidden_size),
            nn.Linear(self.config.hidden_size, args.num_entities))
        self.span_mlp = nn.Sequential(
            nn.Linear(self.config.hidden_size * 4, self.config.hidden_size),
            nn.Linear(self.config.hidden_size, self.config.num_labels))
        self.answer_type_mlp = nn.Sequential(
            nn.Linear(self.config.hidden_size * 4, self.config.hidden_size),
            nn.Linear(self.config.hidden_size, 3))
Example #15
0
def test_hetero_conv(agg, idtype):
    g = dgl.heterograph(
        {
            ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]),
            ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]),
            ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])
        },
        idtype=idtype,
        device=F.ctx())
    conv = nn.HeteroGraphConv(
        {
            'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True),
            'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True),
            'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)
        }, agg)
    conv = conv.to(F.ctx())

    # test pickle
    th.save(conv, tmp_buffer)

    uf = F.randn((4, 2))
    gf = F.randn((4, 4))
    sf = F.randn((2, 3))

    h = conv(g, {'user': uf, 'game': gf, 'store': sf})
    assert set(h.keys()) == {'user', 'game'}
    if agg != 'stack':
        assert h['user'].shape == (4, 3)
        assert h['game'].shape == (4, 4)
    else:
        assert h['user'].shape == (4, 1, 3)
        assert h['game'].shape == (4, 2, 4)

    block = dgl.to_block(g.to(F.cpu()), {
        'user': [0, 1, 2, 3],
        'game': [0, 1, 2, 3],
        'store': []
    }).to(F.ctx())
    h = conv(block, ({
        'user': uf,
        'game': gf,
        'store': sf
    }, {
        'user': uf,
        'game': gf,
        'store': sf[0:0]
    }))
    assert set(h.keys()) == {'user', 'game'}
    if agg != 'stack':
        assert h['user'].shape == (4, 3)
        assert h['game'].shape == (4, 4)
    else:
        assert h['user'].shape == (4, 1, 3)
        assert h['game'].shape == (4, 2, 4)

    h = conv(block, {'user': uf, 'game': gf, 'store': sf})
    assert set(h.keys()) == {'user', 'game'}
    if agg != 'stack':
        assert h['user'].shape == (4, 3)
        assert h['game'].shape == (4, 4)
    else:
        assert h['user'].shape == (4, 1, 3)
        assert h['game'].shape == (4, 2, 4)

    # test with mod args
    class MyMod(th.nn.Module):
        def __init__(self, s1, s2):
            super(MyMod, self).__init__()
            self.carg1 = 0
            self.carg2 = 0
            self.s1 = s1
            self.s2 = s2

        def forward(self, g, h, arg1=None, *, arg2=None):
            if arg1 is not None:
                self.carg1 += 1
            if arg2 is not None:
                self.carg2 += 1
            return th.zeros((g.number_of_dst_nodes(), self.s2))

    mod1 = MyMod(2, 3)
    mod2 = MyMod(2, 4)
    mod3 = MyMod(3, 4)
    conv = nn.HeteroGraphConv({
        'follows': mod1,
        'plays': mod2,
        'sells': mod3
    }, agg)
    conv = conv.to(F.ctx())
    mod_args = {'follows': (1, ), 'plays': (1, )}
    mod_kwargs = {'sells': {'arg2': 'abc'}}
    h = conv(g, {
        'user': uf,
        'game': gf,
        'store': sf
    },
             mod_args=mod_args,
             mod_kwargs=mod_kwargs)
    assert mod1.carg1 == 1
    assert mod1.carg2 == 0
    assert mod2.carg1 == 1
    assert mod2.carg2 == 0
    assert mod3.carg1 == 0
    assert mod3.carg2 == 1

    #conv on graph without any edges
    for etype in g.etypes:
        g = dgl.remove_edges(g, g.edges(form='eid', etype=etype), etype=etype)
    assert g.num_edges() == 0
    h = conv(g, {'user': uf, 'game': gf, 'store': sf})
    assert set(h.keys()) == {'user', 'game'}

    block = dgl.to_block(g.to(F.cpu()), {
        'user': [0, 1, 2, 3],
        'game': [0, 1, 2, 3],
        'store': []
    }).to(F.ctx())
    h = conv(block, ({
        'user': uf,
        'game': gf,
        'store': sf
    }, {
        'user': uf,
        'game': gf,
        'store': sf[0:0]
    }))
    assert set(h.keys()) == {'user', 'game'}
Example #16
0
def test_hetero_conv(agg, idtype):
    g = dgl.heterograph(
        {
            ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]),
            ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]),
            ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])
        },
        idtype=idtype,
        device=F.ctx())
    conv = nn.HeteroGraphConv(
        {
            'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True),
            'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True),
            'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)
        }, agg)
    conv = conv.to(F.ctx())
    uf = F.randn((4, 2))
    gf = F.randn((4, 4))
    sf = F.randn((2, 3))

    h = conv(g, {'user': uf})
    assert set(h.keys()) == {'user', 'game'}
    if agg != 'stack':
        assert h['user'].shape == (4, 3)
        assert h['game'].shape == (4, 4)
    else:
        assert h['user'].shape == (4, 1, 3)
        assert h['game'].shape == (4, 1, 4)

    h = conv(g, {'user': uf, 'store': sf})
    assert set(h.keys()) == {'user', 'game'}
    if agg != 'stack':
        assert h['user'].shape == (4, 3)
        assert h['game'].shape == (4, 4)
    else:
        assert h['user'].shape == (4, 1, 3)
        assert h['game'].shape == (4, 2, 4)

    h = conv(g, {'store': sf})
    assert set(h.keys()) == {'game'}
    if agg != 'stack':
        assert h['game'].shape == (4, 4)
    else:
        assert h['game'].shape == (4, 1, 4)

    # test with pair input
    conv = nn.HeteroGraphConv(
        {
            'follows': nn.SAGEConv(2, 3, 'mean'),
            'plays': nn.SAGEConv((2, 4), 4, 'mean'),
            'sells': nn.SAGEConv(3, 4, 'mean')
        }, agg)
    conv = conv.to(F.ctx())

    h = conv(g, ({'user': uf}, {'user': uf, 'game': gf}))
    assert set(h.keys()) == {'user', 'game'}
    if agg != 'stack':
        assert h['user'].shape == (4, 3)
        assert h['game'].shape == (4, 4)
    else:
        assert h['user'].shape == (4, 1, 3)
        assert h['game'].shape == (4, 1, 4)

    # pair input requires both src and dst type features to be provided
    h = conv(g, ({'user': uf}, {'game': gf}))
    assert set(h.keys()) == {'game'}
    if agg != 'stack':
        assert h['game'].shape == (4, 4)
    else:
        assert h['game'].shape == (4, 1, 4)

    # test with mod args
    class MyMod(th.nn.Module):
        def __init__(self, s1, s2):
            super(MyMod, self).__init__()
            self.carg1 = 0
            self.carg2 = 0
            self.s1 = s1
            self.s2 = s2

        def forward(self, g, h, arg1=None, *, arg2=None):
            if arg1 is not None:
                self.carg1 += 1
            if arg2 is not None:
                self.carg2 += 1
            return th.zeros((g.number_of_dst_nodes(), self.s2))

    mod1 = MyMod(2, 3)
    mod2 = MyMod(2, 4)
    mod3 = MyMod(3, 4)
    conv = nn.HeteroGraphConv({
        'follows': mod1,
        'plays': mod2,
        'sells': mod3
    }, agg)
    conv = conv.to(F.ctx())
    mod_args = {'follows': (1, ), 'plays': (1, )}
    mod_kwargs = {'sells': {'arg2': 'abc'}}
    h = conv(g, {
        'user': uf,
        'store': sf
    },
             mod_args=mod_args,
             mod_kwargs=mod_kwargs)
    assert mod1.carg1 == 1
    assert mod1.carg2 == 0
    assert mod2.carg1 == 1
    assert mod2.carg2 == 0
    assert mod3.carg1 == 0
    assert mod3.carg2 == 1
Example #17
0

class Multi_level(nn.Module):
    def __init__(self):
        super(Multi_level, self).__init__()
        self.micro_layer = None
        self.macro_layer = None

    def forward(self):
        return


import dgl.nn.pytorch as dglnn
conv = dglnn.HeteroGraphConv({
    'follows' : dglnn.GraphConv(...),
    'plays' : dglnn.GraphConv(...),
    'sells' : dglnn.SAGEConv(...)},
    aggregate='sum')


from openhgnn.models.micro_layer.LSTM_conv import LSTMConv
class HGConvLayer(nn.Module):
    def __init__(self, graph: dgl.DGLHeteroGraph, input_dim: int, hidden_dim: int, n_heads: int = 4,
                 dropout: float = 0.2, residual: bool = True):
        """
        :param graph: a heterogeneous graph
        :param input_dim: int, input dimension
        :param hidden_dim: int, hidden dimension
        :param n_heads: int, number of attention heads
        :param dropout: float, dropout rate
        :param residual: boolean, residual connections or not