示例#1
0
 def __init__(self, dim, activation=tf.nn.relu, renorm=False, **kwargs):
     super(SingleAttentionAggregator, self).__init__(**kwargs)
     self.dense = layers.Dense(dim, use_bias=False)
     self.self_layer = layers.Dense(1, use_bias=False)
     self.neigh_layer = layers.Dense(1, use_bias=False)
     self.activation = activation
     self.renorm = renorm
示例#2
0
 def __init__(self, dim, activation=tf.nn.relu, concat=False, **kwargs):
     super(MeanAggregator, self).__init__(**kwargs)
     if concat:
         dim //= 2
     self.concat = concat
     self.self_layer = layers.Dense(dim,
                                    activation=activation,
                                    use_bias=False)
     self.neigh_layer = layers.Dense(dim,
                                     activation=activation,
                                     use_bias=False)
示例#3
0
 def __init__(self, dim, activation=tf.nn.relu, concat=False, **kwargs):
     super(BaseAggregator, self).__init__(**kwargs)
     if concat:
         if dim % 2:
             raise ValueError('dim must be divided exactly '
                              'by 2 if concat is True.')
         dim //= 2
     self.concat = concat
     self.self_layer = layers.Dense(dim,
                                    activation=activation,
                                    use_bias=False)
     self.neigh_layer = layers.Dense(dim,
                                     activation=activation,
                                     use_bias=False)
示例#4
0
 def fm(self, hidden):
     o = len(hidden)
     for i in range(1, o):
         comb = tf.multiply(hidden[0], hidden[i])
         hidden += [comb]
     fc = tf.concat(hidden, 1)
     ld = layers.Dense(self.dims[-1], activation=tf.nn.relu, use_bias=True)
     return ld(fc)
示例#5
0
 def __init__(self, metapath, dim, aggregator='attention',
              feature_idx=-1, feature_dim=0, max_id=-1, use_id=False,
              sparse_feature_idx=-1, sparse_feature_max_id=-1,
              embedding_dim=16, use_hash_embedding=False,
              use_residual=False, head_num=4,
              **kwargs):
     super(GenieEncoder, self).__init__(metapath, dim,
                                        aggregator,
                                        feature_idx,
                                        feature_dim,
                                        max_id,
                                        use_id,
                                        sparse_feature_idx,
                                        sparse_feature_max_id,
                                        embedding_dim,
                                        use_hash_embedding,
                                        use_residual,
                                        head_num, **kwargs)
     self.dim = dim
     self.depth_fc = []
     for layer in range(self.num_layers + 1):
         self.depth_fc.append(layers.Dense(dim))
示例#6
0
 def __init__(self, dim, activation=tf.nn.relu, renorm=False, **kwargs):
     super(GCNAggregator, self).__init__(**kwargs)
     self.renorm = renorm
     self.dense = layers.Dense(dim, activation=activation, use_bias=False)
示例#7
0
 def fc(self, hidden):
     fc = tf.concat(hidden, 1)
     ld = layers.Dense(self.dims[-1], activation=tf.nn.relu, use_bias=True)
     return ld(fc)
示例#8
0
    def __init__(self, dim=None, feature_idx='f1', feature_dim=0, max_id=-1,
                 sparse_feature_idx=-1, sparse_feature_max_id=-1,
                 embedding_dim=16, use_hash_embedding=False, combiner='concat',
                 **kwargs):
        super(ShallowEncoder, self).__init__(**kwargs)

        if combiner not in ['add', 'concat']:
            raise ValueError('combiner must be \'add\' or \'concat\'.')
        if combiner == 'add' and dim is None:
            raise ValueError('add must be used with dim provided.')

        use_feature = feature_idx != -1
        use_id = max_id != -1
        use_sparse_feature = sparse_feature_idx != -1

        if not isinstance(feature_idx, list) and use_feature:
            feature_idx = [feature_idx]
        if isinstance(feature_dim, int) and use_feature:
            feature_dim = [feature_dim]
        if use_feature and len(feature_idx) != len(feature_dim):
            raise ValueError('feature_dim must be the same length as feature'
                             '_idx.idx:%s, dim:%s' % (str(feature_idx),
                                                      str(feature_dim)))

        if isinstance(sparse_feature_idx, int) and use_sparse_feature:
            sparse_feature_idx = [sparse_feature_idx]
        if isinstance(sparse_feature_max_id, int) and use_sparse_feature:
            sparse_feature_max_id = [sparse_feature_max_id]
        if use_sparse_feature and \
           len(sparse_feature_idx) != len(sparse_feature_max_id):

            raise ValueError('sparse_feature_idx must be the same length as'
                             'sparse_feature_max_id.')

        embedding_num = (1 if use_id else 0) + \
                        (len(sparse_feature_idx) if use_sparse_feature else 0)

        if combiner == 'add':
            embedding_dim = dim
        if isinstance(embedding_dim, int) and embedding_num:
            embedding_dim = [embedding_dim] * embedding_num
        if embedding_num and len(embedding_dim) != embedding_num:
            raise ValueError('length of embedding_num must be int(use_id) + '
                             'len(sparse_feature_idx)')

        if isinstance(use_hash_embedding, bool) and embedding_num:
            use_hash_embedding = [use_hash_embedding] * embedding_num
        if embedding_num and len(use_hash_embedding) != embedding_num:
            raise ValueError('length of use_hash_embedding must be int(use_id)'
                             ' + len(sparse_feature_idx)')

        # model architechture
        self.dim = dim
        self.use_id = use_id
        self.use_feature = use_feature
        self.use_sparse_feature = use_sparse_feature
        self.combiner = combiner

        # feature fetching parameters
        self.feature_idx = feature_idx
        self.feature_dim = feature_dim
        self.sparse_feature_idx = sparse_feature_idx
        self.sparse_feature_max_id = sparse_feature_max_id
        self.embedding_dim = embedding_dim

        # sub-layers
        if dim:
            self.dense = layers.Dense(self.dim, use_bias=False)

        if use_id:
            embedding_class = \
                layers.HashEmbedding if use_hash_embedding[0] \
                else layers.Embedding
            self.embedding = embedding_class(max_id + 1, embedding_dim[0])
            embedding_dim = embedding_dim[1:]
            use_hash_embedding = use_hash_embedding[1:]
        if use_sparse_feature:
            self.sparse_embeddings = []
            for max_id, dim, use_hash in zip(
                  sparse_feature_max_id, embedding_dim, use_hash_embedding):
                sparse_embedding_class = \
                    layers.HashSparseEmbedding if use_hash \
                    else layers.SparseEmbedding
                self.sparse_embeddings.append(
                    sparse_embedding_class(max_id + 1, dim))
示例#9
0
 def __init__(self, dim, *args, **kwargs):
     super(BasePoolAggregator, self).__init__(dim, *args, **kwargs)
     self.layers = [layers.Dense(dim, activation=tf.nn.relu)]