コード例 #1
0
    def multi_head_attention(self, inputs, new_embed_size):
        multi_embed_size = self.num_heads * new_embed_size
        # B * F * (K*H)
        queries = tf.layers.dense(inputs=inputs,
                                  units=multi_embed_size,
                                  activation=None,
                                  kernel_initializer=truncated_normal(
                                      0.0, 0.01),
                                  use_bias=False)
        keys = tf.layers.dense(inputs=inputs,
                               units=multi_embed_size,
                               activation=None,
                               kernel_initializer=truncated_normal(
                                   0.0, 0.01),
                               use_bias=False)
        values = tf.layers.dense(inputs=inputs,
                                 units=multi_embed_size,
                                 activation=None,
                                 kernel_initializer=truncated_normal(
                                     0.0, 0.01),
                                 use_bias=False)
        if self.use_residual:
            residual = tf.layers.dense(inputs=inputs,
                                       units=multi_embed_size,
                                       activation=None,
                                       kernel_initializer=truncated_normal(
                                           0.0, 0.01),
                                       use_bias=False)

        # H * B * F * K
        queries = tf.stack(tf.split(queries, self.num_heads, axis=2))
        keys = tf.stack(tf.split(keys, self.num_heads, axis=2))
        values = tf.stack(tf.split(values, self.num_heads, axis=2))

        # H * B * F * F
        weights = queries @ tf.transpose(keys, [0, 1, 3, 2])
        # weights = weights / np.sqrt(new_embed_size)
        weights = tf.nn.softmax(weights)
        # H * B * F * K
        outputs = weights @ values
        # 1 * B * F * (K*H)
        outputs = tf.concat(tf.split(outputs, self.num_heads, axis=0), axis=-1)
        # B * F * (K*H)
        outputs = tf.squeeze(outputs, axis=0)
        if self.use_residual:
            outputs += residual
        outputs = tf.nn.relu(outputs)
        return outputs
コード例 #2
0
ファイル: detnet.py プロジェクト: oarriaga/paz
def net2D(features, num_keypoints, name):
    x = block(features, 256, 3, 1, name + '/project')
    heat_map = Conv2D(num_keypoints,
                      1,
                      strides=1,
                      padding='SAME',
                      activation='sigmoid',
                      name=name + '/prediction/conv2d',
                      kernel_initializer=truncated_normal(stddev=0.01))(x)
    return heat_map
コード例 #3
0
    def _build_user_item(self):
        self.user_indices = tf.placeholder(tf.int32, shape=[None])
        self.item_indices = tf.placeholder(tf.int32, shape=[None])

        user_feat = tf.get_variable(name="user_feat",
                                    shape=[self.n_users, self.embed_size],
                                    initializer=truncated_normal(0.0, 0.01),
                                    regularizer=self.reg)
        item_feat = tf.get_variable(name="item_feat",
                                    shape=[self.n_items, self.embed_size],
                                    initializer=truncated_normal(0.0, 0.01),
                                    regularizer=self.reg)

        user_embed = tf.expand_dims(tf.nn.embedding_lookup(
            user_feat, self.user_indices),
                                    axis=1)
        item_embed = tf.expand_dims(tf.nn.embedding_lookup(
            item_feat, self.item_indices),
                                    axis=1)
        self.concat_embed.extend([user_embed, item_embed])
コード例 #4
0
    def _build_sparse(self):
        self.sparse_indices = tf.placeholder(
            tf.int32, shape=[None, self.sparse_field_size])

        sparse_feat = tf.get_variable(
            name="sparse_feat",
            shape=[self.sparse_feature_size, self.embed_size],
            initializer=truncated_normal(0.0, 0.01),
            regularizer=self.reg)

        sparse_embed = tf.nn.embedding_lookup(sparse_feat, self.sparse_indices)
        self.concat_embed.append(sparse_embed)
コード例 #5
0
ファイル: detnet.py プロジェクト: oarriaga/paz
def net3D(features, num_keypoints, name, need_norm=False):
    x = block(features, 256, 3, 1, name + '/project')
    delta_map = Conv2D(num_keypoints * 3,
                       1,
                       strides=1,
                       padding='SAME',
                       name=name + '/prediction/conv2d',
                       kernel_initializer=truncated_normal(stddev=0.01))(x)
    if need_norm:
        delta_map_norm = tf.norm(delta_map, axis=-1, keepdims=True)
        delta_map = delta_map / tf.maximum(delta_map_norm, 1e-6)

    H, W = features.get_shape()[1:3]
    delta_map = Reshape([H, W, num_keypoints, 3])(delta_map)
    if need_norm:
        return delta_map, delta_map_norm
    return delta_map
コード例 #6
0
    def _build_dense(self):
        self.dense_values = tf.placeholder(tf.float32,
                                           shape=[None, self.dense_field_size])
        dense_values_reshape = tf.reshape(self.dense_values,
                                          [-1, self.dense_field_size, 1])

        dense_feat = tf.get_variable(
            name="dense_feat",
            shape=[self.dense_field_size, self.embed_size],
            initializer=truncated_normal(0.0, 0.01),
            regularizer=self.reg)

        batch_size = tf.shape(self.dense_values)[0]
        # 1 * F_dense * K
        dense_embed = tf.expand_dims(dense_feat, axis=0)
        # B * F_dense * K
        dense_embed = tf.tile(dense_embed, [batch_size, 1, 1])
        dense_embed = tf.multiply(dense_embed, dense_values_reshape)
        self.concat_embed.append(dense_embed)
コード例 #7
0
    def _build_sparse(self):
        self.sparse_indices = tf.placeholder(
            tf.int32, shape=[None, self.sparse_field_size])

        sparse_feat = tf.get_variable(
            name="sparse_feat",
            shape=[self.sparse_feature_size, self.embed_size],
            initializer=truncated_normal(0.0, 0.01),
            regularizer=self.reg)

        if (self.data_info.multi_sparse_combine_info
                and self.multi_sparse_combiner in ("sum", "mean", "sqrtn")):
            sparse_embed = multi_sparse_combine_embedding(
                self.data_info, sparse_feat, self.sparse_indices,
                self.multi_sparse_combiner, self.embed_size)
        else:
            sparse_embed = tf.nn.embedding_lookup(sparse_feat, self.sparse_indices)

        self.concat_embed.append(sparse_embed)
コード例 #8
0
def dense(x, num_units):
    x = Dense(num_units,
              activation=None,
              kernel_regularizer=l2(0.5 * 1.0),
              kernel_initializer=truncated_normal(stddev=0.01))(x)
    return x