예제 #1
0
    def call(self, inputs):
        if not hasattr(self, 'kernel'):
            embedding_layer = inputs._keras_history[0]

            if embedding_layer.name != self.embedding_name:

                def recursive_search(layer):
                    """递归向上搜索,根据名字找Embedding层
                    """
                    last_layer = layer._inbound_nodes[0].inbound_layers
                    if isinstance(last_layer, list):
                        if len(last_layer) == 0:
                            return None
                        else:
                            last_layer = last_layer[0]
                    if last_layer.name == self.embedding_name:
                        return last_layer
                    else:
                        return recursive_search(last_layer)

                embedding_layer = recursive_search(embedding_layer)
                if embedding_layer is None:
                    raise Exception('Embedding layer not found')

                self.kernel = K.transpose(embedding_layer.embeddings)
                self.units = K.int_shape(self.kernel)[1]
                self.bias = self.add_weight(name='bias',
                                            shape=(self.units, ),
                                            initializer='zeros')

        outputs = K.dot(inputs, self.kernel)
        outputs = K.bias_add(outputs, self.bias)
        outputs = self.activation(outputs)
        return outputs
예제 #2
0
 def call(self, inputs):
     ndim, shape = K.ndim(inputs), K.shape(inputs)
     shape = [shape[i] for i in range(ndim)]
     inputs = K.reshape(
         inputs, shape[:-1] + [shape[-1] // self.groups, self.groups])
     outputs = tf.einsum('...ig,ijg->...gj', inputs, self.kernel)
     outputs = K.reshape(outputs, shape[:-1] + [self.units])
     if self.use_bias:
         outputs = K.bias_add(outputs, self.bias)
     outputs = self.activation(outputs)
     return outputs
예제 #3
0
    def call(self, inputs):
        if not hasattr(self, 'kernel'):
            embedding_layer = search_layer(inputs, self.embedding_name)
            if embedding_layer is None:
                raise Exception('Embedding layer not found')

            self.kernel = K.transpose(embedding_layer.embeddings)
            self.units = K.int_shape(self.kernel)[1]
            self.bias = self.add_weight(name='bias',
                                        shape=(self.units, ),
                                        initializer='zeros')

        outputs = K.dot(inputs, self.kernel)
        outputs = K.bias_add(outputs, self.bias)
        outputs = self.activation(outputs)
        return outputs
예제 #4
0
    def call(self, x):
        x, mask = x
        mask = K.squeeze(mask, axis=2)
        # linear
        key = K.bias_add(K.dot(x, self.weight), self.bias)

        # compute attention
        outputs = K.squeeze(K.dot(key, self.query), axis=2)
        outputs -= 1e32 * (1 - mask)

        attn_scores = K.softmax(outputs)
        attn_scores *= mask
        attn_scores = K.reshape(attn_scores,
                                shape=(-1, 1, attn_scores.shape[-1]))

        outputs = K.squeeze(K.batch_dot(attn_scores, key), axis=1)

        return outputs
예제 #5
0
    def call(self, x):
        x, mask = x
        # 因为 self.weight只有两个维度,所以这里要进行维度处理
        mask = K.squeeze(mask, axis=2)  # 维度压缩 去掉一个维度 axis=2,但是数据还是不变的
        # linear 线性变化
        # K.dot()进行 点乘,然后加了self.bias
        key = K.bias_add(K.dot(x, self.weight), self.bias)

        # compute attention
        outputs = K.squeeze(K.dot(key, self.query), axis=2)  # 计算注意力
        outputs -= 1e32 * (1 - mask)

        attn_scores = K.softmax(outputs)  # 使用 softmax 计算得分
        attn_scores *= mask
        attn_scores = K.reshape(attn_scores,
                                shape=(-1, 1, attn_scores.shape[-1]))

        outputs = K.squeeze(K.batch_dot(attn_scores, key), axis=1)

        return outputs
예제 #6
0
 def call(self, inputs):
     return K.bias_add(inputs, self.bias)