示例#1
0
    def _tl_similarity(self, x, y):
        # x * w 得到的shape=[batch_size, num_tensor, state_size]
        tmp = tf.tensordot(x, self._tensor_W, axes=[[1], [1]])
        # x*w*y,得到的shape=[batch_size, num_tensor, batch_size]
        tmp = tf.tensordot(tmp, y, axes=[[2], [1]])
        # 为了方便计算,shape=[num_tensor, batch_size, batch_size]
        tmp = tf.transpose(tmp, [1, 0, 2])

        # 创建需要的单位阵,shape=[num_tensor, batch_size, batch_size]
        eye = tf.eye(self._batch_size, batch_shape=[self._num_tensor])
        # 获得需要的结果, shape=[num_tensor, batch_size, batch_size]
        tmp = tf.multiply(tmp, eye)
        # 求和,得到需要的第一部分结果, shape=[num_tensor, batch_size]
        tmp = tf.reduce_sum(tmp, axis=1)
        # 转置,方便下边计算shape=[batch_size, num_tensor]
        tmp = tf.transpose(tmp, [1, 0])

        # tensor_similarity公式第二部分V*[x, y]^T
        cont = tf.concat([x, y], axis=1)
        v = tf.matmul(cont, self._tensor_V)

        result = tmp + v + self._tensor_b
        result = ph.lrelu(result)

        return result
示例#2
0
 def _build(self):
     # 网络模块定义 --- build
     self._emb = photinia.Linear('EMB', self._voc_size, self._emb_size)
     self._cell = photinia.GRUCell('CELL', self._emb_size, self._state_size)
     self._lin = photinia.Linear('LIN', self._state_size, self._voc_size)
     # 输入定义
     seq = tf.placeholder(
         shape=(None, None, self._voc_size),
         dtype=photinia.dtype
     )
     seq_0 = seq[:, :-1, :]
     seq_1 = seq[:, 1:, :]
     batch_size = tf.shape(seq)[0]
     # RNN结构
     init_state = tf.zeros(
         shape=(batch_size, self._state_size),
         dtype=photinia.dtype
     )
     states = tf.scan(
         fn=self._rnn_step,
         elems=tf.transpose(seq_0, (1, 0, 2)),
         initializer=init_state
     )
     probs = tf.map_fn(
         fn=self._state_to_prob,
         elems=states
     )
     outputs = tf.map_fn(
         fn=self._prob_to_output,
         elems=probs
     )
     probs = tf.transpose(probs, (1, 0, 2))
     outputs = tf.transpose(outputs, (1, 0, 2))
     outputs = tf.concat((seq[:, 0:1, :], outputs), 1)
     loss = tf.reduce_mean(-tf.log(1e-5 + tf.reduce_sum(seq_1 * probs, 2)), 1)
     loss = tf.reduce_mean(loss)
     self._add_slot(
         'train',
         outputs=loss,
         inputs=seq,
         updates=tf.train.AdamOptimizer(1e-3).minimize(loss)
     )
     self._add_slot(
         'evaluate',
         outputs=outputs,
         inputs=seq
     )
     #
     word = tf.placeholder(
         shape=(None, self._voc_size),
         dtype=photinia.dtype
     )
     emb = self._emb.setup(word)
     emb = photinia.lrelu(emb)
     self._add_slot(
         'embedding',
         outputs=emb,
         inputs=word
     )
示例#3
0
 def _state_to_prob(self, state):
     prob = self._lin.setup(state)
     prob = photinia.lrelu(prob)
     prob = tf.nn.softmax(prob)
     return prob
示例#4
0
 def _rnn_step(self, acc, elem):
     emb = self._emb.setup(elem)
     emb = photinia.lrelu(emb)
     state = self._cell.setup(emb, acc)
     return state