def test_ops(self): x = K.variable(np.random.rand(8, 12)) y = K.variable(np.random.rand(12, 25)) z = K.placeholder((25, 18, 13)) w = K.placeholder((18, 18)) # ====== dot ====== # t = K.dot(x, y) self.assertEquals(K.get_shape(t), (8, 25)) self.assertEquals(K.get_shape(t), K.eval(t).shape) t = K.dot(t, K.dimshuffle(z, (1, 0, 2))) self.assertEquals(K.get_shape(t), (8, 18, 13)) # ====== transpose ====== # self.assertEquals(K.get_shape(K.transpose(z)), (13, 18, 25)) self.assertEquals(K.get_shape(K.transpose(t, axes=(2, 0, 1))), (13, 8, 18)) # ====== eye ====== # self.assertEquals(K.get_shape(K.eye(5)), K.eval(K.eye(5)).shape) # ====== diag ====== # self.assertEquals(K.get_shape(K.diag(w)), (18, )) # self.assertEquals(K.get_shape(K.diag(x)), # K.eval(K.diag(y)).shape) self.assertEquals(K.get_shape(K.square(x)), K.eval(K.square(x)).shape) self.assertEquals(K.get_shape(K.abs(x)), K.eval(K.abs(x)).shape) self.assertEquals(K.get_shape(K.sqrt(x)), K.eval(K.sqrt(x)).shape) self.assertEquals(K.get_shape(K.exp(x)), K.eval(K.exp(x)).shape) self.assertEquals(K.get_shape(K.log(x)), K.eval(K.log(x)).shape) self.assertEquals(K.get_shape(K.round(x)), K.eval(K.round(x)).shape) self.assertEquals(K.get_shape(K.pow(x, 2)), K.eval(K.pow(x, 2)).shape) self.assertEquals(K.get_shape(K.clip(x, -1, 1)), K.eval(K.clip(x, -1, 1)).shape) self.assertEquals(K.get_shape(K.inv(x)), K.eval(K.inv(x)).shape)
def _apply(self, u, mean, std, z_corr): mu = self.a1 * self.activation(self.a2 * u + self.a3) + self.a4 * u + self.a5 v = self.a6 * self.activation(self.a7 * u + self.a8) + self.a9 * u + self.a10 z_est = (z_corr - mu) * v + mu z_est_bn = (z_est - mean) / K.square(std) return z_est_bn
def test_basic_ops_value(self): np.random.seed(12082518) x = K.variable(np.random.randn(8, 8)) y = K.variable(np.random.randn(8, 8)) z = K.variable(np.random.randint(0, 2, size=(8, 8)), dtype=np.bool) w = K.variable(np.random.randint(0, 2, size=(8, 8)), dtype=np.bool) self.assertEqual(round(np.sum(K.eval(K.relu(x, alpha=0.12))) * 10000), 276733) self.assertEqual(round(np.sum(K.eval(K.elu(x, alpha=0.12))) * 10000), 289202) self.assertEqual(np.sum(K.eval(K.softmax(x))), 8.0) self.assertEqual(round(np.sum(K.eval(K.softplus(x))) * 10000), 554564) self.assertEqual(round(np.sum(K.eval(K.softsign(x))) * 100000), 211582) self.assertEqual(round(np.sum(K.eval(K.sigmoid(x))) * 10000), 330427) self.assertEqual(round(np.sum(K.eval(K.hard_sigmoid(x))) * 10000), 330836) self.assertEqual(round(np.sum(K.eval(K.tanh(x))) * 100000), 290165) self.assertEqual(round(np.sum(K.eval(K.square(x))) * 10000), 744492) self.assertEqual(round(np.sum(K.eval(K.sqrt(x))) * 10000), 300212) self.assertEqual(round(np.sum(K.eval(K.abs(x))) * 10000), 559979) self.assertEqual(np.sum(K.eval(K.sign(x))), 6.0) self.assertEqual(round(np.sum(K.eval(K.inv(x))) * 1000), 495838) self.assertEqual(round(np.sum(K.eval(K.exp(x))) * 1000), 122062) self.assertEqual(round(np.sum(K.eval(K.log(K.abs(x)))) * 10000), -344491) self.assertEqual(np.sum(K.eval(K.round(x))), 5.0) self.assertEqual(round(np.sum(K.eval(K.pow(x, 8))) * 100), 398153) self.assertEqual( round(np.sum(K.eval(K.clip(x, -0.12, 0.12))) * 1000000), 620529) # TODO: pygpu (libgpuarray) still not support diag # self.assertEqual(round(np.sum(K.eval(K.diag(x))) * 100000), 325289) self.assertEqual(np.sum(K.eval(K.eye(12, 8))), 8.0) self.assertEqual(np.sum(K.eval(K.eq(z, w))), 38) self.assertEqual(np.sum(K.eval(K.neq(z, w))), 26) self.assertEqual(np.sum(K.eval(K.gt(x, y))), 33) self.assertEqual(np.sum(K.eval(K.ge(x, y))), 33) self.assertEqual(np.sum(K.eval(K.lt(x, y))), 31) self.assertEqual(np.sum(K.eval(K.le(x, y))), 31) self.assertEqual(round(np.sum(K.eval(K.switch(z, x, y))) * 100000), 139884)
def score(self, query, key=None, scale=1, window_width=None, q_proj=None, target_proj=None): r""" Arguments: query: Query (or target sequence) tensor of shape `[batch_size, Tq, dim]` or `[num_heads, batch_size, Tq, dim]` in case of multi-heads attention. key: Key (or source sequence) tensor of shape `[batch_size, Tv, dim]` or `[num_heads, batch_size, Tv, dim]` in case of multi-heads attention. scale: single `Scalar` or `Tensor` of shape `[dim]` for scaling the attention scores, suggested `1/sqrt(dim)` in (Vaswani et al. 2017). window_width : `None`, `Integer` or `Float` ([0, 1]). The total number of frames for a single window in local attention (i.e. `left + 1 + right`) Can be given as a fixed number of frames (`int`), or percentage of the sequence length (`float`). If `None`, use `Tq` q_proj : `Dense`, instance of dense or fully connected layer - for `ScoreLocation`, the number of hidden unit is `1` - for `ScoreGeneral`, the number of hidden unit is `dim` target_proj : `Dense`, for predictive local attention, applying a fully connected network on target sequence (i.e. the query) to predict the position on source sequence (i.e. the key). The layer must has output dimension equal to 1 and return logit value. Returns: Tensor of shape `[num_heads, batch_size, Tq, Tv]`, or `[num_heads, batch_size, Tq, 1]` if `ScoreLocation` """ ### Check if multi-head attention is used num_heads = _get_num_heads(query) if num_heads > 0: query = bk.reshape(query, [-1] + [i for i in query.shape[2:]]) if key is not None: key = bk.reshape(key, [-1] + [i for i in key.shape[2:]]) Tq = query.shape[1] Tv = Tq if key is None else key.shape[1] # scale shape is `[]` or `[dim]` scale = bk.array(scale, dtype=query.dtype) ### Check the window width if window_width is None: window_width = Tq elif window_width < 1: window_width = window_width * Tv window_width = int(window_width) ### Locative attention if AttentionMechanism.ScoreLocation in self: if PosLocalM in self or PosLocalP in self: raise NotImplementedError( "ScoreLocation only support Global attention, but given: %s" % str(self)) # [batch_size * num_heads, Tq, dim] scores = bk.reduce_mean(scale) * q_proj(query) assert scores.shape[-1] == 1, \ " q_proj must have only 1 hidden unit, but given %d" % scores.shape[-1] ### Other score mode need the key tensor else: if key is None: raise ValueError( "key must be provided for attention type: %s" % str(self)) ### Attention position (local or global) if PosLocalM in self: key = key[:, -window_width:] elif PosLocalP in self: pt = bk.sigmoid(target_proj(bk.reshape(query, ([0], -1)))) assert pt.shape[-1] == 1, \ "target_proj must project the query [., Tq * dim] to [., 1], i.e. " + \ "predicting the attention position on source sequence using " + \ "knowledge from target sequence." pt = Tv * pt # `[batch_size * num_heads, 1]` # `[batch_size * num_heads, Tv]` # Eq (10) (Luong et al. 2015) gauss_est = bk.exp( -bk.square(bk.arange(Tv, dtype=pt.dtype) - pt) / (2 * bk.square(window_width / 2))) # `[batch_size * num_heads, 1, Tv]` gauss_est = bk.expand_dims(gauss_est, axis=1) ### Additive or concat method if AttentionMechanism.ScoreAdditive in self: # [batch_size * num_heads, Tq, 1, dim] q = bk.expand_dims(query, axis=2) # [batch_size * num_heads, 1, Tv, dim] k = bk.expand_dims(key, axis=1) # [batch_size * num_heads, Tq, Tv] scores = bk.reduce_sum(scale * bk.tanh(q + k), axis=-1) ### Dot product or multiplicative scoring elif AttentionMechanism.ScoreDotProd in self: # this is a trick to make attention_scale broadcastable when # scale_tied=False scores = bk.matmul(scale * query, bk.swapaxes(key, 1, 2)) ### cosine scoring elif AttentionMechanism.ScoreCosine in self: # [batch_size * num_heads, Tq, 1, dim] q = bk.expand_dims(query, axis=2) # [batch_size * num_heads, 1, Tv, dim] k = bk.expand_dims(key, axis=1) # [batch_size * num_heads, Tq, Tv, dim] scores = (q * k) / (bk.norm(q, p=2) * bk.norm(k, p=2)) scores = bk.reduce_sum(scale * scores, axis=-1, keepdims=False) ### general method with only project on the query elif AttentionMechanism.ScoreGeneral in self: query = q_proj(query) assert query.shape[-1] == key.shape[-1], \ " q_proj must have %d hidden units, but given %d units" % \ (key.shape[-1], query.shape[-1]) scores = bk.matmul(scale * query, bk.swapaxes(key, 1, 2)) else: raise NotImplementedError( "No support for attention_type='%s'" % str(self)) ### applying the local-predictive attention if PosLocalP in self: scores = scores * gauss_est ### get back the multi-heads shape if num_heads > 0: scores = bk.reshape(scores, shape=[num_heads, -1] + [i for i in scores.shape[1:]]) return scores