def cnn_for_sentence_encoding( rep_tensor, rep_mask, filter_sizes=(3,4,5), num_filters=200, scope=None, is_train=None, keep_prob=1., wd=0.): """ :param rep_tensor: :param rep_mask: :param filter_sizes: :param num_filters: :param scope: :param is_train: :param keep_prob: :param wd: :return: """ bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2] ivec = rep_tensor.get_shape().as_list()[2] with tf.variable_scope(scope or 'cnn_for_sentence_encoding'): rep_tensor = mask_for_high_rank(rep_tensor, rep_mask) rep_tensor_expand = tf.expand_dims(rep_tensor, 3) rep_tensor_expand_dp = dropout(rep_tensor_expand, keep_prob, is_train) # Create a convolution + maxpool layer for each filter size pooled_outputs = [] for i, filter_size in enumerate(filter_sizes): with tf.variable_scope("conv-maxpool-%s" % filter_size): # Convolution Layer filter_shape = [filter_size, ivec, 1, num_filters] W = tf.get_variable('W', filter_shape, tf.float32) b = tf.get_variable('b', [num_filters], tf.float32) conv = tf.nn.conv2d( rep_tensor_expand_dp, W, strides=[1, 1, 1, 1], padding="VALID", name="conv") # Apply nonlinearity h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu") # bs, sl-fs+1, 1, fn # Maxpooling over the outputs # pooled = tf.nn.max_pool( # h, # ksize=[1, sl - filter_size + 1, 1, 1], # strides=[1, 1, 1, 1], # padding='VALID', # name="pool") pooled = tf.reduce_max(h, 1, True) # bs, 1, 1, fn pooled_outputs.append(pooled) # Combine all the pooled features num_filters_total = num_filters * len(filter_sizes) h_pool = tf.concat(pooled_outputs, 3) h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total]) if wd > 0.: add_reg_without_bias() return h_pool_flat
def hierarchical_cnn_res_gate( rep_tensor, rep_mask, n_gram=5, layer_num=5, hn=None, scope=None, is_train=None, keep_prob=1., wd=0.): # padding if n_gram % 2 == 1: padding_front = padding_back = int((n_gram - 1) / 2) else: padding_front = (n_gram - 1) // 2 padding_back = padding_front + 1 padding = [[0, 0], [padding_front, padding_back], [0, 0], [0, 0]] # lengths bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2] org_ivec = rep_tensor.get_shape().as_list()[2] ivec = hn or org_ivec with tf.variable_scope(scope or 'cnn_for_sentence_encoding'): rep_tensor = mask_for_high_rank(rep_tensor, rep_mask) # bs, sl, hn iter_rep = rep_tensor layer_res_list = [] for layer_idx in range(layer_num): with tf.variable_scope("conv_maxpool_%s" % layer_idx): iter_rep_etd = tf.expand_dims(iter_rep, 3) # bs,sl,hn,1 iter_rep_etd_dp = dropout(iter_rep_etd, keep_prob, is_train) # Convolution Layer feature_size = org_ivec if layer_idx == 0 else ivec filter_shape = [n_gram, feature_size, 1, 2 * ivec] W = tf.get_variable('W', filter_shape, tf.float32) b = tf.get_variable('b', [2 * ivec], tf.float32) iter_rep_etd_pad = tf.pad(iter_rep_etd_dp, padding) conv = tf.nn.conv2d( iter_rep_etd_pad, W, strides=[1, 1, 1, 1], padding="VALID", name="conv") map_res = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu") # bs,sl,1,2hn map_res = tf.squeeze(map_res, [2]) # bs,sl,2*hn # gate map_res_a, map_res_b = tf.split(map_res, num_or_size_splits=2, axis=2) iter_rep = map_res_a * tf.nn.sigmoid(map_res_b) # res if len(layer_res_list) > 0: iter_rep = iter_rep + layer_res_list[-1] layer_res_list.append(iter_rep) if wd > 0.: add_reg_without_bias() return iter_rep
def cnn_for_context_fusion( rep_tensor, rep_mask, filter_sizes=(3,4,5), num_filters=200, scope=None, is_train=None, keep_prob=1., wd=0.): bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2] ivec = rep_tensor.get_shape().as_list()[2] with tf.variable_scope(scope or 'cnn_for_sentence_encoding'): rep_tensor = mask_for_high_rank(rep_tensor, rep_mask) rep_tensor_expand = tf.expand_dims(rep_tensor, 3) # bs, sl, rep_tensor_expand_dp = dropout(rep_tensor_expand, keep_prob, is_train) # Create a convolution + maxpool layer for each filter size pooled_outputs = [] for i, filter_size in enumerate(filter_sizes): with tf.variable_scope("conv-maxpool-%s" % filter_size): # Convolution Layer filter_shape = [filter_size, ivec, 1, num_filters] W = tf.get_variable('W', filter_shape, tf.float32) b = tf.get_variable('b', [num_filters], tf.float32) # # pading in the sequence if filter_size % 2 == 1: padding_front = padding_back = int((filter_size - 1) / 2) else: padding_front = (filter_size - 1) // 2 padding_back = padding_front + 1 padding = [[0, 0], [padding_front, padding_back], [0, 0], [0, 0]] rep_tensor_expand_dp_pad = tf.pad(rep_tensor_expand_dp, padding) conv = tf.nn.conv2d( rep_tensor_expand_dp_pad, W, strides=[1, 1, 1, 1], padding="VALID", name="conv") # Apply nonlinearity h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu") # bs, sl, 1, fn h_squeeze = tf.squeeze(h, [2]) # bs, sl, fn pooled_outputs.append(h_squeeze) # Combine all the pooled features result = tf.concat(pooled_outputs, 2) # bs, sl, 3 * fn if wd > 0.: add_reg_without_bias() return result
def directional_attention_with_dense(rep_tensor, rep_mask, direction=None, scope=None, keep_prob=1., is_train=None, wd=0., activation='elu', tensor_dict=None, name=None, hn=None): def scaled_tanh(x, scale=5.): return scale * tf.nn.tanh(1. / scale * x) bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape( rep_tensor)[2] ivec = hn or rep_tensor.get_shape()[2] with tf.variable_scope(scope or 'directional_attention_%s' % direction or 'diag'): # mask generation sl_indices = tf.range(sl, dtype=tf.int32) sl_col, sl_row = tf.meshgrid(sl_indices, sl_indices) if direction is None: direct_mask = tf.cast( tf.diag(-tf.ones([sl], tf.int32)) + 1, tf.bool) else: if direction == 'forward': direct_mask = tf.greater(sl_row, sl_col) else: direct_mask = tf.greater(sl_col, sl_row) direct_mask_tile = tf.tile(tf.expand_dims(direct_mask, 0), [bs, 1, 1]) # bs,sl,sl rep_mask_tile = tf.tile(tf.expand_dims(rep_mask, 1), [1, sl, 1]) # bs,sl,sl attn_mask = tf.logical_and(direct_mask_tile, rep_mask_tile) # bs,sl,sl # non-linear rep_map = bn_dense_layer(rep_tensor, ivec, True, 0., 'bn_dense_map', activation, False, wd, keep_prob, is_train) rep_map_tile = tf.tile(tf.expand_dims(rep_map, 1), [1, sl, 1, 1]) # bs,sl,sl,vec rep_map_dp = dropout(rep_map, keep_prob, is_train) # attention with tf.variable_scope('attention'): # bs,sl,sl,vec f_bias = tf.get_variable('f_bias', [ivec], tf.float32, tf.constant_initializer(0.)) dependent = linear(rep_map_dp, ivec, False, scope='linear_dependent') # bs,sl,vec dependent_etd = tf.expand_dims(dependent, 1) # bs,1,sl,vec head = linear(rep_map_dp, ivec, False, scope='linear_head') # bs,sl,vec head_etd = tf.expand_dims(head, 2) # bs,sl,1,vec logits = scaled_tanh(dependent_etd + head_etd + f_bias, 5.0) # bs,sl,sl,vec logits_masked = exp_mask_for_high_rank(logits, attn_mask) attn_score = tf.nn.softmax(logits_masked, 2) # bs,sl,sl,vec attn_score = mask_for_high_rank(attn_score, attn_mask) attn_result = tf.reduce_sum(attn_score * rep_map_tile, 2) # bs,sl,vec with tf.variable_scope('output'): o_bias = tf.get_variable('o_bias', [ivec], tf.float32, tf.constant_initializer(0.)) # input gate fusion_gate = tf.nn.sigmoid( linear(rep_map, ivec, True, 0., 'linear_fusion_i', False, wd, keep_prob, is_train) + linear(attn_result, ivec, True, 0., 'linear_fusion_a', False, wd, keep_prob, is_train) + o_bias) output = fusion_gate * rep_map + (1 - fusion_gate) * attn_result output = mask_for_high_rank(output, rep_mask) # save attn if tensor_dict is not None and name is not None: tensor_dict[name + '_dependent'] = dependent tensor_dict[name + '_head'] = head tensor_dict[name] = attn_score tensor_dict[name + '_gate'] = fusion_gate return output
def multi_head_attention_git(rep_tensor, rep_mask, num_heads=8, num_units=64, scope=None, is_train=None, keep_prob=1., wd=0.): '''Applies multihead attention. Args: queries: A 3d tensor with shape of [N, T_q, C_q]. keys: A 3d tensor with shape of [N, T_k, C_k]. num_units: A scalar. Attention size. dropout_rate: A floating point number. is_training: Boolean. Controller of mechanism for dropout. causality: Boolean. If true, units that reference the future are masked. num_heads: An int. Number of heads. scope: Optional scope for `variable_scope`. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns A 3d tensor with shape of (N, T_q, C) ''' causality = False with tf.variable_scope(scope or "multihead_attention"): # because of self-attention, queries and keys is equal to rep_tensor queries = rep_tensor keys = rep_tensor # Set the fall back option for num_units if num_units is None: # hn num_units = queries.get_shape().as_list[-1] # Linear projections Q = tf.layers.dense(queries, num_units, activation=tf.nn.relu) # (N, T_q, C) K = tf.layers.dense(keys, num_units, activation=tf.nn.relu) # (N, T_k, C) V = tf.layers.dense(keys, num_units, activation=tf.nn.relu) # (N, T_k, C) # Split and concat Q_ = tf.concat(tf.split(Q, num_heads, axis=2), axis=0) # (h*N, T_q, C/h) K_ = tf.concat(tf.split(K, num_heads, axis=2), axis=0) # (h*N, T_k, C/h) V_ = tf.concat(tf.split(V, num_heads, axis=2), axis=0) # (h*N, T_k, C/h) # Multiplication outputs = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1])) # (h*N, T_q, T_k) # Scale outputs = outputs / (K_.get_shape().as_list()[-1]**0.5) # Key Masking key_masks = rep_mask # tf.sign(tf.abs(tf.reduce_sum(keys, axis=-1))) # (N, T_k) key_masks = tf.tile(key_masks, [num_heads, 1]) # (h*N, T_k) key_masks = tf.tile(tf.expand_dims(key_masks, 1), [1, tf.shape(queries)[1], 1]) # (h*N, T_q, T_k) paddings = tf.ones_like(outputs) * (-2**32 + 1) # exp mask outputs = tf.where(key_masks, outputs, paddings) # (h*N, T_q, T_k) # Causality = Future blinding if causality: diag_vals = tf.ones_like(outputs[0, :, :]) # (T_q, T_k) tril = tf.contrib.linalg.LinearOperatorTriL( diag_vals).to_dense() # (T_q, T_k) masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(outputs)[0], 1, 1]) # (h*N, T_q, T_k) paddings = tf.ones_like(masks) * (-2**32 + 1) outputs = tf.where(tf.equal(masks, 0), paddings, outputs) # (h*N, T_q, T_k) # Activation outputs = tf.nn.softmax(outputs) # (h*N, T_q, T_k) # Query Masking query_masks = rep_mask # tf.sign(tf.abs(tf.reduce_sum(queries, axis=-1))) # (N, T_q) query_masks = tf.tile(query_masks, [num_heads, 1]) # (h*N, T_q) query_masks = tf.tile(tf.expand_dims(query_masks, -1), [1, 1, tf.shape(keys)[1]]) # (h*N, T_q, T_k) outputs *= tf.cast(query_masks, tf.float32) # broadcasting. (N, T_q, C) # Dropouts # outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=tf.convert_to_tensor(is_training)) outputs = dropout(outputs, keep_prob, is_train) # Weighted sum outputs = tf.matmul(outputs, V_) # ( h*N, T_q, C/h) # Restore shape outputs = tf.concat(tf.split(outputs, num_heads, axis=0), axis=2) # (N, T_q, C) # Residual connection # outputs += queries # Normalize # outputs = normalize(outputs) # (N, T_q, C) return outputs
def multi_head_attention(rep_tensor, rep_mask, head_num=8, hidden_units_num=64, scope=None, is_train=None, keep_prob=1., wd=0.): bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape( rep_tensor)[2] ivec = rep_tensor.get_shape().as_list()[2] with tf.variable_scope(scope or 'multi_head_attention'): with tf.variable_scope('positional_encoding'): seq_idxs = tf.tile(tf.expand_dims(tf.range(sl), 1), [1, ivec]) # sl, ivec feature_idxs = tf.tile(tf.expand_dims(tf.range(ivec), 0), [sl, 1]) # sl, ivec pos_enc = tf.where( tf.equal(tf.mod(feature_idxs, 2), 0), tf.sin( tf.cast(seq_idxs, tf.float32) / tf.pow( 10000., 2.0 * tf.cast(feature_idxs, tf.float32) / (1.0 * ivec))), tf.cos( tf.cast(seq_idxs, tf.float32) / tf.pow( 10000., 2.0 * tf.cast(feature_idxs - 1, tf.float32) / (1.0 * ivec))), ) rep_tensor_pos = mask_for_high_rank(rep_tensor + pos_enc, rep_mask) # bs, sl, ivec with tf.variable_scope('multi_head_attention'): W = tf.get_variable('W', [3, head_num, ivec, hidden_units_num], tf.float32) rep_tile = tf.tile( tf.expand_dims(tf.expand_dims(rep_tensor_pos, 0), 0), [3, head_num, 1, 1, 1]) # 3,head_num,bs,sl,ivec rep_tile_reshape = tf.reshape( rep_tile, [3, head_num, bs * sl, ivec]) # head_num,bs*sl,ivec maps = tf.reshape( # 3,head_num,bs*sl,hn -> 3,head_num,bs,sl,hn tf.matmul(dropout(rep_tile_reshape, keep_prob, is_train), W), [3, head_num, bs, sl, hidden_units_num]) Q_map, K_map, V_map = tf.split(maps, 3, 0) Q_map = tf.squeeze(Q_map, [0]) # head_num,bs,sl,hn K_map = tf.squeeze(K_map, [0]) # head_num,bs,sl,hn V_map = tf.squeeze(V_map, [0]) # head_num,bs,sl,hn # head_num,bs,sl,sl # similarity_mat = tf.reduce_sum(Q_map_tile * K_map_tile, -1) / math.sqrt(1. * hidden_units_num) similarity_mat = tf.matmul(Q_map, tf.transpose( K_map, [0, 1, 3, 2])) / math.sqrt(1. * hidden_units_num) # mask: bs,sl -> head_num,bs,sl multi_mask = tf.tile(tf.expand_dims(rep_mask, 0), [head_num, 1, 1]) # head_num,bs,sl multi_mask_tile_1 = tf.expand_dims(multi_mask, 2) # head_num,bs,1,sl multi_mask_tile_2 = tf.expand_dims(multi_mask, 3) # head_num,bs,sl,1 multi_mask_tile = tf.logical_and( multi_mask_tile_1, multi_mask_tile_2) # head_num,bs,sl,sl similarity_mat_masked = exp_mask( similarity_mat, multi_mask_tile) # head_num,bs,sl,sl prob_dist = tf.nn.softmax( similarity_mat_masked) # head_num,bs,sl,sl prob_dist_dp = dropout(prob_dist, keep_prob, is_train) attn_res = tf.matmul(prob_dist_dp, V_map) # head_num,bs,sl,hn attn_res_tran = tf.transpose(attn_res, [1, 2, 0, 3]) output = tf.reshape(attn_res_tran, [bs, sl, head_num * hidden_units_num]) if wd > 0.: add_reg_without_bias() return output