def MultiHeadedAttention(feature_depth, num_heads=8, dropout=0.0, mode='train'): """Transformer-style multi-headed attention. Accepts inputs of the form (x, mask) and constructs (q, k, v) from x. Args: feature_depth: int: depth of embedding num_heads: int: number of attention heads dropout: float: dropout rate mode: str: 'train' or 'eval' Returns: Multi-headed self-attention layer. """ return combinators.Serial( combinators.Parallel( # q = k = v = first input combinators.Branch(combinators.Copy(), combinators.Copy(), combinators.Copy()), combinators.Copy() # pass the mask ), MultiHeadedAttentionQKV( # pylint: disable=no-value-for-parameter feature_depth, num_heads=num_heads, dropout=dropout, mode=mode), )
def MultiHeadedAttentionQKV(feature_depth, num_heads=8, dropout=0.0, mode='train'): """Transformer-style multi-headed attention. Accepts inputs of the form (q, k, v), mask. Args: feature_depth: int: depth of embedding num_heads: int: number of attention heads dropout: float: dropout rate mode: str: 'train' or 'eval' Returns: Multi-headed self-attention result and the mask. """ return combinators.Serial( combinators.Parallel( combinators.Parallel( core.Dense(feature_depth), core.Dense(feature_depth), core.Dense(feature_depth), ), combinators.Copy()), PureMultiHeadedAttention( # pylint: disable=no-value-for-parameter feature_depth=feature_depth, num_heads=num_heads, dropout=dropout, mode=mode), combinators.Parallel(core.Dense(feature_depth), combinators.Copy()))
def test_parallel(self): input_shape = ((2, 3), (2, 3)) expected_shape = ((2, 3), (2, 3)) output_shape = base.check_shape_agreement( combinators.Parallel(combinators.Copy(), combinators.Copy()), input_shape) self.assertEqual(output_shape, expected_shape)
def test_branch_named(self): input_shape = (2, 3) expected_shape = {'a': (2, 3), 'b': (2, 3)} output_shape = base.check_shape_agreement( combinators.Branch(a=combinators.Copy(), b=combinators.Copy()), input_shape) self.assertEqual(output_shape, expected_shape)
def ChunkedCausalMultiHeadedAttention(feature_depth, num_heads=8, dropout=0.0, chunk_selector=None, mode='train'): """Transformer-style causal multi-headed attention operating on chunks. Accepts inputs that are a list of chunks and applies causal attention. Args: feature_depth: int: depth of embedding num_heads: int: number of attention heads dropout: float: dropout rate chunk_selector: a function from chunk number to list of chunks to attend. mode: str: 'train' or 'eval' Returns: Multi-headed self-attention layer. """ prepare_attention_input = combinators.Serial( combinators.Branch( combinators.Branch( # q = k = v = first input combinators.Copy(), combinators.Copy(), combinators.Copy()), CausalMask(axis=-2), # pylint: disable=no-value-for-parameter ), combinators.Parallel( combinators.Parallel( core.Dense(feature_depth), core.Dense(feature_depth), core.Dense(feature_depth), ), combinators.Copy())) return combinators.Serial( combinators.Map(prepare_attention_input), ChunkedAttentionSelector(selector=chunk_selector), # pylint: disable=no-value-for-parameter combinators.Map( PureMultiHeadedAttention( # pylint: disable=no-value-for-parameter feature_depth=feature_depth, num_heads=num_heads, dropout=dropout, mode=mode), check_shapes=False), combinators.Map(combinators.Select(0), check_shapes=False), # drop masks combinators.Map(core.Dense(feature_depth)))
def GeneralGRUCell(candidate_transform, memory_transform=combinators.Copy, gate_nonlinearity=core.Sigmoid, candidate_nonlinearity=core.Tanh, dropout_rate_c=0.1, sigmoid_bias=0.5): r"""Parametrized Gated Recurrent Unit (GRU) cell construction. GRU update equations: $$ Update gate: u_t = \sigmoid(U' * s_{t-1} + B') $$ $$ Reset gate: r_t = \sigmoid(U'' * s_{t-1} + B'') $$ $$ Candidate memory: c_t = \tanh(U * (r_t \odot s_{t-1}) + B) $$ $$ New State: s_t = u_t \odot s_{t-1} + (1 - u_t) \odot c_t $$ See combinators.Gate for details on the gating function. Args: candidate_transform: Transform to apply inside the Candidate branch. Applied before nonlinearities. memory_transform: Optional transformation on the memory before gating. gate_nonlinearity: Function to use as gate activation. Allows trying alternatives to Sigmoid, such as HardSigmoid. candidate_nonlinearity: Nonlinearity to apply after candidate branch. Allows trying alternatives to traditional Tanh, such as HardTanh dropout_rate_c: Amount of dropout on the transform (c) gate. Dropout works best in a GRU when applied exclusively to this branch. sigmoid_bias: Constant to add before sigmoid gates. Generally want to start off with a positive bias. Returns: A model representing a GRU cell with specified transforms. """ return combinators.Serial( combinators.Branch( # s_{t-1} branch - optionally transform # Typically is an identity. memory_transform(), # u_t (Update gate) branch combinators.Serial( candidate_transform(), # Want bias to start out positive before sigmoids. core.AddConstant(constant=sigmoid_bias), gate_nonlinearity() ), # c_t (Candidate) branch combinators.Serial( combinators.Branch( combinators.Copy(), # r_t (Reset) Branch combinators.Serial( candidate_transform(), # Want bias to start out positive before sigmoids. core.AddConstant(constant=sigmoid_bias), gate_nonlinearity() ) ), ## Gate S{t-1} with sigmoid(candidate_transform(S{t-1})) combinators.Multiply(), # Final projection + tanh to get Ct candidate_transform(), candidate_nonlinearity(), # Candidate gate # Only apply dropout on the C gate. # Paper reports that 0.1 is a good default. core.Dropout(rate=dropout_rate_c) ), ), # Gate memory and candidate combinators.Gate())
def test_parallel_named(self): input_shape = {'a': (2, 3), 'b': (2, 3)} expected_shape = {'a': (2, 3), 'b': (2, 3)} output_shape = base.check_shape_agreement( combinators.Parallel(a=combinators.Copy()), input_shape) self.assertEqual(output_shape, expected_shape)