Пример #1
0
def mtr_lm_v1(num_heads=8, num_memory_heads=0):
    """Model incorporating mixture-of-experts, local and global attention.

  ~6B parameters

  32 experts in 3 hierarchichal moe layers.

  Args:
    num_heads: an optional integer
    num_memory_heads: an optional integer

  Returns:
    a hparams
  """
    hparams = mtr_lm_dense(0)
    local_att = transformer_layers.LocalSelfAttention(
        num_heads=num_heads,
        num_memory_heads=num_memory_heads,
        key_value_size=128)
    att = transformer_layers.SelfAttention(num_heads=num_heads,
                                           num_memory_heads=num_memory_heads,
                                           key_value_size=128)
    drd = transformer_layers.DenseReluDense(hidden_size=2048)
    hmoe = moe.MoE2D(expert_x=8, expert_y=4, hidden_size=32768)
    hparams.layer_stack = transformer.LayerStack(
        ([local_att, local_att, drd, att, drd, local_att, local_att, hmoe] *
         4)[:-1])
    hparams.mesh_shape = "b0:4;b1:8"
    hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0"
    hparams.outer_batch_size = 4
    return hparams
Пример #2
0
def layer_stack(include_encdec_attention):
  """Create a layer stack.

  Args:
    include_encdec_attention: a boolean
  Returns:
    a LayerStack
  """
  ret = []
  for _ in xrange(FLAGS.num_layers):
    ret.append(
        transformer_layers.SelfAttention(
            num_heads=FLAGS.num_heads,
            key_value_size=FLAGS.d_kv,
            attention_kwargs={"dropout_rate": FLAGS.dropout}))
    if include_encdec_attention:
      ret.append(
          transformer_layers.EncDecAttention(
              num_heads=FLAGS.num_heads,
              key_value_size=FLAGS.d_kv,
              attention_kwargs={"dropout_rate": FLAGS.dropout}))
    ret.append(
        transformer_layers.DenseReluDense(
            hidden_size=FLAGS.d_ff,
            dropout_rate=FLAGS.dropout))
  return transformer.LayerStack(ret)
Пример #3
0
def self_attention_layer(hparams, prefix):
    """Create self-attention layer based on hyperparameters."""
    return transformer_layers.SelfAttention(
        num_heads=hparams.get(prefix + "num_heads"),
        num_memory_heads=hparams.get(prefix + "num_memory_heads"),
        key_value_size=hparams.d_kv,
        shared_kv=hparams.get(prefix + "shared_kv", False),
        attention_kwargs=attention_kwargs_from_hparams(hparams))
Пример #4
0
 def my_layer_stack(hparams):
     return transformer.LayerStack([
         transformer_layers.SelfAttention(
             num_heads=hparams.num_heads,
             key_value_size=hparams.d_kv,
             dropout_rate=hparams.attention_dropout),
         transformer_layers.DenseReluDense(
             hidden_size=hparams.d_ff,
             dropout_rate=hparams.layer_prepostprocess_dropout),
     ] * hparams.num_hidden_layers)
Пример #5
0
def mtf_unitransformer_all_layers_tiny():
  """Test out all the layers on local CPU."""
  hparams = mtf_unitransformer_tiny()
  hparams.layer_stack = transformer.LayerStack(
      [transformer_layers.SelfAttention(num_heads=4),
       transformer_layers.LocalSelfAttention(num_heads=4),
       moe.MoE1D(num_experts=4, hidden_size=512),
       moe.MoE2D(expert_x=4, expert_y=4, hidden_size=512),
       transformer_layers.DenseReluDense(hidden_size=512)])
  return hparams
Пример #6
0
  def __init__(self, base_num_heads):
    """Create an DecoderAttentionLayer.

    Args:
      base_num_heads: a positive integer, the base number of heads the attention
        layers are using.
    """
    self._self_attention = transformer_layers.SelfAttention(num_heads=2 *
                                                            base_num_heads)
    self._enc_dec_attention = transformer_layers.EncDecAttention(
        num_heads=base_num_heads)
Пример #7
0
def mtf_transformer2_all_layers_tiny():
    """Test out all the layers on local CPU."""
    hparams = mtf_transformer2_base()
    hparams.batch_size = 2
    hparams.mesh_shape = ""
    hparams.d_model = 128
    hparams.layer_stack = transformer.LayerStack([
        transformer_layers.SelfAttention(num_heads=4),
        transformer_layers.LocalSelfAttention(num_heads=4),
        moe.MoE1D(num_experts=4, hidden_size=512),
        moe.MoE2D(expert_x=4, expert_y=4, hidden_size=512),
        transformer_layers.DenseReluDense(hidden_size=512)
    ])
    return hparams
Пример #8
0
def default_layer_stack_with_encoder_attention(hparams):
    return transformer.LayerStack(
        [
            transformer_layers.SelfAttention(
                num_heads=hparams.num_heads,
                key_value_size=hparams.d_kv,
                dropout_rate=hparams.attention_dropout),
            transformer_layers.EncDecAttention(
                num_heads=hparams.num_heads,
                key_value_size=hparams.d_kv,
                dropout_rate=hparams.attention_dropout),
            transformer_layers.DenseReluDense(
                hidden_size=hparams.d_ff, dropout_rate=hparams.relu_dropout),
        ] * hparams.num_hidden_layers,
        dropout_rate=hparams.layer_prepostprocess_dropout,
        norm_epsilon=hparams.norm_epsilon)
Пример #9
0
def simple_layer_stack(include_encdec_attention,
                       num_layers=6,
                       d_ff=2048,
                       num_heads=8,
                       d_kv=128,
                       dropout_rate=0.1):
    """Create a layer stack.

  Args:
    include_encdec_attention: a boolean
    num_layers: an integer
    d_ff: an integer
    num_heads: an integer
    d_kv: an integer
    dropout_rate: a float

  Returns:
    a LayerStack
  """
    ret = []
    for _ in xrange(num_layers):
        ret.append(
            transformer_layers.SelfAttention(
                num_heads=num_heads,
                key_value_size=d_kv,
                attention_kwargs={"dropout_rate": dropout_rate}))
        if include_encdec_attention:
            ret.append(
                transformer_layers.EncDecAttention(
                    num_heads=num_heads,
                    key_value_size=d_kv,
                    attention_kwargs={"dropout_rate": dropout_rate}))
        ret.append(
            transformer_layers.DenseReluDense(hidden_size=d_ff,
                                              dropout_rate=dropout_rate))
    return transformer.LayerStack(ret)
Пример #10
0
 def __init__(self, base_num_heads, key_value_size, dropout_rate):
     self._self_attention = transformer_layers.SelfAttention(
         num_heads=int(2 * base_num_heads),
         key_value_size=int(key_value_size / 2),
         dropout_rate=dropout_rate)
Пример #11
0
def create_dummy_model(mesh,
                       shapes,
                       n_blocks=2,
                       block_param_size_str="2_2",
                       block_repeat_size_str="1_1"):
    """Creates a dummy model and layer stack with 4-dimensional input."""

    assert len(shapes) == 4
    outer_batch_size, batch_size, length, d_model = shapes
    batch_dim = mtf.Dimension("batch", batch_size)
    outer_batch_dim = mtf.Dimension("outer_batch", outer_batch_size)
    length_dim = mtf.Dimension("length", length)
    block_param_size = list(map(int, block_param_size_str.split("_")))
    block_repeat_size = list(map(int, block_repeat_size_str.split("_")))

    sublayers_initial = [
        transformer.sublayer_dropout,
    ]
    sublayers_per_layer = [
        transformer.sublayer_rms_norm,
        transformer.sublayer_call_layer,
        transformer.sublayer_dropout,
        transformer.sublayer_residual,
    ]
    sublayers_final = [
        transformer.sublayer_rms_norm,
        transformer.sublayer_dropout,
    ]
    submodules = [
        transformer_layers.SelfAttention(),
        transformer_layers.DenseReluDense()
    ]

    n_sublayers = np.array(block_param_size).prod()
    layers = submodules * n_sublayers
    layer_stack = funnel_transformer.FunnelTransformerLayerStack(
        layers=layers,
        n_blocks=n_blocks,
        block_param_size=block_param_size,
        block_repeat_size=block_repeat_size,
        sublayers_initial=sublayers_initial,
        sublayers_per_layer=sublayers_per_layer,
        sublayers_final=sublayers_final)

    model = transformer.Unitransformer(input_vocab_size=10,
                                       output_vocab_size=10,
                                       autoregressive=False,
                                       max_length=8,
                                       d_model=d_model,
                                       layer_stack=layer_stack)

    context = transformer.Context(model=model,
                                  mesh=mesh,
                                  batch_dims=[batch_dim, outer_batch_dim],
                                  length_dim=length_dim,
                                  variable_dtype=mtf.VariableDType(tf.float32),
                                  sequence_id=mtf.ones(mesh,
                                                       mtf.Shape([length_dim
                                                                  ])),
                                  position=mtf.range(mesh,
                                                     length_dim,
                                                     dtype=tf.int32))
    return layer_stack, context