Example #1
0
def main(_):
  index = FLAGS.index
  head = FLAGS.head
  vocab_path = FLAGS.vocab_path
  attention_file = FLAGS.attention_file
 
  subtokenizer = tokenization.restore_subtokenizer_from_vocab_files(vocab_path)
  subtoken_list = subtokenizer._subtoken_list

  attn = np.load(attention_file, allow_pickle=True).item()

  src_src = attn['src_src_attention']
  tgt_src = attn['tgt_src_attention']
  tgt_tgt = attn['tgt_tgt_attention']
  src = attn['src']
  tgt = attn['tgt']

  draw_attention_weights(
    src[index], src[index], src_src[index, head], subtoken_list)
  plt.savefig('src_src.png', dpi=256)
  print('src_src_attention saved to "src_src.png".')
  draw_attention_weights(
    tgt[index], src[index], tgt_src[index, head], subtoken_list)
  plt.savefig('tgt_src.png', dpi=256)
  print('tgt_src_attention saved to "tgt_src.png".')
  draw_attention_weights(
    tgt[index], tgt[index], tgt_tgt[index, head], subtoken_list)
  plt.savefig('tgt_tgt.png', dpi=256)
  print('tgt_tgt_attention saved to "tgt_tgt.png".')
Example #2
0
def main(_):
    vocab_path = FLAGS.vocab_path
    model_dir = FLAGS.model_dir

    encoder_stack_size = FLAGS.encoder_stack_size
    decoder_stack_size = FLAGS.decoder_stack_size
    hidden_size = FLAGS.hidden_size
    num_heads = FLAGS.num_heads
    filter_size = FLAGS.filter_size
    dropout_rate = FLAGS.dropout_rate

    extra_decode_length = FLAGS.extra_decode_length
    beam_width = FLAGS.beam_width
    alpha = FLAGS.alpha
    decode_batch_size = FLAGS.decode_batch_size
    src_max_length = FLAGS.src_max_length

    source_text_filename = FLAGS.source_text_filename
    target_text_filename = FLAGS.target_text_filename
    translation_output_filename = FLAGS.translation_output_filename

    # transformer model
    subtokenizer = tokenization.restore_subtokenizer_from_vocab_files(
        vocab_path)
    vocab_size = subtokenizer.vocab_size
    model = TransformerModel(vocab_size=vocab_size,
                             encoder_stack_size=encoder_stack_size,
                             decoder_stack_size=decoder_stack_size,
                             hidden_size=hidden_size,
                             num_heads=num_heads,
                             filter_size=filter_size,
                             dropout_rate=dropout_rate,
                             extra_decode_length=extra_decode_length,
                             beam_width=beam_width,
                             alpha=alpha)

    ckpt = tf.train.Checkpoint(model=model)
    latest_ckpt = tf.train.latest_checkpoint(model_dir)
    if latest_ckpt is None:
        raise ValueError('No checkpoint is found in %s' % model_dir)
    print('Loaded latest checkpoint ', latest_ckpt)
    ckpt.restore(latest_ckpt).expect_partial()

    # build evaluator
    evaluator = SequenceTransducerEvaluator(model, subtokenizer,
                                            decode_batch_size, src_max_length)

    # translates input sequences, and optionally evaluates BLEU score if
    # groundtruth target sequences are provided
    if target_text_filename is not None:
        case_insensitive_score, case_sensitive_score = evaluator.evaluate(
            source_text_filename, target_text_filename,
            translation_output_filename)
        print('BLEU(case insensitive): %f' % case_insensitive_score)
        print('BLEU(case sensitive): %f' % case_sensitive_score)
    else:
        evaluator.translate(source_text_filename, translation_output_filename)
        print(
            'Inference mode: no groundtruth translations.\nTranslations written '
            'to file "%s"' % translation_output_filename)
def main(_):
  filename = FLAGS.filename
  vocab_path = FLAGS.vocab_path
  model_dir = FLAGS.model_dir

  m_seq_len = FLAGS.m_seq_len
  cutoffs = FLAGS.cutoffs
  adaptive_embedding = FLAGS.adaptive_embedding

  stack_size = FLAGS.stack_size
  hidden_size = FLAGS.hidden_size
  num_heads = FLAGS.num_heads
  filter_size = FLAGS.filter_size
  tie_biases = FLAGS.tie_biases

  with tf.io.gfile.GFile(filename + '.json') as f:
    dataset_config = json.load(f)

  subword = dataset_config['subword']
  batch_size = dataset_config['batch_size']
  
  if subword:
    tokenizer = tokenization.restore_subtokenizer_from_vocab_files(vocab_path)
  else:
    tokenizer = tokenization.restore_tokenizer_from_vocab_files(vocab_path)

  vocab_size = tokenizer.vocab_size
  cutoffs = list(map(int, cutoffs))

  model = TransformerXLModel(adaptive_embedding,
                             vocab_size,
                             cutoffs,
                             stack_size,
                             hidden_size,
                             num_heads,
                             filter_size,
                             tie_biases=tie_biases)

  parse_fn = functools.partial(parse_fn_sequence_pair, 
                               keys=('inputs', 'labels'), 
                               dtype='int32')
  dataset = tf.data.TFRecordDataset(filename + '.tfrecord')
  dataset = dataset.map(parse_fn).batch(batch_size)

  ckpt = tf.train.Checkpoint(model=model)
  latest_ckpt = tf.train.latest_checkpoint(model_dir)
  if latest_ckpt is None:
    raise ValueError('No checkpoint is found in %s' % model_dir)
  print('Loaded latest checkpoint', latest_ckpt)
  ckpt.restore(latest_ckpt).expect_partial()

  evaluator = TransformerXLModelEvaluator(model, 
                                          m_seq_len, 
                                          batch_size, 
                                          vocab_size, 
                                          adaptive_embedding)
  print('Evaluating file %s...' % (filename + '.tfrecord'))
  perplexity = evaluator.evaluate(dataset)
  print('Perplexity:', perplexity)
Example #4
0
def main(_):
    data_dir = FLAGS.data_dir
    vocab_path = FLAGS.vocab_path
    model_dir = FLAGS.model_dir

    encoder_stack_size = FLAGS.encoder_stack_size
    decoder_stack_size = FLAGS.decoder_stack_size
    hidden_size = FLAGS.hidden_size
    num_heads = FLAGS.num_heads
    filter_size = FLAGS.filter_size
    dropout_rate = FLAGS.dropout_rate

    max_num_tokens = FLAGS.max_num_tokens
    max_length = FLAGS.max_length
    num_parallel_calls = FLAGS.num_parallel_calls

    learning_rate = FLAGS.learning_rate
    learning_rate_warmup_steps = FLAGS.learning_rate_warmup_steps
    optimizer_adam_beta1 = FLAGS.optimizer_adam_beta1
    optimizer_adam_beta2 = FLAGS.optimizer_adam_beta2
    optimizer_adam_epsilon = FLAGS.optimizer_adam_epsilon

    label_smoothing = FLAGS.label_smoothing
    num_steps = FLAGS.num_steps
    save_ckpt_per_steps = FLAGS.save_ckpt_per_steps

    # transformer model
    subtokenizer = tokenization.restore_subtokenizer_from_vocab_files(
        vocab_path)
    vocab_size = subtokenizer.vocab_size
    model = TransformerModel(vocab_size=vocab_size,
                             encoder_stack_size=encoder_stack_size,
                             decoder_stack_size=decoder_stack_size,
                             hidden_size=hidden_size,
                             num_heads=num_heads,
                             filter_size=filter_size,
                             dropout_rate=dropout_rate)

    # training dataset
    builder = dataset.DynamicBatchDatasetBuilder(max_num_tokens, True,
                                                 max_length,
                                                 num_parallel_calls)
    filenames = sorted(glob.glob(os.path.join(data_dir, SUFFIX)))
    train_ds = builder.build_dataset(filenames)

    # learning rate and optimizer
    optimizer = tf.keras.optimizers.Adam(utils.LearningRateSchedule(
        learning_rate, hidden_size, learning_rate_warmup_steps),
                                         optimizer_adam_beta1,
                                         optimizer_adam_beta2,
                                         epsilon=optimizer_adam_epsilon)

    # checkpoint
    ckpt = tf.train.Checkpoint(model=model, optimizer=optimizer)

    # build trainer and start training
    trainer = SequenceTransducerTrainer(model, label_smoothing)
    trainer.train(train_ds, optimizer, ckpt, model_dir, num_steps,
                  save_ckpt_per_steps)
Example #5
0
def main(_):
    filename = FLAGS.filename
    vocab_path = FLAGS.vocab_path
    model_dir = FLAGS.model_dir

    m_seq_len = FLAGS.m_seq_len
    cutoffs = FLAGS.cutoffs
    adaptive_embedding = FLAGS.adaptive_embedding
    stack_size = FLAGS.stack_size
    hidden_size = FLAGS.hidden_size
    num_heads = FLAGS.num_heads
    filter_size = FLAGS.filter_size
    dropout_rate = FLAGS.dropout_rate
    dropout_rate_attention = FLAGS.dropout_rate_attention
    tie_biases = FLAGS.tie_biases

    learning_rate = FLAGS.learning_rate
    learning_rate_warmup_steps = FLAGS.learning_rate_warmup_steps
    optimizer_adam_beta1 = FLAGS.optimizer_adam_beta1
    optimizer_adam_beta2 = FLAGS.optimizer_adam_beta2
    optimizer_adam_epsilon = FLAGS.optimizer_adam_epsilon

    warmup_lr = FLAGS.warmup_lr
    clip_norm = FLAGS.clip_norm
    alpha = FLAGS.alpha
    num_steps = FLAGS.num_steps
    save_ckpt_per_steps = FLAGS.save_ckpt_per_steps

    with tf.io.gfile.GFile(filename + '.json') as f:
        dataset_config = json.load(f)

    subword = dataset_config['subword']
    batch_size = dataset_config['batch_size']

    # transformerxl model
    if subword:
        tokenizer = tokenization.restore_subtokenizer_from_vocab_files(
            vocab_path)
    else:
        tokenizer = tokenization.restore_tokenizer_from_vocab_files(vocab_path)

    vocab_size = tokenizer.vocab_size
    cutoffs = list(map(int, cutoffs))

    model = TransformerXLModel(adaptive_embedding,
                               vocab_size,
                               cutoffs,
                               stack_size,
                               hidden_size,
                               num_heads,
                               filter_size,
                               dropout_rate=dropout_rate,
                               dropout_rate_attention=dropout_rate_attention,
                               tie_biases=tie_biases)

    # training datset
    parse_fn = functools.partial(parse_fn_sequence_pair,
                                 keys=('inputs', 'labels'),
                                 dtype='int32')
    dataset = tf.data.TFRecordDataset(filename + '.tfrecord')
    dataset = dataset.map(parse_fn).repeat().batch(batch_size)

    # learning rate and optimizer
    schedule = CosineDecayLearningRateSchedule(
        learning_rate=learning_rate,
        decay_steps=num_steps - learning_rate_warmup_steps,
        alpha=alpha,
        warmup_steps=learning_rate_warmup_steps,
        warmup_lr=warmup_lr)
    optimizer = tf.keras.optimizers.Adam(schedule,
                                         optimizer_adam_beta1,
                                         optimizer_adam_beta2,
                                         epsilon=optimizer_adam_epsilon)

    # checkpoint
    ckpt = tf.train.Checkpoint(model=model, optimizer=optimizer)

    # build trainer and start training
    trainer = TransformerXLModelTrainer(model, m_seq_len, batch_size,
                                        vocab_size, adaptive_embedding)
    trainer.train(dataset, optimizer, ckpt, model_dir, num_steps,
                  save_ckpt_per_steps, clip_norm)
Example #6
0
def main(_):
  prompt_filename = FLAGS.prompt_filename
  filename = FLAGS.filename
  vocab_path = FLAGS.vocab_path
  model_dir = FLAGS.model_dir

  decoding_method = FLAGS.decoding_method
  m_seq_len = FLAGS.m_seq_len
  cutoffs = FLAGS.cutoffs
  adaptive_embedding = FLAGS.adaptive_embedding
  num_tokens = FLAGS.num_tokens

  stack_size = FLAGS.stack_size
  hidden_size = FLAGS.hidden_size
  num_heads = FLAGS.num_heads
  filter_size = FLAGS.filter_size
  tie_biases = FLAGS.tie_biases

  with tf.io.gfile.GFile(filename + '.json') as f:
    dataset_config = json.load(f)

  subword = dataset_config['subword']

  if subword:
    tokenizer = tokenization.restore_subtokenizer_from_vocab_files(vocab_path)
  else:
    tokenizer = tokenization.restore_tokenizer_from_vocab_files(vocab_path)

  vocab_size = tokenizer.vocab_size
  cutoffs = list(map(int, cutoffs))

  model = TransformerXLModel(adaptive_embedding,
                             vocab_size,
                             cutoffs,
                             stack_size, 
                             hidden_size, 
                             num_heads, 
                             filter_size,
                             tie_biases=tie_biases)

  ckpt = tf.train.Checkpoint(model=model)
  latest_ckpt = tf.train.latest_checkpoint(model_dir)
  if latest_ckpt is None:
    raise ValueError('No checkpoint is found in %s' % model_dir)
  print('Loaded latest checkpoint', latest_ckpt)
  ckpt.restore(latest_ckpt).expect_partial()

  inferencer = TransformerXLModelInferencer(model, 
                                            m_seq_len, 
                                            1, 
                                            vocab_size, 
                                            adaptive_embedding, 
                                            decoding_method,
                                            num_tokens=num_tokens)

  with open(prompt_filename) as f:
    prompt = f.read()

  prompt_token_ids = tokenizer.encode(prompt, add_eos=False)
  token_id_list = inferencer.infer(tf.constant([prompt_token_ids]))
  text = tokenizer.decode(token_id_list) 
  print('\nPrompted Sequence:\n')
  print(prompt, '\n\n')
  print('Predicted sequence:\n')
  print(text)