def testLoadWordVectorsWithoutFileRaisesError(self):
    vocab = {"foo", "bar", "baz", "qux", "."}
    with self.assertRaisesRegexp(
        ValueError, "Cannot find GloVe embedding file at"):
      data.load_word_vectors(self._temp_data_dir, vocab)

    os.makedirs(os.path.join(self._temp_data_dir, "glove"))
    with self.assertRaisesRegexp(
        ValueError, "Cannot find GloVe embedding file at"):
      data.load_word_vectors(self._temp_data_dir, vocab)
  def testSnliData(self):
    snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0")
    fake_train_file = os.path.join(snli_1_0_dir, "snli_1.0_train.txt")
    os.makedirs(snli_1_0_dir)
    self._createFakeSnliData(fake_train_file)

    glove_dir = os.path.join(self._temp_data_dir, "glove")
    os.makedirs(glove_dir)
    glove_file = os.path.join(glove_dir, "glove.42B.300d.txt")
    self._createFakeGloveData(glove_file)

    vocab = data.load_vocabulary(self._temp_data_dir)
    word2index, _ = data.load_word_vectors(self._temp_data_dir, vocab)

    train_data = data.SnliData(fake_train_file, word2index)
    self.assertEqual(4, train_data.num_batches(1))
    self.assertEqual(2, train_data.num_batches(2))
    self.assertEqual(2, train_data.num_batches(3))
    self.assertEqual(1, train_data.num_batches(4))

    generator = train_data.get_generator(2)()
    for _ in range(2):
      label, prem, prem_trans, hypo, hypo_trans = next(generator)
      self.assertEqual(2, len(label))
      self.assertEqual((4, 2), prem.shape)
      self.assertEqual((5, 2), prem_trans.shape)
      self.assertEqual((3, 2), hypo.shape)
      self.assertEqual((3, 2), hypo_trans.shape)
  def testLoadWordVectors(self):
    glove_dir = os.path.join(self._temp_data_dir, "glove")
    os.makedirs(glove_dir)
    glove_file = os.path.join(glove_dir, "glove.42B.300d.txt")

    words = [".", ",", "foo", "bar", "baz"]
    with open(glove_file, "wt") as f:
      for i, word in enumerate(words):
        f.write("%s " % word)
        for j in range(data.WORD_VECTOR_LEN):
          f.write("%.5f" % (i * 0.1))
          if j < data.WORD_VECTOR_LEN - 1:
            f.write(" ")
          else:
            f.write("\n")

    vocab = {"foo", "bar", "baz", "qux", "."}
    # Notice that "qux" is not present in `words`.
    word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab)

    self.assertEqual(6, len(word2index))
    self.assertEqual(0, word2index["<unk>"])
    self.assertEqual(1, word2index["<pad>"])
    self.assertEqual(2, word2index["."])
    self.assertEqual(3, word2index["foo"])
    self.assertEqual(4, word2index["bar"])
    self.assertEqual(5, word2index["baz"])
    self.assertEqual((6, data.WORD_VECTOR_LEN), embed.shape)
    self.assertAllClose([0.0] * data.WORD_VECTOR_LEN, embed[0, :])
    self.assertAllClose([0.0] * data.WORD_VECTOR_LEN, embed[1, :])
    self.assertAllClose([0.0] * data.WORD_VECTOR_LEN, embed[2, :])
    self.assertAllClose([0.2] * data.WORD_VECTOR_LEN, embed[3, :])
    self.assertAllClose([0.3] * data.WORD_VECTOR_LEN, embed[4, :])
    self.assertAllClose([0.4] * data.WORD_VECTOR_LEN, embed[5, :])
  def testInferSpinnThrowsErrorIfOnlyOneSentenceIsSpecified(self):
    snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0")
    self._create_test_data(snli_1_0_dir)

    vocab = data.load_vocabulary(self._temp_data_dir)
    word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab)

    config = _test_spinn_config(
        data.WORD_VECTOR_LEN, 4,
        logdir=os.path.join(self._temp_data_dir, "logdir"),
        inference_sentences=("( foo ( bar . ) )", None))
    with self.assertRaises(ValueError):
      spinn.train_or_infer_spinn(embed, word2index, None, None, None, config)
  def testInferSpinnWorks(self):
    """Test inference with the spinn model."""
    snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0")
    self._create_test_data(snli_1_0_dir)

    vocab = data.load_vocabulary(self._temp_data_dir)
    word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab)

    config = _test_spinn_config(
        data.WORD_VECTOR_LEN, 4,
        logdir=os.path.join(self._temp_data_dir, "logdir"),
        inference_sentences=("( foo ( bar . ) )", "( bar ( foo . ) )"))
    logits = spinn.train_or_infer_spinn(
        embed, word2index, None, None, None, config)
    self.assertEqual(tf.float32, logits.dtype)
    self.assertEqual((3,), logits.shape)
  def testTrainSpinn(self):
    """Test with fake toy SNLI data and GloVe vectors."""

    # 1. Create and load a fake SNLI data file and a fake GloVe embedding file.
    snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0")
    fake_train_file = self._create_test_data(snli_1_0_dir)

    vocab = data.load_vocabulary(self._temp_data_dir)
    word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab)

    train_data = data.SnliData(fake_train_file, word2index)
    dev_data = data.SnliData(fake_train_file, word2index)
    test_data = data.SnliData(fake_train_file, word2index)

    # 2. Create a fake config.
    config = _test_spinn_config(
        data.WORD_VECTOR_LEN, 4,
        logdir=os.path.join(self._temp_data_dir, "logdir"))

    # 3. Test training of a SPINN model.
    trainer = spinn.train_or_infer_spinn(
        embed, word2index, train_data, dev_data, test_data, config)

    # 4. Load train loss values from the summary files and verify that they
    #    decrease with training.
    summary_file = glob.glob(os.path.join(config.logdir, "events.out.*"))[0]
    events = summary_test_util.events_from_file(summary_file)
    train_losses = [event.summary.value[0].simple_value for event in events
                    if event.summary.value
                    and event.summary.value[0].tag == "train/loss"]
    self.assertEqual(config.epochs, len(train_losses))

    # 5. Verify that checkpoints exist and contains all the expected variables.
    self.assertTrue(glob.glob(os.path.join(config.logdir, "ckpt*")))
    object_graph = trackable_utils.object_metadata(
        checkpoint_management.latest_checkpoint(config.logdir))
    ckpt_variable_names = set()
    for node in object_graph.nodes:
      for attribute in node.attributes:
        ckpt_variable_names.add(attribute.full_name)
    self.assertIn("global_step", ckpt_variable_names)
    for v in trainer.variables:
      variable_name = v.name[:v.name.index(":")] if ":" in v.name else v.name
      self.assertIn(variable_name, ckpt_variable_names)
  def testEncodeSingleSentence(self):
    snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0")
    fake_train_file = os.path.join(snli_1_0_dir, "snli_1.0_train.txt")
    os.makedirs(snli_1_0_dir)
    self._createFakeSnliData(fake_train_file)
    vocab = data.load_vocabulary(self._temp_data_dir)
    glove_dir = os.path.join(self._temp_data_dir, "glove")
    os.makedirs(glove_dir)
    glove_file = os.path.join(glove_dir, "glove.42B.300d.txt")
    self._createFakeGloveData(glove_file)
    word2index, _ = data.load_word_vectors(self._temp_data_dir, vocab)

    sentence_variants = [
        "( Foo ( ( bar baz ) . ) )",
        " ( Foo ( ( bar baz ) . ) ) ",
        "( Foo ( ( bar baz ) . )  )"]
    for sentence in sentence_variants:
      word_indices, shift_reduce = data.encode_sentence(sentence, word2index)
      self.assertEqual(np.int64, word_indices.dtype)
      self.assertEqual((5, 1), word_indices.shape)
      self.assertAllClose(
          np.array([[3, 3, 3, 2, 3, 2, 2]], dtype=np.int64).T, shift_reduce)