Example #1
0
 def test_single_word(self):
     data_source = getStream(self.test_words_short)
     ts = split_corpus_dataset.TokenizedSplitSingleTarget(
         data_source, self.vocab, 1)
     tokens_string = next(iter(ts))
     expectation = (torch.LongTensor([0]), torch.LongTensor([1])
                    )  # input, target
     self.assertEqual(tokens_string, expectation)
Example #2
0
 def test_two_word_seq(self):
     data_source = getStream(self.test_words_short)
     ts = split_corpus_dataset.TokenizedSplitSingleTarget(
         data_source, self.vocab, 2)
     tokens_strings = list(iter(ts))
     expectation = [(torch.LongTensor([0, 1]), torch.LongTensor([2])),
                    (torch.LongTensor([1, 2]), torch.LongTensor([0]))]
     self.assertEqual(tokens_strings, expectation)
Example #3
0
 def test_single_word_len(self):
     data_source = getStream(self.test_words_short)
     ts = split_corpus_dataset.TokenizedSplitSingleTarget(
         data_source, self.vocab, 1)
     self.assertEqual(len(ts), len(self.test_words_short) - 1)
Example #4
0
 def test_two_word_retrieval(self):
     data_source = getStream(self.test_words_short)
     ts = split_corpus_dataset.TokenizedSplitSingleTarget(
         data_source, self.vocab, 2)
     words = list(ts.input_words())
     self.assertEqual(words, ['a b', 'b c'])  # we expect the input words
Example #5
0
 def test_len_no_output(self):
     data_source = getStream(self.test_words_short)
     ts = split_corpus_dataset.TokenizedSplitSingleTarget(
         data_source, self.vocab, 5)
     self.assertEqual(len(ts), 0)