def test_serial_with_python(self): dataset = lambda _: ((i, i + 1) for i in range(10)) batches = data.Serial(dataset, lambda g: map(lambda x: (x[0], x[1] + 1), g), lambda g: filter(lambda x: x[0] % 2 == 1, g), data.Batch(2)) batch = next(batches()) self.assertLen(batch, 2) (xs, ys) = batch # First tuple after filtering is (1, 3) = (1, 2+1). self.assertEqual(xs[0], 1) self.assertEqual(ys[0], 3) # Second tuple after filtering is (3, 5). self.assertEqual(xs[1], 3) self.assertEqual(ys[1], 5)
def process_c4_with_span_corruption(spm_path=None, extra_ids=0, train=False, max_length=100, noise_density=0.15, mean_noise_span_length=3.0, seed1=None, seed2=None): return data.Serial( data.TFDS('c4/en:2.3.0', data_dir=_TESTDATA, keys=('text', ), train=train), data.SentencePieceTokenize(spm_path=spm_path, extra_ids=extra_ids), data.generate_sequential_chunks(max_length=max_length), data.generate_random_noise_mask( noise_density=noise_density, mean_noise_span_length=mean_noise_span_length, seed1=seed1, seed2=seed2), data.consume_noise_mask(vocab_size=32000 + extra_ids), data.FilterEmptyExamples(), data.AppendValue(val={ 0: [1], 1: [1] }), data.PadToLength(len_map={ 0: 100, 1: 30 }, pad_value={ 0: 0, 1: 0 }), data.AddLossWeights(id_to_mask=0), data.Batch(batch_size=2))
def test_serial(self): dataset = lambda _: ((i, i + 1) for i in range(10)) batches = data.Serial(dataset, data.Shuffle(3), data.Batch(10)) batch = next(batches()) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, (10, ))