def test_ragged(self): source = tf.ragged.constant([['X', 'X'], ['X']]) expected = tf.constant([['<X>', '<X>'], ['<X>', '']]) result = wrap_with(source, '<', '>').to_tensor(default_value='') expected, result = self.evaluate([expected, result]) self.assertAllEqual(expected, result)
def test_unicode(self): expected = u'надо' result = wrap_with(u'ад', u'н', u'о') expected = tf.convert_to_tensor(expected, dtype=tf.string) expected, result = self.evaluate([expected, result]) self.assertAllEqual(expected, result)
def test_inference_shape(self): source = [ ['1', '2', '3'], ['4', '5', '6'], ] result = wrap_with(source, '<', '>') self.assertAllEqual([2, 3], result.shape.as_list())
def test_actual_shape(self): source = [ ['1', '2', '3'], ['4', '5', '6'], ] result = wrap_with(source, '<', '>') result = tf.shape(result) result = self.evaluate(result) self.assertAllEqual([2, 3], result.tolist())
def ngram_features(input_words, minn, maxn): input_words = normalize_unicode(input_words, 'NFKC') input_words = replace_string( # accentuation input_words, [u'\u0060', u' \u0301', u'\u02CA', u'\u02CB', u'\u0300', u'\u0301'], [''] * 6) input_words = lower_case(input_words) input_words = zero_digits(input_words) input_words = wrap_with(input_words, '<', '>') word_ngrams = char_ngrams(input_words, minn, maxn, itself='ALONE') return word_ngrams
def test_skip(self): result = wrap_with([['X', 'y']], '<', '>', skip=['y']) result = self.evaluate(result) self.assertAllEqual([[b'<X>', b'y']], result)
def test_2d(self): result = wrap_with([['X']], '<', '>') result = self.evaluate(result) self.assertAllEqual([[b'<X>']], result)
def test_0d(self): result = wrap_with('X', '<', '>') result = self.evaluate(result) self.assertAllEqual(b'<X>', result)
def test_empty_borders(self): result = wrap_with('test', '', '') result = self.evaluate(result) self.assertAllEqual(b'test', result)
def test_empty(self): result = wrap_with('', '<', '>') result = self.evaluate(result) self.assertAllEqual(b'<>', result)