예제 #1
0
    def test_full_tokenizer(self):
        tokenizer = AlbertTokenizer(SAMPLE_VOCAB, keep_accents=True)

        tokens = tokenizer.tokenize("This is a test")
        self.assertListEqual(tokens, ["▁this", "▁is", "▁a", "▁test"])

        self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens),
                             [48, 25, 21, 1289])

        tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
        self.assertListEqual(tokens, [
            "▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this",
            "▁is", "▁fal", "s", "é", "."
        ])
        ids = tokenizer.convert_tokens_to_ids(tokens)
        self.assertListEqual(
            ids, [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9])

        back_tokens = tokenizer.convert_ids_to_tokens(ids)
        self.assertListEqual(
            back_tokens,
            [
                "▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and",
                "▁this", "▁is", "▁fal", "s", "<unk>", "."
            ],
        )
예제 #2
0
with open(os.path.join(args.data, 'ontonotes/const/pos/labels.txt')) as f:
  while True:
    pos = f.readline().rstrip()
    if pos == "": break
    tag[pos] = np.asarray([0, 0])

    
text_file = open(os.path.join(args.data, 'ontonotes/const/pos/conll-2012-test.json'), 'r')
for i, line in tqdm(enumerate(text_file.readlines())):
  data = json.loads(line)
  tokens = data['text'].split(' ')
  labels = data['targets']
  re_2_o = []
  retokenized = []
  for word_id, token in enumerate(tokens):
    token = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(token))
    retokenized.extend(token)
    re_2_o.extend([word_id for _ in range(len(token))])
  model_inputs = []
  for span in labels:
    span1 = [] 
    indexed_tokens = retokenized.copy()
    for pos in range(span['span1'][0], span['span1'][1]):
      select = np.where(np.asarray(re_2_o) == pos)[0]
      span1.extend(select)
    for i in span1:
      indexed_tokens[i] = tokenizer.mask_token_id 
    indexed_tokens = [tokenizer.cls_token_id] + indexed_tokens + [tokenizer.sep_token_id]
    model_inputs.append(indexed_tokens)

  retokenized.insert(0, tokenizer.cls_token_id)