コード例 #1
0
def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer):
    """
    将一个样本进行分析,然后将字转化为id, 标签转化为id,然后结构化到InputFeatures对象中
    :param ex_index: index
    :param example: 一个样本
    :param label_list: 标签列表
    :param max_seq_length:
    :param tokenizer:
    :param output_dir
    :param mode:
    :return:
    """
    label_map = {}
    for (i, label) in enumerate(label_list):
        label_map[label] = i

    tokens_a = tokenizer.tokenize(example.text_a)
    tokens_b = None
    # if example.text_b:
    #     tokens_b = tokenizer.tokenize(example.text_b)

    # Account for [CLS] and [SEP] with "- 2"
    if len(tokens_a) > max_seq_length - 2:
        tokens_a = tokens_a[0:(max_seq_length - 2)]

    tokens = []
    segment_ids = []
    tokens.append("[CLS]")
    segment_ids.append(0)
    for token in tokens_a:
        tokens.append(token)
        segment_ids.append(0)
    tokens.append("[SEP]")
    segment_ids.append(0)

    # 找到pos1_head, pos1_tail, pos2_tag, pos2_tail这四个点的位置,做position embedding
    sub, obj = example.text_b.split('&&')
    sub = tokenizer.tokenize(sub)
    obj = tokenizer.tokenize(obj)

    pos1_head = -1
    pos1_tail = -1
    pos2_head = -1
    pos2_tail = -1
    for i in range(len(tokens) - 1):
        cut = tokens[i:min(i + len(sub), len(tokens) - 1)]
        if ''.join(''.join(cut).split('##')) == ''.join(''.join(sub).split('##')):
            pos1_head = i
            pos1_tail = i + len(cut) - 1
            break
        cut = tokens[i:min(i + len(sub) - 1, len(tokens) - 1)]
        if ''.join(''.join(cut).split('##')) == ''.join(''.join(sub).split('##')):
            pos1_head = i
            pos1_tail = i + len(cut) - 1
            break
        cut = tokens[i:min(i + len(sub) + 1, len(tokens) - 1)]
        if ''.join(''.join(cut).split('##')) == ''.join(''.join(sub).split('##')):
            pos1_head = i
            pos1_tail = i + len(cut) - 1
            break
    if pos1_head == -1:
        pos1_head = pos1_tail = len(tokens) - 1
        # print(tokens)
        # print(sub)
        # raise ValueError

    for i in range(len(tokens) - 1):
        cut = tokens[i:min(i + len(obj), len(tokens) - 1)]
        if ''.join(''.join(cut).split('##')) == ''.join(''.join(obj).split('##')):
            pos2_head = i
            pos2_tail = i + len(cut) - 1
            break
        cut = tokens[i:min(i + len(obj) - 1, len(tokens) - 1)]
        if ''.join(''.join(cut).split('##')) == ''.join(''.join(obj).split('##')):
            pos2_head = i
            pos2_tail = i + len(cut) - 1
            break
        cut = tokens[i:min(i + len(obj) + 1, len(tokens) - 1)]
        if ''.join(''.join(cut).split('##')) == ''.join(''.join(obj).split('##')):
            pos2_head = i
            pos2_tail = i + len(cut) - 1
            break
    if pos2_head == -1:
        pos2_head = pos2_tail = len(tokens) - 1
        # print(tokens)
        # print(obj)
        # raise ValueError

    # 写入position和pcnn的mask
    position_ids = np.zeros([max_seq_length, 4], dtype=np.int32)
    pcnn_masks = []

    pos_min_h = 0
    pos_min_t = 0
    pos_max_h = 0
    pos_max_t = 0
    if pos1_head < pos2_head:
        pos_min_h = pos1_head
        pos_min_t = pos1_tail
        pos_max_h = pos2_head
        pos_max_t = pos2_tail
    else:
        pos_min_h = pos2_head
        pos_min_t = pos2_tail
        pos_max_h = pos1_head
        pos_max_t = pos1_tail

    for i in range(len(tokens)):
        position_ids[i] = np.array(
            [i - pos_min_h, i - pos_min_t, i - pos_max_h, i - pos_max_t])
        if i < pos_min_h:
            pcnn_masks.append(1)
        elif i <= pos_min_t:
            pcnn_masks.append(0)
        elif i < pos_max_h:
            pcnn_masks.append(2)
        elif i <= pos_max_t:
            pcnn_masks.append(0)
        else:
            pcnn_masks.append(3)

    input_ids = tokenizer.convert_tokens_to_ids(tokens)

    # The mask has 1 for real tokens and 0 for padding tokens. Only real
    # tokens are attended to.
    input_mask = [1] * len(input_ids)

    # Zero-pad up to the sequence length.
    i = len(tokens)
    while len(input_ids) < max_seq_length:
        input_ids.append(0)
        input_mask.append(0)
        segment_ids.append(0)
        position_ids[i] = np.array(
            [i - pos_min_h, i - pos_min_t, i - pos_max_h, i - pos_max_t])
        pcnn_masks.append(0)
        i += 1

    assert len(input_ids) == max_seq_length
    assert len(input_mask) == max_seq_length
    assert len(segment_ids) == max_seq_length
    assert len(position_ids) == max_seq_length
    assert len(pcnn_masks) == max_seq_length

    label_id = label_map[example.label]
    if ex_index < 5:
        tf.logging.info("*** Example ***")
        tf.logging.info("guid: %s" % (example.guid))
        tf.logging.info("tokens: %s" % " ".join(
            [tokenization.printable_text(x) for x in tokens]))
        tf.logging.info("input_ids: %s" %
                        " ".join([str(x) for x in input_ids]))
        tf.logging.info("input_mask: %s" %
                        " ".join([str(x) for x in input_mask]))
        tf.logging.info("segment_ids: %s" %
                        " ".join([str(x) for x in segment_ids]))
        tf.logging.info("position_ids: %s" %
                        " ".join([str(x) for x in position_ids]))
        tf.logging.info("pcnn_masks: %s" %
                        " ".join([str(x) for x in pcnn_masks]))
        tf.logging.info("label: %s (id = %d)" % (example.label, label_id))

    feature = InputFeatures(
        input_ids=input_ids,
        input_mask=input_mask,
        segment_ids=segment_ids,
        label_id=label_id,
        position_ids=position_ids,
        pcnn_masks=pcnn_masks)
    return feature
コード例 #2
0
def convert_single_example(ex_index, example, label_list, max_seq_length,
                           tokenizer):
    """
    ptr Net做主客体标注
    """
    label_map = {}
    for (i, label) in enumerate(label_list):
        label_map[label] = i

    tokens_a = tokenizer.tokenize(example.text_a)

    over = 0
    if len(tokens_a) > max_seq_length - 2:
        tokens_a = tokens_a[0:(max_seq_length - 2)]
        over = 1

    tokens = []
    segment_ids = []
    tokens.append("[CLS]")
    segment_ids.append(0)
    for token in tokens_a:
        tokens.append(token)
        segment_ids.append(0)
    tokens.append("[SEP]")
    segment_ids.append(0)

    input_ids = tokenizer.convert_tokens_to_ids(tokens)
    # The mask has 1 for real tokens and 0 for padding tokens. Only real
    # tokens are attended to.
    input_mask = [1] * len(input_ids)
    # Zero-pad up to the sequence length.
    while len(input_ids) < max_seq_length:
        input_ids.append(0)
        input_mask.append(0)
        segment_ids.append(0)
    assert len(input_ids) == max_seq_length
    assert len(input_mask) == max_seq_length
    assert len(segment_ids) == max_seq_length

    # 生成主客体的首尾指针和实体关系类别的id
    sub, obj = example.text_b.split('&&')
    sub = tokenizer.tokenize(sub)
    obj = tokenizer.tokenize(obj)

    sub_head = -1
    sub_tail = -1
    obj_head = -1
    obj_tail = -1

    for i in range(len(tokens) - 1):
        cut = tokens[i:min(i + len(sub), len(tokens) - 1)]
        if ''.join(''.join(cut).split('##')) == ''.join(
                ''.join(sub).split('##')):
            sub_head = i
            sub_tail = i + len(cut) - 1
            break
        cut = tokens[i:min(i + len(sub) - 1, len(tokens) - 1)]
        if ''.join(''.join(cut).split('##')) == ''.join(
                ''.join(sub).split('##')):
            sub_head = i
            sub_tail = i + len(cut) - 1
            break
        cut = tokens[i:min(i + len(sub) + 1, len(tokens) - 1)]
        if ''.join(''.join(cut).split('##')) == ''.join(
                ''.join(sub).split('##')):
            sub_head = i
            sub_tail = i + len(cut) - 1
            break
    if sub_head == -1:
        sub_head = sub_tail = len(tokens) - 1
        # print(tokens)
        # print(sub)
        # raise ValueError

    for i in range(len(tokens) - 1):
        cut = tokens[i:min(i + len(obj), len(tokens) - 1)]
        if ''.join(''.join(cut).split('##')) == ''.join(
                ''.join(obj).split('##')):
            obj_head = i
            obj_tail = i + len(cut) - 1
            break
        cut = tokens[i:min(i + len(obj) - 1, len(tokens) - 1)]
        if ''.join(''.join(cut).split('##')) == ''.join(
                ''.join(obj).split('##')):
            obj_head = i
            obj_tail = i + len(cut) - 1
            break
        cut = tokens[i:min(i + len(obj) + 1, len(tokens) - 1)]
        if ''.join(''.join(cut).split('##')) == ''.join(
                ''.join(obj).split('##')):
            obj_head = i
            obj_tail = i + len(cut) - 1
            break
    if obj_head == -1:
        obj_head = obj_tail = len(tokens) - 1

    label_id = label_map[example.label]

    if ex_index < 5:
        tf.logging.info("*** Example ***")
        tf.logging.info("guid: %s" % (example.guid))
        tf.logging.info(
            "tokens: %s" %
            " ".join([tokenization.printable_text(x) for x in tokens]))
        tf.logging.info("input_ids: %s" % " ".join([str(x)
                                                    for x in input_ids]))
        tf.logging.info("input_mask: %s" %
                        " ".join([str(x) for x in input_mask]))
        tf.logging.info("segment_ids: %s" %
                        " ".join([str(x) for x in segment_ids]))
        tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
        tf.logging.info("pointer: %d %d %d %d" %
                        (sub_head, sub_tail, obj_head, obj_tail))

    feature = InputFeatures_ptr(input_ids=input_ids,
                                input_mask=input_mask,
                                segment_ids=segment_ids,
                                label_id=label_id,
                                sub_ptr=[sub_head, sub_tail],
                                obj_ptr=[obj_head, obj_tail])
    return feature, over
コード例 #3
0
def convert_single_example(ex_index, example, label_list, max_seq_length,
                           tokenizer, output_dir, mode):
    """
    将一个样本进行分析,然后将字转化为id, 标签转化为id,然后结构化到InputFeatures对象中
    :param ex_index: index
    :param example: 一个样本
    :param label_list: 标签列表
    :param max_seq_length:
    :param tokenizer:
    :param output_dir
    :param mode:
    :return:
    """
    label_map = {}
    # 1表示从1开始对label进行index化,为什么???
    for (i, label) in enumerate(label_list):
        label_map[label] = i
    # 保存label->index 的map
    if not os.path.exists(os.path.join(output_dir, 'label2id.pkl')):
        with codecs.open(os.path.join(output_dir, 'label2id.pkl'), 'wb') as w:
            pickle.dump(label_map, w)

    textlist = example.text.split(' ')
    labellist = example.label.split(' ')
    tokens = []
    labels = []
    for i, word in enumerate(textlist):
        # 分词,如果是中文,就是分字,但是对于一些不在BERT的vocab.txt中得字符会被进行WordPice处理(例如中文的引号),可以将所有的分字操作替换为list(input)
        token = tokenizer.tokenize(word)
        tokens.extend(token)
        label_1 = labellist[i]
        for m in range(len(token)):
            if m == 0:
                labels.append(label_1)
            else:  # 一般不会出现else
                labels.append("X")
    # tokens = tokenizer.tokenize(example.text)
    # 序列截断
    if len(tokens) >= max_seq_length - 1:
        tokens = tokens[0:(max_seq_length - 2)]  # -2 的原因是因为序列需要加一个句首和句尾标志
        labels = labels[0:(max_seq_length - 2)]
    ntokens = []
    segment_ids = []
    label_ids = []
    ntokens.append("[CLS]")  # 句子开始设置CLS 标志
    segment_ids.append(0)
    # append("O") or append("[CLS]") not sure!
    # O OR CLS 没有任何影响,不过我觉得O 会减少标签个数,不过拒收和句尾使用不同的标志来标注,使用LCS 也没毛病
    label_ids.append(label_map["[CLS]"])
    for i, token in enumerate(tokens):
        ntokens.append(token)
        segment_ids.append(0)
        label_ids.append(label_map[labels[i]])
    ntokens.append("[SEP]")  # 句尾添加[SEP] 标志
    segment_ids.append(0)
    # append("O") or append("[SEP]") not sure!
    label_ids.append(label_map["[SEP]"])
    input_ids = tokenizer.convert_tokens_to_ids(
        ntokens)  # 将序列中的字(ntokens)转化为ID形式
    input_mask = [1] * len(input_ids)
    # label_mask = [1] * len(input_ids)
    # padding, 使用
    while len(input_ids) < max_seq_length:
        input_ids.append(0)
        input_mask.append(0)
        segment_ids.append(0)
        # we don't concerned about it!
        label_ids.append(0)
        ntokens.append("**NULL**")
        # label_mask.append(0)
    # print(len(input_ids))
    assert len(input_ids) == max_seq_length
    assert len(input_mask) == max_seq_length
    assert len(segment_ids) == max_seq_length
    assert len(label_ids) == max_seq_length
    # assert len(label_mask) == max_seq_length

    # 打印部分样本数据信息
    if ex_index < 5:
        tf.logging.info("*** Example ***")
        tf.logging.info("guid: %s" % (example.guid))
        tf.logging.info(
            "tokens: %s" %
            " ".join([tokenization.printable_text(x) for x in tokens]))
        tf.logging.info("input_ids: %s" % " ".join([str(x)
                                                    for x in input_ids]))
        tf.logging.info("input_mask: %s" %
                        " ".join([str(x) for x in input_mask]))
        tf.logging.info("segment_ids: %s" %
                        " ".join([str(x) for x in segment_ids]))
        tf.logging.info("label_ids: %s" % " ".join([str(x)
                                                    for x in label_ids]))
        # tf.logging.info("label_mask: %s" % " ".join([str(x) for x in label_mask]))

    # 结构化为一个类
    feature = InputFeatures(
        input_ids=input_ids,
        input_mask=input_mask,
        segment_ids=segment_ids,
        label_ids=label_ids,
        # label_mask = label_mask
    )
    return feature
コード例 #4
0
def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer):
    """
    将一个样本进行分析,然后将字转化为id, 标签转化为id,然后结构化到InputFeatures对象中
    :param ex_index: index
    :param example: 一个样本
    :param label_list: 标签列表
    :param max_seq_length:
    :param tokenizer:
    :param output_dir
    :param mode:
    :return:
    """
    label_ids = list(example.label)
    tokens_a = tokenizer.tokenize(example.text_a)
    tokens_b = None
    if example.text_b:
        tokens_b = tokenizer.tokenize(example.text_b)

    if tokens_b:
        # Modifies `tokens_a` and `tokens_b` in place so that the total
        # length is less than the specified length.
        # Account for [CLS], [SEP], [SEP] with "- 3"
        _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
    else:
        # Account for [CLS] and [SEP] with "- 2"
        if len(tokens_a) > max_seq_length - 2:
            tokens_a = tokens_a[0:(max_seq_length - 2)]

    # The convention in BERT is:
    # (a) For sequence pairs:
    #  tokens:   [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
    #  type_ids: 0     0  0    0    0     0       0 0     1  1  1  1   1 1
    # (b) For single sequences:
    #  tokens:   [CLS] the dog is hairy . [SEP]
    #  type_ids: 0     0   0   0  0     0 0
    #
    # Where "type_ids" are used to indicate whether this is the first
    # sequence or the second sequence. The embedding vectors for `type=0` and
    # `type=1` were learned during pre-training and are added to the wordpiece
    # embedding vector (and position vector). This is not *strictly* necessary
    # since the [SEP] token unambiguously separates the sequences, but it makes
    # it easier for the model to learn the concept of sequences.
    #
    # For classification tasks, the first vector (corresponding to [CLS]) is
    # used as the "sentence vector". Note that this only makes sense because
    # the entire model is fine-tuned.
    tokens = []
    segment_ids = []
    tokens.append("[CLS]")
    segment_ids.append(0)
    for token in tokens_a:
        tokens.append(token)
        segment_ids.append(0)
    tokens.append("[SEP]")
    segment_ids.append(0)

    input_ids = tokenizer.convert_tokens_to_ids(tokens)

    # The mask has 1 for real tokens and 0 for padding tokens. Only real
    # tokens are attended to.
    input_mask = [1] * len(input_ids)

    # Zero-pad up to the sequence length.
    while len(input_ids) < max_seq_length:
        input_ids.append(0)
        input_mask.append(0)
        segment_ids.append(0)

    assert len(input_ids) == max_seq_length
    assert len(input_mask) == max_seq_length
    assert len(segment_ids) == max_seq_length

    if ex_index < 5:
        tf.logging.info("*** Example ***")
        tf.logging.info("guid: %s" % (example.guid))
        tf.logging.info("tokens: %s" % " ".join(
            [tokenization.printable_text(x) for x in tokens]))
        tf.logging.info("input_ids: %s" %
                        " ".join([str(x) for x in input_ids]))
        tf.logging.info("input_mask: %s" %
                        " ".join([str(x) for x in input_mask]))
        tf.logging.info("segment_ids: %s" %
                        " ".join([str(x) for x in segment_ids]))
        tf.logging.info("label: %s" %
                        " ".join([str(x) for x in label_ids]))

    feature = InputFeatures(
        input_ids=input_ids,
        input_mask=input_mask,
        segment_ids=segment_ids,
        label_id=label_ids)
    return feature