def process_sentence(sentence_id, annotations, lines):
    merged = merge_tokens(sentence_id, annotations, lines)
    normalized = normalize_numerical_fes(sentence_id, merged)

    # insert correct token ids
    for i, p in enumerate(normalized):
        p[1] = str(i)

    clean = OrderedSet()
    for line in normalized:
        clean.add('\t'.join(line))

    return clean
def process_sentence(sentence_id, annotations, lines):
    merged = merge_tokens(sentence_id, annotations, lines)
    normalized = normalize_numerical_fes(sentence_id, merged)

    # insert correct token ids
    for i, p in enumerate(normalized):
        p[1] = str(i)

    clean = OrderedSet()
    for line in normalized:
        clean.add('\t'.join(line))

    return clean
def process_sentence(sentence_id, annotations, lines):
    """
    process a single sentence by mergind the tokens and normalizing numerical expressions
    """
    merged = merge_tokens(sentence_id, annotations, lines)
    normalized = normalize_numerical_fes(sentence_id, merged)

    # insert correct token ids
    for i, p in enumerate(normalized):
        p[1] = str(i)

    clean = OrderedSet()
    for line in normalized:
        clean.add('\t'.join(line))

    return clean
def process_sentence(sentence_id, annotations, lines):
    """ process a single sentence by merging the tokens and normalizing numerical expressions

    :param str sentence_id: The ID of this sentence
    :param dict annotations: The data about this sentence's FEs
    :param list lines: The POS tagging of this sentence
    :return: The processed sentence
    :rtype: list
    """
    merged = merge_tokens(sentence_id, annotations, lines)
    normalized = normalize_numerical_fes(sentence_id, merged)

    # insert correct token ids
    for i, p in enumerate(normalized):
        p[1] = str(i)

    clean = OrderedSet()
    for line in normalized:
        clean.add('\t'.join(line))

    return clean
def process_sentence(sentence_id, annotations, lines):
    """ process a single sentence by merging the tokens and normalizing numerical expressions

    :param str sentence_id: The ID of this sentence
    :param dict annotations: The data about this sentence's FEs
    :param list lines: The POS tagging of this sentence
    :return: The processed sentence
    :rtype: list
    """
    merged = merge_tokens(sentence_id, annotations, lines)
    normalized = normalize_numerical_fes(sentence_id, merged)

    # insert correct token ids
    for i, p in enumerate(normalized):
        p[1] = str(i)

    clean = OrderedSet()
    for line in normalized:
        clean.add('\t'.join(line))

    return clean