Example #1
0
def create_spt_model(
    data_file: str,
    vocab_size: int,
    sample_size: int,
    do_lower_case: bool,
    tokenizer_type: str = 'unigram',
    output_dir: Optional[str] = None,
    character_coverage: float = 1.0,
):
    """
    Creates sentence piece tokenizer model from data file.
    Args:
        data_file: data file
        vocab_size: vocabulary size
        sample_size: maximum size of sentences the trainer loads
        do_lower_case: if text should be lower cased before tokenizer model is created
        character_coverage: float value between 0 and 1 (as a percentage). For languages with a vast charset,
            can be < 1.0, but for all other languages, it should be set as 1.0
        output_dir: folder to save created tokenizer model. If not specified will store model at data_file/../spt folder
    """

    if not data_file or not os.path.exists(data_file):
        raise ValueError(
            f"data_file must be valid file path, but got {data_file}")
    data_dir = os.path.dirname(data_file)
    vocab = []
    if not output_dir:
        output_dir = f'{data_dir}/spt'
    if if_exist(output_dir, ['tokenizer.model']):
        logging.info(
            f"tokenizer model {output_dir}/tokenizer.model already exists")
        return f'{output_dir}/tokenizer.model', f'{output_dir}/vocab.txt'
    logging.info(f'Processing {data_file} and store at {output_dir}')
    os.makedirs(output_dir, exist_ok=True)

    cmd = (f"--input={data_file} --model_prefix={output_dir}/tokenizer "
           f"--vocab_size={vocab_size} "
           f"--shuffle_input_sentence=true --hard_vocab_limit=false "
           f"--model_type={tokenizer_type} "
           f"--character_coverage={character_coverage} "
           f"--bos_id=-1 --eos_id=-1")
    if do_lower_case:
        cmd += " --normalization_rule_name=nmt_nfkc_cf"

    if sample_size > 0:
        cmd += f" --input_sentence_size={sample_size}"

    sentencepiece.SentencePieceTrainer.Train(cmd)

    # Add BERT control symbols
    tokens = []

    with open(f"{output_dir}/tokenizer.vocab", "r") as f:
        f.readline()  # skip first <unk> token

        # Read tokens from each line and parse for vocab
        for line in f:
            piece = line.split("\t")[0]
            token = piece[1:] if piece.startswith("▁") else f"##{piece}"

            if len(token) > 0:
                tokens.append(token)
            else:
                tokens.append(piece[0])

    vocab.extend(tokens)

    # Save vocabulary to output file
    vocab_file = f'{output_dir}/vocab.txt'
    with open(vocab_file, "w") as f:
        for token in vocab:
            f.write(f"{token}\n")
    return f'{output_dir}/tokenizer.model', vocab_file
Example #2
0
def create_spt_model(
    data_file: str,
    vocab_size: int,
    sample_size: int,
    do_lower_case: bool,
    tokenizer_type: str = 'unigram',
    output_dir: Optional[str] = None,
    character_coverage: float = 1.0,
    train_extremely_large_corpus: bool = False,
    max_sentencepiece_length: int = -1,
    bos: bool = False,
    eos: bool = False,
    pad: bool = False,
    control_symbols: List[str] = None,
    user_defined_symbols: List[str] = None,
):
    """
    Creates sentence piece tokenizer model from data file.
    Args:
        data_file: data file
        vocab_size: vocabulary size
        sample_size: maximum size of sentences the trainer loads
        do_lower_case: if text should be lower cased before tokenizer model is created
        character_coverage: float value between 0 and 1 (as a percentage). For languages with a vast charset,
            can be < 1.0, but for all other languages, it should be set as 1.0
        output_dir: folder to save created tokenizer model. If not specified will store model at data_file/../spt folder
        train_extremely_large_corpus: If training on huge datasets, pass this flag to allow SentencePiece
            to build the tokenizer.
        max_sentencepiece_length: Limits the maximum length of the SentencePiece subword that can be constructed.
            By default, no limit is placed.
        bos: when True, bos token "<s>" is added to the vocabulary.
        eos: when True, eos token "</s>" is added to the vocabulary.
        pad: when True, pad token "<pad>" is added to the vocabulary.
        control_symbols: control symbols to add to tokenizer, as defined by sentencepiece.
            These tokens get removed at decode time and are not encoded from the text - can only be added to the input programatically.
        user_defined_symbols: user symbols to add to tokenizer, as defined by sentencepiece.
            These tokens remain in the decoded text and are encoded automatically when present in the input text.
    """

    if not data_file or not os.path.exists(data_file):
        raise ValueError(f"data_file must be valid file path, but got {data_file}")
    data_dir = os.path.dirname(data_file)
    vocab = []
    if not output_dir:
        output_dir = f'{data_dir}/spt'
    if if_exist(output_dir, ['tokenizer.model']):
        logging.info(f"tokenizer model {output_dir}/tokenizer.model already exists")
        return f'{output_dir}/tokenizer.model', f'{output_dir}/vocab.txt'
    logging.info(f'Processing {data_file} and store at {output_dir}')
    os.makedirs(output_dir, exist_ok=True)

    cmd = (
        f"--input={data_file} --model_prefix={output_dir}/tokenizer "
        f"--vocab_size={vocab_size} "
        f"--shuffle_input_sentence=true --hard_vocab_limit=false "
        f"--model_type={tokenizer_type} "
        f"--character_coverage={character_coverage}"
    )

    pad_id = 3
    if not bos:
        pad_id -= 1
        cmd += " --bos_id=-1"

    if not eos:
        pad_id -= 1
        cmd += " --eos_id=-1"

    if pad:
        cmd += f" --pad_id={pad_id}"

    if control_symbols:
        control_string = (",").join(control_symbols)
        cmd += f" --control_symbols={control_string}"

    if user_defined_symbols:
        user_string = (",").join(user_defined_symbols)
        cmd += f" --user_defined_symbols={user_string}"

    if do_lower_case:
        cmd += " --normalization_rule_name=nmt_nfkc_cf"

    if sample_size > 0:
        cmd += f" --input_sentence_size={sample_size}"

    if train_extremely_large_corpus:
        cmd += " --train_extremely_large_corpus=true"

    if max_sentencepiece_length >= 0:
        cmd += f" --max_sentencepiece_length={max_sentencepiece_length}"

    sentencepiece.SentencePieceTrainer.Train(cmd)

    # Add BERT control symbols
    tokens = []
    special_tokens = ["<s>", "</s>", "<pad>", "<unk>"]
    special_tokens += control_symbols + user_defined_symbols

    with open(f"{output_dir}/tokenizer.vocab", "r") as f:
        # Read tokens from each line and parse for vocab
        for line in f:
            piece = line.split("\t")[0]
            if piece in special_tokens:
                # skip special tokens
                continue
            token = piece[1:] if piece.startswith("▁") else f"##{piece}"

            if len(token) > 0:
                tokens.append(token)
            else:
                tokens.append(piece[0])

    vocab.extend(tokens)

    # Save vocabulary to output file
    vocab_file = f'{output_dir}/vocab.txt'
    with open(vocab_file, "w") as f:
        for token in vocab:
            f.write(f"{token}\n")
    return f'{output_dir}/tokenizer.model', vocab_file