Example #1
0
def prepare(config):
    """
    checks data, creates the directories, 
    prepare the vocabulary and embeddings
    """
    logger = logging.getLogger('qarc')
    logger.info('Checking the data files...')
    for data_path in config.train_files + config.dev_files + config.test_files:
        assert os.path.exists(data_path),\
            '{} file does not exist.'.format(data_path)
    logger.info('Preparing the directories...')
    train_summary_dir = os.path.join(config.summary_dir, 'train')
    dev_summary_dir = os.path.join(config.summary_dir, 'dev')
    for dir_path in [config.vocab_dir, config.model_dir, config.result_dir, train_summary_dir, dev_summary_dir]:
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)

    logger.info('Load dataset...')
    if config.dataset_name.startswith('cmrc2018'):
        qarc_data = CMRCDataset(config.max_p_len, config.max_q_len, config.max_char_len, config.max_py_len,
                                config.train_files, config.dev_files, config.test_files)
    else:
        qarc_data = BRCDataset(config.max_p_num, config.max_p_len, config.max_q_len, config.max_char_len,
                               config.train_files, config.dev_files, config.test_files)

    logger.info('Building vocabulary...')
    vocab = Vocab(lower=True)
    for word in qarc_data.word_iter('train'):
        vocab.add_word(word)
    for char in qarc_data.char_iter('train'):
        vocab.add_char(char)
    for py in qarc_data.py_iter('train'):
        vocab.add_py(py)

    unfiltered_vocab_word_size = vocab.word_size()
    vocab.filter_tokens_by_cnt(min_cnt=2)
    filtered_word_num = unfiltered_vocab_word_size - vocab.word_size()
    logger.info('After filter {} tokens, the final vocab size is {}'.format(
        filtered_word_num, vocab.word_size()))

    unfiltered_vocab_char_size = vocab.char_size()
    vocab.filter_chars_by_cnt(min_cnt=2)
    filtered_char_num = unfiltered_vocab_char_size - vocab.char_size()
    logger.info('After filter {} chars, the final chars size is {}'.format(
        filtered_char_num, vocab.char_size()))

    unfiltered_vocab_py_size = vocab.py_size()
    vocab.filter_pys_by_cnt(min_cnt=2)
    filtered_py_num = unfiltered_vocab_py_size - vocab.py_size()
    logger.info('After filter {} pys, the final pys size is {}'.format(
        filtered_py_num, vocab.py_size()))

    logger.info('Assigning word embeddings...')
    vocab.load_pretrained_word_embeddings(
        config.word2vec, config.word_embed_dim)

    logger.info('Assigning char embeddings...')
    # vocab.randomly_init_char_embeddings(config.char_embed_dim)
    vocab.load_pretrained_char_embeddings(
        config.word2vec, config.char_embed_dim)

    logger.info('Assigning py embeddings...')
    vocab.randomly_init_py_embeddings(config.py_embed_dim)

    logger.info('Saving vocab...')
    with open(os.path.join(config.vocab_dir, config.dataset_name + '_vocab.data'), 'wb') as fout:
        pickle.dump(vocab, fout)

    logger.info('Done with preparing!')