Exemplo n.º 1
0
class Corpus(object):
    def __init__(self, path, dataset, *args, **kwargs):
        self.dataset = dataset
        self.vocab = Vocab(*args, **kwargs)

        if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8']:
            self.vocab.count_file(os.path.join(path, 'train.txt'))
            self.vocab.count_file(os.path.join(path, 'valid.txt'))
            self.vocab.count_file(os.path.join(path, 'test.txt'))
        elif self.dataset == 'wt103':
            self.vocab.count_file(os.path.join(path, 'train.txt'))
        elif self.dataset == 'lm1b':
            train_path_pattern = os.path.join(
                path, '1-billion-word-language-modeling-benchmark-r13output',
                'training-monolingual.tokenized.shuffled', 'news.en-*')
            train_paths = glob.glob(train_path_pattern)
            # the vocab will load from file when build_vocab() is called

        self.vocab.build_vocab()

        if self.dataset in ['ptb', 'wt2', 'wt103']:
            self.train = self.vocab.encode_file(
                os.path.join(path, 'train.txt'), ordered=True)
            self.valid = self.vocab.encode_file(
                os.path.join(path, 'valid.txt'), ordered=True)
            self.test  = self.vocab.encode_file(
                os.path.join(path, 'test.txt'), ordered=True)
        elif self.dataset in ['enwik8', 'text8']:
            self.train = self.vocab.encode_file(
                os.path.join(path, 'train.txt'), ordered=True, add_eos=False)
            self.valid = self.vocab.encode_file(
                os.path.join(path, 'valid.txt'), ordered=True, add_eos=False)
            self.test  = self.vocab.encode_file(
                os.path.join(path, 'test.txt'), ordered=True, add_eos=False)
        elif self.dataset == 'lm1b':
            self.train = train_paths
            self.valid = self.vocab.encode_file(
                os.path.join(path, 'valid.txt'), ordered=False, add_double_eos=True)
            self.test  = self.vocab.encode_file(
                os.path.join(path, 'test.txt'), ordered=False, add_double_eos=True)

    def get_iterator(self, split, *args, **kwargs):
        if split == 'train':
            if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
                data_iter = LMOrderedIterator(self.train, *args, **kwargs)
            elif self.dataset == 'lm1b':
                kwargs['shuffle'] = True
                data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs)
        elif split in ['valid', 'test']:
            data = self.valid if split == 'valid' else self.test
            if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
                data_iter = LMOrderedIterator(data, *args, **kwargs)
            elif self.dataset == 'lm1b':
                data_iter = LMShuffledIterator(data, *args, **kwargs)

        return data_iter
class Corpus(object):
    def __init__(self, path, *args, **kwargs):
        self.vocab = Vocab(*args, **kwargs)

        # 从单词表里面加载单词
        self.vocab.build_vocab()

        # 训练集
        self.train = self.vocab.encode_file(os.path.join(path, 'train.txt'),
                                            verbose=True)
        self.train_label = self.vocab.encode_file_only_for_lables(os.path.join(
            path, 'train.label'),
                                                                  verbose=True)

        # 验证集
        self.valid = self.vocab.encode_file(os.path.join(path, 'valid.txt'),
                                            verbose=True)
        self.valid_label = self.vocab.encode_file_only_for_lables(os.path.join(
            path, 'valid.label'),
                                                                  verbose=True)

        # self.test = self.vocab.encode_file(
        #     os.path.join(path, 'test.txt'), ordered=True)

    # 许海明
    def get_batch_iterator(self, split, *args, **kwargs):
        '''

        :param split:
        :param args:
        :param kwargs:
        :return:
        '''
        if split == 'train':
            # data_iter = LMOrderedIterator(self.train, *args, **kwargs)
            batch_iter = BatchIteratorHelper(self.train, self.train_label,
                                             *args, **kwargs)

        elif split == 'valid':
            batch_iter = BatchIteratorHelper(self.valid, self.valid_label,
                                             *args, **kwargs)

        return batch_iter
Exemplo n.º 3
0
class Corpus(object):
    def __init__(self, path, dataset, *args, **kwargs):
        self.dataset = dataset
        self.vocab = Vocab(*args, **kwargs)

        if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8', 'smiles']:
            self.vocab.count_file(os.path.join(path, 'train.txt'))
            self.vocab.count_file(os.path.join(path, 'valid.txt'))
            self.vocab.count_file(os.path.join(path, 'test.txt'))

        self.vocab.build_vocab()

        if self.dataset in ['ptb', 'wt2', 'wt103']:
            self.train = self.vocab.encode_file(os.path.join(
                path, 'train.txt'),
                                                ordered=True)
            self.valid = self.vocab.encode_file(os.path.join(
                path, 'valid.txt'),
                                                ordered=True)
            self.test = self.vocab.encode_file(os.path.join(path, 'test.txt'),
                                               ordered=True)
        elif self.dataset in ['enwik8', 'text8', 'smiles']:
            self.train = self.vocab.encode_file(os.path.join(
                path, 'train.txt'),
                                                ordered=True,
                                                add_eos=False)
            self.valid = self.vocab.encode_file(os.path.join(
                path, 'valid.txt'),
                                                ordered=True,
                                                add_eos=False)
            self.test = self.vocab.encode_file(os.path.join(path, 'test.txt'),
                                               ordered=True,
                                               add_eos=False)

    def get_iterator(self, split, *args, **kwargs):
        if split == 'train':
            if self.dataset in [
                    'ptb', 'wt2', 'wt103', 'enwik8', 'text8', 'smiles'
            ]:
                data_iter = LMOrderedIterator(self.train, *args, **kwargs)
            elif self.dataset == 'lm1b':
                kwargs['shuffle'] = True
                data_iter = LMMultiFileIterator(self.train, self.vocab, *args,
                                                **kwargs)
        elif split in ['valid', 'test']:
            data = self.valid if split == 'valid' else self.test
            if self.dataset in [
                    'ptb', 'wt2', 'wt103', 'enwik8', 'text8', 'smiles'
            ]:
                data_iter = LMOrderedIterator(data, *args, **kwargs)
            elif self.dataset == 'lm1b':
                data_iter = LMShuffledIterator(data, *args, **kwargs)

        return data_iter
Exemplo n.º 4
0
class Corpus:
    train_files: List[str]
    vocab: Vocab
    train: Optional[torch.LongTensor]
    valid: Optional[torch.LongTensor]
    test: Optional[torch.LongTensor]

    def __init__(self, path, dataset, use_bpe, valid_custom=None, *args, **kwargs):
        self.dataset = dataset
        train_paths = None
        file_paths = None
        self.valid_custom = None

        if use_bpe:
            self.vocab = OpenAIVocab(kwargs['max_size'], kwargs.get('vocab_file'))
        else:
            self.vocab = Vocab(*args, **kwargs)

        if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8']:
            self.vocab.count_file(os.path.join(path, 'train.txt'))
            self.vocab.count_file(os.path.join(path, 'valid.txt'))
            self.vocab.count_file(os.path.join(path, 'test.txt'))
        elif self.dataset == 'wt103' or self.dataset == 'wt2':
            self.vocab.count_file(os.path.join(path, 'train.txt'))
        elif self.dataset == 'wt103-normal':
            self.vocab.count_file(os.path.join(path, 'wiki.train.tokens'))
        elif self.dataset == 'lm1b':
            train_path_pattern = os.path.join(
                path, '1-billion-word-language-modeling-benchmark-r13output',
                'training-monolingual.tokenized.shuffled', 'news.en-*')
            train_paths = glob.glob(train_path_pattern)
        elif self.dataset == 'wiki':
            file_path_pattern = os.path.join(path, '*/wiki_*.txt')
            file_paths = glob.glob(file_path_pattern)
            assert file_paths, f'Nothing found at {file_path_pattern}'
        elif self.dataset == 'git':
            file_path_pattern = os.path.join(path, 'git_*.txt')
            file_paths = glob.glob(file_path_pattern)
            valid_path = os.path.join(path, 'valid.txt')
            test_path = os.path.join(path, 'test.txt')
            assert file_paths, f'Nothing found at {file_path_pattern}'

        file_paths = natsort.natsorted(file_paths)

        # the vocab will load from file when build_vocab() is called
        self.vocab.build_vocab()

        if self.dataset in ['ptb', 'wt2', 'wt103']:
            self.train = self.vocab.encode_file(
                os.path.join(path, 'train.txt'), ordered=True)
            self.valid = self.vocab.encode_file(
                os.path.join(path, 'valid.txt'), ordered=True)
            self.test = self.vocab.encode_file(
                os.path.join(path, 'test.txt'), ordered=True)
        elif self.dataset in ['enwik8', 'text8']:
            self.train = self.vocab.encode_file(
                os.path.join(path, 'train.txt'), ordered=True, add_eos=False)
            self.valid = self.vocab.encode_file(
                os.path.join(path, 'valid.txt'), ordered=True, add_eos=False)
            self.test = self.vocab.encode_file(
                os.path.join(path, 'test.txt'), ordered=True, add_eos=False)
        elif self.dataset == 'lm1b':
            self.train = natsort.natsorted(train_paths)
            self.valid = self.vocab.encode_file(
                os.path.join(path, 'valid.txt'), ordered=False, add_double_eos=True)
            self.test = self.vocab.encode_file(
                os.path.join(path, 'test.txt'), ordered=False, add_double_eos=True)
        elif self.dataset == 'wiki':
            if g.args.test:  # in testing mode we use smaller dataset
                valid_path = sorted(file_paths)[-1]
                test_path = sorted(file_paths)[-1]
            else:
                valid_path = sorted(file_paths)[42]
                test_path = sorted(file_paths)[1337]
            self.valid = self.vocab.encode_file(valid_path, ordered=True)
            self.test = self.vocab.encode_file(test_path, ordered=True)
            self.train = None
            self.train_files = list(set(file_paths) - {valid_path, test_path})
        elif self.dataset == 'git':
            if g.args.test:  # in testing mode we use smaller dataset
                valid_path = sorted(file_paths)[-1]
                test_path = sorted(file_paths)[-1]
            if valid_custom:
                g.logger.info(f"Using file {valid_custom} as additional validation file")
                self.valid_custom = self.vocab.encode_file(valid_custom, ordered=True)
            self.valid = self.vocab.encode_file(valid_path, ordered=True)
            self.test = self.vocab.encode_file(test_path, ordered=True)
            self.train = None
            self.train_files = file_paths
        elif self.dataset in ['wt103-normal']:
            self.train = self.vocab.encode_file(
                os.path.join(path, 'wiki.train.tokens'), ordered=True, add_eos=False)
            self.valid = self.vocab.encode_file(
                os.path.join(path, 'wiki.valid.tokens'), ordered=True, add_eos=False)
            self.test = self.vocab.encode_file(
                os.path.join(path, 'wiki.test.tokens'), ordered=True, add_eos=False)

        self.train_files = natsort.natsorted(self.train_files)

    def get_dist_iterator(self, split: str, *args, rank: int = 0, max_rank: int = 1, skip_files: float = .0, **kwargs):
        """Get an iterator that only operates on rank'th independent subset of the data."""
        if split == 'train':
            data = self.train
        elif split == 'valid':
            data = self.valid
        elif split == 'valid_custom':
            assert self.valid_custom is not None, "Custom validation file was not specified while the Corpus initialization"
            data = self.valid_custom
        else:
            assert split == 'test'
            data = self.test

        # special handling for large datasets, don't load training set in memory
        if self.dataset in ['lm1b', 'wiki', 'git'] and split == 'train':
            file_subset = list(chunk(self.train_files, max_rank))[rank]
            return LMMultiFileIterator(file_subset, self.vocab, skip_files=skip_files, *args, **kwargs)

        # noinspection PyTypeChecker
        assert len(data), f"data attribute '{split}' empty for iterator.dataset={self.dataset}"
        # noinspection PyTypeChecker
        subset = list(chunk(data, max_rank))[rank]
        return LMOrderedIterator(subset, *args, **kwargs)

    def get_iterator(self, split: str, *args, **kwargs):
        """Get an iterator over the corpus.

        Each next() returns (data, target, seq_length).
        data and target have shape (bptt, bsz) and seq_length is a scalar.
        """
        data = self.__getattribute__(split)
        if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8', 'wt103-normal']:
            return LMOrderedIterator(data, *args, **kwargs)
        if self.dataset == 'lm1b':
            if split in ['valid', 'test']:
                return LMShuffledIterator(data, *args, **kwargs)

            kwargs['shuffle'] = True
            return LMMultiFileIterator(data, self.vocab, *args, **kwargs)
        if self.dataset in ['wiki', 'git']:
            if split == 'train':
                return LMMultiFileIterator(data, self.vocab, *args, **kwargs)
            return LMOrderedIterator(data, *args, **kwargs)
Exemplo n.º 5
0
class Corpus(object):
    def __init__(self, path, dataset, *args, **kwargs):
        self.dataset = dataset
        self.vocab = Vocab(*args, **kwargs)
        # self.order = kwargs.get('order', True)

        # if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8', 'bilingual_ted']:
        #     self.vocab.count_file(os.path.join(path, 'train.txt'))
        #     self.vocab.count_file(os.path.join(path, 'valid.txt'))
        #     self.vocab.count_file(os.path.join(path, 'test.txt'))
        # elif self.dataset == 'wt103':
        #     self.vocab.count_file(os.path.join(path, 'train.txt'))
        # elif self.dataset == 'lm1b':
        #     train_path_pattern = os.path.join(
        #         path, '1-billion-word-language-modeling-benchmark-r13output',
        #         'training-monolingual.tokenized.shuffled', 'news.en-*')
        #     train_paths = glob.glob(train_path_pattern)
        #     # the vocab will load from file when build_vocab() is called

        self.vocab.count_file(os.path.join(path, 'train.txt'))
        self.vocab.build_vocab()

        self.train = self.vocab.encode_file(
            os.path.join(path, 'train.txt'))
        self.valid = self.vocab.encode_file(
            os.path.join(path, 'valid.txt'))
        self.test = self.vocab.encode_file(
            os.path.join(path, 'test.txt'))

        # if self.dataset in ['ptb', 'wt2', 'wt103']:
        #
        # elif self.dataset in ['enwik8', 'text8', 'bilingual_ted']:
        #     print("Creating %s dataset" % self.dataset)
        #     self.train = self.vocab.encode_file(
        #         os.path.join(path, 'train.txt'), ordered=True, add_eos=False)
        #     self.valid = self.vocab.encode_file(
        #         os.path.join(path, 'valid.txt'), ordered=True, add_eos=False)
        #     self.test  = self.vocab.encode_file(
        #         os.path.join(path, 'test.txt'), ordered=True, add_eos=False)
        # elif self.dataset == 'lm1b':
        #     self.train = train_paths
        #     self.valid = self.vocab.encode_file(
        #         os.path.join(path, 'valid.txt'), ordered=False, add_double_eos=True)
        #     self.test  = self.vocab.encode_file(
        #         os.path.join(path, 'test.txt'), ordered=False, add_double_eos=True)

    def get_iterator(self, split, *args, **kwargs):
        # if split == 'train':
        #     if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8', 'bilingual_ted']:
        #         data_iter = LMOrderedIterator(self.train, *args, **kwargs)
        #     elif self.dataset == 'lm1b':
        #         kwargs['shuffle'] = True
        #         data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs)
        # elif split in ['valid', 'test']:
        #     data = self.valid if split == 'valid' else self.test
        #     if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8', 'bilingual_ted']:
        #         data_iter = LMOrderedIterator(data, *args, **kwargs)
        #     elif self.dataset == 'lm1b':
        #         data_iter = LMShuffledIterator(data, *args, **kwargs)

        # if not hasattr(self, 'order'):
        #     self.order = True
        order = kwargs.get('order', True)

        if order:
            if split == 'train':
                data_iter = LMOrderedIterator(self.train, *args, **kwargs)
            elif split in ['valid', 'test']:
                data_iter = LMOrderedIterator(self.valid, *args, **kwargs)
        else:
            if split == 'train':
                data_iter = LMShuffledIterator(self.train, *args, **kwargs)
            elif split in ['valid', 'test']:
                data_iter = LMShuffledIterator(self.valid, *args, **kwargs)

        return data_iter
Exemplo n.º 6
0
class Corpus(object):
    def __init__(self, path, dataset, *args, **kwargs):
        self.dataset = dataset
        self.vocab = Vocab(*args, **kwargs)

        if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8', 'wddev']:
            self.vocab.count_file(os.path.join(path, 'train.txt'))
            self.vocab.count_file(os.path.join(path, 'valid.txt'))
            self.vocab.count_file(os.path.join(path, 'test.txt'))
        elif self.dataset == 'wt103':
            self.vocab.count_file(os.path.join(path, 'train.txt'))
        elif self.dataset == 'wdtrain' or self.dataset == 'wdtrain-morph':
            train_path_pattern = os.path.join(path, 'train.txt')
            train_paths = glob.glob(train_path_pattern)
            # the vocab will load from file when build_vocab() is called

        self.vocab.build_vocab()

        if self.dataset in ['ptb', 'wt2', 'wt103', 'wddev']:
            self.train = self.vocab.encode_file(os.path.join(
                path, 'train.txt'),
                                                ordered=True)
            self.valid = self.vocab.encode_file(os.path.join(
                path, 'valid.txt'),
                                                ordered=True)
            self.test = self.vocab.encode_file(os.path.join(path, 'test.txt'),
                                               ordered=True)
        elif self.dataset in ['enwik8', 'text8']:
            self.train = self.vocab.encode_file(os.path.join(
                path, 'train.txt'),
                                                ordered=True,
                                                add_eos=False)
            self.valid = self.vocab.encode_file(os.path.join(
                path, 'valid.txt'),
                                                ordered=True,
                                                add_eos=False)
            self.test = self.vocab.encode_file(os.path.join(path, 'test.txt'),
                                               ordered=True,
                                               add_eos=False)
        elif self.dataset == 'lm1b':
            self.train = train_paths
            self.valid = self.vocab.encode_file(os.path.join(
                path, 'valid.txt'),
                                                ordered=False,
                                                add_double_eos=True)
            self.test = self.vocab.encode_file(os.path.join(path, 'test.txt'),
                                               ordered=False,
                                               add_double_eos=True)
        elif self.dataset == 'wdtrain' or self.dataset == 'wdtrain-morph':
            self.train = self.vocab.encode_file(os.path.join(
                path, 'train.txt'),
                                                ordered=False,
                                                add_double_eos=True)
            self.valid = self.vocab.encode_file(os.path.join(
                path, 'valid.txt'),
                                                ordered=False,
                                                add_double_eos=True)
            self.test = self.vocab.encode_file(os.path.join(path, 'test.txt'),
                                               ordered=False,
                                               add_double_eos=True)

    def get_iterator(self, split, *args, **kwargs):
        if split == 'train':
            if self.dataset in [
                    'ptb', 'wt2', 'wt103', 'enwik8', 'text8', 'wddev'
            ]:
                data_iter = LMOrderedIterator(self.train, *args, **kwargs)
            elif self.dataset == 'wdtrain' or self.dataset == 'wdtrain-morph':
                kwargs['shuffle'] = True
                #data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs)
                data_iter = LMShuffledIterator(self.train, *args, **kwargs)
                #data_iter = LMOrderedIterator(self.train, *args, **kwargs)
        elif split in ['valid', 'test']:
            data = self.valid if split == 'valid' else self.test
            if self.dataset in [
                    'ptb', 'wt2', 'wt103', 'enwik8', 'text8', 'wddev'
            ]:
                data_iter = LMOrderedIterator(data, *args, **kwargs)
            elif self.dataset == 'wdtrain' or self.dataset == 'wdtrain-morph':
                data_iter = LMShuffledIterator(data, *args, **kwargs)
                #data_iter = RescoreIter(data, *args, **kwargs)
                #data_iter = LMOrderedIterator(data, *args, **kwargs)
        return data_iter
Exemplo n.º 7
0
class Corpus(object):
    #not called with args and kwards ever
    def __init__(self, path, dataset, *args, **kwargs):
        self.dataset = dataset  #the string storing name of dataset
        self.vocab = Vocab(*args, **kwargs)

        if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8']:
            # Add words to the counter object of vocab
            self.vocab.count_file(os.path.join(path, 'train.txt'))
            self.vocab.count_file(os.path.join(path, 'valid.txt'))
            self.vocab.count_file(os.path.join(path, 'test.txt'))

        elif self.dataset == 'wt103':
            # Add words to the counter object of vocab
            self.vocab.count_file(os.path.join(path, 'train.txt'))

        elif self.dataset == 'lm1b':
            train_path_pattern = os.path.join(
                path, '1-billion-word-language-modeling-benchmark-r13output',
                'training-monolingual.tokenized.shuffled', 'news.en-*')
            train_paths = glob.glob(train_path_pattern)
            # the vocab will load from file when build_vocab() is called

        # Add words to idx2sym and sym2idx of vocab
        self.vocab.build_vocab()

        if self.dataset in ['ptb', 'wt2', 'wt103']:
            # Add LongTensors of the full corpus consisting only of words.
            self.train = self.vocab.encode_file(os.path.join(
                path, 'train.txt'),
                                                ordered=True)
            self.valid = self.vocab.encode_file(os.path.join(
                path, 'valid.txt'),
                                                ordered=True)
            self.test = self.vocab.encode_file(os.path.join(path, 'test.txt'),
                                               ordered=True)

        elif self.dataset in ['enwik8', 'text8']:
            self.train = self.vocab.encode_file(os.path.join(
                path, 'train.txt'),
                                                ordered=True,
                                                add_eos=False)
            self.valid = self.vocab.encode_file(os.path.join(
                path, 'valid.txt'),
                                                ordered=True,
                                                add_eos=False)
            self.test = self.vocab.encode_file(os.path.join(path, 'test.txt'),
                                               ordered=True,
                                               add_eos=False)

        elif self.dataset == 'lm1b':
            self.train = train_paths
            self.valid = self.vocab.encode_file(os.path.join(
                path, 'valid.txt'),
                                                ordered=False,
                                                add_double_eos=True)
            self.test = self.vocab.encode_file(os.path.join(path, 'test.txt'),
                                               ordered=False,
                                               add_double_eos=True)

    def get_iterator(self, split, *args, **kwargs):
        # batch_size, bptt, device and extended context length are passed as args/kwargs
        if split == 'train':
            if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
                data_iter = LMOrderedIterator(self.train, *args, **kwargs)

            elif self.dataset == 'lm1b':
                kwargs['shuffle'] = True
                data_iter = LMMultiFileIterator(self.train, self.vocab, *args,
                                                **kwargs)

        elif split in ['valid', 'test']:
            data = self.valid if split == 'valid' else self.test

            if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
                data_iter = LMOrderedIterator(data, *args, **kwargs)

            elif self.dataset == 'lm1b':
                data_iter = LMShuffledIterator(data, *args, **kwargs)

        return data_iter
Exemplo n.º 8
0
class Corpus:
    def __init__(self, path, dataset, use_bpe, *args, **kwargs):
        self.dataset = dataset
        if use_bpe:
            self.vocab = OpenAIVocab(kwargs['max_size'],
                                     kwargs.get('vocab_file'))
        else:
            self.vocab = Vocab(*args, **kwargs)

        if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8']:
            self.vocab.count_file(os.path.join(path, 'train.txt'))
            self.vocab.count_file(os.path.join(path, 'valid.txt'))
            self.vocab.count_file(os.path.join(path, 'test.txt'))
        elif self.dataset == 'wt103' or self.dataset == 'wt2':
            self.vocab.count_file(os.path.join(path, 'train.txt'))
        elif self.dataset == 'wt103-normal':
            self.vocab.count_file(os.path.join(path, 'wiki.train.tokens'))
        elif self.dataset == 'lm1b':
            train_path_pattern = os.path.join(
                path, '1-billion-word-language-modeling-benchmark-r13output',
                'training-monolingual.tokenized.shuffled', 'news.en-*')
            train_paths = glob.glob(train_path_pattern)
        elif self.dataset == 'wiki':
            file_path_pattern = os.path.join(path, '*/wiki_*.txt')
            file_paths = glob.glob(file_path_pattern)
            assert file_paths, f'Nothing found at {file_path_pattern}'

        # the vocab will load from file when build_vocab() is called
        self.vocab.build_vocab()

        if self.dataset in ['ptb', 'wt2', 'wt103']:
            self.train = self.vocab.encode_file(os.path.join(
                path, 'train.txt'),
                                                ordered=True)
            self.valid = self.vocab.encode_file(os.path.join(
                path, 'valid.txt'),
                                                ordered=True)
            self.test = self.vocab.encode_file(os.path.join(path, 'test.txt'),
                                               ordered=True)
        elif self.dataset in ['enwik8', 'text8']:
            self.train = self.vocab.encode_file(os.path.join(
                path, 'train.txt'),
                                                ordered=True,
                                                add_eos=False)
            self.valid = self.vocab.encode_file(os.path.join(
                path, 'valid.txt'),
                                                ordered=True,
                                                add_eos=False)
            self.test = self.vocab.encode_file(os.path.join(path, 'test.txt'),
                                               ordered=True,
                                               add_eos=False)
        elif self.dataset == 'lm1b':
            self.train = train_paths
            self.valid = self.vocab.encode_file(os.path.join(
                path, 'valid.txt'),
                                                ordered=False,
                                                add_double_eos=True)
            self.test = self.vocab.encode_file(os.path.join(path, 'test.txt'),
                                               ordered=False,
                                               add_double_eos=True)
        elif self.dataset == 'wiki':
            # Take the first and second file of each alphabetical directory for train and test.
            self.valid = [x for x in file_paths if x.endswith('00.txt')]
            self.test = [x for x in file_paths if x.endswith('01.txt')]
            self.train = list(
                set(file_paths) - set(self.valid) - set(self.test))
        elif self.dataset in ['wt103-normal']:
            self.train = self.vocab.encode_file(os.path.join(
                path, 'wiki.train.tokens'),
                                                ordered=True,
                                                add_eos=False)
            self.valid = self.vocab.encode_file(os.path.join(
                path, 'wiki.valid.tokens'),
                                                ordered=True,
                                                add_eos=False)
            self.test = self.vocab.encode_file(os.path.join(
                path, 'wiki.test.tokens'),
                                               ordered=True,
                                               add_eos=False)

    def get_dist_iterator(self, split, rank, max_rank, *args, **kwargs):
        """Get an iterator that only operates on rank//max_rank independent subset of the data."""
        data = self.__getattribute__(split)
        subset = list(chunk(data, max_rank))[rank]
        if self.dataset in ['lm1b', 'wiki']:
            return LMMultiFileIterator(subset, self.vocab, *args, **kwargs)

        return LMOrderedIterator(subset, *args, **kwargs)

    def get_iterator(self, split, *args, **kwargs):
        """Get an iterator over the corpus.

        Each next() returns (data, target, seq_length).
        data and target have shape (bptt, bsz) and seq_length is a scalar.
        """
        data = self.__getattribute__(split)
        if self.dataset in [
                'ptb', 'wt2', 'wt103', 'enwik8', 'text8', 'wt103-normal'
        ]:
            return LMOrderedIterator(data, *args, **kwargs)
        elif self.dataset == 'lm1b':
            if split in ['valid', 'test']:
                return LMShuffledIterator(data, *args, **kwargs)
            else:
                kwargs['shuffle'] = True
                return LMMultiFileIterator(data, self.vocab, *args, **kwargs)
        elif self.dataset == 'wiki':
            return LMMultiFileIterator(data, self.vocab, *args, **kwargs)
Exemplo n.º 9
0
class Corpus(object):
    def __init__(self, vocab=None, *args, **kwargs):

        if vocab is None:
            self.vocab = Vocab(*args, **kwargs)
        else:
            self.vocab = vocab

        self.train, self.valid = [], []

    def generate_data(self, path, update_vocab=True):
        # self.order = kwargs.get('order', True)

        # if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8', 'bilingual_ted']:
        #     self.vocab.count_file(os.path.join(path, 'train.txt'))
        #     self.vocab.count_file(os.path.join(path, 'valid.txt'))
        #     self.vocab.count_file(os.path.join(path, 'test.txt'))
        # elif self.dataset == 'wt103':
        #     self.vocab.count_file(os.path.join(path, 'train.txt'))
        # elif self.dataset == 'lm1b':
        #     train_path_pattern = os.path.join(
        #         path, '1-billion-word-language-modeling-benchmark-r13output',
        #         'training-monolingual.tokenized.shuffled', 'news.en-*')
        #     train_paths = glob.glob(train_path_pattern)
        #     # the vocab will load from file when build_vocab() is called

        if update_vocab:
            self.vocab.count_file(os.path.join(path, 'train.txt'))
            self.vocab.build_vocab()

        self.train = self.vocab.encode_file(os.path.join(path, 'train.txt'))
        self.valid = self.vocab.encode_file(os.path.join(path, 'valid.txt'))
        # self.test = self.vocab.encode_file(
        #     os.path.join(path, 'test.txt'))

    def save(self, datadir):

        data = dict()

        data['train'] = self.train
        data['valid'] = self.valid
        data['vocab'] = self.vocab

        fn = os.path.join(datadir, 'cache.pt')
        torch.save(data, fn)

        vn = os.path.join(datadir, 'vocab.txt')
        self.vocab.write_to_file(vn)

    def load(self, datadir):

        fn = os.path.join(datadir, 'cache.pt')
        cache = torch.load(fn)

        self.train = cache['train']
        self.valid = cache['valid']
        self.vocab = cache['vocab']

    def get_iterator(self, split, *args, **kwargs):

        order = kwargs.get('order', True)

        if order:
            if split == 'train':
                data_iter = LMOrderedIterator(self.vocab, self.train, *args,
                                              **kwargs)
            elif split in ['valid', 'test']:
                data_iter = LMOrderedIterator(self.vocab, self.valid, *args,
                                              **kwargs)
        else:
            if split == 'train':
                data_iter = LMShuffledIterator(self.vocab, self.train, *args,
                                               **kwargs)
            elif split in ['valid', 'test']:
                data_iter = LMShuffledIterator(self.vocab, self.valid, *args,
                                               **kwargs)

        return data_iter
Exemplo n.º 10
0
class Corpus(object):
    # def __init__(self, path, dataset, order=True, *args, **kwargs):
    def __init__(self,
                 train_src,
                 train_tgt,
                 valid_src,
                 valid_tgt,
                 order=True,
                 *args,
                 **kwargs):
        self.dataset = dataset

        # if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8', 'bilingual_ted']:
        #     self.vocab.count_file(os.path.join(path, 'train.txt'))
        #     self.vocab.count_file(os.path.join(path, 'valid.txt'))
        #     self.vocab.count_file(os.path.join(path, 'test.txt'))
        # elif self.dataset == 'wt103':
        #     self.vocab.count_file(os.path.join(path, 'train.txt'))
        # elif self.dataset == 'lm1b':
        #     train_path_pattern = os.path.join(
        #         path, '1-billion-word-language-modeling-benchmark-r13output',
        #         'training-monolingual.tokenized.shuffled', 'news.en-*')
        #     train_paths = glob.glob(train_path_pattern)
        #     # the vocab will load from file when build_vocab() is called

        if kwargs.get('share_vocab'):
            self.src_vocab = Vocab(*args, **kwargs)
            self.src_vocab.count_file(train_src)
            self.src_vocab.count_file(train_tgt)
            self.src_vocab.build_vocab()
            self.tgt_vocab = self.src_vocab
        else:
            print("Two vocabularies are not supported at the moment")
            raise NotImplementedError

        self.train = dict()

        self.train['src'] = self.src_vocab.encode_file(train_src)

        self.train['tgt'] = self.tgt_vocab.encode_file(train_tgt,
                                                       bos=True,
                                                       eos=True)

        self.valid['src'] = self.src_vocab.encode_file(valid_src)

        self.valid['tgt'] = self.tgt_vocab.encode_file(valid_tgt,
                                                       bos=True,
                                                       eos=True)

        # self.train = self.vocab.encode_file(
        #     os.path.join(path, 'train.txt'), ordered=order)
        # self.valid = self.vocab.encode_file(
        #     os.path.join(path, 'valid.txt'), ordered=order)
        # self.test = self.vocab.encode_file(
        #     os.path.join(path, 'test.txt'), ordered=order)

        # if self.dataset in ['ptb', 'wt2', 'wt103']:
        #
        # elif self.dataset in ['enwik8', 'text8', 'bilingual_ted']:
        #     print("Creating %s dataset" % self.dataset)
        #     self.train = self.vocab.encode_file(
        #         os.path.join(path, 'train.txt'), ordered=True, add_eos=False)
        #     self.valid = self.vocab.encode_file(
        #         os.path.join(path, 'valid.txt'), ordered=True, add_eos=False)
        #     self.test  = self.vocab.encode_file(
        #         os.path.join(path, 'test.txt'), ordered=True, add_eos=False)
        # elif self.dataset == 'lm1b':
        #     self.train = train_paths
        #     self.valid = self.vocab.encode_file(
        #         os.path.join(path, 'valid.txt'), ordered=False, add_double_eos=True)
        #     self.test  = self.vocab.encode_file(
        #         os.path.join(path, 'test.txt'), ordered=False, add_double_eos=True)

    def get_iterator(self, split, *args, **kwargs):
        # if split == 'train':
        #     if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8', 'bilingual_ted']:
        #         data_iter = LMOrderedIterator(self.train, *args, **kwargs)
        #     elif self.dataset == 'lm1b':
        #         kwargs['shuffle'] = True
        #         data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs)
        # elif split in ['valid', 'test']:
        #     data = self.valid if split == 'valid' else self.test
        #     if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8', 'bilingual_ted']:
        #         data_iter = LMOrderedIterator(data, *args, **kwargs)
        #     elif self.dataset == 'lm1b':
        #         data_iter = LMShuffledIterator(data, *args, **kwargs)
        if split == 'train':
            data_iter = LMOrderedIterator(self.train, *args, **kwargs)
        elif split in ['valid', 'test']:
            data_iter = LMOrderedIterator(self.valid, *args, **kwargs)

        return data_iter
Exemplo n.º 11
0
class Corpus(object):
    def __init__(self, path, dataset, *args, **kwargs):
        self.dataset = dataset
        self.vocab = Vocab(*args, **kwargs)

        if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8']:
            self.vocab.count_file(os.path.join(path, 'train.txt'))
            self.vocab.count_file(os.path.join(path, 'valid.txt'))
            self.vocab.count_file(os.path.join(path, 'test.txt'))
        elif self.dataset == 'wt103':
            self.vocab.count_file(os.path.join(path, 'train.txt'))
        elif self.dataset == 'lm1b':
            train_path_pattern = os.path.join(
                path, '1-billion-word-language-modeling-benchmark-r13output',
                'training-monolingual.tokenized.shuffled', 'news.en-*')
            train_paths = glob.glob(train_path_pattern)
            # the vocab will load from file when build_vocab() is called
        elif self.dataset == 'nesmdb':
            print("nesmdb path", path)
            train_paths = glob.glob(os.path.join(path, 'train', '*.txt'))
            valid_paths = glob.glob(os.path.join(path, 'valid', '*.txt'))
            test_paths = glob.glob(os.path.join(path, 'test', '*.txt'))

        self.vocab.build_vocab()

        if self.dataset in ['ptb', 'wt2', 'wt103']:
            self.train = self.vocab.encode_file(os.path.join(
                path, 'train.txt'),
                                                ordered=True)
            self.valid = self.vocab.encode_file(os.path.join(
                path, 'valid.txt'),
                                                ordered=True)
            self.test = self.vocab.encode_file(os.path.join(path, 'test.txt'),
                                               ordered=True)
        elif self.dataset in ['enwik8', 'text8']:
            self.train = self.vocab.encode_file(os.path.join(
                path, 'train.txt'),
                                                ordered=True,
                                                add_eos=False)
            self.valid = self.vocab.encode_file(os.path.join(
                path, 'valid.txt'),
                                                ordered=True,
                                                add_eos=False)
            self.test = self.vocab.encode_file(os.path.join(path, 'test.txt'),
                                               ordered=True,
                                               add_eos=False)
        elif self.dataset == 'lm1b':
            self.train = train_paths
            self.valid = self.vocab.encode_file(os.path.join(
                path, 'valid.txt'),
                                                ordered=False,
                                                add_double_eos=True)
            self.test = self.vocab.encode_file(os.path.join(path, 'test.txt'),
                                               ordered=False,
                                               add_double_eos=True)
        elif self.dataset == 'nesmdb':
            self.train = train_paths
            self.valid = valid_paths
            self.test = test_paths

    def get_iterator(self, split, *args, **kwargs):
        if split == 'train':
            if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
                data_iter = LMOrderedIterator(self.train, *args, **kwargs)
            elif self.dataset in ['lm1b', 'nesmdb']:
                kwargs['shuffle'] = True
                data_iter = LMMultiFileIterator(self.train, self.vocab, *args,
                                                **kwargs)
        elif split in ['valid', 'test']:
            data = self.valid if split == 'valid' else self.test
            if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
                data_iter = LMOrderedIterator(data, *args, **kwargs)
            elif self.dataset == 'lm1b':
                data_iter = LMShuffledIterator(data, *args, **kwargs)
            elif self.dataset == 'nesmdb':
                kwargs['shuffle'] = False
                # I've decided to let these both always be true for evaluation
                kwargs['skip_short'] = True
                kwargs['trim_padding'] = True
                data_iter = LMMultiFileIterator(data, self.vocab, *args,
                                                **kwargs)
                print(split, "data_iter.paths", data_iter.paths)

        return data_iter
class Corpus(object):
    def __init__(self, path, dataset, *args, **kwargs):
        self.dataset = dataset
        self.params = dict(kwargs)
        if self.dataset == 'generic_dataset':
            if kwargs.get('vocab_file') is not None:
                kwargs['vocab_file'] = os.path.join(path, kwargs['vocab_file'])
        ordered = True
        if self.dataset in ['ptb', 'wt2', 'wt103']:
            kwargs.setdefault('add_eos', True)
        elif self.dataset == 'lm1b':
            kwargs.setdefault('add_double_eos', True)
            ordered = False

        print(self.dataset, 'vocab params', kwargs)
        self.vocab = Vocab(*args, **kwargs)
        train_paths = None

        if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8', 'papers']:
            self.vocab.count_file(os.path.join(path, 'train.txt'))
            self.vocab.count_file(os.path.join(path, 'valid.txt'))
            self.vocab.count_file(os.path.join(path, 'test.txt'))

        elif self.dataset == 'generic_dataset' and not self.vocab.vocab_file:
            train_path = os.path.join(path, 'train.txt')
            if os.path.exists(train_path):
                self.vocab.count_file(train_path, verbose=True)
            else:
                train_paths = glob.glob(os.path.join(path, '**',
                                                     'train-*.txt'))
                for train_path in tqdm.tqdm(train_paths,
                                            desc='counting train'):
                    self.vocab.count_file(train_path)
            self.vocab.count_file(os.path.join(path, 'valid.txt'),
                                  verbose=True)
            self.vocab.count_file(os.path.join(path, 'test.txt'), verbose=True)

        elif self.dataset == 'wt103':
            self.vocab.count_file(os.path.join(path, 'train.txt'))
        elif self.dataset == 'lm1b':
            train_path_pattern = os.path.join(
                path, '1-billion-word-language-modeling-benchmark-r13output',
                'training-monolingual.tokenized.shuffled', 'news.en-*')
            train_paths = glob.glob(train_path_pattern)
            # the vocab will load from file when build_vocab() is called,
            # because it has a vocab file set
            assert self.vocab.vocab_file

        self.vocab.build_vocab()

        if train_paths is not None:
            self.train = train_paths
        else:
            self.train = self.vocab.encode_file(os.path.join(
                path, 'train.txt'),
                                                ordered=ordered,
                                                verbose=True)
        self.valid = self.vocab.encode_file(os.path.join(path, 'valid.txt'),
                                            ordered=ordered,
                                            verbose=True)
        self.test = self.vocab.encode_file(os.path.join(path, 'test.txt'),
                                           ordered=ordered,
                                           verbose=True)

    def get_iterator(self, split, *args, **kwargs):
        if split == 'train':
            if self.dataset in [
                    'ptb', 'wt2', 'wt103', 'enwik8', 'text8', 'papers'
            ]:
                data_iter = LMOrderedIterator(self.train, *args, **kwargs)
            elif self.dataset == 'lm1b':
                kwargs['shuffle'] = True
                data_iter = LMMultiFileIterator(self.train, self.vocab, *args,
                                                **kwargs)
            elif self.dataset == 'generic_dataset':
                if isinstance(self.train, list):
                    kwargs.update({
                        'shuffle': True,
                        'shuffle_sent': False,
                    })
                    data_iter = LMMultiFileIterator(self.train, self.vocab,
                                                    *args, **kwargs)
                else:
                    data_iter = LMOrderedIterator(self.train, *args, **kwargs)
        elif split in ['valid', 'test']:
            data = self.valid if split == 'valid' else self.test
            if self.dataset in [
                    'ptb', 'wt2', 'wt103', 'enwik8', 'text8', 'papers',
                    'generic_dataset'
            ]:
                data_iter = LMOrderedIterator(data, *args, **kwargs)
            elif self.dataset == 'lm1b':
                data_iter = LMShuffledIterator(data, *args, **kwargs)

        print('Using data_iter {data_iter} for {split} split')
        return data_iter
Exemplo n.º 13
0
class Corpus(object):
    def __init__(self, path, dataset, *args, **kw):
        self.dataset = dataset
        self.vocab = Vocab(*args, **kw)

        if self.dataset in ["ptb", "wt2", "enwik8", "text8"]:
            self.vocab.count_file(os.path.join(path, "train.txt"))
            self.vocab.count_file(os.path.join(path, "valid.txt"))
            self.vocab.count_file(os.path.join(path, "test.txt"))
        elif self.dataset == "wt103":
            self.vocab.count_file(os.path.join(path, "train.txt"))
        elif self.dataset == "lm1b":
            train_path_pattern = os.path.join(
                path,
                "1-billion-word-language-modeling-benchmark-r13output",
                "training-monolingual.tokenized.shuffled",
                "news.en-*",
            )
            train_paths = glob.glob(train_path_pattern)
            # the vocab will load from file when build_vocab() is called

        self.vocab.build_vocab()

        if self.dataset in ["ptb", "wt2", "wt103"]:
            self.train = self.vocab.encode_file(os.path.join(
                path, "train.txt"),
                                                ordered=True)
            self.valid = self.vocab.encode_file(os.path.join(
                path, "valid.txt"),
                                                ordered=True)
            self.test = self.vocab.encode_file(os.path.join(path, "test.txt"),
                                               ordered=True)
        elif self.dataset in ["enwik8", "text8"]:
            self.train = self.vocab.encode_file(os.path.join(
                path, "train.txt"),
                                                ordered=True,
                                                add_eos=False)
            self.valid = self.vocab.encode_file(os.path.join(
                path, "valid.txt"),
                                                ordered=True,
                                                add_eos=False)
            self.test = self.vocab.encode_file(os.path.join(path, "test.txt"),
                                               ordered=True,
                                               add_eos=False)
        elif self.dataset == "lm1b":
            self.train = train_paths
            self.valid = self.vocab.encode_file(os.path.join(
                path, "valid.txt"),
                                                ordered=False,
                                                add_double_eos=True)
            self.test = self.vocab.encode_file(os.path.join(path, "test.txt"),
                                               ordered=False,
                                               add_double_eos=True)

    def get_iterator(self, split, *args, **kw):
        if split == "train":
            if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
                data_iter = LMOrderedIterator(self.train, *args, **kw)
            elif self.dataset == "lm1b":
                kw["shuffle"] = True
                data_iter = LMMultiFileIterator(self.train, self.vocab, *args,
                                                **kw)
        elif split in ["valid", "test"]:
            data = self.valid if split == "valid" else self.test
            if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
                data_iter = LMOrderedIterator(data, *args, **kw)
            elif self.dataset == "lm1b":
                data_iter = LMShuffledIterator(data, *args, **kw)

        return data_iter