コード例 #1
0
    def create_iterator(self):
        spacy_de = spacy.load("de")
        spacy_en = spacy.load("en")

        def tokenize_de(text):
            return [tok.text for tok in spacy_de.tokenizer(text)][::-1]

        def tokenize_en(text):
            return [tok.text for tok in spacy_en.tokenizer(text)][::-1]

        if self.nopad:
            self.source_field = Field(tokenize=tokenize_de,
                                      init_token="<sos>",
                                      eos_token="<eos>",
                                      lower=True,
                                      batch_first=True)
        else:
            self.source_field = Field(tokenize=tokenize_de,
                                      init_token="<sos>",
                                      eos_token="<eos>",
                                      lower=True,
                                      batch_first=True)

        self.target_field = Field(tokenize=tokenize_en,
                                  init_token="<sos>",
                                  eos_token="<eos>",
                                  lower=True,
                                  batch_first=True)

        train_data, valid_data, test_data = Multi30k.splits(
            exts=(".de", ".en"), fields=(self.source_field, self.target_field))

        print(f"Number of training examples = {len(train_data.examples)}")
        print(f"Number of validation examples = {len(valid_data.examples)}")
        print(f"Number of testing examples = {len(test_data.examples)}")

        self.source_field.build_vocab(train_data, min_freq=2)
        self.target_field.build_vocab(train_data, min_freq=2)

        if self.nopad:
            train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
                (train_data, valid_data, test_data),
                batch_size=self.batch_size,
                sort_within_batch=True,
                sort_key=lambda x: len(x.src),
                device=self.device)
        else:
            train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
                (train_data, valid_data, test_data),
                batch_size=self.batch_size,
                device=self.device)

        return train_iterator, valid_iterator, test_iterator
コード例 #2
0
ファイル: data.py プロジェクト: jlrussin/transformer_scan
def build_scan(split, batch_size, device):
    # Get paths and filenames of each partition of split
    if split == 'simple':
        path = 'data/scan/simple/'
    elif split == 'addjump':
        path = 'data/scan/addjump/'
    else:
        assert split not in ['simple','addjump'], "Unknown split"
    train_path = os.path.join(path,'train')
    dev_path = os.path.join(path,'dev')
    test_path = os.path.join(path,'test')
    exts = ('.src','.trg')

    # Fields for source (SRC) and target (TRG) sequences
    SRC = Field(init_token='<sos>',eos_token='<eos>')
    TRG = Field(init_token='<sos>',eos_token='<eos>')
    fields = (SRC,TRG)

    # Build datasets
    train_ = TranslationDataset(train_path,exts,fields)
    dev_ = TranslationDataset(dev_path,exts,fields)
    test_ = TranslationDataset(test_path,exts,fields)

    # Build vocabs: fields ensure same vocab used for each partition
    SRC.build_vocab(train_)
    TRG.build_vocab(train_)

    # BucketIterator ensures similar sequence lengths to minimize padding
    train, dev, test = BucketIterator.splits((train_, dev_, test_),
        batch_size = batch_size, device = device)

    return SRC, TRG, train, dev, test
コード例 #3
0
ファイル: torchtext_intro.py プロジェクト: AI-Core/tutorials
def iterator_construction(
        train: TabularDataset,
        valid: TabularDataset,
        test: TabularDataset,
        feature: str,  # the column name of the input text data
        batch_sizes: Tuple[int, int, int] = (64, 64, 64),  # order: train, valid, test
        device: torch.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
) -> Tuple[BucketIterator, BucketIterator, BucketIterator]:
    """
    This function takes torchtext.data.TabularDataset's as input and output the correspondent BucketIterator's,
    splitting the datasets into batches. This iterator batches examples of similar lengths together, minimizing the
    amount of padding needed while producing freshly shuffled batches for each new epoch.
    :param train: a torchtest.data.TabularDataset representing the training dataset
    :param valid: a torchtest.data.TabularDataset representing the validation dataset
    :param test: a torchtest.data.TabularDataset representing the testing dataset
    :param feature: a string represent the name of the input feature to the model. Multiple inputs are not supported.
    :param batch_sizes: a tuple of 3 integers, each representing the batch size for the train, validation and test set
           respectively. Default: (64, 64, 64)
    :param device: the torch.device to be used, either 'cpu' or 'cuda' (gpu) if available
    :return: train_iter: a torchtext.data.BucketIterator, the iterator for the training dataset
             valid_iter: a torchtext.data.BucketIterator, the iterator for the validation dataset
             test_iter: a torchtext.data.BucketIterator, the iterator for the testing dataset
    """

    train_iter, valid_iter, test_iter = BucketIterator.splits(
        (train, valid, test),
        batch_sizes=(batch_sizes[0], batch_sizes[1], batch_sizes[2]),
        device=device,
        sort_key=lambda x: len(getattr(x, feature)),
        sort_within_batch=True
    )

    return train_iter, valid_iter, test_iter
コード例 #4
0
ファイル: utils.py プロジェクト: Kaixin-Wu/NEU_NMT
def load_dataset(batch_size):
    '''
    	load data sets.
    '''

    Lang1 = Field(include_lengths=True, init_token='<sos>', eos_token='<eos>')
    Lang2 = Field(include_lengths=True, init_token='<sos>', eos_token='<eos>')

    train = TranslationDataset(path='data/40w/train',
                               exts=('.ch', '.en'),
                               fields=(Lang1, Lang2))
    val = TranslationDataset(path='data/40w/valid',
                             exts=('.ch', '.en'),
                             fields=(Lang1, Lang2))
    test = TranslationDataset(path='data/40w/test',
                              exts=('.ch', '.en'),
                              fields=(Lang1, Lang2))

    Lang1.build_vocab(train.src, max_size=30000)
    Lang2.build_vocab(train.trg, max_size=30000)

    train_iter, val_iter, test_iter = BucketIterator.splits(
        (train, val, test), batch_size=batch_size, repeat=False)

    return train_iter, val_iter, test_iter, Lang1, Lang2
コード例 #5
0
def task2_iterators(train, val, device, vectors, batch_size=256):
    TEXT = Field(sequential=True,
                 tokenize=None,
                 init_token="<sos>",
                 eos_token="<eos>")
    LABELS = Field(sequential=True,
                   init_token="<sos>",
                   eos_token="<eos>",
                   is_target=True)

    tv_datafields = [("text", TEXT), ("labels", LABELS)]

    trn, vld = TabularDataset.splits(
        path=config.
        TASK2['csv_files'],  # the root directory where the data lies
        train=train,
        validation=val,
        format='csv',
        skip_header=True,
        fields=tv_datafields)

    TEXT.build_vocab(trn, vld, vectors=vectors)
    LABELS.build_vocab(trn, vld)
    train_iterator, val_iterator = BucketIterator.splits(
        (trn, vld),
        batch_sizes=(batch_size, batch_size),
        device=device,
        sort_within_batch=False,
        sort_key=lambda x: len(x.text))
    return train_iterator, val_iterator, TEXT, LABELS
コード例 #6
0
def get_bucketized_iterators(path_to_data: PathOrStr, batch_size: int = 16,
                             len_context_vocab: int = 30000,
                             len_title_vocab: int = 30000,
                             len_aut_vocab: int = 30000) -> IteratorData:
    """
    Gets path_to_data and delegates tasks to generate buckettized training iterators.  
    
    ## Parameters:  
    
    - **path_to_data** *(PathOrStr)*:  Path object or string to a .csv dataset.  
    - **batch_size** *(int=32)*: BucketIterator minibatch size.  
    - **len_context_vocab** *(int=30000)*:  Maximum length of context vocab size before adding special tokens.  
    - **len_title_vocab** *(int=30000)*:  Maximum length of context vocab size before adding special tokens.  
    - **len_aut_vocab** *(int=30000)*:  Maximum length of context vocab size before adding special tokens.   
    
    ## Output:  
    
    - **Training data** *(IteratorData)*:  Container holding CNTXT (*Field*), TTL (*Field*), AUT (*Field*), 
        train_iterator (*BucketIterator*), valid_iterator (*BucketIterator*), test_iterator (*BucketIterator*) objects.
    """
    
    data = get_datasets(path_to_data=path_to_data, len_context_vocab=len_context_vocab,
                        len_title_vocab=len_title_vocab, len_aut_vocab=len_aut_vocab)

    # create bucketted iterators for each dataset
    train_iterator, valid_iterator, test_iterator = BucketIterator.splits((data.train, data.valid, data.test), 
                                                                          batch_size = batch_size,
                                                                          sort_within_batch = True,
                                                                          sort_key = lambda x : len(x.title_cited))
    
    return IteratorData(data.cntxt, data.ttl, data.aut, train_iterator, valid_iterator, test_iterator)
コード例 #7
0
    def __init__(self,
                 batch_size=64,
                 max_vocab=999999,
                 min_freq=1,
                 tokenizer=sp,
                 shuffle=True):
        super().__init__()
        self.TEXT = Field(sequential=True,
                          use_vocab=True,
                          tokenize=tokenizer,
                          lower=True,
                          batch_first=True,
                          fix_length=20)
        self.LABEL = Field(sequential=False, use_vocab=False, is_target=True)

        train_data, valid_data, test_data = TabularDataset.splits(
            path='data/',
            train='train.txt',
            validation='validation.txt',
            test='test.txt',
            format='tsv',
            fields=[('text', self.TEXT), ('label', self.LABEL)],
            skip_header=True)
        self.TEXT.build_vocab(train_data,
                              max_size=max_vocab,
                              min_freq=min_freq)
        self.LABEL.build_vocab(train_data)

        # self.train_loader = BucketIterator(dataset=train_data, batch_size=batch_size)
        # self.test_loader = BucketIterator(dataset=valid_data, batch_size=batch_size)
        self.tr_dl, self.val_dl, self.test_dl = BucketIterator.splits(
            (train_data, valid_data, test_data),
            sort_key=lambda x: len(x.text),
            batch_size=batch_size,
            shuffle=True)
コード例 #8
0
def data_iter(data_path, vec_path, fix_length):
    TEXT = data.Field(sequential=True,
                      lower=True,
                      fix_length=fix_length,
                      batch_first=True)

    LABEL = data.Field(sequential=False, use_vocab=False)

    train, test = TabularDataset.splits(path=data_path,
                                        train='train.csv',
                                        test='test.csv',
                                        format='csv',
                                        fields=[('label', LABEL),
                                                ('title', None),
                                                ('text', TEXT)],
                                        skip_header=True)

    train_iter, test_iter = BucketIterator.splits(
        (train, test),  # 构建数据集所需的数据集
        batch_sizes=(8, 8),
        sort_within_batch=False,
        repeat=False)

    cache = '.vector_cache'
    if not os.path.exists(cache):
        os.mkdir(cache)

    vectors = Vectors(name=vec_path, cache=cache)
    TEXT.build_vocab(train, vectors=vectors)
    vocab = TEXT.vocab

    return train_iter, test_iter, vocab
コード例 #9
0
def get_dataset(args):
    TEXT = Field(sequential=True,
                 tokenize=tokenize_line_en,
                 lower=True,
                 batch_first=True)
    LABEL = Field(sequential=False, use_vocab=False, batch_first=True)
    train, val, test = TabularDataset.splits(path='WikiQACorpus',
                                             root='',
                                             train='WikiQA-train.tsv',
                                             validation='WikiQA-dev.tsv',
                                             test='WikiQA-test.tsv',
                                             format='tsv',
                                             fields=[('question_id', None),
                                                     ('question', TEXT),
                                                     ('document_id', None),
                                                     ('document_title', None),
                                                     ('sentence_id', None),
                                                     ('sentence', TEXT),
                                                     ('label', LABEL)],
                                             skip_header=True)
    TEXT.build_vocab(train, vectors='glove.840B.300d')
    device = torch.device(
        'cuda', args.device) if args.device > -1 else torch.device('cpu')
    train_iter, dev_iter, test_iter = BucketIterator.splits(
        (train, val, test),
        batch_size=args.batch_size,
        sort=False,
        shuffle=True,
        repeat=False,
        device=device)
    return train_iter, dev_iter, test_iter, TEXT.vocab
コード例 #10
0
ファイル: heb_data.py プロジェクト: alonshoa/signLang_DLProg
def create_street_names_data_iterators(path,
                                       char_max_size=50,
                                       names_max_size=60000,
                                       batch_size=32,
                                       device='cuda'):
    chars = Field(sequential=True,
                  use_vocab=True,
                  tokenize=lambda x: x.split(),
                  lower=True,
                  fix_length=18)
    names = Field(sequential=False,
                  use_vocab=True,
                  tokenize=lambda x: x,
                  lower=True)

    fields = {"chars": ("chars", chars), "street_name": ("names", names)}

    train_data, test_data = TabularDataset.splits(
        path="",
        train=os.path.join(path, "train_.csv"),
        test=os.path.join(path, "test_.csv"),
        format="csv",
        fields=fields)

    chars.build_vocab(train_data, max_size=char_max_size, min_freq=2)
    names.build_vocab(train_data, max_size=names_max_size, min_freq=1)

    train_iterator, test_iterator = BucketIterator.splits(
        (train_data, test_data), batch_size=batch_size, device=device)
    return train_iterator, test_iterator, chars.vocab, names.vocab, train_data, test_data
コード例 #11
0
def load_dataset(path="./data", train_csv="train.csv", val_csv="val.csv", 
                 init_token='^', eos_token='$', batch_size=32):
    INDEX = Field(sequential=False, 
                  use_vocab=False, 
                  pad_token=None, 
                  unk_token=None)
    EN = Field(tokenize=tokenize, 
               include_lengths=True,
               init_token=init_token, 
               eos_token=eos_token)
    JP = Field(tokenize=tokenize, 
               include_lengths=True,
               init_token=init_token, 
               eos_token=eos_token, 
               is_target=True)
    FREQ = Field(sequential=False, 
                   use_vocab=False, 
                   pad_token=None, 
                   unk_token=None,
                   dtype=torch.float32)
    data_fields = [('index', INDEX), ('english', EN), 
                   ('japanese', JP), ('frequency', FREQ)]
    train, val = TabularDataset.splits(path=path, 
                                       train=train_csv, 
                                       validation=val_csv, 
                                       skip_header = True,
                                       format='csv', fields=data_fields)
    EN.build_vocab(train.english)
    JP.build_vocab(train.japanese)
    train_iter, val_iter = BucketIterator.splits((train, val),
                                                 batch_size=batch_size, 
                                                 sort=False,
                                                 repeat=False)
    return train_iter, val_iter, EN, JP
コード例 #12
0
    def iters(cls,
              batch_size=64,
              device=-1,
              shuffle=True,
              vectors='glove.840B.300d'):
        cls.TEXT = Field(sequential=True,
                         tokenize='spacy',
                         lower=True,
                         batch_first=True)
        cls.LABEL = Field(sequential=False,
                          use_vocab=False,
                          batch_first=True,
                          tensor_type=torch.FloatTensor,
                          postprocessing=Pipeline(get_class_probs))
        cls.ID = RawField()

        train, val, test = cls.splits(cls.TEXT, cls.LABEL, cls.ID)

        cls.TEXT.build_vocab(train, vectors=vectors)

        return BucketIterator.splits((train, val, test),
                                     batch_size=batch_size,
                                     shuffle=shuffle,
                                     repeat=False,
                                     device=device)
コード例 #13
0
ファイル: data.py プロジェクト: cyc1am3n/transformer
 def get_train_valid_iter(self, batch_size):
     train_data, valid_data = self._get_train_valid_data()
     train_iter, valid_iter = BucketIterator.splits(
         (train_data, valid_data),
         batch_size=batch_size,
         device=self.device)
     return train_iter, valid_iter
コード例 #14
0
def get_data():

    SEED = 1234

    random.seed(SEED)
    torch.manual_seed(SEED)
    torch.backends.cudnn.deterministic = True

    SRC = Field(tokenize=tokenize_de,
                init_token='<sos>',
                eos_token='<eos>',
                lower=True)
    TRG = Field(tokenize=tokenize_en,
                init_token='<sos>',
                eos_token='<eos>',
                lower=True)
    train_data, valid_data, test_data = Multi30k.splits(exts=('.de', '.en'),
                                                        fields=(SRC, TRG))
    SRC.build_vocab(train_data, min_freq=2)
    TRG.build_vocab(train_data, min_freq=2)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    BATCH_SIZE = 128
    INPUT_DIM = len(SRC.vocab)
    OUTPUT_DIM = len(TRG.vocab)
    PAD_IDX = TRG.vocab.stoi['<pad>']

    train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
        (train_data, valid_data, test_data),
        batch_size=BATCH_SIZE,
        device=device)
    return train_iterator, valid_iterator, test_iterator, INPUT_DIM, OUTPUT_DIM, PAD_IDX
コード例 #15
0
 def _get_iterators(self, train_data, valid_data, model_name):
     return BucketIterator.splits((train_data, valid_data),
                                  repeat=False,
                                  batch_size=self.batch_size,
                                  sort_within_batch=False,
                                  sort_key=lambda x: len(x.src),
                                  device=self.device)
コード例 #16
0
 def initialize_iterators(self):
     ''' build iterators for data (by batches) using the bucket iterator '''
     self.train_iter, self.valid_iter, self.test_iter = BucketIterator.splits(
         datasets=(self.train_set, self.valid_set, self.test_set),
         batch_size=self.batch_size,
         device=self.device,
         random_state=seed)
コード例 #17
0
    def get_dataloader(self,
                       proc_id=0,
                       n_gpus=1,
                       device=torch.device('cpu'),
                       batch_size=64):
        def _distribute_dataset(dataset):
            n = len(dataset)
            part = dataset[n * proc_id // n_gpus:n * (proc_id + 1) // n_gpus]
            return torchtext.data.Dataset(part, dataset.fields)

        train_ds = _distribute_dataset(self.train_ds)
        self.verbose = self.verbose and (proc_id == 0)
        train_iter, valid_iter = BucketIterator.splits(
            (train_ds, self.valid_ds),
            batch_sizes=(batch_size, batch_size),
            sort_within_batch=True,
            sort_key=lambda x: len(x.input),
            device=device,
            repeat=False,
        )

        test_iter = Iterator(
            self.test_ds,
            batch_size=1,
            sort=False,
            sort_within_batch=False,
            device=device,
            repeat=False,
        )
        train_dl = BatchWrapper(train_iter)
        valid_dl = BatchWrapper(valid_iter)
        test_dl = BatchWrapper(test_iter)
        return train_dl, valid_dl, test_dl
コード例 #18
0
def load_dataset(batch_size):

    ASK = Field(tokenize=tokenizer,
                include_lengths=True,
                eos_token='<eos>',
                init_token='<sos>')

    ANS = Field(tokenize=tokenizer,
                include_lengths=True,
                eos_token='<eos>',
                init_token='<sos>')
    file_path = r"D:\pycharm_project\datasets\NLP\cn_chat/"
    train, val, test = ttd.TabularDataset.splits(
        path=file_path,
        train='weibo_mini_train.csv',
        test='weibo_mini_test.csv',
        validation='weibo_mini_val.csv',
        format='csv',
        fields=[('Ask', ASK), ('Ans', ANS)])

    ASK.build_vocab(train.Ask, min_freq=1)
    ANS.build_vocab(train.Ans, max_size=20000)

    torch.save(ASK, file_path + 'ASK')
    torch.save(ANS, file_path + 'ANS')
    # train_iter, val_iter, test_iter = BucketIterator.splits(
    #         (train, val, test), batch_size=batch_size, repeat=False)
    train_iter, val_iter, test_iter = BucketIterator.splits(
        (train, val, test), batch_size=batch_size, repeat=False, sort=False)
    return train_iter, val_iter, test_iter, ASK, ANS
コード例 #19
0
    def get_splits(self, device, batch_size):
        train_dataset = torchtext.data.Dataset(self.train_examples,
                                               self.fields)
        train_dataset.sort_key = lambda example: len(example.input)

        dev_dataset = torchtext.data.Dataset(self.dev_examples, self.fields)
        dev_dataset.sort_key = lambda example: len(example.input)

        test_dataset = torchtext.data.Dataset(self.test_examples, self.fields)
        test_dataset.sort_key = lambda example: len(example.input)

        vectors = Vectors(name=WORD2VEC_EMBEDDING_FILE,
                          cache=WORD2VEC_EMBEDDING_DIR,
                          unk_init=torch.Tensor.zero_)
        self.input_field.build_vocab(train_dataset,
                                     dev_dataset,
                                     test_dataset,
                                     vectors=vectors)
        self.query_field.build_vocab(train_dataset,
                                     dev_dataset,
                                     test_dataset,
                                     vectors=vectors)

        return BucketIterator.splits(
            (train_dataset, dev_dataset, test_dataset),
            batch_size=batch_size,
            repeat=False,
            shuffle=True,
            sort_within_batch=True,
            device=device)
コード例 #20
0
ファイル: cat.py プロジェクト: oahzxl/SeedCup2019
def main():
    train, test, field = dataset_reader(train=True, stop=900000)
    evl, _ = dataset_reader(train=False, fields=field)
    field.build_vocab(train, evl)
    _, evl_iter = BucketIterator.splits((train, evl),
                                        batch_sizes=(1024, 1024),
                                        device=device,
                                        sort_within_batch=False,
                                        repeat=False,
                                        sort=False)

    with open('data/result.txt', 'w+') as f:
        f.write('')

    with open('data/2.txt', "r") as f1:
        with open('data/3.txt', "r") as f2:
            for i, data in tqdm.tqdm(enumerate(evl_iter),
                                     total=evl_iter.__len__()):
                with open('data/result.txt', 'a+') as f:
                    for b in range(data.create_time.size(0)):
                        start_day = field.vocab.itos[data.create_time[b]][:-2]
                        start_day = arrow.get('2019-' + start_day).timestamp
                        day = (float(f1.readline()) + float(f2.readline())) / 2
                        sign_day = int('%.0f' % day)
                        sign_hour = '15'
                        final = str(
                            arrow.get(start_day +
                                      sign_day * 24 * 60 * 60))[:10]
                        final = final + ' ' + sign_hour
                        f.write(final + '\n')
コード例 #21
0
ファイル: utils.py プロジェクト: glennkroegel/imdb
def make_small_imdb(batch_size=8, device=-1, vectors=None):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # TEXT = data.Field(include_lengths=False, lower=True, batch_first=True)
    TEXT = data.Field(tokenize=get_tokenizer("basic_english"),
                      init_token='<sos>',
                      eos_token='<eos>',
                      lower=True,
                      batch_first=False)
    LABEL = data.LabelField()

    datafields = [('text', TEXT), ('label', LABEL)]
    train, test = TabularDataset.splits(path='.',
                                        train='train.csv',
                                        validation='cv.csv',
                                        format='csv',
                                        skip_header=True,
                                        fields=datafields)

    TEXT.build_vocab(train, test, vectors=vectors, max_size=30000)
    LABEL.build_vocab(train, test)
    train_iter, test_iter = BucketIterator.splits(
        (train, test),
        batch_sizes=(128, 128),
        device=device,
        sort_key=lambda x: len(x.text),
        sort_within_batch=False,
        repeat=False)

    return train_iter, test_iter, TEXT, LABEL
コード例 #22
0
    def main_handler(cls, config, data_dir, shuffle=True):

        # Getting Data Splits: train, dev, test
        print("\n\n==>> Loading Data splits and tokenizing each document....")
        train, val, test = cls.get_dataset_splits(data_dir)

        # Build Vocabulary and obtain embeddings for each word in Vocabulary
        print("\n==>> Building Vocabulary and obtaining embeddings....")
        glove_embeds = torchtext.vocab.Vectors(name=config['glove_path'],
                                               max_vectors=int(4e5))
        cls.TEXT.build_vocab(train, val, test, vectors=glove_embeds)

        # Setting 'unk' token as the average of all other embeddings
        if config['model_name'] != 'han':
            cls.TEXT.vocab.vectors[cls.TEXT.vocab.stoi['<unk>']] = torch.mean(
                cls.TEXT.vocab.vectors, dim=0)

        # Getting iterators for each set
        print("\n==>> Preparing Iterators....")
        train_iter, val_iter, test_iter = BucketIterator.splits(
            (train, val, test),
            batch_size=config['batch_size'],
            repeat=False,
            shuffle=shuffle,
            sort_within_batch=False,
            device=device)
        return cls.TEXT, cls.LABEL, train_iter, val_iter, test_iter, train, val, test
コード例 #23
0
def iterator_construction(
    train: TranslationDataset, valid: TranslationDataset,
    test: TranslationDataset, batch_sizes: Tuple[int, int, int],
    device: Union[str, torch.device]
) -> Tuple[BucketIterator, BucketIterator, BucketIterator]:
    """
    This function takes torchtext.data.TranslationDataset's as input and outputs the correspondent BucketIterator's,
    splitting the datasets into batches. This iterator batches examples of similar lengths together, minimizing the
    amount of padding needed while producing freshly shuffled batches for each new training epoch.
    :param train: a torchtest.data.TranslationDataset representing the training dataset.
    :param valid: a torchtest.data.TranslationDataset representing the validation dataset.
    :param test: a torchtest.data.TranslationDataset representing the test dataset.
    :param batch_sizes: a tuple of 3 integers, each representing the batch size for the train, validation and test set
           respectively.
    :param device: the device to be used for the calculations. Can be a str (e.g. 'cuda') or torch.device object.
    :return: train_iter: a torchtext.data.BucketIterator, the iterator for the training dataset.
             valid_iter: a torchtext.data.BucketIterator, the iterator for the validation dataset.
             test_iter: a torchtext.data.BucketIterator, the iterator for the test dataset.
    """
    train_iter, valid_iter, test_iter = BucketIterator.splits(
        datasets=(train, valid, test),
        batch_sizes=batch_sizes,
        sort=True,
        device=device)
    return train_iter, valid_iter, test_iter
コード例 #24
0
def get_data():
    SRC = Field(tokenize=tokenize_de,
                init_token='<sos>',
                eos_token='<eos>',
                pad_token='<pad>',
                unk_token='<unk>',
                lower=True)
    TRG = Field(tokenize=tokenize_en,
                init_token='<sos>',
                eos_token='<eos>',
                pad_token='<pad>',
                unk_token='<unk>',
                lower=True)

    train_data, valid_data, test_data = Multi30k.splits(exts=('.de', '.en'),
                                                        fields=(SRC, TRG))

    print("train: {}".format(len(train_data.examples)))
    print("valid: {}".format(len(valid_data.examples)))
    print("test: {}".format(len(test_data.examples)))

    SRC.build_vocab(train_data, min_freq=params.MIN_FREQ)
    TRG.build_vocab(train_data, min_freq=params.MIN_FREQ)

    print("源语言词表大小: {}".format(len(SRC.vocab)))
    print("目标语言词表大小: {}".format(len(TRG.vocab)))
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
        (train_data, valid_data, test_data),
        batch_size=params.BATCH_SIZE,
        device=device)

    return train_iterator, valid_iterator, test_iterator, SRC, TRG
コード例 #25
0
def get_dataset(train_df, valid_df, batch_size, cache=None, gpus=1, vectors=None):
    TEXT = data.Field(init_token='<START>', eos_token='<END>', tokenize=None, tokenizer_language='en',
                      batch_first=True, lower=True, stop_words=set(string.punctuation))
    LABEL = data.Field(dtype=torch.float, is_target=True, unk_token=None, sequential=False, use_vocab=False)

    train_dataset = DataFrameDataset(train_df, {
        'text': TEXT,
        'label': LABEL
    })

    val_dataset = DataFrameDataset(valid_df, {
        'text': TEXT,
        'label': LABEL
    })

    train_loader, val_loader = BucketIterator.splits(
        (train_dataset, val_dataset),
        batch_size=batch_size,
        sort_key=lambda x: len(x.text),
        device='cuda' if torch.cuda.is_available() and gpus else 'cpu'
    )

    embeddings = vectors if vectors is not None else GloVe('42B', cache=cache)
    TEXT.build_vocab(train_dataset.text, vectors=embeddings)

    return TEXT, LABEL, train_loader, val_loader
コード例 #26
0
 def build_iterator(self, train, val, test):
     train_iter, valid_iter, test_iter = \
     BucketIterator.splits((train, val, test), batch_size=32,
                           sort_key=lambda x: (len(x.orig), len(x.para)),
                           sort_within_batch=True, repeat=False,
                           device=self.device)
     return train_iter, valid_iter, test_iter
コード例 #27
0
def build_data(train_file, test_file):
    TEXT = Field(sequential=True, tokenize=tokenize, lower=False)
    LABELS = Field(sequential=False, use_vocab=True)

    datafields = [('word', TEXT), ('label', LABELS), ('left', TEXT),
                  ('right', TEXT)]
    train_data, valid_data = TabularDataset.splits(
        path='data',
        train='sample_train.txt',
        validation='sample_train.txt',
        format='tsv',
        skip_header=False,
        fields=datafields)
    TEXT.build_vocab(train_data)
    LABELS.build_vocab(train_data)

    train_iter, valid_iter = BucketIterator.splits(
        (train_data, valid_data),
        batch_size=(64, 64),
        device=-1,
        sort_key=lambda x: len(x.word),
        sort_within_batch=False,
        repeat=False)

    # print(next(train_iter.__iter__()))
    return train_iter, valid_iter
def create_dataset(path_to_dataset,batch_size,split_ratio=0.7,min_vocab_freq=10,max_vocab_size=4000):
	text_field = Field(tokenize="spacy",tokenizer_language="en",batch_first=True,init_token="<sos>",eos_token="<eos>",lower=True)

	def transform(caption):
		caption = caption.strip().lower().split()
		return caption

	dataset = CocoCaptions(annFile=os.path.join(path_to_dataset,"captions_train2014.json"),text_field=text_field,transform=transform)
	train,val = dataset.split(split_ratio=split_ratio)
	test = CocoCaptions(annFile=os.path.join(path_to_dataset,"captions_val2014.json"),text_field=text_field,transform=transform)

	print("Dataset loaded")
	print("Train set size:",len(train))

	text_field.build_vocab(dataset.text,min_freq=min_vocab_freq,max_size=max_vocab_size)
	SOS_TOKEN = text_field.vocab.stoi['<sos>']
	EOS_TOKEN = text_field.vocab.stoi['<eos>']
	UNK_TOKEN = text_field.vocab.stoi['<unk>']
	PAD_TOKEN = text_field.vocab.stoi['<pad>']

	print("Vocabuly build")

	print("Vocabuly statistics")

	print("\nMost common words in the vocabulary:\n",text_field.vocab.freqs.most_common(10))
	print("Size of the vocabulary:",len(text_field.vocab))
	print("Max sequence lenght",dataset.max_seq_len)

	train_iter,val_iter = BucketIterator.splits((train,val),repeat=False,batch_size=batch_size)
	test_iter = BucketIterator(test,batch_size=batch_size,repeat=False,train=False)
	vocab_dict = text_field.vocab.stoi
	return {"data_iters":(train_iter,val_iter,test_iter),"fields":text_field,
	"word_to_num_vocab":vocab_dict,"num_to_word_vocab":{y:x for x,y in vocab_dict.items()},
	"num_classes":len(text_field.vocab),"tokens":(SOS_TOKEN,EOS_TOKEN,UNK_TOKEN,PAD_TOKEN),"max_seq_len":dataset.max_seq_len}
コード例 #29
0
def get_iterator(dataset, device, batch_size, shuffle=True, repeat=False):
    train, val, test = dataset

    train_iter, val_iter = BucketIterator.splits(
        (train, val),
        batch_size=batch_size,
        device=device,
        sort_key=lambda x: len(x.comment_text),
        sort_within_batch=False,
        shuffle=shuffle,
        repeat=repeat)

    test_iter = Iterator(test,
                         batch_size=batch_size,
                         device=device,
                         sort_within_batch=False,
                         repeat=repeat,
                         sort=False)

    train_dl = BatchWrapper(train_iter, "comment_text", [
        "toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"
    ])
    valid_dl = BatchWrapper(val_iter, "comment_text", [
        "toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"
    ])
    test_dl = BatchWrapper(test_iter, "comment_text", None)

    return train_dl, valid_dl, test_dl
コード例 #30
0
def load_dataset(batch_size):
    spacy_de = spacy.load('de')
    spacy_en = spacy.load('en')
    url = re.compile('(<url>.*</url>)')

    def tokenize_de(text):
        return [tok.text for tok in spacy_de.tokenizer(url.sub('@URL@', text))]

    def tokenize_en(text):
        return [tok.text for tok in spacy_en.tokenizer(url.sub('@URL@', text))]

    DE = Field(tokenize=tokenize_de,
               include_lengths=True,
               init_token='<sos>',
               eos_token='<eos>')
    EN = Field(tokenize=tokenize_en,
               include_lengths=True,
               init_token='<sos>',
               eos_token='<eos>')
    train, val, test = Multi30k.splits(exts=('.de', '.en'), fields=(DE, EN))
    DE.build_vocab(train.src, min_freq=2)
    EN.build_vocab(train.trg, max_size=10000)
    train_iter, val_iter, test_iter = BucketIterator.splits(
        (train, val, test), batch_size=batch_size, repeat=False)

    return train_iter, val_iter, test_iter, DE, EN
コード例 #31
0
    def create_iterator(self, device):

        # create iterators that outputs a batch of data samples
        return BucketIterator.splits(
            (self.train_data, self.valid_data, self.test_data),
            batch_size=self.batch_size,
            device=device)
コード例 #32
0
ファイル: utils.py プロジェクト: gwli/seq2seq
def load_dataset(batch_size):
    spacy_de = spacy.load('de')
    spacy_en = spacy.load('en')
    url = re.compile('(<url>.*</url>)')

    def tokenize_de(text):
        return [tok.text for tok in spacy_de.tokenizer(url.sub('@URL@', text))]

    def tokenize_en(text):
        return [tok.text for tok in spacy_en.tokenizer(url.sub('@URL@', text))]

    DE = Field(tokenize=tokenize_de, include_lengths=True,
               init_token='<sos>', eos_token='<eos>')
    EN = Field(tokenize=tokenize_en, include_lengths=True,
               init_token='<sos>', eos_token='<eos>')
    train, val, test = Multi30k.splits(exts=('.de', '.en'), fields=(DE, EN))
    DE.build_vocab(train.src, min_freq=2)
    EN.build_vocab(train.trg, max_size=10000)
    train_iter, val_iter, test_iter = BucketIterator.splits(
            (train, val, test), batch_size=batch_size, repeat=False)
    return train_iter, val_iter, test_iter, DE, EN
コード例 #33
0
 skip_header=True, 
 fields=[('id',None),('text',TEXT),('label',LABEL)], 
 filter_pred = lambda x: True if len(x.text) > 1 else False) 
# 토큰 레벨 문장의 길이가 1 이상인 경우만 허용

TEXT.build_vocab(train_data,min_freq=2)
LABEL.build_vocab(train_data)

# print (TEXT.vocab)
# print (len(TEXT.vocab),len(LABEL.vocab))

# print (TEXT.vocab.itos[:5])
# print (LABEL.vocab.itos)

train_loader, test_loader = BucketIterator.splits((train_data,test_data),sort_key=lambda x:len(x.text), sort_within_batch=True,
 repeat=False,shuffle=True,
 batch_size=32,device=DEVICE)

for batch in train_loader:
    
    break

EPOCH = 5
BATCH_SIZE = 32
EMBED = 300
KERNEL_SIZES = [3,4,5]
KERNEL_DIM = 100
LR = 0.001

# model = CNNClassifier(len(TEXT.vocab), EMBED, 1, KERNEL_DIM, KERNEL_SIZES)
model = RNN(len(TEXT.vocab), EMBED, KERNEL_DIM, 1, bidirec=False)
コード例 #34
0
# multi30k dataloader
train,val,test = datasets.Multi30k.splits(exts=(".en",".de"),fields=(EN,DE),root=data_path)

# wmt14 dataloader (better than using datasets.WMT14.splits since it's slow)
#train,val,test = datasets.TranslationDataset.splits(exts=(".en",".de"),fields=[("src",EN),("trg",DE)],path=os.path.join(data_path,"wmt14"),
#	train="train.tok.clean.bpe.32000",validation="newstest2013.tok.bpe.32000",test="newstest2014.tok.bpe.32000")

print("Dataset loaded")

EN.build_vocab(train.src,min_freq=3)
DE.build_vocab(train.trg,max_size=50000)

print("Vocabularies build")

train_iter,val_iter = BucketIterator.splits((train, val),batch_size=3)
test_iter = BucketIterator(test,batch_size=3)

print("Start iterating through data")

for i,batch in enumerate(train_iter):
	print(batch.src) # the source language
	print(batch.trg) # the target language
	break

for i,batch in enumerate(val_iter):
	print(batch.src) # the source language
	print(batch.trg) # the target language
	break

for i,batch in enumerate(test_iter):