def _setup_datasets(dataset_name, train_filenames, valid_filenames, test_filenames, root='.data'): if not isinstance(train_filenames, tuple) and not isinstance(valid_filenames, tuple) \ and not isinstance(test_filenames, tuple): raise ValueError("All filenames must be tuples") src_train, tgt_train = train_filenames src_eval, tgt_eval = valid_filenames src_test, tgt_test = test_filenames extracted_files = [] if isinstance(URLS[dataset_name], list): for f in URLS[dataset_name]: dataset_tar = download_from_url(f, root=root) extracted_files.extend(extract_archive(dataset_tar)) elif isinstance(URLS[dataset_name], str): dataset_tar = download_from_url(URLS[dataset_name], root=root) extracted_files.extend(extract_archive(dataset_tar)) else: raise ValueError( "URLS for {} has to be in a form or list or string".format( dataset_name)) # Clean the xml and tag file in the archives file_archives = [] for fname in extracted_files: if 'xml' in fname: _clean_xml_file(fname) file_archives.append(os.path.splitext(fname)[0]) elif "tags" in fname: _clean_tags_file(fname) file_archives.append(fname.replace('.tags', '')) else: file_archives.append(fname) data_filenames = defaultdict(dict) data_filenames = { "train": _construct_filepaths(file_archives, src_train, tgt_train), "valid": _construct_filepaths(file_archives, src_eval, tgt_eval), "test": _construct_filepaths(file_archives, src_test, tgt_test) } for key in data_filenames.keys(): if len(data_filenames[key]) == 0 or data_filenames[key] is None: raise FileNotFoundError( "Files are not found for data type {}".format(key)) datasets = [] for key in data_filenames.keys(): src_data_iter = _read_text_iterator(data_filenames[key][0]) tgt_data_iter = _read_text_iterator(data_filenames[key][1]) def _iter(src_data_iter, tgt_data_iter): for item in zip(src_data_iter, tgt_data_iter): yield item datasets.append( RawTextIterableDataset(dataset_name, NUM_LINES[dataset_name], _iter(src_data_iter, tgt_data_iter))) return tuple(datasets)
def WMT14(root, split, language_pair=('de', 'en'), train_set='train.tok.clean.bpe.32000', valid_set='newstest2013.tok.bpe.32000', test_set='newstest2014.tok.bpe.32000'): """WMT14 Dataset The available datasets include following: **Language pairs**: +-----+-----+-----+ | |'en' |'de' | +-----+-----+-----+ |'en' | | x | +-----+-----+-----+ |'de' | x | | +-----+-----+-----+ Args: root: Directory where the datasets are saved. Default: ".data" split: split or splits to be returned. Can be a string or tuple of strings. Default: (‘train’, ‘valid’, ‘test’) language_pair: tuple or list containing src and tgt language train_set: A string to identify train set. valid_set: A string to identify validation set. test_set: A string to identify test set. Examples: >>> from torchtext.datasets import WMT14 >>> train_iter, valid_iter, test_iter = WMT14() >>> src_sentence, tgt_sentence = next(train_iter) """ supported_language = ['en', 'de'] supported_train_set = [s for s in NUM_LINES if 'train' in s] supported_valid_set = [s for s in NUM_LINES if 'test' in s] supported_test_set = [s for s in NUM_LINES if 'test' in s] assert ( len(language_pair) == 2 ), 'language_pair must contain only 2 elements: src and tgt language respectively' if language_pair[0] not in supported_language: raise ValueError( "Source language '{}' is not supported. Valid options are {}". format(language_pair[0], supported_language)) if language_pair[1] not in supported_language: raise ValueError( "Target language '{}' is not supported. Valid options are {}". format(language_pair[1], supported_language)) if train_set not in supported_train_set: raise ValueError( "'{}' is not a valid train set identifier. valid options are {}". format(train_set, supported_train_set)) if valid_set not in supported_valid_set: raise ValueError( "'{}' is not a valid valid set identifier. valid options are {}". format(valid_set, supported_valid_set)) if test_set not in supported_test_set: raise ValueError( "'{}' is not a valid valid set identifier. valid options are {}". format(test_set, supported_test_set)) train_filenames = '{}.{}'.format(train_set, language_pair[0]), '{}.{}'.format( train_set, language_pair[1]) valid_filenames = '{}.{}'.format(valid_set, language_pair[0]), '{}.{}'.format( valid_set, language_pair[1]) test_filenames = '{}.{}'.format(test_set, language_pair[0]), '{}.{}'.format( test_set, language_pair[1]) if split == 'train': src_file, tgt_file = train_filenames elif split == 'valid': src_file, tgt_file = valid_filenames else: src_file, tgt_file = test_filenames dataset_tar = download_from_url(URL, root=root, hash_value=MD5, path=os.path.join(root, _PATH), hash_type='md5') extracted_files = extract_archive(dataset_tar) data_filenames = { split: _construct_filepaths(extracted_files, src_file, tgt_file), } for key in data_filenames: if len(data_filenames[key]) == 0 or data_filenames[key] is None: raise FileNotFoundError( "Files are not found for data type {}".format(key)) assert data_filenames[split][ 0] is not None, "Internal Error: File not found for reading" assert data_filenames[split][ 1] is not None, "Internal Error: File not found for reading" src_data_iter = _read_text_iterator(data_filenames[split][0]) tgt_data_iter = _read_text_iterator(data_filenames[split][1]) def _iter(src_data_iter, tgt_data_iter): for item in zip(src_data_iter, tgt_data_iter): yield item return _RawTextIterableDataset(DATASET_NAME, NUM_LINES[os.path.splitext(src_file)[0]], _iter(src_data_iter, tgt_data_iter))
def Multi30k(root, split, task='task1', language_pair=('de', 'en'), train_set="train", valid_set="val", test_set="test_2016_flickr"): """Multi30k Dataset The available datasets include following: **Language pairs (task1)**: +-----+-----+-----+-----+-----+ | |'en' |'cs' |'de' |'fr' | +-----+-----+-----+-----+-----+ |'en' | | x | x | x | +-----+-----+-----+-----+-----+ |'cs' | x | | x | x | +-----+-----+-----+-----+-----+ |'de' | x | x | | x | +-----+-----+-----+-----+-----+ |'fr' | x | x | x | | +-----+-----+-----+-----+-----+ **Language pairs (task2)**: +-----+-----+-----+ | |'en' |'de' | +-----+-----+-----+ |'en' | | x | +-----+-----+-----+ |'de' | x | | +-----+-----+-----+ For additional details refer to source: https://github.com/multi30k/dataset Args: root: Directory where the datasets are saved. Default: ".data" split: split or splits to be returned. Can be a string or tuple of strings. Default: (‘train’, ‘valid’, ‘test’) task: Indicate the task language_pair: tuple or list containing src and tgt language train_set: A string to identify train set. valid_set: A string to identify validation set. test_set: A string to identify test set. Examples: >>> from torchtext.experimental.datasets.raw import Multi30k >>> train_iter, valid_iter, test_iter = Multi30k() >>> src_sentence, tgt_sentence = next(train_iter) """ if task not in SUPPORTED_DATASETS.keys(): raise ValueError( 'task {} is not supported. Valid options are {}'.format( task, SUPPORTED_DATASETS.keys())) assert ( len(language_pair) == 2 ), 'language_pair must contain only 2 elements: src and tgt language respectively' if language_pair[0] not in SUPPORTED_DATASETS[task].keys(): raise ValueError( "Source language '{}' is not supported. Valid options for task '{}' are {}" .format(language_pair[0], task, list(SUPPORTED_DATASETS[task].keys()))) if language_pair[1] not in SUPPORTED_DATASETS[task].keys(): raise ValueError( "Target language '{}' is not supported. Valid options for task '{}' are {}" .format(language_pair[1], task, list(SUPPORTED_DATASETS[task].keys()))) if train_set not in SUPPORTED_DATASETS[task][ language_pair[0]].keys() or 'train' not in train_set: raise ValueError( "'{}' is not a valid train set identifier. valid options for task '{}' and language pair {} are {}" .format(train_set, task, language_pair, [ k for k in SUPPORTED_DATASETS[task][language_pair[0]].keys() if 'train' in k ])) if valid_set not in SUPPORTED_DATASETS[task][ language_pair[0]].keys() or 'val' not in valid_set: raise ValueError( "'{}' is not a valid valid set identifier. valid options for task '{}' and language pair {} are {}" .format(valid_set, task, language_pair, [ k for k in SUPPORTED_DATASETS[task][language_pair[0]].keys() if 'val' in k ])) if test_set not in SUPPORTED_DATASETS[task][ language_pair[0]].keys() or 'test' not in test_set: raise ValueError( "'{}' is not a valid test set identifier. valid options for task '{}' and language pair {} are {}" .format(test_set, task, language_pair, [ k for k in SUPPORTED_DATASETS[task][language_pair[0]].keys() if 'test' in k ])) train_filenames = [ "{}.{}".format(train_set, language_pair[0]), "{}.{}".format(train_set, language_pair[1]) ] valid_filenames = [ "{}.{}".format(valid_set, language_pair[0]), "{}.{}".format(valid_set, language_pair[1]) ] test_filenames = [ "{}.{}".format(test_set, language_pair[0]), "{}.{}".format(test_set, language_pair[1]) ] if split == 'train': src_file, tgt_file = train_filenames elif split == 'valid': src_file, tgt_file = valid_filenames else: src_file, tgt_file = test_filenames extracted_files = [] # list of paths to the extracted files current_url = [] current_md5 = [] current_filenames = [src_file, tgt_file] for url, md5 in zip(URL[split], MD5[split]): if any(f in url for f in current_filenames): current_url.append(url) current_md5.append(md5) for url, md5 in zip(current_url, current_md5): dataset_tar = download_from_url(url, path=os.path.join( root, os.path.basename(url)), root=root, hash_value=md5, hash_type='md5') extracted_files.extend(extract_archive(dataset_tar)) file_archives = extracted_files data_filenames = { split: _construct_filepaths(file_archives, src_file, tgt_file), } for key in data_filenames: if len(data_filenames[key]) == 0 or data_filenames[key] is None: raise FileNotFoundError( "Files are not found for data type {}".format(key)) assert data_filenames[split][ 0] is not None, "Internal Error: File not found for reading" assert data_filenames[split][ 1] is not None, "Internal Error: File not found for reading" src_data_iter = _read_text_iterator(data_filenames[split][0]) tgt_data_iter = _read_text_iterator(data_filenames[split][1]) def _iter(src_data_iter, tgt_data_iter): for item in zip(src_data_iter, tgt_data_iter): yield item set_identifier = { 'train': train_set, 'valid': valid_set, 'test': test_set, } return RawTextIterableDataset( "Multi30k", SUPPORTED_DATASETS[task][language_pair[0]][ set_identifier[split]]['NUM_LINES'], _iter(src_data_iter, tgt_data_iter))
import torchtext import torch from torchtext.data.utils import get_tokenizer from collections import Counter from torchtext.vocab import Vocab from torchtext.utils import download_from_url, extract_archive import io url_base = 'https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/' train_urls = ('train.de.gz', 'train.en.gz') val_urls = ('val.de.gz', 'val.en.gz') test_urls = ('test_2016_flickr.de.gz', 'test_2016_flickr.en.gz') train_filepaths = [extract_archive(download_from_url(url_base + url))[0] for url in train_urls] val_filepaths = [extract_archive(download_from_url(url_base + url))[0] for url in val_urls] test_filepaths = [extract_archive(download_from_url(url_base + url))[0] for url in test_urls] de_tokenizer = get_tokenizer('spacy', language='de') en_tokenizer = get_tokenizer('spacy', language='en') def build_vocab(filepath, tokenizer): counter = Counter() with io.open(filepath, encoding='utf8') as f: for string_ in f: counter.update(tokenizer(string_)) return Vocab(counter, specials=['<unk>', '<pad>', '<bos>', '<eos>']) de_vocab = build_vocab(train_filepaths[0], de_tokenizer) en_vocab = build_vocab(train_filepaths[1], en_tokenizer)
import torchtext import torch from torchtext.data.utils import get_tokenizer from collections import Counter from torchtext.vocab import Vocab from torchtext.utils import download_from_url, extract_archive import io url_base = 'https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/' train_urls = ('train.de.gz', 'train.en.gz') val_urls = ('val.de.gz', 'val.en.gz') test_urls = ('test_2016_flickr.de.gz', 'test_2016_flickr.en.gz') train_filepaths = [ extract_archive(download_from_url(url_base + url))[0] for url in train_urls ] val_filepaths = [ extract_archive(download_from_url(url_base + url))[0] for url in val_urls ] test_filepaths = [ extract_archive(download_from_url(url_base + url))[0] for url in test_urls ] de_tokenizer = get_tokenizer('spacy', language='de') en_tokenizer = get_tokenizer('spacy', language='en') def build_vocab(filepath, tokenizer): counter = Counter() with io.open(filepath, encoding="utf8") as f:
self.register_buffer('pe', pe) def forward(self, x): x = x + self.pe[:x.size(0), :] return self.dropout(x) #%% import io import torch from torchtext.utils import download_from_url, extract_archive from torchtext.data.utils import get_tokenizer from torchtext.vocab import build_vocab_from_iterator url = 'https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip' test_filepath, valid_filepath, train_filepath = extract_archive( download_from_url(url)) tokenizer = get_tokenizer('basic_english') vocab = build_vocab_from_iterator( map(tokenizer, iter(io.open(train_filepath, encoding="utf8")))) def data_process(raw_text_iter): data = [ torch.tensor([vocab[token] for token in tokenizer(item)], dtype=torch.long) for item in raw_text_iter ] return torch.cat(tuple(filter(lambda t: t.numel() > 0, data))) train_data = data_process(iter(io.open(train_filepath, encoding="utf8"))) val_data = data_process(iter(io.open(valid_filepath, encoding="utf8")))
def run_worker(rank, world_size): ###################################################################### # Load and batch data # ------------------- # ###################################################################### # The training process uses Wikitext-2 dataset from ``torchtext``. The # vocab object is built based on the train dataset and is used to numericalize # tokens into tensors. Starting from sequential data, the ``batchify()`` # function arranges the dataset into columns, trimming off any tokens remaining # after the data has been divided into batches of size ``batch_size``. # For instance, with the alphabet as the sequence (total length of 26) # and a batch size of 4, we would divide the alphabet into 4 sequences of # length 6: # # .. math:: # \begin{bmatrix} # \text{A} & \text{B} & \text{C} & \ldots & \text{X} & \text{Y} & \text{Z} # \end{bmatrix} # \Rightarrow # \begin{bmatrix} # \begin{bmatrix}\text{A} \\ \text{B} \\ \text{C} \\ \text{D} \\ \text{E} \\ \text{F}\end{bmatrix} & # \begin{bmatrix}\text{G} \\ \text{H} \\ \text{I} \\ \text{J} \\ \text{K} \\ \text{L}\end{bmatrix} & # \begin{bmatrix}\text{M} \\ \text{N} \\ \text{O} \\ \text{P} \\ \text{Q} \\ \text{R}\end{bmatrix} & # \begin{bmatrix}\text{S} \\ \text{T} \\ \text{U} \\ \text{V} \\ \text{W} \\ \text{X}\end{bmatrix} # \end{bmatrix} # # These columns are treated as independent by the model, which means that # the dependence of ``G`` and ``F`` can not be learned, but allows more # efficient batch processing. # # In 'run_worker' def print_with_rank(msg): print('[RANK {}]: {}'.format(rank, msg)) import io from torchtext.utils import download_from_url, extract_archive from torchtext.data.utils import get_tokenizer from torchtext.vocab import build_vocab_from_iterator url = 'https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip' test_filepath, valid_filepath, train_filepath = extract_archive( download_from_url(url, root=".data{}".format(rank))) tokenizer = get_tokenizer('basic_english') vocab = build_vocab_from_iterator( map(tokenizer, iter(io.open(train_filepath, encoding="utf8")))) def data_process(raw_text_iter): data = [ torch.tensor([vocab[token] for token in tokenizer(item)], dtype=torch.long) for item in raw_text_iter ] return torch.cat(tuple(filter(lambda t: t.numel() > 0, data))) train_data = data_process(iter(io.open(train_filepath, encoding="utf8"))) val_data = data_process(iter(io.open(valid_filepath, encoding="utf8"))) test_data = data_process(iter(io.open(test_filepath, encoding="utf8"))) device = torch.device(2 * rank) def batchify(data, bsz, rank, world_size, is_train=False): # Divide the dataset into bsz parts. nbatch = data.size(0) // bsz # Trim off any extra elements that wouldn't cleanly fit (remainders). data = data.narrow(0, 0, nbatch * bsz) # Evenly divide the data across the bsz batches. data = data.view(bsz, -1).t().contiguous() # Divide the data across the ranks only for training data. if is_train: data_per_rank = data.size(0) // world_size data = data[rank * data_per_rank:(rank + 1) * data_per_rank] return data.to(device) batch_size = 20 eval_batch_size = 10 train_data = batchify(train_data, batch_size, rank, world_size, True) val_data = batchify(val_data, eval_batch_size, rank, world_size) test_data = batchify(test_data, eval_batch_size, rank, world_size) ###################################################################### # Functions to generate input and target sequence # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # ###################################################################### # ``get_batch()`` function generates the input and target sequence for # the transformer model. It subdivides the source data into chunks of # length ``bptt``. For the language modeling task, the model needs the # following words as ``Target``. For example, with a ``bptt`` value of 2, # we’d get the following two Variables for ``i`` = 0: # # .. image:: ../_static/img/transformer_input_target.png # # It should be noted that the chunks are along dimension 0, consistent # with the ``S`` dimension in the Transformer model. The batch dimension # ``N`` is along dimension 1. # # In 'run_worker' bptt = 35 def get_batch(source, i): seq_len = min(bptt, len(source) - 1 - i) data = source[i:i + seq_len] target = source[i + 1:i + 1 + seq_len].view(-1) return data, target ###################################################################### # Model scale and Pipe initialization # ----------------------------------- # ###################################################################### # To demonstrate training large Transformer models using pipeline parallelism, # we scale up the Transformer layers appropriately. We use an embedding # dimension of 4096, hidden size of 4096, 16 attention heads and 8 total # transformer layers (``nn.TransformerEncoderLayer``). This creates a model with # **~1 billion** parameters. # # We need to initialize the `RPC Framework <https://pytorch.org/docs/stable/rpc.html>`__ # since Pipe depends on the RPC framework via `RRef <https://pytorch.org/docs/stable/rpc.html#rref>`__ # which allows for future expansion to cross host pipelining. We need to # initialize the RPC framework with only a single worker since we're using a # single process to drive multiple GPUs. # # The pipeline is then initialized with 8 transformer layers on one GPU and 8 # transformer layers on the other GPU. One pipe is setup across GPUs 0 and 1 and # another across GPUs 2 and 3. Both pipes are then replicated using DistributedDataParallel. # In 'run_worker' ntokens = len(vocab.stoi) # the size of vocabulary emsize = 4096 # embedding dimension nhid = 4096 # the dimension of the feedforward network model in nn.TransformerEncoder nlayers = 8 # the number of nn.TransformerEncoderLayer in nn.TransformerEncoder nhead = 16 # the number of heads in the multiheadattention models dropout = 0.2 # the dropout value from torch.distributed import rpc tmpfile = tempfile.NamedTemporaryFile() rpc.init_rpc( name="worker", rank=0, world_size=1, rpc_backend_options=rpc.TensorPipeRpcBackendOptions( init_method="file://{}".format(tmpfile.name), # Specifying _transports and _channels is a workaround and we no longer # will have to specify _transports and _channels for PyTorch # versions >= 1.8.1 _transports=["ibv", "uv"], _channels=["cuda_ipc", "cuda_basic"], )) # Num gpus for model parallelism. num_gpus = 2 partition_len = ((nlayers - 1) // num_gpus) + 1 # Add encoder in the beginning. tmp_list = [Encoder(ntokens, emsize, dropout).cuda(2 * rank)] module_list = [] # Add all the necessary transformer blocks. for i in range(nlayers): transformer_block = TransformerEncoderLayer(emsize, nhead, nhid, dropout) if i != 0 and i % (partition_len) == 0: module_list.append(nn.Sequential(*tmp_list)) tmp_list = [] device = i // (partition_len) tmp_list.append(transformer_block.to(2 * rank + device)) # Add decoder in the end. tmp_list.append(Decoder(ntokens, emsize).cuda(2 * rank + num_gpus - 1)) module_list.append(nn.Sequential(*tmp_list)) # Need to use 'checkpoint=never' since as of PyTorch 1.8, Pipe checkpointing # doesn't work with DDP. from torch.distributed.pipeline.sync import Pipe model = Pipe(torch.nn.Sequential(*module_list), chunks=8, checkpoint="never") # Initialize process group and wrap model in DDP. from torch.nn.parallel import DistributedDataParallel import torch.distributed as dist os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '29500' dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) model = DistributedDataParallel(model) def get_total_params(module: torch.nn.Module): total_params = 0 for param in module.parameters(): total_params += param.numel() return total_params print_with_rank('Total parameters in model: {:,}'.format( get_total_params(model))) ###################################################################### # Run the model # ------------- # ###################################################################### # `CrossEntropyLoss <https://pytorch.org/docs/master/nn.html?highlight=crossentropyloss#torch.nn.CrossEntropyLoss>`__ # is applied to track the loss and # `SGD <https://pytorch.org/docs/master/optim.html?highlight=sgd#torch.optim.SGD>`__ # implements stochastic gradient descent method as the optimizer. The initial # learning rate is set to 5.0. `StepLR <https://pytorch.org/docs/master/optim.html?highlight=steplr#torch.optim.lr_scheduler.StepLR>`__ is # applied to adjust the learn rate through epochs. During the # training, we use # `nn.utils.clip_grad_norm\_ <https://pytorch.org/docs/master/nn.html?highlight=nn%20utils%20clip_grad_norm#torch.nn.utils.clip_grad_norm_>`__ # function to scale all the gradient together to prevent exploding. # # In 'run_worker' criterion = nn.CrossEntropyLoss() lr = 5.0 # learning rate optimizer = torch.optim.SGD(model.parameters(), lr=lr) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95) import time def train(): model.train() # Turn on the train mode total_loss = 0. start_time = time.time() ntokens = len(vocab.stoi) # Train only for 50 batches to keep script execution time low. nbatches = min(50 * bptt, train_data.size(0) - 1) for batch, i in enumerate(range(0, nbatches, bptt)): data, targets = get_batch(train_data, i) optimizer.zero_grad() # Since the Pipe is only within a single host and process the ``RRef`` # returned by forward method is local to this node and can simply # retrieved via ``RRef.local_value()``. output = model(data).local_value() # Need to move targets to the device where the output of the # pipeline resides. loss = criterion(output.view(-1, ntokens), targets.cuda(2 * rank + 1)) loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5) optimizer.step() total_loss += loss.item() log_interval = 10 if batch % log_interval == 0 and batch > 0: cur_loss = total_loss / log_interval elapsed = time.time() - start_time print_with_rank('| epoch {:3d} | {:5d}/{:5d} batches | ' 'lr {:02.2f} | ms/batch {:5.2f} | ' 'loss {:5.2f} | ppl {:8.2f}'.format( epoch, batch, nbatches // bptt, scheduler.get_lr()[0], elapsed * 1000 / log_interval, cur_loss, math.exp(cur_loss))) total_loss = 0 start_time = time.time() def evaluate(eval_model, data_source): eval_model.eval() # Turn on the evaluation mode total_loss = 0. ntokens = len(vocab.stoi) # Evaluate only for 50 batches to keep script execution time low. nbatches = min(50 * bptt, data_source.size(0) - 1) with torch.no_grad(): for i in range(0, nbatches, bptt): data, targets = get_batch(data_source, i) output = eval_model(data).local_value() output_flat = output.view(-1, ntokens) # Need to move targets to the device where the output of the # pipeline resides. total_loss += len(data) * criterion( output_flat, targets.cuda(2 * rank + 1)).item() return total_loss / (len(data_source) - 1) ###################################################################### # Loop over epochs. Save the model if the validation loss is the best # we've seen so far. Adjust the learning rate after each epoch. # In 'run_worker' best_val_loss = float("inf") epochs = 3 # The number of epochs best_model = None for epoch in range(1, epochs + 1): epoch_start_time = time.time() train() val_loss = evaluate(model, val_data) print_with_rank('-' * 89) print_with_rank( '| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | ' 'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time), val_loss, math.exp(val_loss))) print_with_rank('-' * 89) if val_loss < best_val_loss: best_val_loss = val_loss best_model = model scheduler.step() ###################################################################### # Evaluate the model with the test dataset # ------------------------------------- # # Apply the best model to check the result with the test dataset. # In 'run_worker' test_loss = evaluate(best_model, test_data) print_with_rank('=' * 89) print_with_rank( '| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format( test_loss, math.exp(test_loss))) print_with_rank('=' * 89)
def _setup_datasets( url, top_n=-1, local_cache_path=".data", prepare_extractive=True ): FILE_NAME = "cnndm.tar.gz" maybe_download(url, FILE_NAME, local_cache_path) dataset_tar = os.path.join(local_cache_path, FILE_NAME) extracted_files = extract_archive(dataset_tar) for fname in extracted_files: if fname.endswith("train.txt.src"): train_source_file = fname if fname.endswith("train.txt.tgt.tagged"): train_target_file = fname if fname.endswith("test.txt.src"): test_source_file = fname if fname.endswith("test.txt.tgt.tagged"): test_target_file = fname if prepare_extractive: return ( SummarizationDataset( train_source_file, target_file=train_target_file, source_preprocessing=[_clean, tokenize.sent_tokenize], target_preprocessing=[ _clean, _remove_ttags, _target_sentence_tokenization, ], word_tokenize=nltk.word_tokenize, top_n=top_n, ), SummarizationDataset( test_source_file, target_file=test_target_file, source_preprocessing=[_clean, tokenize.sent_tokenize], target_preprocessing=[ _clean, _remove_ttags, _target_sentence_tokenization, ], word_tokenize=nltk.word_tokenize, top_n=top_n, ), ) else: return ( SummarizationDataset( train_source_file, target_file=train_target_file, source_preprocessing=[_clean, tokenize.sent_tokenize], target_preprocessing=[ _clean, _remove_ttags, _target_sentence_tokenization, ], top_n=top_n, ), SummarizationDataset( test_source_file, target_file=test_target_file, source_preprocessing=[_clean, tokenize.sent_tokenize], target_preprocessing=[ _clean, _remove_ttags, _target_sentence_tokenization, ], top_n=top_n, ), )
def _setup_datasets(dataset_name, train_filenames, valid_filenames, test_filenames, data_select, root): ''' train_filenames=('train.de-en.de', 'train.de-en.en'), valid_filenames=('IWSLT16.TED.tst2013.de-en.de', 'IWSLT16.TED.tst2013.de-en.en'), test_filenames=('IWSLT16.TED.tst2014.de-en.de', 'IWSLT16.TED.tst2014.de-en.en'), data_select=('train', 'valid', 'test'), root='.data' ''' print("## entered _setup_datasets") data_select = check_default_set(data_select, ('train', 'valid', 'test')) if not isinstance(train_filenames, tuple) and not isinstance(valid_filenames, tuple) \ and not isinstance(test_filenames, tuple): raise ValueError("All filenames must be tuples") src_train, tgt_train = train_filenames src_eval, tgt_eval = valid_filenames src_test, tgt_test = test_filenames extracted_files = [] # list of paths to the extracted files if isinstance(URLS[dataset_name], list): for idx, f in enumerate(URLS[dataset_name]): dataset_tar = download_from_url( f, root=root, hash_value=MD5[dataset_name][idx], hash_type='md5') extracted_files.extend(extract_archive(dataset_tar)) # IWSLT will go into this one elif isinstance(URLS[dataset_name], str): dataset_tar = download_from_url(URLS[dataset_name]) print("#dataset_tar: ", dataset_tar) extracted_dataset_tar = extract_archive(dataset_tar) if dataset_name == 'IWSLT': print('## It is IWSLT!!!') src_language = train_filenames[0].split(".")[-1] tgt_language = train_filenames[1].split(".")[-1] languages = "-".join([src_language, tgt_language]) # this is what was downloaded from the original iwslt url. now we need to pick this out from all the languages downloaded iwslt_tar_name = '.data/2016-01/texts/{}/{}/{}.tgz' iwslt_tar_name = iwslt_tar_name.format( src_language, tgt_language, languages) print('## iwslt_tar_name: ', iwslt_tar_name) extracted_iwslt_tar = extract_archive(iwslt_tar_name) # extracted_iwslt_tar = extract_archive('.data/2016-01/texts/de/en/de-en.tgz') print('## extracted_iwslt_tar', extracted_iwslt_tar) extracted_files.extend(extracted_iwslt_tar) else: extracted_files.extend(extracted_dataset_tar) # print("#extracted_files: ", extracted_files) # print('extracted_dataset_tar', extracted_dataset_tar) else: raise ValueError( "URLS for {} has to be in a form or list or string".format( dataset_name)) # Clean the xml and tag file in the archives file_archives = [] for fname in extracted_files: if 'xml' in fname: _clean_xml_file(fname) file_archives.append(os.path.splitext(fname)[0]) elif "tags" in fname: _clean_tags_file(fname) file_archives.append(fname.replace('.tags', '')) else: file_archives.append(fname)
def _setup_datasets(dataset_name, train_filenames, valid_filenames, test_filenames, split, root, offset): if not isinstance(train_filenames, tuple) and not isinstance(valid_filenames, tuple) \ and not isinstance(test_filenames, tuple): raise ValueError("All filenames must be tuples") src_train, tgt_train = train_filenames src_eval, tgt_eval = valid_filenames src_test, tgt_test = test_filenames extracted_files = [] # list of paths to the extracted files if isinstance(URLS[dataset_name], list): for idx, f in enumerate(URLS[dataset_name]): dataset_tar = download_from_url(f, root=root, hash_value=MD5[dataset_name][idx], hash_type='md5') extracted_files.extend(extract_archive(dataset_tar)) elif isinstance(URLS[dataset_name], str): dataset_tar = download_from_url(URLS[dataset_name], root=root, hash_value=MD5[dataset_name], hash_type='md5') extracted_dataset_tar = extract_archive(dataset_tar) if dataset_name == 'IWSLT': # IWSLT dataset's url downloads a multilingual tgz. # We need to take an extra step to pick out the specific language pair from it. src_language = train_filenames[0].split(".")[-1] tgt_language = train_filenames[1].split(".")[-1] languages = "-".join([src_language, tgt_language]) iwslt_tar = '.data/2016-01/texts/{}/{}/{}.tgz' iwslt_tar = iwslt_tar.format(src_language, tgt_language, languages) extracted_dataset_tar = extract_archive(iwslt_tar) extracted_files.extend(extracted_dataset_tar) else: raise ValueError( "URLS for {} has to be in a form or list or string".format( dataset_name)) # Clean the xml and tag file in the archives file_archives = [] for fname in extracted_files: if 'xml' in fname: _clean_xml_file(fname) file_archives.append(os.path.splitext(fname)[0]) elif "tags" in fname: _clean_tags_file(fname) file_archives.append(fname.replace('.tags', '')) else: file_archives.append(fname) data_filenames = defaultdict(dict) data_filenames = { "train": _construct_filepaths(file_archives, src_train, tgt_train), "valid": _construct_filepaths(file_archives, src_eval, tgt_eval), "test": _construct_filepaths(file_archives, src_test, tgt_test) } for key in data_filenames.keys(): if len(data_filenames[key]) == 0 or data_filenames[key] is None: raise FileNotFoundError( "Files are not found for data type {}".format(key)) datasets = [] for key in split: src_data_iter = _read_text_iterator(data_filenames[key][0]) tgt_data_iter = _read_text_iterator(data_filenames[key][1]) def _iter(src_data_iter, tgt_data_iter): for item in zip(src_data_iter, tgt_data_iter): yield item datasets.append( RawTextIterableDataset(dataset_name, NUM_LINES[dataset_name][key], _iter(src_data_iter, tgt_data_iter), offset=offset)) return datasets
def IWSLT2017(root='.data', split=('train', 'valid', 'test'), language_pair=('de', 'en')): """IWSLT2017 dataset The available datasets include following: **Language pairs**: +-----+-----+-----+-----+-----+-----+ | |'en' |'nl' |'de' |'it' |'ro' | +-----+-----+-----+-----+-----+-----+ |'en' | | x | x | x | x | +-----+-----+-----+-----+-----+-----+ |'nl' | x | | x | x | x | +-----+-----+-----+-----+-----+-----+ |'de' | x | x | | x | x | +-----+-----+-----+-----+-----+-----+ |'it' | x | x | x | | x | +-----+-----+-----+-----+-----+-----+ |'ro' | x | x | x | x | | +-----+-----+-----+-----+-----+-----+ For additional details refer to source website: https://wit3.fbk.eu/2017-01 Args: root: Directory where the datasets are saved. Default: ".data" split: split or splits to be returned. Can be a string or tuple of strings. Default: (‘train’, ‘valid’, ‘test’) language_pair: tuple or list containing src and tgt language Examples: >>> from torchtext.datasets import IWSLT2017 >>> train_iter, valid_iter, test_iter = IWSLT2017() >>> src_sentence, tgt_sentence = next(train_iter) """ valid_set = 'dev2010' test_set = 'tst2010' num_lines_set_identifier = { 'train': 'train', 'valid': valid_set, 'test': test_set } if not isinstance(language_pair, list) and not isinstance( language_pair, tuple): raise ValueError( "language_pair must be list or tuple but got {} instead".format( type(language_pair))) assert ( len(language_pair) == 2 ), 'language_pair must contain only 2 elements: src and tgt language respectively' src_language, tgt_language = language_pair[0], language_pair[1] if src_language not in SUPPORTED_DATASETS['language_pair']: raise ValueError( "src_language '{}' is not valid. Supported source languages are {}" .format(src_language, list(SUPPORTED_DATASETS['language_pair']))) if tgt_language not in SUPPORTED_DATASETS['language_pair'][src_language]: raise ValueError( "tgt_language '{}' is not valid for give src_language '{}'. Supported target language are {}" .format(tgt_language, src_language, SUPPORTED_DATASETS['language_pair'][src_language])) train_filenames = ('train.{}-{}.{}'.format(src_language, tgt_language, src_language), 'train.{}-{}.{}'.format(src_language, tgt_language, tgt_language)) valid_filenames = ('IWSLT{}.TED.{}.{}-{}.{}'.format( SUPPORTED_DATASETS['year'], valid_set, src_language, tgt_language, src_language), 'IWSLT{}.TED.{}.{}-{}.{}'.format( SUPPORTED_DATASETS['year'], valid_set, src_language, tgt_language, tgt_language)) test_filenames = ('IWSLT{}.TED.{}.{}-{}.{}'.format( SUPPORTED_DATASETS['year'], test_set, src_language, tgt_language, src_language), 'IWSLT{}.TED.{}.{}-{}.{}'.format( SUPPORTED_DATASETS['year'], test_set, src_language, tgt_language, tgt_language)) src_train, tgt_train = train_filenames src_eval, tgt_eval = valid_filenames src_test, tgt_test = test_filenames extracted_files = [] # list of paths to the extracted files dataset_tar = download_from_url(SUPPORTED_DATASETS['URL'], root=root, hash_value=SUPPORTED_DATASETS['MD5'], path=os.path.join( root, SUPPORTED_DATASETS['_PATH']), hash_type='md5') extracted_dataset_tar = extract_archive(dataset_tar) # IWSLT dataset's url downloads a multilingual tgz. # We need to take an extra step to pick out the specific language pair from it. src_language = train_filenames[0].split(".")[-1] tgt_language = train_filenames[1].split(".")[-1] iwslt_tar = os.path.join(root, SUPPORTED_DATASETS['_PATH'].split(".")[0], 'texts/DeEnItNlRo/DeEnItNlRo', 'DeEnItNlRo-DeEnItNlRo.tgz') extracted_dataset_tar = extract_archive(iwslt_tar) extracted_files.extend(extracted_dataset_tar) # Clean the xml and tag file in the archives file_archives = [] for fname in extracted_files: if 'xml' in fname: _clean_xml_file(fname) file_archives.append(os.path.splitext(fname)[0]) elif "tags" in fname: _clean_tags_file(fname) file_archives.append(fname.replace('.tags', '')) else: file_archives.append(fname) data_filenames = { "train": _construct_filepaths(file_archives, src_train, tgt_train), "valid": _construct_filepaths(file_archives, src_eval, tgt_eval), "test": _construct_filepaths(file_archives, src_test, tgt_test) } for key in data_filenames: if len(data_filenames[key]) == 0 or data_filenames[key] is None: raise FileNotFoundError( "Files are not found for data type {}".format(key)) src_data_iter = _read_text_iterator(data_filenames[split][0]) tgt_data_iter = _read_text_iterator(data_filenames[split][1]) def _iter(src_data_iter, tgt_data_iter): for item in zip(src_data_iter, tgt_data_iter): yield item return _RawTextIterableDataset( DATASET_NAME, NUM_LINES[split][num_lines_set_identifier[split]][tuple( sorted(language_pair))], _iter(src_data_iter, tgt_data_iter))
parser.add_argument( "--mlpipeline_ui_metadata", type=str, help="Path to write mlpipeline-ui-metadata.json", ) args = vars(parser.parse_args()) dataset_url = args["dataset_url"] output_path = args["output_path"] Path(output_path).mkdir(parents=True, exist_ok=True) dataset_tar = download_from_url(dataset_url, root="./") extracted_files = extract_archive(dataset_tar) ag_news_csv = pv.read_csv("ag_news_csv/train.csv") pq.write_table(ag_news_csv, os.path.join(output_path, "ag_news_data.parquet")) entry_point = ["ls", "-R", output_path] run_code = subprocess.run(entry_point, stdout=subprocess.PIPE) print(run_code.stdout) visualization_arguments = { "inputs": { "dataset_url": args["dataset_url"] }, "output": {
def GloVe(name="840B", dim=300, unk_tensor=None, root=".data", validate_file=True, num_cpus=32): r"""Create a GloVe Vectors object. Args: name (str): the name of the GloVe dataset to use. Options are: - 42B - 840B - twitter.27B - 6B dim (int): the dimension for the GloVe dataset to load. Options are: 42B: - 300 840B: - 300 twitter.27B: - 25 - 50 - 100 - 200 6B: - 50 - 100 - 200 - 300 unk_tensor (Tensor): a 1d tensor representing the vector associated with an unknown token. root (str): folder used to store downloaded files in (.data) validate_file (bool): flag to determine whether to validate the downloaded files checksum. Should be `False` when running tests with a local asset. num_cpus (int): the number of cpus to use when loading the vectors from file. Default: 10. Returns: Vectors: a Vectors object. Raises: ValueError: if unexpected duplicate tokens are found in GloVe file. """ dup_token_glove_840b = [ "����������������������������������������������������������������������" "����������������������������������������������������������������������" "����������������������������������������������������������������������" "����������������������������������������������������������������������" "������������������������������������������������������" ] urls = { "42B": "https://nlp.stanford.edu/data/glove.42B.300d.zip", "840B": "https://nlp.stanford.edu/data/glove.840B.300d.zip", "twitter.27B": "https://nlp.stanford.edu/data/glove.twitter.27B.zip", "6B": "https://nlp.stanford.edu/data/glove.6B.zip", } valid_glove_file_names = { "glove.42B.300d.txt", "glove.840B.300d.txt", "glove.twitter.27B.25d.txt", "glove.twitter.27B.50d.txt", "glove.twitter.27B.100d.txt", "glove.twitter.27B.200d.txt", "glove.6B.50d.txt", "glove.6B.100d.txt", "glove.6B.200d.txt", "glove.6B.300d.txt" } file_name = "glove.{}.{}d.txt".format(name, str(dim)) if file_name not in valid_glove_file_names: raise ValueError( "Could not find GloVe file with name {}. Please check that `name` and `dim`" "are valid.".format(str(file_name))) url = urls[name] checksum = None if validate_file: checksum = CHECKSUMS_GLOVE.get(url, None) downloaded_file_path = download_from_url(url, root=root, hash_value=checksum) extracted_file_paths = extract_archive(downloaded_file_path) # need to get the full path to the correct file in the case when multiple files are extracted with different dims extracted_file_path_with_correct_dim = [ path for path in extracted_file_paths if file_name in path ][0] cpp_vectors_obj, dup_tokens = _load_token_and_vectors_from_file( extracted_file_path_with_correct_dim, ' ', num_cpus, unk_tensor) # Ensure there is only 1 expected duplicate token present for 840B dataset if dup_tokens and dup_tokens != dup_token_glove_840b: raise ValueError("Found duplicate tokens in file: {}".format( str(dup_tokens))) vectors_obj = Vectors(cpp_vectors_obj) return vectors_obj
import logging import argparse from torchtext.utils import extract_archive from torchtext.utils import download_from_url parser = argparse.ArgumentParser( description='Download and extract a given dataset') parser.add_argument('--url', default='http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/' 'validation.tar.gz') parser.add_argument('--data', default='validation.tar.gz') parser.add_argument('--logging-level', default='WARNING') args = parser.parse_args() logging.basicConfig(level=getattr(logging, args.logging_level)) tar_file = download_from_url(args.url, args.data) extracted_files = extract_archive(args.data, 'extracted_files')
def forward(self, x): x = x + self.pe[:x.size(0), :] return self.dropout(x) import io import torch from torchtext.utils import download_from_url, extract_archive from torchtext.data.utils import get_tokenizer from torchtext.vocab import build_vocab_from_iterator #url = 'https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip' root='C:/myapp/OneDrive/Mycode/python/pytorch/data/wikitext-2-v1.zip' #test_filepath, valid_filepath, train_filepath = extract_archive(download_from_url(url,root)) test_filepath, valid_filepath, train_filepath = extract_archive(root) tokenizer = get_tokenizer('basic_english') vocab = build_vocab_from_iterator(map(tokenizer, iter(io.open(train_filepath, encoding="utf8")))) def data_process(raw_text_iter): data = [torch.tensor([vocab[token] for token in tokenizer(item)], dtype=torch.long) for item in raw_text_iter] return torch.cat(tuple(filter(lambda t: t.numel() > 0, data))) train_data = data_process(iter(io.open(train_filepath, encoding="utf8"))) val_data = data_process(iter(io.open(valid_filepath, encoding="utf8"))) test_data = data_process(iter(io.open(test_filepath, encoding="utf8"))) device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def _setup_datasets(dataset_name, tokenizer=tokenizer, root='.data', vocab=None, removed_tokens=[], data_select=('train', 'test', 'valid'), bptt=None, batch_size=64): if isinstance(data_select, str): data_select = [data_select] if not set(data_select).issubset(set(('train', 'test', 'valid'))): raise TypeError('data_select is not supported!') print(tokenizer) if tokenizer is None: tokenizer = get_tokenizer('basic_english') if dataset_name == 'PennTreebank': extracted_files = [] select_to_index = {'train': 0, 'test': 1, 'valid': 2} extracted_files = [ download_from_url(URLS['PennTreebank'][select_to_index[key]], root=root) for key in data_select ] elif dataset_name == 'HumanNumbers': extracted_files = [ '/Users/ssaurabh/.fastai/data/human_numbers/train.txt', '/Users/ssaurabh/.fastai/data/human_numbers/valid.txt' ] else: dataset_tar = download_from_url(URLS[dataset_name], root=root) extracted_files = extract_archive(dataset_tar) _path = {} for item in data_select: _path[item] = _get_datafile_path(item, extracted_files) #print(_path) if vocab is None: if 'train' not in _path.keys(): raise TypeError("Must pass a vocab if train is not selected.") logging.info('Building Vocab based on {}'.format(_path['train'])) txt_iter = iter( tokenizer(row) for row in io.open(_path['train'], encoding="utf8")) vocab = build_vocab_from_iterator(txt_iter) logging.info('Vocab has {} entries'.format(len(vocab))) else: if not isinstance(vocab, Vocab): raise TypeError("Passed vocabulary is not of type Vocab") data = {} raw_data = {} for item in _path.keys(): data[item] = [] raw_data[item] = [] logging.info('Creating {} data'.format(item)) txt_iter = iter( tokenizer(row) for row in io.open(_path[item], encoding="utf8")) for txt in txt_iter: raw_data[item] += txt txt_iter = iter( tokenizer(row) for row in io.open(_path[item], encoding="utf8")) _iter = numericalize_tokens_from_iterator(vocab, txt_iter, removed_tokens) for tokens in _iter: data[item] += [token_id for token_id in tokens] for key in data_select: if data[key] == []: raise TypeError('Dataset {} is empty!'.format(key)) if bptt is None: return tuple( LanguageModelingDataset( torch.tensor(data[d]).long(), vocab, raw_data[d]) for d in data_select) else: #### generate input and labels input_data = {} label_data = {} for key in data_select: #### Extend the dataset such that the last batch is not left out recycled_data_len = (bptt * batch_size) - (len(data[key]) % (bptt * batch_size)) + bptt data[key] = data[key] + data[key][0:recycled_data_len] input_d = [] label_d = [] for i in range(len(data[key]) - bptt): input_d.append(data[key][i:i + bptt]) label_d.append(data[key][i + 1:i + bptt + 1]) print(len(input_d)) print(len(label_d)) input_data[key] = torch.tensor(input_d) label_data[key] = torch.tensor(label_d) print(input_data[key].shape) print(label_data[key].shape) #input_d = torch.tensor(data[key]).long() ###reshape the input data #if input_d.shape[0]%bptt > 0: # pad_len_input = bptt - input_d.shape[0]%bptt #else: # pad_len_input = 0 #input_d = torch.nn.functional.pad(input_d, (0, pad_len_input), mode='constant', value=1) #input_data[key] = input_d.reshape(int(input_d.shape[0]/bptt),bptt) #if input_d[1:].shape[0]%bptt > 0: # pad_len_output = bptt - input_d[1:].shape[0]%bptt #else: # pad_len_output = 0 #input_d = torch.nn.functional.pad(input_d[1:], (0, pad_len_output), mode='constant', value=1) #label_data[key] = input_d.reshape(int(input_d.shape[0]/bptt),bptt) return tuple( HumanLanguageModelingDataset(input_data[d], vocab, label_data[d]) for d in data_select)