def run(basedir=None, train_file=None, valid_file=None, dataset_key='tlm', embed_type='default', d_model=512, d_ff=2048, d_k=None, num_heads=8, num_layers=8, num_train_workers=4, nctx=256, file_type='json', batch_size=256, subword_model_file=None, subword_vocab_file=None, dropout=0.1, ffn_pdrop=0.0, layer_drop=0.0, lr_scheduler='cosine', lr_decay_steps=None, lr_decay_rate=None, lr_alpha=0.0, optim='adamw', lr=4.0e-4, clip=1.0, weight_decay=1.0e-2, epochs=32, restart_from=None, restart_tt=None, warmup_steps=10000, saves_per_epoch=10, mlm=True, preprocessed=True, rpr_k=[8], rpr_value_on=False, windowed_ra=False, device="cuda", distributed=False, local_rank=-1, extra_tokens=["[CLS]", "[MASK]"], do_early_stopping=False, model_type='transformer-mlm', modules=[], ra_type=None, transformer_type=None, **kwargs): if basedir is None: basedir = 'lm-{}-bpe-{}'.format(dataset_key, os.getpid()) logging.basicConfig( level=logging.INFO if local_rank in [-1, 0] else logging.WARN) for module in modules: import_user_module(module) num_gpus = get_num_gpus_multiworker() distributed = distributed or num_gpus > 1 logger.info(f"Using {num_gpus} GPUs in this job.") do_on_demand_masking = mlm and not preprocessed if do_on_demand_masking: logger.info(f"On-demand masking is turned on") if distributed: device, updated_local_rank = init_distributed(local_rank) local_rank = updated_local_rank if file_type == 'tfrecord': reader_type = 'tfrecord' elif preprocessed: reader_type = 'preprocessed' else: reader_type = 'lang' reader = MultiFileDatasetReader(src_nctx=nctx, model_file=subword_model_file, vocab_file=subword_vocab_file, file_type=file_type, reader_type=reader_type, record_keys=['x', 'y'] if mlm else ['x'], extra_tokens=extra_tokens) # This looks a bit funny but the streaming reader ignores our vocab and gives us the one from the subword_model # However, we do need to get counts from our dataset for validation so we can calculate the perplexity vocab = reader.build_vocab([valid_file]) # If we are not using chars, then use 'x' for both input and output preproc_data = baseline.embeddings.load_embeddings( 'x', dsz=d_model, known_vocab=vocab['x'], preserve_vocab_indices=True, embed_type=embed_type) vocabs = preproc_data['vocab'] os.makedirs(basedir, exist_ok=True) # We want to make sure to save our input vocab into the basedir for reuse later write_json(vocabs, os.path.join(basedir, 'vocabs.json')) embeddings = {'x': preproc_data['embeddings']} logger.info("Loaded embeddings") train_set = reader.load(train_file, vocabs) valid_set = reader.load(valid_file, vocabs, distribute=False, shuffle=False) train_loader = DataLoader(train_set, batch_size=batch_size, num_workers=num_train_workers) valid_loader = DataLoader(valid_set, batch_size=batch_size) logger.info("Loaded datasets") logger.info("Using embedding type [%s]", embed_type) if 'mlm' in model_type: mask_from = vocabs vocab_size = len(mask_from) mask_value = mask_from.get("[MASK]") if mask_value == -1: logger.error( "We could not find a suitable masking token in the vocab") return if len(rpr_k) == 0 or rpr_k[0] < 1: rpr_k = None elif len(rpr_k) == 1: rpr_k = None if rpr_k[0] == 0 else rpr_k[0] if ra_type != None and ra_type != 'shaw' and rpr_k is not None: print( f"Relative attention mismatch. You requested {ra_type} with rpr set. Setting it to 0" ) rpr_k = None model = create_lang_model( embeddings, hsz=d_model, nctx=nctx, # Only for gMLP d_ff=d_ff, tie_weights=True, dropout=dropout, gpu=False, num_heads=num_heads, layers=num_layers, rpr_k=rpr_k, d_k=d_k, ffn_pdrop=ffn_pdrop, windowed_ra=windowed_ra, rpr_value_on=rpr_value_on, layer_drop=layer_drop, model_type=model_type, ra_type=ra_type, transformer_type=transformer_type, src_keys=['x'], tgt_key='x') model.to(device) loss_function = model.create_loss() loss_function.to(device) logger.info("Loaded model and loss") steps_per_epoch = len(train_loader) // num_gpus update_on = steps_per_epoch // saves_per_epoch report_on = max(10, update_on) // 10 logger.info( f"Steps per epoch per GPU: {steps_per_epoch}. Saving checkpoint every {update_on} steps." ) lr_decay = get_lr_decay(lr_scheduler, lr, steps_per_epoch, epochs, logger, decay_steps=lr_decay_steps, decay_rate=lr_decay_rate, alpha=lr_alpha) linear_warmup = WarmupLinearSchedulerPyTorch(warmup_steps, lr=lr) lr_sched = CompositeLRScheduler(linear_warmup, lr_decay, lr=lr) global_step = 0 start_epoch = 0 if restart_from: if restart_from.endswith('npz'): load_tlm_npz(model, restart_from) else: model.load_state_dict(torch.load(restart_from)) vec = restart_from.split("-") if restart_tt: tick_type = restart_tt else: tick_type = vec[-2] step_num = int(vec[-1].split(".")[0]) if tick_type == 'epoch': start_epoch = step_num global_step = start_epoch * steps_per_epoch elif tick_type == 'step': start_epoch = step_num // steps_per_epoch global_step = step_num else: logger.warning( f"The previous tick was {step_num} but command-line specifies to ignore, setting to 0" ) logger.info( "Restarting from a previous checkpoint %s.\n\tStarting at global_step=%d, epoch=%d", restart_from, global_step, start_epoch + 1) optimizer = OptimizerManager(model, global_step, optim=optim, lr=lr, lr_function=lr_sched, weight_decay=weight_decay) logger.info("Model has {:,} parameters".format( sum(p.numel() for p in model.parameters() if p.requires_grad))) # Prepare model for distributed training if needed if distributed: # This program assume pure data parallelism, each model is on a single gpu # If we wanted to support model and data parallelism we would need to update # the selection of gpus based on rank, it would need to select multiple ids # based on rank, here we select only a single gpu and use it for input and # output. model = DistributedDataParallel(model, device_ids=[device], output_device=device, find_unused_parameters=True) logger.info("Model located on %s", device) model_base = os.path.join(basedir, 'checkpoint') steps = global_step best_valid_loss = np.inf timer = Timer() for epoch in range(start_epoch, epochs): avg_loss = Average('average_train_loss') metrics = {} optimizer.zero_grad() timer.start() model.train() train_itr = iter(train_loader) for i in range(steps_per_epoch): batch = next(train_itr) steps += 1 x, y = batch inputs = x.to(device) labels = y.to(device) if do_on_demand_masking: inputs, labels, _ = on_demand_mlm_masking( inputs, labels, mask_value, vocab_size) inputs = {'x': inputs} labels = labels.contiguous() logits = model(inputs, None)[0].contiguous() if mlm: loss = loss_function(logits, labels) else: shift_logits = logits[:, -1] shift_labels = labels[:, 1:] loss = loss_function(shift_logits, shift_labels) loss.backward() avg_loss.update(loss.item()) torch.nn.utils.clip_grad_norm_(model.parameters(), clip) optimizer.step() optimizer.zero_grad() if (i + 1) % report_on == 0: logging.info(avg_loss) if (i + 1) % update_on == 0 and local_rank < 1: elapsed = timer.elapsed(True) logging.info('elapsed time this epoch %d min', elapsed) logging.info('elapsed step time %f steps/min', i / elapsed) logging.info('LR: %f', optimizer.current_lr) if not do_early_stopping: save_checkpoint(model, model_base, steps, tick_type='step') else: valid_token_loss = validate(model, loss_function, valid_loader, avg_loss, timer, metrics, do_on_demand_masking, mlm, mask_value, vocab_size, device) if valid_token_loss < best_valid_loss: best_valid_loss = valid_token_loss logger.info( f"New best valid loss: {best_valid_loss}. Saving checkpoint..." ) save_checkpoint(model, model_base, steps, tick_type='step') model.train() if not do_early_stopping: _ = validate(model, loss_function, valid_loader, avg_loss, timer, metrics, do_on_demand_masking, mlm, mask_value, vocab_size, device) save_checkpoint(model, model_base, epoch, tick_type='epoch')
def run(basedir=None, train_file=None, valid_file=None, dataset_key='paired', embed_type='default', d_model=512, d_ff=2048, d_k=None, num_heads=8, num_layers=8, num_train_workers=4, nctx=256, tgt_nctx=None, file_type='json', record_keys=['x', 'y'], batch_size=256, subword_model_file=None, subword_vocab_file=None, dropout=0.1, lr_scheduler='cosine', lr_decay_steps=None, lr_decay_rate=None, lr_alpha=None, optim='adamw', lr=4.0e-4, clip=1.0, weight_decay=1.0e-2, epochs=32, restart_from=None, restart_tt=None, warmup_steps=10000, saves_per_epoch=10, layer_drop=0.0, reader_type='preprocessed', src_begin_tok=[], src_end_tok=['<EOS>'], tgt_begin_tok=['<GO>'], tgt_end_tok=['<EOS>'], lower=False, rpr_k=[8], device='cuda', distributed=False, local_rank=-1, save_npz=False, extra_tokens=["[CLS]", "[MASK]"], subword_type='bpe', label_smoothing=None, ra_type=None, transformer_type=None, **kwargs): if basedir is None: basedir = f's2s-{reader_type}-paired-{dataset_key}-bpe-{os.getpid()}' logging.basicConfig( level=logging.INFO if local_rank in [-1, 0] else logging.WARN) num_gpus = get_num_gpus_multiworker() distributed = distributed or num_gpus > 1 logger.info(f"Using {num_gpus} GPUs in this job.") if distributed: device, updated_local_rank = init_distributed(local_rank) local_rank = updated_local_rank if not tgt_nctx: tgt_nctx = nctx reader = MultiFileDatasetReader(nctx, tgt_nctx, src_begin_tok, src_end_tok, tgt_begin_tok, tgt_end_tok, subword_model_file, subword_vocab_file, file_type, reader_type=reader_type, record_keys=record_keys, lower=lower, extra_tokens=extra_tokens, subword_type=subword_type) vocab = reader.build_vocab() # If we are not using chars, then use 'x' for both input and output preproc_data = baseline.embeddings.load_embeddings( 'x', dsz=d_model, known_vocab=vocab['x'], preserve_vocab_indices=True, embed_type=embed_type) vocabs = preproc_data['vocab'] os.makedirs(basedir, exist_ok=True) # We want to make sure to save our input vocab into the basedir for reuse later write_json(vocabs, os.path.join(basedir, 'vocabs.json')) embeddings = preproc_data['embeddings'] logger.info("Loaded embeddings") train_set = reader.load(train_file, vocabs) valid_set = reader.load(valid_file, vocabs, distribute=False, shuffle=False) train_loader = DataLoader(train_set, batch_size=batch_size, num_workers=num_train_workers) valid_loader = DataLoader(valid_set, batch_size=batch_size) logger.info("Loaded datasets") logger.info("Using embedding type [%s]", embed_type) if len(rpr_k) == 0 or rpr_k[0] < 1: rpr_k = None elif len(rpr_k) == 1: rpr_k = rpr_k[0] else: rpr_k = rpr_k hps = { "dsz": d_model, "hsz": d_model, "d_ff": d_ff, "dropout": dropout, "num_heads": num_heads, "layers": num_layers, "encoder_type": "transformer", "decoder_type": "transformer", "src_lengths_key": "x_lengths", "d_k": d_k, "layer_drop": layer_drop, "rpr_k": rpr_k, "ra_type": ra_type, "transformer_type": transformer_type } model = TiedEmbeddingsSeq2SeqModel({'x': embeddings}, None, **hps) model.to(device) loss_function = model.create_loss(label_smoothing=label_smoothing) loss_function.to(device) logger.info("Created model and loss") steps_per_epoch = len(train_loader) // num_gpus valid_steps = len(valid_loader) update_on = steps_per_epoch // saves_per_epoch report_on = max(10, update_on) // 10 logger.info( f"Steps per epoch per GPU: {steps_per_epoch}. Saving checkpoint every {update_on} steps." ) lr_decay = get_lr_decay(lr_scheduler, lr, steps_per_epoch, epochs, logger, decay_steps=lr_decay_steps, decay_rate=lr_decay_rate, alpha=lr_alpha) linear_warmup = WarmupLinearSchedulerPyTorch(warmup_steps, lr=lr) lr_sched = CompositeLRScheduler(linear_warmup, lr_decay, lr=lr) global_step = 0 start_epoch = 0 if restart_from: global_step, start_epoch = reload_from_checkpoint( restart_from, restart_tt, model, steps_per_epoch) logger.info( "Restarting from a previous checkpoint %s.\n\tStarting at global_step=%d, epoch=%d", restart_from, global_step, start_epoch + 1) optimizer = OptimizerManager(model, global_step, optim=optim, lr=lr, lr_function=lr_sched, weight_decay=weight_decay) logger.info("Model has {:,} parameters".format( sum(p.numel() for p in model.parameters() if p.requires_grad))) # Prepare model for distributed training if needed if distributed: model = DistributedDataParallel(model, device_ids=[device], output_device=device) logger.info("Model located on %d", local_rank) model_base = os.path.join(basedir, 'checkpoint') steps = global_step timer = Timer() for epoch in range(start_epoch, epochs): avg_loss = Average('average_train_loss') metrics = {} optimizer.zero_grad() timer.start() model.train() train_itr = iter(train_loader) for i in range(steps_per_epoch): batch = next(train_itr) steps += 1 x, y = batch loss = run_step(x, y, model, loss_function, distributed) loss.backward() avg_loss.update(loss.item()) torch.nn.utils.clip_grad_norm_(model.parameters(), clip) optimizer.step() optimizer.zero_grad() if (i + 1) % report_on == 0: logging.info(avg_loss) if (i + 1) % update_on == 0 and local_rank < 1: elapsed = timer.elapsed(True) logging.info('elapsed time this epoch %d min', elapsed) logging.info('elapsed step time %f steps/min', i / elapsed) logging.info('LR: %f', optimizer.current_lr) save_checkpoint(model, model_base, steps, tick_type='step', save_npz=save_npz) # How much time elapsed in minutes elapsed = timer.elapsed(True) train_avg_loss = avg_loss.avg # This is the average training token-level loss across all machines # This is the token-level training perplexity metrics['train_elapsed_min'] = elapsed metrics['average_train_loss'] = train_avg_loss if local_rank < 1: avg_valid_loss = Average('average_valid_loss') timer.start() model.eval() valid_itr = iter(valid_loader) for j in range(valid_steps): with torch.no_grad(): batch = next(valid_itr) x, y = batch loss = run_step(x, y, model, loss_function, distributed) avg_valid_loss.update(loss.item()) valid_avg_loss = avg_valid_loss.avg elapsed = timer.elapsed(True) metrics['valid_elapsed_min'] = elapsed metrics['average_valid_loss'] = valid_avg_loss logger.info(metrics) save_checkpoint(model, model_base, epoch, tick_type='epoch', save_npz=save_npz)
def train(): parser = ArgumentParser() parser.add_argument("--basedir", type=str) parser.add_argument("--train_file", type=str, help='Optional file path to use for train file') parser.add_argument("--valid_file", type=str, help='Optional file path to use for valid file') parser.add_argument("--preprocessed", type=str2bool, default=True, help="Has the data already been preprocessed?") parser.add_argument("--gen_d_model", type=int, default=256, help="Model dimension (and embedding dsz)") parser.add_argument("--gen_d_ff", type=int, default=1024, help="FFN dimension") parser.add_argument( "--gen_d_k", type=int, default=None, help="Dimension per head. Use if num_heads=1 to reduce dims") parser.add_argument("--gen_num_heads", type=int, default=8, help="Number of heads") parser.add_argument("--gen_num_layers", type=int, default=8, help="Number of layers") parser.add_argument("--gen_dropout", type=float, default=0.1, help="Dropout") parser.add_argument( '--gen_rpr_k', help= 'Relative attention positional sizes pass 0 if you dont want relative attention', type=int, default=[8], nargs='+') parser.add_argument("--discrim_d_model", type=int, default=512, help="Model dimension (and embedding dsz)") parser.add_argument("--discrim_d_ff", type=int, default=2048, help="FFN dimension") parser.add_argument( "--discrim_d_k", type=int, default=None, help="Dimension per head. Use if num_heads=1 to reduce dims") parser.add_argument("--discrim_num_heads", type=int, default=8, help="Number of heads") parser.add_argument("--discrim_num_layers", type=int, default=8, help="Number of layers") parser.add_argument("--discrim_dropout", type=float, default=0.1, help="Dropout") parser.add_argument( '--discrim_rpr_k', help= 'Relative attention positional sizes pass 0 if you dont want relative attention', type=int, default=[8], nargs='+') parser.add_argument("--num_train_workers", type=int, default=4, help="Number train workers") parser.add_argument( "--nctx", type=int, default=256, help="Max context length (for both encoder and decoder)") parser.add_argument( "--embed_type", type=str, default='default', choices=["default", "positional", "learned-positional"], help="register label of the embeddings") parser.add_argument( "--pattern", default='*.json', help= "Glob pattern for files, defaults to *.json if preprocessed, *.txt otherwise" ) parser.add_argument("--batch_size", type=int, default=256, help="Batch Size") parser.add_argument("--dataset_key", default="reddit", help="dataset key for basedir") parser.add_argument("--subword_model_file", type=str, required=True) parser.add_argument("--subword_vocab_file", type=str, required=True) parser.add_argument("--lr_scheduler", type=str, default='cosine', help="The type of learning rate decay scheduler") parser.add_argument("--lr_decay_steps", type=int, help="decay steps of lr scheduler") parser.add_argument("--lr_decay_rate", type=float, help="decay rate of lr scheduler") parser.add_argument("--lr_alpha", type=float, help="parameter alpha for cosine decay scheduler") parser.add_argument("--optim", default="adam", type=str, help="Optimizer to use (defaults to adam)") parser.add_argument("--lr", type=float, default=4.0e-4, help="Learning rate") parser.add_argument("--clip", type=float, default=1.0, help="Clipping gradient norm") parser.add_argument("--gen_loss_scale", type=float, default=50.0, help="Scaling for loss function") parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay") parser.add_argument("--epochs", type=int, default=32, help="Num training epochs") parser.add_argument( "--restart_from", type=str, help= "Option allows you to restart from the latest checkpoint in a directory" ) parser.add_argument( "--restart_tt", type=str, choices=['step', 'epoch'], default='step', help="Optional param for legacy checkpoints (step|epoch)") parser.add_argument("--warmup_steps", type=int, default=10000, help="Num warmup steps") parser.add_argument("--saves_per_epoch", type=int, default=100, help="The number of checkpoints to save per epoch") parser.add_argument("--print", type=str2bool, default=True, help="Print some output") parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu", help="Device (cuda or cpu)") parser.add_argument("--distributed", type=str2bool, default=False, help="Are we doing distributed training?") parser.add_argument( "--local_rank", type=int, default=-1, help= "Local rank for distributed training (-1 means use the environment variables to find)" ) args = parser.parse_args() if args.train_file and not args.valid_file: logger.error( "If you provide a train_file, you must provide a valid_file") return if not args.train_file and args.valid_file: logger.error( "If you provide a valid_file, you must also provide a train_file") return if args.basedir is None: args.basedir = 'gd-{}-bpe-{}'.format(args.dataset_key, os.getpid()) logging.basicConfig( format="%(name)s: %(levelname)s: %(message)s", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN) num_gpus = get_num_gpus_multiworker() args.distributed = args.distributed or num_gpus > 1 logger.info(f"Using {num_gpus} GPUs in this job.") if args.distributed: args.device, args.local_rank = init_distributed(args.local_rank) if not args.preprocessed: reader_type = "lang" args.pattern = "*.txt" else: reader_type = "preprocessed" reader = MultiFileDatasetReader(args.nctx, args.subword_model_file, args.subword_vocab_file, args.pattern, reader_type=reader_type) # just return the vocab from the BPE vectorizer vocab = reader.build_vocab([]) gen_embed = baseline.embeddings.load_embeddings('x', dsz=args.gen_d_model, known_vocab=vocab['x'], embed_type=args.embed_type) vocabs = gen_embed['vocab'] index2word = revlut(vocabs) discrim_embed = baseline.embeddings.load_embeddings( 'x', dsz=args.discrim_d_model, known_vocab=vocab['x'], embed_type=args.embed_type) os.makedirs(args.basedir, exist_ok=True) # We want to make sure to save our input vocab into the basedir for reuse later write_json(vocabs, os.path.join(args.basedir, 'vocabs.json')) gen_embeddings = {'x': gen_embed['embeddings']} discrim_embeddings = {'x': discrim_embed['embeddings']} logger.info("Loaded embeddings") train_set = reader.load(args.train_file, vocabs) valid_set = reader.load(args.valid_file, vocabs) train_loader = DataLoader(train_set, batch_size=args.batch_size, num_workers=args.num_train_workers) valid_loader = DataLoader(valid_set, batch_size=args.batch_size) train_steps_per_epoch = len(train_loader) // (args.batch_size * num_gpus) valid_steps_per_epoch = len(valid_loader) // args.batch_size logger.info("Loaded datasets") logger.info("Using embedding type [%s]", args.embed_type) mask_value = vocabs.get("[MASK]", vocabs.get("<MASK>", -1)) if mask_value == -1: logger.error("We could not find a suitable masking token in the vocab") return os.makedirs(args.basedir, exist_ok=True) vocab_size = len(vocabs) if len(args.gen_rpr_k) == 0 or args.gen_rpr_k[0] < 1: gen_rpr_k = None elif len(args.gen_rpr_k) == 1: gen_rpr_k = args.gen_rpr_k[0] else: gen_rpr_k = args.gen_rpr_k if len(args.gen_rpr_k) == 0 or args.discrim_rpr_k[0] < 1: discrim_rpr_k = None elif len(args.discrim_rpr_k) == 1: discrim_rpr_k = args.discrim_rpr_k[0] else: discrim_rpr_k = args.discrim_rpr_k gen_model = TransformerMaskedLanguageModel.create( gen_embeddings, hsz=args.gen_d_model, d_ff=args.gen_d_ff, tie_weights=True, dropout=args.gen_dropout, num_heads=args.gen_num_heads, layers=args.gen_num_layers, rpr_k=gen_rpr_k, d_k=args.gen_d_k, src_keys=['x'], tgt_key='x') discrim_model = TransformerDiscriminator(discrim_embeddings, d_model=args.discrim_d_model, d_ff=args.discrim_d_ff, dropout=args.discrim_dropout, num_heads=args.discrim_num_heads, layers=args.discrim_num_layers, activation='gelu', layer_norm_eps=1.0e-12, rpr_k=discrim_rpr_k, d_k=args.discrim_d_k) gen_model.to(args.device) gen_loss_fn = gen_model.create_loss() discrim_model.to(args.device) discrim_loss_fn = discrim_model.create_loss() logger.info("Loaded model and loss") update_on = train_steps_per_epoch // args.saves_per_epoch report_on = update_on // 10 logger.info( f"Steps per epoch per GPU: {train_steps_per_epoch}. Saving checkpoint every {update_on} steps." ) lr_decay = get_lr_decay(args.lr_scheduler, args.lr, train_steps_per_epoch, args.epochs, logger, decay_steps=args.lr_decay_steps, decay_rate=args.lr_decay_rate, alpha=args.lr_alpha) linear_warmup = WarmupLinearSchedulerPyTorch(args.warmup_steps, lr=args.lr) lr_sched = CompositeLRScheduler(linear_warmup, lr_decay, lr=args.lr) global_step = 0 start_epoch = 0 if args.restart_from: if not os.path.isdir(args.restart_from): raise Exception( f"Cannot restart from {args.restart_from}, directory not found" ) tick_type = args.restart_tt discrim_latest, step_num = find_latest_checkpoint( args.restart_from, wildcard=f'checkpoint-discrim-{tick_type}') gen_latest, _ = find_latest_checkpoint( args.restart_from, wildcard=f'checkpoint-gen-{tick_type}') discrim_model.load_state_dict(torch.load(discrim_latest)) gen_model.load_state_dict(torch.load(gen_latest)) if tick_type == 'step': start_epoch = step_num // train_steps_per_epoch global_step = step_num else: start_epoch = step_num global_step = train_steps_per_epoch * start_epoch parameters = list(discrim_model.parameters()) + list( gen_model.parameters()) optz = OptimizerManager(parameters, global_step, optim=args.optim, lr=args.lr, lr_function=lr_sched, weight_decay=args.weight_decay) logger.info("Generator has {:,} parameters".format( sum(p.numel() for p in gen_model.parameters() if p.requires_grad))) logger.info("Discriminator has {:,} parameters".format( sum(p.numel() for p in discrim_model.parameters() if p.requires_grad))) # Prepare model for distributed training if needed if args.distributed: # This program assume pure data parallelism, each model is on a single gpu # If we wanted to support model and data parallelism we would need to update # the selection of gpus based on rank, it would need to select multiple ids # based on rank, here we select only a single gpu and use it for input and # output. gen_model = DistributedDataParallel(gen_model, device_ids=[args.device], output_device=args.device) discrim_model = DistributedDataParallel(discrim_model, device_ids=[args.device], output_device=args.device) logger.info("Model located on %s", args.device) # This is the training loop steps = global_step model_base = os.path.join(args.basedir, 'checkpoint') discrim_base = f'{model_base}-discrim' gen_base = f'{model_base}-gen' do_on_demand_masking = not args.preprocessed if do_on_demand_masking: logger.info(f"On-demand masking is turned on") timer = Timer() for epoch in range(start_epoch, args.epochs): gen_model.train() discrim_model.train() avg_gen_loss = Average('average_train_gen_loss') avg_discrim_loss = Average('average_train_discrim_loss') avg_discrim_acc = Average('average_train_discrim_acc') avg_train_loss = Average('average5_train_loss') metrics = {} optz.zero_grad() timer.start() print(f'Starting epoch {epoch + 1}') train_iter = iter(train_loader) valid_iter = iter(valid_loader) for i in range(train_steps_per_epoch): steps += 1 x, y = next(train_iter) do_report = (i + 1) % report_on == 0 and args.print gen_loss_step, discrim_loss_step, acc = gen_vs_discrim( x, y, args.device, gen_model, gen_loss_fn, discrim_model, discrim_loss_fn, mask_value, vocab_size, index2word, do_report, do_on_demand_masking) avg_gen_loss.update(gen_loss_step.item()) total_loss_step = gen_loss_step + args.gen_loss_scale * discrim_loss_step total_loss_step.backward() avg_discrim_loss.update(discrim_loss_step.item()) avg_train_loss.update(total_loss_step.item()) avg_discrim_acc.update(acc) torch.nn.utils.clip_grad_norm_(parameters, args.clip) optz.step() optz.zero_grad() if (i + 1) % report_on == 0: logging.info('Loss g=%f, d=%f total=%f, Per token acc=%f', avg_gen_loss.avg, avg_discrim_loss.avg, avg_train_loss.avg, avg_discrim_acc.avg) if (i + 1) % update_on == 0 and args.local_rank < 1: elapsed = timer.elapsed(True) logging.info('elapsed time this epoch %d min', elapsed) logging.info('elapsed step time %f steps/min', i / elapsed) logging.info('LR: %f', optz.current_lr) save_checkpoint(gen_model, gen_base, steps, tick_type='step') save_checkpoint(discrim_model, discrim_base, steps, tick_type='step') # How much time elapsed in minutes elapsed = timer.elapsed(True) # This is the average training token-level loss across all machines # This is the token-level training perplexity metrics['train_elapsed_min'] = elapsed metrics['average_train_gen_loss'] = avg_gen_loss.avg metrics['average_train_discrim_loss'] = avg_discrim_loss.avg metrics[ 'average_train_discrim_per_token_accuracy'] = avg_discrim_acc.avg metrics['average_train_loss'] = avg_train_loss.avg if args.local_rank < 1: avg_valid_gen_loss = Average('average_valid_gen_loss') avg_valid_discrim_loss = Average('average_valid_discrim_loss') avg_valid_discrim_acc = Average('average_valid_discrim_acc') avg_valid_loss = Average('average_valid_loss') timer.start() gen_model.eval() discrim_model.eval() for i in range(valid_steps_per_epoch): with torch.no_grad(): x, y = next(valid_iter) do_report = (i + 1) % report_on == 0 and args.print gen_loss_step, discrim_loss_step, acc = gen_vs_discrim( x, y, args.device, gen_model, gen_loss_fn, discrim_model, discrim_loss_fn, mask_value, vocab_size, index2word, do_report, do_on_demand_masking) avg_valid_gen_loss.update(gen_loss_step.item()) avg_valid_discrim_acc.update(acc) avg_valid_discrim_loss.update(discrim_loss_step.item()) total_loss_step = gen_loss_step + args.gen_loss_scale * discrim_loss_step avg_valid_loss.update(total_loss_step.item()) elapsed = timer.elapsed(True) metrics['valid_elapsed_min'] = elapsed metrics['average_valid_gen_loss'] = avg_valid_gen_loss.avg metrics['average_valid_discrim_loss'] = avg_valid_discrim_loss.avg metrics[ 'average_valid_discrim_per_token_accuracy'] = avg_valid_discrim_acc.avg metrics['average_valid_loss'] = avg_valid_loss.avg logger.info(metrics) save_checkpoint(discrim_model, discrim_base, epoch, tick_type='epoch', save_npz=True) save_checkpoint(gen_model, gen_base, epoch, tick_type='epoch', save_npz=True)