def create_model(embeddings, **kwargs): unif = kwargs['unif'] if 'sess' not in kwargs: kwargs['sess'] = tf.Session() weight_initializer = tf.random_uniform_initializer(-unif, unif) with tf.variable_scope('Model', initializer=weight_initializer): lm = create_lang_model(BASELINE_LM_MODELS, embeddings, **kwargs) return lm
def create_model(embeddings, **kwargs): lm = create_lang_model(BASELINE_LM_MODELS, embeddings, **kwargs) return lm
def run(basedir=None, train_file=None, valid_file=None, dataset_key='tlm', embed_type='default', d_model=512, d_ff=2048, d_k=None, num_heads=8, num_layers=8, num_train_workers=4, nctx=256, file_type='json', batch_size=256, subword_model_file=None, subword_vocab_file=None, dropout=0.1, ffn_pdrop=0.0, layer_drop=0.0, lr_scheduler='cosine', lr_decay_steps=None, lr_decay_rate=None, lr_alpha=0.0, optim='adamw', lr=4.0e-4, clip=1.0, weight_decay=1.0e-2, epochs=32, restart_from=None, restart_tt=None, warmup_steps=10000, saves_per_epoch=10, mlm=True, preprocessed=True, rpr_k=[8], rpr_value_on=False, windowed_ra=False, device="cuda", distributed=False, local_rank=-1, extra_tokens=["[CLS]", "[MASK]"], do_early_stopping=False, model_type='transformer-mlm', modules=[], ra_type=None, transformer_type=None, **kwargs): if basedir is None: basedir = 'lm-{}-bpe-{}'.format(dataset_key, os.getpid()) logging.basicConfig( level=logging.INFO if local_rank in [-1, 0] else logging.WARN) for module in modules: import_user_module(module) num_gpus = get_num_gpus_multiworker() distributed = distributed or num_gpus > 1 logger.info(f"Using {num_gpus} GPUs in this job.") do_on_demand_masking = mlm and not preprocessed if do_on_demand_masking: logger.info(f"On-demand masking is turned on") if distributed: device, updated_local_rank = init_distributed(local_rank) local_rank = updated_local_rank if file_type == 'tfrecord': reader_type = 'tfrecord' elif preprocessed: reader_type = 'preprocessed' else: reader_type = 'lang' reader = MultiFileDatasetReader(src_nctx=nctx, model_file=subword_model_file, vocab_file=subword_vocab_file, file_type=file_type, reader_type=reader_type, record_keys=['x', 'y'] if mlm else ['x'], extra_tokens=extra_tokens) # This looks a bit funny but the streaming reader ignores our vocab and gives us the one from the subword_model # However, we do need to get counts from our dataset for validation so we can calculate the perplexity vocab = reader.build_vocab([valid_file]) # If we are not using chars, then use 'x' for both input and output preproc_data = baseline.embeddings.load_embeddings( 'x', dsz=d_model, known_vocab=vocab['x'], preserve_vocab_indices=True, embed_type=embed_type) vocabs = preproc_data['vocab'] os.makedirs(basedir, exist_ok=True) # We want to make sure to save our input vocab into the basedir for reuse later write_json(vocabs, os.path.join(basedir, 'vocabs.json')) embeddings = {'x': preproc_data['embeddings']} logger.info("Loaded embeddings") train_set = reader.load(train_file, vocabs) valid_set = reader.load(valid_file, vocabs, distribute=False, shuffle=False) train_loader = DataLoader(train_set, batch_size=batch_size, num_workers=num_train_workers) valid_loader = DataLoader(valid_set, batch_size=batch_size) logger.info("Loaded datasets") logger.info("Using embedding type [%s]", embed_type) if 'mlm' in model_type: mask_from = vocabs vocab_size = len(mask_from) mask_value = mask_from.get("[MASK]") if mask_value == -1: logger.error( "We could not find a suitable masking token in the vocab") return if len(rpr_k) == 0 or rpr_k[0] < 1: rpr_k = None elif len(rpr_k) == 1: rpr_k = None if rpr_k[0] == 0 else rpr_k[0] if ra_type != None and ra_type != 'shaw' and rpr_k is not None: print( f"Relative attention mismatch. You requested {ra_type} with rpr set. Setting it to 0" ) rpr_k = None model = create_lang_model( embeddings, hsz=d_model, nctx=nctx, # Only for gMLP d_ff=d_ff, tie_weights=True, dropout=dropout, gpu=False, num_heads=num_heads, layers=num_layers, rpr_k=rpr_k, d_k=d_k, ffn_pdrop=ffn_pdrop, windowed_ra=windowed_ra, rpr_value_on=rpr_value_on, layer_drop=layer_drop, model_type=model_type, ra_type=ra_type, transformer_type=transformer_type, src_keys=['x'], tgt_key='x') model.to(device) loss_function = model.create_loss() loss_function.to(device) logger.info("Loaded model and loss") steps_per_epoch = len(train_loader) // num_gpus update_on = steps_per_epoch // saves_per_epoch report_on = max(10, update_on) // 10 logger.info( f"Steps per epoch per GPU: {steps_per_epoch}. Saving checkpoint every {update_on} steps." ) lr_decay = get_lr_decay(lr_scheduler, lr, steps_per_epoch, epochs, logger, decay_steps=lr_decay_steps, decay_rate=lr_decay_rate, alpha=lr_alpha) linear_warmup = WarmupLinearSchedulerPyTorch(warmup_steps, lr=lr) lr_sched = CompositeLRScheduler(linear_warmup, lr_decay, lr=lr) global_step = 0 start_epoch = 0 if restart_from: if restart_from.endswith('npz'): load_tlm_npz(model, restart_from) else: model.load_state_dict(torch.load(restart_from)) vec = restart_from.split("-") if restart_tt: tick_type = restart_tt else: tick_type = vec[-2] step_num = int(vec[-1].split(".")[0]) if tick_type == 'epoch': start_epoch = step_num global_step = start_epoch * steps_per_epoch elif tick_type == 'step': start_epoch = step_num // steps_per_epoch global_step = step_num else: logger.warning( f"The previous tick was {step_num} but command-line specifies to ignore, setting to 0" ) logger.info( "Restarting from a previous checkpoint %s.\n\tStarting at global_step=%d, epoch=%d", restart_from, global_step, start_epoch + 1) optimizer = OptimizerManager(model, global_step, optim=optim, lr=lr, lr_function=lr_sched, weight_decay=weight_decay) logger.info("Model has {:,} parameters".format( sum(p.numel() for p in model.parameters() if p.requires_grad))) # Prepare model for distributed training if needed if distributed: # This program assume pure data parallelism, each model is on a single gpu # If we wanted to support model and data parallelism we would need to update # the selection of gpus based on rank, it would need to select multiple ids # based on rank, here we select only a single gpu and use it for input and # output. model = DistributedDataParallel(model, device_ids=[device], output_device=device, find_unused_parameters=True) logger.info("Model located on %s", device) model_base = os.path.join(basedir, 'checkpoint') steps = global_step best_valid_loss = np.inf timer = Timer() for epoch in range(start_epoch, epochs): avg_loss = Average('average_train_loss') metrics = {} optimizer.zero_grad() timer.start() model.train() train_itr = iter(train_loader) for i in range(steps_per_epoch): batch = next(train_itr) steps += 1 x, y = batch inputs = x.to(device) labels = y.to(device) if do_on_demand_masking: inputs, labels, _ = on_demand_mlm_masking( inputs, labels, mask_value, vocab_size) inputs = {'x': inputs} labels = labels.contiguous() logits = model(inputs, None)[0].contiguous() if mlm: loss = loss_function(logits, labels) else: shift_logits = logits[:, -1] shift_labels = labels[:, 1:] loss = loss_function(shift_logits, shift_labels) loss.backward() avg_loss.update(loss.item()) torch.nn.utils.clip_grad_norm_(model.parameters(), clip) optimizer.step() optimizer.zero_grad() if (i + 1) % report_on == 0: logging.info(avg_loss) if (i + 1) % update_on == 0 and local_rank < 1: elapsed = timer.elapsed(True) logging.info('elapsed time this epoch %d min', elapsed) logging.info('elapsed step time %f steps/min', i / elapsed) logging.info('LR: %f', optimizer.current_lr) if not do_early_stopping: save_checkpoint(model, model_base, steps, tick_type='step') else: valid_token_loss = validate(model, loss_function, valid_loader, avg_loss, timer, metrics, do_on_demand_masking, mlm, mask_value, vocab_size, device) if valid_token_loss < best_valid_loss: best_valid_loss = valid_token_loss logger.info( f"New best valid loss: {best_valid_loss}. Saving checkpoint..." ) save_checkpoint(model, model_base, steps, tick_type='step') model.train() if not do_early_stopping: _ = validate(model, loss_function, valid_loader, avg_loss, timer, metrics, do_on_demand_masking, mlm, mask_value, vocab_size, device) save_checkpoint(model, model_base, epoch, tick_type='epoch')