def train(): parser = ArgumentParser() parser.add_argument("--basedir", type=str) parser.add_argument("--train_dir", type=str, required=True, help='Training directory') parser.add_argument("--valid_dir", type=str, required=True, help='Validation directory') parser.add_argument( "--train_md", type=str, help="Training metadata YAML, defaults to `{train_dir}/md.yml`") parser.add_argument( "--valid_md", type=str, help="Validation metadata YAML, defaults to `{valid_dir}/md.yml`") parser.add_argument("--dataset_key", default="tlm", help="dataset key for basedir") parser.add_argument( "--embed_type", type=str, default='default', choices=["default", "positional", "learned-positional"], help="register label of the embeddings") parser.add_argument("--gen_d_model", type=int, default=256, help="Model dimension (and embedding dsz)") parser.add_argument("--gen_d_ff", type=int, default=1024, help="FFN dimension") parser.add_argument( "--gen_d_k", type=int, default=None, help="Dimension per head. Use if num_heads=1 to reduce dims") parser.add_argument("--gen_num_heads", type=int, default=8, help="Number of heads") parser.add_argument("--gen_num_layers", type=int, default=8, help="Number of layers") parser.add_argument( '--gen_rpr_k', help= 'Relative attention positional sizes pass 0 if you dont want relative attention', type=int, default=[8], nargs='+') parser.add_argument('--windowed_ra', type=str2bool, default=False, help="whether prevent attention beyond rpr_k") parser.add_argument("--gen_loss_scale", type=float, default=50.0, help="Scaling for loss function") parser.add_argument("--gen_dropout", type=float, default=0.1, help="Dropout") parser.add_argument( '--discrim_rpr_k', help= 'Relative attention positional sizes pass 0 if you dont want relative attention', type=int, default=[8], nargs='+') parser.add_argument("--discrim_d_model", type=int, default=512, help="Model dimension (and embedding dsz)") parser.add_argument("--discrim_d_ff", type=int, default=2048, help="FFN dimension") parser.add_argument( "--discrim_d_k", type=int, default=None, help="Dimension per head. Use if num_heads=1 to reduce dims") parser.add_argument("--discrim_num_heads", type=int, default=8, help="Number of heads") parser.add_argument("--discrim_num_layers", type=int, default=8, help="Number of layers") parser.add_argument("--discrim_dropout", type=float, default=0.1, help="Dropout") parser.add_argument("--num_train_workers", type=int, default=4, help="Number train workers") parser.add_argument("--distribute", type=str, default="mirror", choices=["mirror", "tpu", "nccl"]) parser.add_argument("--tpu_ep", type=str, help="The TPU endpoint if using `distribute=tpu`") parser.add_argument("--nctx", type=int, default=256, help="Max input length") parser.add_argument("--file_type", default='tfrecord', choices=['json', 'tfrecord'], help="Glob pattern for data") parser.add_argument("--batch_size", type=int, default=256, help="Batch Size") parser.add_argument("--subword_model_file", type=str, help="The BPE model file", required=True) parser.add_argument("--subword_vocab_file", type=str, help="The BPE subword vocab", required=True) parser.add_argument("--optim", default="adam", type=str, help="Optimizer to use (defaults to adam)") parser.add_argument("--lr", type=float, default=4.0e-4, help="Learning rate") parser.add_argument("--clip", type=float, default=1.0, help="Clipping gradient norm") parser.add_argument("--weight_decay", type=float, default=1.0e-2, help="Weight decay") parser.add_argument("--epochs", type=int, default=32, help="Num training epochs") parser.add_argument( "--restart", type=str2bool, help="Option allows you to restart from a previous checkpoint") parser.add_argument("--warmup_steps", type=int, default=10000, help="Num warmup steps") parser.add_argument("--causal", type=str2bool, default=False, help="Use CLM (causal) instead of MLM") parser.add_argument("--saves_per_epoch", type=int, default=10, help="The number of checkpoints to save per epoch") parser.add_argument("--strategy", help="Training strategy, defaults to `mirror`", choices=["mirror"]) parser.add_argument("--npz", help="Should we write out NPZ files?", type=str2bool, default=False) parser.add_argument("--tb", help="Turn on tensorboard?", type=str2bool, default=False) parser.add_argument( "--convert_only", help="Should we just convert this file to NPZ and exit?", type=str2bool, default=False) args = parser.parse_args() SET_TRAIN_FLAG(True) if args.convert_only: args.restart = True args.npz = True if args.basedir is None: args.basedir = f'discrim-{args.dataset_key}-bpe-{os.getpid()}' logging.basicConfig(level=logging.INFO) logger.info(f"Writing results to {args.basedir}") if args.tb: logdir = f"logs/scalars/{os.getpid()}" file_writer = tf.summary.create_file_writer(logdir + "/metrics") file_writer.set_as_default() logger.info(f"Set up tensorboard logdir {logdir}") strategy = create_distribute_strategy(args.distribute, args.tpu_ep) num_replicas = strategy.num_replicas_in_sync logger.info(f"Using {num_replicas} replicas in this job.") vectorizer = BPEVectorizer1D(model_file=args.subword_model_file, vocab_file=args.subword_vocab_file, mxlen=args.nctx) vocab = {'x': vectorizer.vocab} gen_preproc_data = baseline.embeddings.load_embeddings( 'x', dsz=args.gen_d_model, known_vocab=vocab['x'], preserve_vocab_indices=True, embed_type=args.embed_type) vocabs = gen_preproc_data['vocab'] discrim_preproc_data = baseline.embeddings.load_embeddings( 'x', dsz=args.discrim_d_model, known_vocab=vocab['x'], preserve_vocab_indices=True, embed_type=args.embed_type) def dataset_train_fn(input_context): batch_size = input_context.get_per_replica_batch_size(args.batch_size) ds = get_dataset(args.train_dir, args.file_type, args.num_train_workers).batch(batch_size) return ds.shard(input_context.num_input_pipelines, input_context.input_pipeline_id) train_loader = strategy.experimental_distribute_datasets_from_function( dataset_train_fn) def dataset_test_fn(input_context): batch_size = input_context.get_per_replica_batch_size(args.batch_size) ds = get_dataset(args.valid_dir, args.file_type, args.num_train_workers, shuffle=False).batch(batch_size) return ds.shard(input_context.num_input_pipelines, input_context.input_pipeline_id) valid_loader = strategy.experimental_distribute_datasets_from_function( dataset_test_fn) train_md = args.train_md if args.train_md else os.path.join( args.train_dir, 'md.yml') num_train_samples = get_num_samples(train_md) valid_md = args.valid_md if args.valid_md else os.path.join( args.valid_dir, 'md.yml') num_valid_samples = get_num_samples(valid_md) os.makedirs(args.basedir, exist_ok=True) # We want to make sure to save our input vocab into the basedir for reuse later write_json(vocabs, os.path.join(args.basedir, 'vocabs.json')) gen_embeddings = {'x': gen_preproc_data['embeddings']} discrim_embeddings = {'x': discrim_preproc_data['embeddings']} logger.info("Loaded embeddings") logger.info("Loaded datasets") logger.info("Using embedding type [%s]", args.embed_type) if len(args.gen_rpr_k) == 0 or args.gen_rpr_k[0] < 1: gen_rpr_k = None elif len(args.gen_rpr_k) == 1: gen_rpr_k = args.gen_rpr_k[0] else: gen_rpr_k = args.gen_rpr_k if len(args.discrim_rpr_k) == 0 or args.discrim_rpr_k[0] < 1: discrim_rpr_k = None elif len(args.gen_rpr_k) == 1: discrim_rpr_k = args.discrim_rpr_k[0] else: discrim_rpr_k = args.discrim_rpr_k gen_model = TransformerMaskedLanguageModel.create( gen_embeddings, hsz=args.gen_d_model, d_ff=args.gen_d_ff, tie_weights=True, dropout=args.gen_dropout, gpu=False, num_heads=args.gen_num_heads, layers=args.gen_num_layers, rpr_k=gen_rpr_k, d_k=args.gen_d_k, windowed_ra=args.windowed_ra, src_keys=['x'], tgt_key='x') discrim_model = TransformerDiscriminator(discrim_embeddings, d_model=args.discrim_d_model, d_ff=args.discrim_d_ff, dropout=args.discrim_dropout, num_heads=args.discrim_num_heads, layers=args.discrim_num_layers, rpr_k=discrim_rpr_k, d_k=args.discrim_d_k) logger.info("Loaded model and loss") steps_per_epoch = num_train_samples // args.batch_size steps_per_valid_epoch = num_valid_samples // args.batch_size update_on = steps_per_epoch // args.saves_per_epoch report_on = max(10, update_on) // 10 logger.info( f"Steps per epoch: {steps_per_epoch}. Saving checkpoint every {update_on} steps." ) lr_decay = CosineDecaySchedulerTensorFlow(steps_per_epoch * args.epochs, lr=args.lr) linear_warmup = WarmupLinearSchedulerTensorFlow(args.warmup_steps, lr=args.lr) lr_sched = CompositeLRSchedulerTensorFlow(linear_warmup, lr_decay) mask_value = vocabs.get("[MASK]", vocabs.get("<MASK>", -1)) if mask_value == -1: logger.error("We could not find a suitable masking token in the vocab") return optimizer, clip = create_keras_optimizer(**vars(args)) discrim_checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=discrim_model) discrim_checkpoint_manager = tf.train.CheckpointManager( discrim_checkpoint, directory=os.path.join(args.basedir, 'discrim'), max_to_keep=5) gen_checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=discrim_model) gen_checkpoint_manager = tf.train.CheckpointManager(gen_checkpoint, directory=os.path.join( args.basedir, 'gen'), max_to_keep=5) mask_value = vocabs.get("[MASK]", vocabs.get("<MASK>", -1)) if mask_value == -1: logger.error("We could not find a suitable masking token in the vocab") return if args.restart: # The global step gets automatically updated here # so we dont have to worry about our LR regimen gen_checkpoint.restore(gen_checkpoint_manager.latest_checkpoint) discrim_checkpoint.restore( discrim_checkpoint_manager.latest_checkpoint) def _replicated_train_step(inputs): """This runs on a single replica""" noised_x, labels = inputs with tf.GradientTape() as tape: gen_loss_step, discrim_loss_step, acc = gen_vs_discrim( noised_x, labels, gen_model, discrim_model, mask_value) loss_value = (args.gen_loss_scale * gen_loss_step + discrim_loss_step) / num_replicas grads = tape.gradient( loss_value, gen_model.trainable_variables + discrim_model.trainable_variables) grads, _ = tf.clip_by_global_norm(grads, clip) optimizer.apply_gradients( zip( grads, gen_model.trainable_variables + discrim_model.trainable_variables)) return loss_value, gen_loss_step, discrim_loss_step, acc @tf.function def _distributed_train_step(inputs: Tuple[tf.Tensor, tf.Tensor]): """Runs across multiple replicas and aggregates the results. :param inputs: :return: """ loss, gen_loss, discrim_loss, acc = strategy.run( _replicated_train_step, args=(inputs, )) sum_loss = strategy.reduce(tf.distribute.ReduceOp.SUM, loss, axis=None) sum_gen_loss = strategy.reduce(tf.distribute.ReduceOp.SUM, gen_loss, axis=None) sum_discrim_loss = strategy.reduce(tf.distribute.ReduceOp.SUM, discrim_loss, axis=None) sum_acc = strategy.reduce(tf.distribute.ReduceOp.SUM, acc, axis=None) return sum_loss, sum_gen_loss, sum_discrim_loss, sum_acc def _replicated_test_step(inputs): """This runs on a single replica""" noised_x, labels = inputs gen_loss_step, discrim_loss_step, acc = gen_vs_discrim( noised_x, labels, gen_model, discrim_model, mask_value) loss_value = (args.gen_loss_scale * gen_loss_step + discrim_loss_step) / num_replicas return loss_value, gen_loss_step, discrim_loss_step, acc @tf.function def _distributed_test_step(inputs: Tuple[tf.Tensor, tf.Tensor]): """Runs across multiple replicas and aggregates the results. :param inputs: :return: """ loss, gen_loss, discrim_loss, acc = strategy.run(_replicated_test_step, args=(inputs, )) sum_loss = strategy.reduce(tf.distribute.ReduceOp.SUM, loss, axis=None) sum_gen_loss = strategy.reduce(tf.distribute.ReduceOp.SUM, gen_loss, axis=None) sum_discrim_loss = strategy.reduce(tf.distribute.ReduceOp.SUM, discrim_loss, axis=None) sum_acc = strategy.reduce(tf.distribute.ReduceOp.SUM, acc, axis=None) return sum_loss, sum_gen_loss, sum_discrim_loss, sum_acc # This is the training loop start_epoch = 0 timer = Timer() with strategy.scope(): for epoch in range(start_epoch, args.epochs): SET_TRAIN_FLAG(True) logger.info('Starting epoch %d', epoch + 1) avg_loss = Average('average_train_loss') avg_gen_loss = Average('average_gen_loss') avg_discrim_loss = Average('average_discrim_loss') avg_acc = Average('average_train_acc') metrics = {} timer.start() train_iter = iter(train_loader) for i in range(steps_per_epoch): loss, gen_loss, discrim_loss, acc = _distributed_train_step( next(train_iter)) avg_loss.update(loss.numpy().item()) avg_gen_loss.update(gen_loss.numpy().item()) avg_discrim_loss.update(discrim_loss.numpy().item()) avg_acc.update(acc.numpy().item()) tf.summary.scalar("train_loss", data=loss, step=optimizer.iterations) tf.summary.scalar("train_gen_loss", data=gen_loss, step=optimizer.iterations) tf.summary.scalar("train_discrim_loss", data=discrim_loss, step=optimizer.iterations) tf.summary.scalar("train_acc", data=acc, step=optimizer.iterations) if args.convert_only: logger.warning( "Convert only flag specified. Stopping after one step" ) steps = optimizer.iterations.numpy() npz_checkpoint = os.path.join(args.basedir, f'discrim-step-{steps}.npz') save_tlm_npz(discrim_model, npz_checkpoint) npz_checkpoint = os.path.join(args.basedir, f'gen-step-{steps}.npz') save_tlm_npz(gen_model, npz_checkpoint) return if (i + 1) % report_on == 0: logging.info(avg_loss) logging.info(avg_gen_loss) logging.info(avg_discrim_loss) logging.info(avg_acc) if (i + 1) % update_on == 0: elapsed = timer.elapsed(True) logging.info('elapsed time this epoch %d min', elapsed) logging.info('elapsed step time %f steps/min', i / elapsed) gen_checkpoint_manager.save() discrim_checkpoint_manager.save() if args.npz: steps = optimizer.iterations.numpy() npz_checkpoint = os.path.join( args.basedir, f'discrim-step-{steps}.npz') save_tlm_npz(discrim_model, npz_checkpoint) npz_checkpoint = os.path.join(args.basedir, f'gen-step-{steps}.npz') save_tlm_npz(gen_model, npz_checkpoint) # This is the average training token-level loss across all machines # This is the token-level training perplexity metrics['train_elapsed_min'] = timer.elapsed(True) metrics['average_train_loss'] = avg_loss.avg metrics['average_gen_loss'] = avg_gen_loss.avg metrics['average_discrim_loss'] = avg_discrim_loss.avg metrics['average_train_acc'] = avg_acc.avg metrics['lr'] = float( lr_sched(tf.cast(optimizer.global_step, tf.float32)).numpy().item()) avg_valid_loss = Average('average_valid_loss') avg_valid_gen_loss = Average('average_valid_gen_loss') avg_valid_discrim_loss = Average('average_valid_discrim_loss') avg_valid_acc = Average('average_valid_acc') timer.start() SET_TRAIN_FLAG(False) valid_iter = iter(valid_loader) for i in range(steps_per_valid_epoch): valid_loss, valid_gen_loss, valid_discrim_loss, valid_acc = _distributed_test_step( next(valid_iter)) tf.summary.scalar('valid_loss', data=valid_loss, step=optimizer.iterations) tf.summary.scalar('valid_gen_loss', data=valid_gen_loss, step=optimizer.iterations) tf.summary.scalar('valid_discrim_loss', data=valid_discrim_loss, step=optimizer.iterations) tf.summary.scalar('valid_acc', data=valid_acc, step=optimizer.iterations) avg_valid_loss.update(valid_loss.numpy().item()) avg_valid_gen_loss.update(valid_gen_loss.numpy().item()) avg_valid_discrim_loss.update( valid_discrim_loss.numpy().item()) avg_valid_acc.update(valid_acc.numpy().item()) metrics['valid_elapsed_min'] = timer.elapsed(True) metrics['average_valid_loss'] = avg_valid_loss.avg metrics['average_valid_gen_loss'] = avg_valid_gen_loss.avg metrics['average_valid_discrim_loss'] = avg_valid_discrim_loss.avg metrics['average_valid_acc'] = avg_valid_acc.avg logger.info(json.dumps(metrics, indent=4))
def main(): parser = ArgumentParser() parser.add_argument("--basedir", type=str) parser.add_argument("--train_dir", type=str, required=True, help='Training directory') parser.add_argument("--valid_dir", type=str, required=True, help='Validation directory') parser.add_argument( "--train_md", type=str, help="Training metadata YAML, defaults to `{train_dir}/md.yml`") parser.add_argument( "--valid_md", type=str, help="Validation metadata YAML, defaults to `{valid_dir}/md.yml`") parser.add_argument("--dataset_key", default="tlm", help="dataset key for basedir") parser.add_argument( "--embed_type", type=str, default='default', choices=["default", "positional", "learned-positional"], help="register label of the embeddings") parser.add_argument("--d_model", type=int, default=512, help="Model dimension (and embedding dsz)") parser.add_argument("--d_ff", type=int, default=2048, help="FFN dimension") parser.add_argument( "--d_k", type=int, default=None, help="Dimension per head. Use if num_heads=1 to reduce dims") parser.add_argument("--num_heads", type=int, default=8, help="Number of heads") parser.add_argument("--num_layers", type=int, default=8, help="Number of layers") parser.add_argument("--num_train_workers", type=int, default=4, help="Number train workers") parser.add_argument("--distribute", type=str, default="mirror", choices=["mirror", "tpu", "nccl"]) parser.add_argument("--tpu_ep", type=str, help="The TPU endpoint if using `distribute=tpu`") parser.add_argument("--nctx", type=int, default=256, help="Max input length") parser.add_argument("--file_type", default='tfrecord', choices=['json', 'tfrecord'], help="Glob pattern for data") parser.add_argument("--batch_size", type=int, default=256, help="Batch Size") parser.add_argument("--subword_model_file", type=str, help="The BPE model file", required=False) parser.add_argument("--subword_vocab_file", type=str, help="The BPE subword vocab", required=True) parser.add_argument("--subword_type", type=str, choices=["bpe", "wordpiece"], default="bpe") parser.add_argument("--dropout", type=float, default=0.1, help="Dropout") parser.add_argument("--ffn_pdrop", type=float, default=0.0, help="Dropout in the dense stack") parser.add_argument("--layer_drop", type=float, default=0.0, help="LayerDrop to apply") parser.add_argument("--optim", default="adamw", type=str, help="Optimizer to use (defaults to adamw)") parser.add_argument("--lr", type=float, default=4.0e-4, help="Learning rate") parser.add_argument("--clip", type=float, default=1.0, help="Clipping gradient norm") parser.add_argument("--weight_decay", type=float, default=1.0e-2, help="Weight decay") parser.add_argument("--epochs", type=int, default=32, help="Num training epochs") parser.add_argument( "--restart", type=str2bool, help="Option allows you to restart from a previous checkpoint") parser.add_argument("--warmup_steps", type=int, default=10000, help="Num warmup steps") parser.add_argument("--causal", type=str2bool, default=False, help="Use CLM (causal) instead of MLM") parser.add_argument("--mlp", type=str2bool, default=False, help="Use Gated MLP") parser.add_argument("--saves_per_epoch", type=int, default=10, help="The number of checkpoints to save per epoch") parser.add_argument( '--rpr_k', help= 'Relative attention positional sizes pass 0 if you dont want relative attention', type=int, default=[8], nargs='+') parser.add_argument( '--rpr_value_on', type=str2bool, default=True, help= "In relative attention, whether add positional correction to values in addition to the " "correction to attention matrix") parser.add_argument('--windowed_ra', type=str2bool, default=False, help="whether prevent attention beyond rpr_k") parser.add_argument("--strategy", help="Training strategy, defaults to `mirror`", choices=["mirror"]) parser.add_argument("--npz", help="Should we write out NPZ files?", type=str2bool, default=False) parser.add_argument("--tb", help="Turn on tensorboard?", type=str2bool, default=False) parser.add_argument( "--convert_only", help="Should we just convert this file to NPZ and exit?", type=str2bool, default=False) parser.add_argument("--extra_tokens", help="What extra tokens should we use", nargs="+", default=["[CLS]", "[MASK]"]) args = parser.parse_args() SET_TRAIN_FLAG(True) if args.convert_only: args.restart = True if args.basedir is None: args.basedir = f'lm-{args.dataset_key}-bpe-{os.getpid()}' logging.basicConfig(level=logging.INFO) logger.info(f"Writing results to {args.basedir}") if args.tb: logdir = f"{args.basedir}/scalars/{os.getpid()}" file_writer = tf.summary.create_file_writer(logdir + "/metrics") file_writer.set_as_default() logger.info(f"Set up tensorboard logdir {logdir}") strategy = create_distribute_strategy(args.distribute, args.tpu_ep) num_replicas = strategy.num_replicas_in_sync logger.info(f"Using {num_replicas} replicas in this job.") Vec1D = BPEVectorizer1D if args.subword_type == 'bpe' else WordpieceVectorizer1D vectorizer = Vec1D(model_file=args.subword_model_file, vocab_file=args.subword_vocab_file, mxlen=args.nctx, extra_tokens=args.extra_tokens) vocab = {'x': vectorizer.vocab} preproc_data = baseline.embeddings.load_embeddings( 'x', dsz=args.d_model, known_vocab=vocab['x'], preserve_vocab_indices=True, embed_type=args.embed_type) vocabs = preproc_data['vocab'] train_md = args.train_md if args.train_md else os.path.join( args.train_dir, 'md.yml') num_train_samples = get_num_samples(train_md) valid_md = args.valid_md if args.valid_md else os.path.join( args.valid_dir, 'md.yml') num_valid_samples = get_num_samples(valid_md) is_curriculum = True if isinstance(num_train_samples, Mapping) else False def dataset_train_fn(input_context): global_batchsz = args.batch_size base_batchsz = input_context.get_per_replica_batch_size(global_batchsz) ds = None if is_curriculum: for sub in num_train_samples.keys(): train_curr_dir = os.path.join(args.train_dir, str(sub)) batchsz_scale_factor = args.nctx // sub this_batchsz = base_batchsz * batchsz_scale_factor curr_ds = get_dataset(train_curr_dir, args.file_type, args.num_train_workers, causal=args.causal).batch( this_batchsz, drop_remainder=True) if ds is None: ds = curr_ds else: ds = ds.concatenate(curr_ds) else: ds = get_dataset(args.train_dir, args.file_type, args.num_train_workers, causal=args.causal).batch(base_batchsz) return ds.shard(input_context.num_input_pipelines, input_context.input_pipeline_id) train_loader = strategy.experimental_distribute_datasets_from_function( dataset_train_fn) def dataset_test_fn(input_context): global_batchsz = args.batch_size base_batchsz = input_context.get_per_replica_batch_size(global_batchsz) ds = None if is_curriculum: for sub in num_valid_samples.keys(): valid_curr_dir = os.path.join(args.valid_dir, str(sub)) batchsz_scale_factor = args.nctx // sub this_batchsz = base_batchsz * batchsz_scale_factor curr_ds = get_dataset(valid_curr_dir, args.file_type, args.num_train_workers, causal=args.causal).batch( this_batchsz, drop_remainder=True) if ds is None: ds = curr_ds else: ds = ds.concatenate(curr_ds) else: ds = get_dataset(args.valid_dir, args.file_type, args.num_train_workers, shuffle=False, causal=args.causal).batch(base_batchsz) return ds.shard(input_context.num_input_pipelines, input_context.input_pipeline_id) valid_loader = strategy.experimental_distribute_datasets_from_function( dataset_test_fn) os.makedirs(args.basedir, exist_ok=True) # We want to make sure to save our input vocab into the basedir for reuse later write_json(vocabs, os.path.join(args.basedir, 'vocabs.json')) embeddings = {'x': preproc_data['embeddings']} logger.info("Loaded embeddings") logger.info("Loaded datasets") logger.info("Using embedding type [%s]", args.embed_type) model = create_model(args, embeddings) if isinstance(model, GatedMLPLanguageModel) and is_curriculum: raise Exception( "Variable tensor lengths not currently supported for gMLP") logger.info("Loaded model and loss") if is_curriculum: steps_per_epoch = 0 steps_per_valid_epoch = 0 for k, v in num_train_samples.items(): steps_per_epoch += int(num_train_samples[k] // (args.batch_size * (args.nctx / k))) for k, v in num_valid_samples.items(): steps_per_valid_epoch += int(num_valid_samples[k] // (args.batch_size * (args.nctx / k))) else: steps_per_epoch = num_train_samples // args.batch_size steps_per_valid_epoch = num_valid_samples // args.batch_size update_on = steps_per_epoch // args.saves_per_epoch report_on = max(10, update_on) // 10 logger.info( f"Steps per epoch: {steps_per_epoch}. Saving checkpoint every {update_on} steps." ) lr_decay = CosineDecaySchedulerTensorFlow(steps_per_epoch * args.epochs, lr=args.lr) linear_warmup = WarmupLinearSchedulerTensorFlow(args.warmup_steps, lr=args.lr) lr_sched = CompositeLRSchedulerTensorFlow(linear_warmup, lr_decay) optimizer = EagerOptimizer(loss_function, optim=args.optim, lr_function=lr_sched, weight_decay=args.weight_decay, clip=args.clip, lr=args.lr) checkpoint = tf.train.Checkpoint(optimizer=optimizer.optimizer, model=model) checkpoint_manager = tf.train.CheckpointManager(checkpoint, directory=args.basedir, max_to_keep=5) start_epoch = 0 if args.restart: # The global step gets automatically updated here # so we dont have to worry about our LR regimen checkpoint.restore(checkpoint_manager.latest_checkpoint) current_step = optimizer.global_step start_epoch = current_step // steps_per_epoch def _replicated_train_step(inputs): """This runs on a single replica""" x, y = inputs per_replica_loss = optimizer.update(model, {'x': x}, y, num_replicas) return per_replica_loss @tf.function def _distributed_train_step(inputs: Tuple[tf.Tensor, tf.Tensor]): """Runs across multiple replicas and aggregates the results. :param inputs: :return: """ per_replica_loss = strategy.run(_replicated_train_step, args=(inputs, )) return strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_loss, axis=None) def _replicated_test_step(inputs): """This runs on a single replica""" x, y = inputs per_replica_loss = loss_function(model, {'x': x}, y) / num_replicas return per_replica_loss @tf.function def _distributed_test_step(inputs: Tuple[tf.Tensor, tf.Tensor]): """Runs across multiple replicas and aggregates the results. :param inputs: :return: """ per_replica_loss = strategy.run(_replicated_test_step, args=(inputs, )) return strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_loss, axis=None) timer = Timer() with strategy.scope(): for epoch in range(start_epoch, args.epochs): timer.start() SET_TRAIN_FLAG(True) logger.info('Starting epoch %d', epoch + 1) avg_loss = Average('average_train_loss') metrics = {} train_iter = iter(train_loader) for i in range(steps_per_epoch): try: loss = _distributed_train_step(next(train_iter)) avg_loss.update(loss.numpy().item()) tf.summary.scalar("train_loss", data=loss, step=optimizer.global_step) except Exception as e: logger.error( f"Exception at training step {i+1}/{steps_per_epoch}. Skipping" ) pass if args.convert_only: logger.warning( "Convert only flag specified. Stopping after one step" ) steps = optimizer.global_step.numpy() npz_checkpoint = os.path.join( args.basedir, f'checkpoint-step-{steps}.npz') save_tlm_npz(model, npz_checkpoint) return steps = optimizer.global_step.numpy() if (steps + 1) % report_on == 0: logger.info(avg_loss) if (steps + 1) % update_on == 0: elapsed = timer.elapsed(True) logger.info('elapsed time this epoch %d min', elapsed) logger.info('elapsed step time %f steps/min', i / elapsed) checkpoint_manager.save() if args.npz: npz_checkpoint = os.path.join( args.basedir, f'checkpoint-step-{steps}.npz') save_tlm_npz(model, npz_checkpoint) # How much time elapsed in minutes train_token_loss = avg_loss.avg # This is the average training token-level loss across all machines # This is the token-level training perplexity train_token_ppl = math.exp(train_token_loss) metrics['train_elapsed_min'] = timer.elapsed(True) metrics['average_train_loss'] = train_token_loss metrics['train_ppl'] = train_token_ppl metrics['lr'] = float( lr_sched(tf.cast(optimizer.global_step, tf.float32)).numpy().item()) avg_valid_loss = Average('average_valid_loss') timer.start() SET_TRAIN_FLAG(False) valid_iter = iter(valid_loader) for i in range(steps_per_valid_epoch): try: valid_loss = _distributed_test_step(next(valid_iter)) tf.summary.scalar('valid_loss', data=valid_loss, step=optimizer.global_step) avg_valid_loss.update(valid_loss.numpy().item()) except Exception as e: logger.error( f"Exception at validation step {i+1}/{steps_per_valid_epoch}. Skipping" ) pass valid_token_loss = avg_valid_loss.avg valid_token_ppl = math.exp(valid_token_loss) metrics['valid_elapsed_min'] = timer.elapsed(True) metrics['average_valid_loss'] = valid_token_loss metrics['average_valid_word_ppl'] = valid_token_ppl logger.info(json.dumps(metrics, indent=4))