def main(args): if args.distribute: distribute.enable_distributed_training() tf.logging.set_verbosity(tf.logging.INFO) model_cls = models.get_model(args.model) params = default_parameters() # Import and override parameters # Priorities (low -> high): # default -> saved -> command params = merge_parameters(params, model_cls.get_parameters()) params = import_params(args.output, args.model, params) override_parameters(params, args) # Export all parameters and model specific parameters if distribute.rank() == 0: export_params(params.output, "params.json", params) export_params(params.output, "%s.json" % args.model, collect_params(params, model_cls.get_parameters())) # Build Graph with tf.Graph().as_default(): if not params.record: # Build input queue features = dataset.get_training_input(params.input, params) else: features = record.get_input_features( os.path.join(params.record, "*train*"), "train", params) # Build model initializer = get_initializer(params) regularizer = tf.contrib.layers.l1_l2_regularizer( scale_l1=params.scale_l1, scale_l2=params.scale_l2) model = model_cls(params) # Create global step global_step = tf.train.get_or_create_global_step() dtype = tf.float16 if args.half else None # Multi-GPU setting sharded_losses = parallel.parallel_model( model.get_training_func(initializer, regularizer, dtype), features, params.device_list) loss = tf.add_n(sharded_losses) / len(sharded_losses) loss = loss + tf.losses.get_regularization_loss() if distribute.rank() == 0: print_variables() learning_rate = get_learning_rate_decay(params.learning_rate, global_step, params) learning_rate = tf.convert_to_tensor(learning_rate, dtype=tf.float32) tf.summary.scalar("loss", loss) tf.summary.scalar("learning_rate", learning_rate) # Create optimizer if params.optimizer == "Adam": opt = tf.train.AdamOptimizer(learning_rate, beta1=params.adam_beta1, beta2=params.adam_beta2, epsilon=params.adam_epsilon) elif params.optimizer == "LazyAdam": opt = tf.contrib.opt.LazyAdamOptimizer(learning_rate, beta1=params.adam_beta1, beta2=params.adam_beta2, epsilon=params.adam_epsilon) else: raise RuntimeError("Optimizer %s not supported" % params.optimizer) opt = optimizers.MultiStepOptimizer(opt, params.update_cycle) if args.half: opt = optimizers.LossScalingOptimizer(opt, params.loss_scale) # Optimization grads_and_vars = opt.compute_gradients( loss, colocate_gradients_with_ops=True) if params.clip_grad_norm: grads, var_list = list(zip(*grads_and_vars)) grads, _ = tf.clip_by_global_norm(grads, params.clip_grad_norm) grads_and_vars = zip(grads, var_list) train_op = opt.apply_gradients(grads_and_vars, global_step=global_step) # Validation if params.validation and params.references[0]: files = [params.validation] + list(params.references) eval_inputs = dataset.sort_and_zip_files(files) eval_input_fn = dataset.get_evaluation_input else: eval_input_fn = None # Hooks train_hooks = [ tf.train.StopAtStepHook(last_step=params.train_steps), tf.train.NanTensorHook(loss), tf.train.LoggingTensorHook( { "step": global_step, "loss": loss, "source": tf.shape(features["source"]), "target": tf.shape(features["target"]) }, every_n_iter=1) ] broadcast_hook = distribute.get_broadcast_hook() if broadcast_hook: train_hooks.append(broadcast_hook) if distribute.rank() == 0: # Add hooks save_vars = tf.trainable_variables() + [global_step] saver = tf.train.Saver( var_list=save_vars if params.only_save_trainable else None, max_to_keep=params.keep_checkpoint_max, sharded=False) tf.add_to_collection(tf.GraphKeys.SAVERS, saver) train_hooks.append( hooks.MultiStepHook(tf.train.CheckpointSaverHook( checkpoint_dir=params.output, save_secs=params.save_checkpoint_secs or None, save_steps=params.save_checkpoint_steps or None, saver=saver), step=params.update_cycle)) if eval_input_fn is not None: train_hooks.append( hooks.MultiStepHook(hooks.EvaluationHook( lambda f: inference.create_inference_graph([model], f, params), lambda: eval_input_fn(eval_inputs, params), lambda x: decode_target_ids(x, params), params.output, session_config(params), device_list=params.device_list, max_to_keep=params.keep_top_checkpoint_max, eval_secs=params.eval_secs, eval_steps=params.eval_steps), step=params.update_cycle)) checkpoint_dir = params.output else: checkpoint_dir = None restore_op = restore_variables(args.checkpoint) def restore_fn(step_context): step_context.session.run(restore_op) # Create session, do not use default CheckpointSaverHook with tf.train.MonitoredTrainingSession( checkpoint_dir=checkpoint_dir, hooks=train_hooks, save_checkpoint_secs=None, config=session_config(params)) as sess: # Restore pre-trained variables sess.run_step_fn(restore_fn) while not sess.should_stop(): sess.run(train_op)
def main(args): if args.distribute: distribute.enable_distributed_training() tf.logging.set_verbosity(tf.logging.INFO) model_cls = models.get_model(args.model) params = default_parameters() # Import and override parameters # Priorities (low -> high): # default -> saved -> command params = merge_parameters(params, model_cls.get_parameters()) params = import_params(args.output, args.model, params) override_parameters(params, args) # Export all parameters and model specific parameters if not args.distribute or distribute.rank() == 0: export_params(params.output, "params.json", params) export_params(params.output, "%s.json" % args.model, collect_params(params, model_cls.get_parameters())) assert 'r2l' in params.input[2] # Build Graph use_all_devices(params) with tf.Graph().as_default(): if not params.record: # Build input queue features = dataset.abd_get_training_input(params.input, params) else: features = record.get_input_features( os.path.join(params.record, "*train*"), "train", params) update_cycle = params.update_cycle features, init_op = cache.cache_features(features, update_cycle) # Build model initializer = get_initializer(params) regularizer = tf.contrib.layers.l1_l2_regularizer( scale_l1=params.scale_l1, scale_l2=params.scale_l2) model = model_cls(params) # Create global step global_step = tf.train.get_or_create_global_step() dtype = tf.float16 if args.fp16 else None if args.distribute: training_func = model.get_training_func(initializer, regularizer, dtype) loss = training_func(features) else: # Multi-GPU setting sharded_losses = parallel.parallel_model( model.get_training_func(initializer, regularizer, dtype), features, params.device_list) loss = tf.add_n(sharded_losses) / len(sharded_losses) loss = loss + tf.losses.get_regularization_loss() # Print parameters if not args.distribute or distribute.rank() == 0: print_variables() learning_rate = get_learning_rate_decay(params.learning_rate, global_step, params) learning_rate = tf.convert_to_tensor(learning_rate, dtype=tf.float32) tf.summary.scalar("learning_rate", learning_rate) # Create optimizer if params.optimizer == "Adam": opt = tf.train.AdamOptimizer(learning_rate, beta1=params.adam_beta1, beta2=params.adam_beta2, epsilon=params.adam_epsilon) elif params.optimizer == "LazyAdam": opt = tf.contrib.opt.LazyAdamOptimizer(learning_rate, beta1=params.adam_beta1, beta2=params.adam_beta2, epsilon=params.adam_epsilon) else: raise RuntimeError("Optimizer %s not supported" % params.optimizer) loss, ops = optimize.create_train_op( loss, opt, global_step, distribute.all_reduce if args.distribute else None, args.fp16, params) restore_op = restore_variables(args.checkpoint) # Validation if params.validation and params.references[0]: files = params.validation + list(params.references) eval_inputs = dataset.sort_and_zip_files(files) eval_input_fn = dataset.abd_get_evaluation_input else: eval_input_fn = None # Add hooks multiplier = tf.convert_to_tensor([update_cycle, 1]) train_hooks = [ tf.train.StopAtStepHook(last_step=params.train_steps), tf.train.NanTensorHook(loss), tf.train.LoggingTensorHook( { "step": global_step, "loss": loss, "source": tf.shape(features["source"]) * multiplier, "target": tf.shape(features["target"]) * multiplier }, every_n_iter=1) ] if args.distribute: train_hooks.append(distribute.get_broadcast_hook()) config = session_config(params) if not args.distribute or distribute.rank() == 0: # Add hooks save_vars = tf.trainable_variables() + [global_step] saver = tf.train.Saver( var_list=save_vars if params.only_save_trainable else None, max_to_keep=params.keep_checkpoint_max, sharded=False) tf.add_to_collection(tf.GraphKeys.SAVERS, saver) train_hooks.append( tf.train.CheckpointSaverHook( checkpoint_dir=params.output, save_secs=params.save_checkpoint_secs or None, save_steps=params.save_checkpoint_steps or None, saver=saver)) if eval_input_fn is not None: if not args.distribute or distribute.rank() == 0: train_hooks.append( hooks.EvaluationHook( lambda f: inference.create_inference_graph([model], f, params), lambda: eval_input_fn(eval_inputs, params), lambda x: decode_target_ids(x, params), params.output, config, params.keep_top_checkpoint_max, eval_secs=params.eval_secs, eval_steps=params.eval_steps)) def restore_fn(step_context): step_context.session.run(restore_op) def step_fn(step_context): # Bypass hook calls step_context.session.run([init_op, ops["zero_op"]]) for i in range(update_cycle - 1): step_context.session.run(ops["collect_op"]) return step_context.run_with_hooks(ops["train_op"]) # Create session, do not use default CheckpointSaverHook if not args.distribute or distribute.rank() == 0: checkpoint_dir = params.output else: checkpoint_dir = None with tf.train.MonitoredTrainingSession(checkpoint_dir=checkpoint_dir, hooks=train_hooks, save_checkpoint_secs=None, config=config) as sess: # Restore pre-trained variables sess.run_step_fn(restore_fn) while not sess.should_stop(): sess.run_step_fn(step_fn)