def run_loop(session,
             modes,
             models,
             summary_ops=None,
             logging=False,
             name_modifier=""):
    """
    Runs models for the specified number of epochs.
    :param session: MonitoredTrainingSession to use
    :param modes: List of modes. Whether to train, evaluate, or test
    :param models: List of models
    :param summary_ops: List of summary operations with one operation per model, or None.
    :param logging: Whether to write summaries for tensorboard.
    :param name_modifier: Suffix to identify the model in tensorboard. Creates a subdirectory.
    :return: List of lists, where each sub-list is the loss/epoch.
    """
    assert (len(modes) == len(models))
    assert (summary_ops is None or len(summary_ops) == len(models))
    assert (summary_ops is None or logging)

    nr_models = len(modes)
    representative_config = models[
        0].config  # used for variables that are assumed to be the same between models

    writers = []
    losses = []
    for i in range(nr_models):
        losses.append([])
        if logging:
            writers.append(
                tf.summary.FileWriter(
                    log_path + os.sep + Mode.to_string(modes[i]).lower() +
                    name_modifier, session.graph))
        else:
            writers.append(None)

    for i in range(representative_config.nr_epochs):
        for m in range(nr_models):
            model = models[m]
            perplexity = run_epoch(
                session,
                model,
                writer=writers[m],
                summary_op=summary_ops[m],
                eval_op=model.train_op if modes[m] == Mode.train else None)
            losses[m].append(perplexity)
            log("%s Epoch: %d Perplexity: %.3f" %
                (Mode.to_string(modes[m]), i + 1, perplexity))

    return losses
def instantiate_model(mode, config, data_set):
    """
    Instantiates the model with the given configuration and data.
    :param mode: Mode enum specifying train/test/validation.
    :param config:
    :param data_set: a tf.data.dataset
    :return: model, summary operation
    """
    name = Mode.to_string(mode)

    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)

    with tf.name_scope(name=name):
        model_input = ModelInput(config=config, data=data_set)
        with tf.variable_scope("Model",
                               reuse=tf.AUTO_REUSE,
                               initializer=initializer):
            model = SeqModel(is_training=(mode == Mode.train),
                             config=config,
                             dataset=model_input)

            summary = tf.summary.scalar(name + " Loss", model.normalized_cost)
            if mode == Mode.train:
                summ_lr = tf.summary.scalar("Learning Rate",
                                            model.config.learning_rate)
                summary = tf.summary.merge([summary, summ_lr])
            summ_layers = tf.summary.scalar("Layers", model.config.num_layers)
            summ_bs = tf.summary.scalar("Batch Size", model.config.batch_size)
            summ_grad_norm = tf.summary.scalar("Gradient Norm",
                                               model.config.max_grad_norm)
            summ_keep_prob = tf.summary.scalar("Dropout Keep Probability",
                                               model.config.keep_prob)
            summ_hidden_size = tf.summary.scalar("State Size",
                                                 model.config.hidden_size)
            summary = tf.summary.merge([
                summary, summ_layers, summ_bs, summ_grad_norm, summ_keep_prob,
                summ_hidden_size
            ])

    return model, summary