Example #1
0
def run(job_dir,
        estimator: tf.estimator.Estimator,
        model_cls,
        dataset,
        step,
        eval_index,
        eval_batch_size):
    ############
    # Datasets #
    ############
    dataset_test = dataset.read(split='test')

    #######
    # Run #
    #######
    checkpoints = tf.train.get_checkpoint_state(job_dir).all_model_checkpoint_paths

    def index_filter(fn, col):
        return list(map(lambda t: (lambda _, el: el)(*t), filter(lambda t: fn(*t), enumerate(col))))

    if step is not None:
        checkpoints = filter(lambda p: os.path.basename(p) == 'model.ckpt-{}'.format(step), checkpoints)
    elif eval_index is not None:
        checkpoints = index_filter(lambda i, _: i % 8 == eval_index, checkpoints)

    for checkpoint_path in sorted(checkpoints, reverse=True):
        step = os.path.basename(checkpoint_path).split('-')[-1]

        # Run evaluation
        tf.logging.info('Start evaluation for {}.'.format(step))
        estimator.evaluate(
            build_input_fn(dataset_test, eval_batch_size, map_fn=strip_dict_arg(model_cls.eval_map_fn),
                           shuffle_and_repeat=False),
            hooks=[],
            checkpoint_path=checkpoint_path)
def train(model: tf.estimator.Estimator,
          nb_epochs: int,
          train_data_path: str,
          val_data_path: str,
          batch_size: int = 32):

    train_epoch_history = [
        model.evaluate(
            input_fn=lambda: load_dataset(train_data_path, shuffle=False))
    ]
    validation_epoch_history = [
        model.evaluate(
            input_fn=lambda: load_dataset(val_data_path, shuffle=False))
    ]
    for epoch in range(nb_epochs):
        model_spec = model.train(
            input_fn=lambda: load_dataset('data/train.tfrecords',
                                          epochs=1,
                                          shuffle=True,
                                          batch_size=batch_size))

        train_epoch_history.append(
            model.evaluate(
                input_fn=lambda: load_dataset(train_data_path, shuffle=False)))
        validation_epoch_history.append(
            model.evaluate(
                input_fn=lambda: load_dataset(val_data_path, shuffle=False)))

        logging.info(f"EPOCH: {epoch}:\n"
                     f"\tval_loss: {validation_epoch_history[-1]['loss']}\n"
                     f"\ttrain_loss: {train_epoch_history[-1]['loss']}\n")

    return train_epoch_history, validation_epoch_history
Example #3
0
def train_and_test(estimator: tf.estimator.Estimator, train_input_fn,
                   test_input_fn, steps, steps_between_evals, eval_steps):
    eval_results = estimator.evaluate(input_fn=test_input_fn, steps=eval_steps)
    print(eval_results)

    for i in range(steps // steps_between_evals):
        estimator.train(
            input_fn=train_input_fn,
            steps=steps_between_evals,
        )
        eval_results = estimator.evaluate(input_fn=test_input_fn,
                                          steps=eval_steps)
        print(eval_results)
Example #4
0
def evaluate(model: tf.estimator.Estimator,
             features: pd.DataFrame,
             labels: pd.DataFrame,
             steps: int = None):
    '''Check the mse on the validation set.'''

    ds = make_dataset(features, labels)

    results = model.evaluate(train_fn(ds, shuffle=False, repeat=1),
                             steps=steps)

    for stat_name, stat_value in results.items():
        print(f"{stat_name:>20} | {stat_value}")

    return results