コード例 #1
0
def train(model):
    print('Evaluate every {} epochs'.format(FLAGS.epochs_per_eval))
    for n in range(FLAGS.train_epochs // FLAGS.epochs_per_eval):
        tf.logging.info('START TRAIN AT EPOCH {}'.format(FLAGS.epochs_per_eval*n + 1))
        t0 = time.time()
        model.train(input_fn=lambda: input_fn(FLAGS.train_data, FLAGS.epochs_per_eval, FLAGS.batch_size),
                    hooks=None,
                    steps=None,
                    max_steps=None,
                    saving_listeners=None)
        tf.logging.info('Finish train {} epochs, take {} mins'.format(n + 1, FLAGS.epochs_per_eval, elapse_time(t0)))
        print('-' * 80)

        t0 = time.time()
        results = model.evaluate(input_fn=lambda: input_fn(FLAGS.test_data, 1, FLAGS.batch_size, False),
                                 steps=None,  # Number of steps for which to evaluate model.
                                 hooks=None,
                                 checkpoint_path=None,  # If None, the latest checkpoint in model_dir is used.
                                 name=None)
        tf.logging.info('Finish evaluation, take {} mins'.format(n + 1, elapse_time(t0)))
        print('-' * 80)

        # Display evaluation metrics
        for key in sorted(results):
            print('{}: {}'.format(key, results[key]))
コード例 #2
0
def train(model):
    """Custom train and eval function, eval between epochs."""
    tf.logging.info('Evaluate every {} epochs'.format(FLAGS.epochs_per_eval))
    best_auc, best_logloss, best_epoch = 0, 10000, 0  # saving best auc and logloss
    for n in range(FLAGS.train_epochs // FLAGS.epochs_per_eval):
        tf.logging.info(
            'START TRAIN AT EPOCH {}'.format(FLAGS.epochs_per_eval * n + 1))
        t0 = time.time()
        model.train(input_fn=lambda:
                    input_fn(CONFIG, FLAGS.train_data, FLAGS.epochs_per_eval,
                             FLAGS.batch_size, True, FLAGS.num_samples),
                    hooks=None,
                    steps=None,
                    max_steps=None,
                    saving_listeners=None)
        tf.logging.info('Finish train {} epochs, take {} mins'.format(
            n + 1, elapse_time(t0)))
        print('-' * 80)

        t0 = time.time()
        results = model.evaluate(
            input_fn=lambda: input_fn(CONFIG, FLAGS.dev_data, 1, FLAGS.
                                      batch_size, False),
            steps=None,  # Number of steps for which to evaluate model.
            hooks=None,
            checkpoint_path=
            None,  # If None, the latest checkpoint in model_dir is used.
            name=None)
        tf.logging.info('Finish evaluation, take {} mins'.format(
            elapse_time(t0)))
        print('-' * 80)

        # Display evaluation metrics
        print('Evaluation metrics at epoch {}: (* means improve)'.format(n +
                                                                         1))
        improve_auc_token, improve_loss_token = "", ""
        for key in sorted(results):
            value = results[key]
            print('\t{}: {}'.format(key, value))
            if key == "auc" and value > best_auc:
                best_auc = value
                improve_auc_token = "*"
            elif key == "average_loss" and value < best_logloss:
                best_logloss = value
                improve_loss_token = "*"

        if improve_loss_token or improve_auc_token:
            best_epoch = n + 1
        print("\nMAX AUC={:.6f} {}\nMIN LOSS={:.6f} {}".format(
            best_auc, improve_auc_token, best_logloss, improve_loss_token))
        print('-' * 80)

        # Early stopping after 3 epoch no improvement.
        if n + 1 - best_epoch >= 3:
            exit("No improvement for 3 epochs, early stopping.")
コード例 #3
0
def train_and_eval(model):
    """tf.estimator train and eval function, eval between steps."""
    train_spec = tf.estimator.TrainSpec(
        input_fn=lambda: input_fn(CONFIG, FLAGS.train_data, FLAGS.train_epochs,
                                  FLAGS.batch_size, True, FLAGS.num_samples),
        max_steps=None)
    eval_spec = tf.estimator.EvalSpec(
        input_fn=lambda: input_fn(CONFIG, FLAGS.dev_data, 1, FLAGS.batch_size,
                                  False),
        steps=FLAGS.steps_per_eval,
        start_delay_secs=180,
        throttle_secs=60,
    )
    tf.estimator.train_and_evaluate(model, train_spec, eval_spec)
コード例 #4
0
def main(_):
    print("Using TensorFlow version %s" % tf.__version__)
    assert "1.4" <= tf.__version__, "TensorFlow r1.4 or later is needed"
    print('Model type: {}'.format(FLAGS.model_type))
    model_dir = os.path.join(FLAGS.model_dir, FLAGS.model_type)
    print('Model directory: {}'.format(model_dir))

    model = build_custom_estimator(model_dir, FLAGS.model_type)
    tf.logging.info('Build estimator: {}'.format(model))

    tf.logging.info('=' * 30 + ' START TESTING' + '=' * 30)
    s_time = time.time()
    results = model.evaluate(
        input_fn=lambda: input_fn(FLAGS.test_data, 1, FLAGS.batch_size, False),
        steps=None,  # Number of steps for which to evaluate model.
        hooks=None,
        checkpoint_path=FLAGS.
        checkpoint_path,  # If None, the latest checkpoint is used.
        name=None)
    tf.logging.info('=' * 30 +
                    'FINISH TESTING, TAKE {}'.format(elapse_time(s_time)) +
                    '=' * 30)
    # Display evaluation metrics
    print('-' * 80)
    for key in sorted(results):
        print('%s: %s' % (key, results[key]))
コード例 #5
0
def train_and_eval(model):
    train_spec = tf.estimator.TrainSpec(input_fn=lambda: input_fn(FLAGS.train_data, 1, FLAGS.batch_size), max_steps=10000)
    eval_spec = tf.estimator.EvalSpec(input_fn=lambda: input_fn(FLAGS.eval_data, 1, FLAGS.batch_size, False))
    tf.estimator.train_and_evaluate(model, train_spec, eval_spec)