def testMultiEvalStepIncrements(self):
    checkpoint_dir = os.path.join(self.get_temp_dir(), 'eval_ops_and_final_ops')

    # Train a model for a single step to get a checkpoint.
    self._train_model(checkpoint_dir, num_steps=1)
    checkpoint_path = saver.latest_checkpoint(checkpoint_dir)

    # Create the model so we have something to restore.
    inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    logistic_classifier(inputs)

    num_evals = 6

    my_var = local_variable(0.0, name='MyVar')
    # In eval ops, we also increase the eval step one more time.
    eval_ops = [state_ops.assign_add(my_var, 1.0),
                state_ops.assign_add(
                    evaluation._get_or_create_eval_step(), 1, use_locking=True)]
    expect_eval_update_counts = num_evals // 2

    final_ops = array_ops.identity(my_var)

    final_ops_values = evaluation._evaluate_once(
        checkpoint_path=checkpoint_path,
        eval_ops=eval_ops,
        final_ops={'value': final_ops},
        hooks=[evaluation._StopAfterNEvalsHook(num_evals),])
    self.assertEqual(final_ops_values['value'], expect_eval_update_counts)
Exemple #2
0
    def testMultiEvalStepIncrements(self):
        checkpoint_dir = os.path.join(self.get_temp_dir(),
                                      'eval_ops_and_final_ops')

        # Train a model for a single step to get a checkpoint.
        self._train_model(checkpoint_dir, num_steps=1)
        checkpoint_path = saver.latest_checkpoint(checkpoint_dir)

        # Create the model so we have something to restore.
        inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        logistic_classifier(inputs)

        num_evals = 6

        my_var = local_variable(0.0, name='MyVar')
        # In eval ops, we also increase the eval step one more time.
        eval_ops = [
            state_ops.assign_add(my_var, 1.0),
            state_ops.assign_add(evaluation._get_or_create_eval_step(),
                                 1,
                                 use_locking=True)
        ]
        expect_eval_update_counts = num_evals // 2

        final_ops = array_ops.identity(my_var)

        final_ops_values = evaluation._evaluate_once(
            checkpoint_path=checkpoint_path,
            eval_ops=eval_ops,
            final_ops={'value': final_ops},
            hooks=[
                evaluation._StopAfterNEvalsHook(num_evals),
            ])
        self.assertEqual(final_ops_values['value'], expect_eval_update_counts)
  def testEvaluateWithFiniteInputs(self):
    checkpoint_dir = os.path.join(self.get_temp_dir(),
                                  'evaluate_with_finite_inputs')

    # Train a Model to completion:
    self._train_model(checkpoint_dir, num_steps=300)

    # Run evaluation. Inputs are fed through input producer for one epoch.
    all_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    all_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

    single_input, single_label = training.slice_input_producer(
        [all_inputs, all_labels], num_epochs=1)
    inputs, labels = training.batch([single_input, single_label], batch_size=6,
                                    allow_smaller_final_batch=True)

    logits = logistic_classifier(inputs)
    predictions = math_ops.round(logits)

    accuracy, update_op = metrics.accuracy(
        predictions=predictions, labels=labels)

    checkpoint_path = saver.latest_checkpoint(checkpoint_dir)

    final_ops_values = evaluation._evaluate_once(
        checkpoint_path=checkpoint_path,
        eval_ops=update_op,
        final_ops={'accuracy': accuracy,
                   'eval_steps': evaluation._get_or_create_eval_step()},
        hooks=[evaluation._StopAfterNEvalsHook(None),])
    self.assertTrue(final_ops_values['accuracy'] > .99)
    # Runs evaluation for 4 iterations. First 2 evaluate full batch of 6 inputs
    # each; the 3rd iter evaluates the remaining 4 inputs, and the last one
    # triggers an error which stops evaluation.
    self.assertEqual(final_ops_values['eval_steps'], 4)
 def after_run(self, run_context, run_values):
     # To avoid race condition between the eval step read and increment in
     # evaluation graph, we read the value explicitly here.
     eval_steps = run_context.session.run(
         evaluation._get_or_create_eval_step())
     self._test_case.assertEqual(expected_eval_steps, eval_steps)
     self._test_case.assertFalse(self._invoked)
     self._invoked = True
Exemple #5
0
 def begin(self):
     if self._summary_writer is None and self._output_dir:
         self._summary_writer = SummaryWriterCache.get(self._output_dir)
     self._next_step = None
     self._global_step_tensor = evaluation._get_or_create_eval_step()  # pylint: disable=protected-access
     if self._global_step_tensor is None:
         raise RuntimeError(
             "Global step should be created to use SummarySaverHook.")
    def testEvaluateWithFiniteInputs(self):
        checkpoint_dir = os.path.join(self.get_temp_dir(),
                                      'evaluate_with_finite_inputs')

        # Train a Model to completion:
        self._train_model(checkpoint_dir, num_steps=300)

        # Run evaluation. Inputs are fed through input producer for one epoch.
        all_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        all_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

        single_input, single_label = training.slice_input_producer(
            [all_inputs, all_labels], num_epochs=1)
        inputs, labels = training.batch([single_input, single_label],
                                        batch_size=6,
                                        allow_smaller_final_batch=True)

        logits = logistic_classifier(inputs)
        predictions = math_ops.round(logits)

        accuracy, update_op = metrics.accuracy(predictions=predictions,
                                               labels=labels)

        checkpoint_path = saver.latest_checkpoint(checkpoint_dir)

        final_ops_values = evaluation._evaluate_once(
            checkpoint_path=checkpoint_path,
            eval_ops=update_op,
            final_ops={
                'accuracy': accuracy,
                'eval_steps': evaluation._get_or_create_eval_step()
            },
            hooks=[
                evaluation._StopAfterNEvalsHook(None),
            ])
        self.assertTrue(final_ops_values['accuracy'] > .99)
        # Runs evaluation for 4 iterations. First 2 evaluate full batch of 6 inputs
        # each; the 3rd iter evaluates the remaining 4 inputs, and the last one
        # triggers an error which stops evaluation.
        self.assertEqual(final_ops_values['eval_steps'], 4)
Exemple #7
0
 def before_run(self, run_context):
   return session_run_hook.SessionRunArgs({
       'eval_steps': evaluation._get_or_create_eval_step()
   })
Exemple #8
0
        def build_graph():

            encoder_decoder = config.encoder_decoder
            input_size = encoder_decoder.input_size
            num_classes = encoder_decoder.num_classes
            default_event_label = encoder_decoder.default_event_label

            batch_size = config.batch_size
            label_shape = []
            learning_rate = config.learning_rate
            inputs, labels, lengths, composers = None, None, None, None
            if mode == 'train' or mode == 'eval':
                if config.label_classifier_units:
                    inputs, labels, lengths, composers = mg.common.get_padded_batch_metadata(
                        examples_path,
                        batch_size,
                        input_size,
                        label_shape=label_shape,
                        shuffle=mode == 'train',
                        composer_shape=config.label_classifier_units,
                        num_enqueuing_threads=config.threads)
                    inputs = tf.debugging.check_numerics(
                        inputs, "Inputs invalid")
                    labels = tf.cast(
                        tf.debugging.check_numerics(
                            tf.cast(labels, tf.float32), "Labels invalid"),
                        tf.int64)
                    lengths = tf.cast(
                        tf.debugging.check_numerics(
                            tf.cast(lengths, tf.float32), "Lengths invalid"),
                        tf.int32)
                    composers = tf.cast(
                        tf.debugging.check_numerics(
                            tf.cast(composers, tf.float32),
                            "Composers invalid"), tf.int64)
                else:
                    inputs, labels, lengths = mg.common.get_padded_batch(
                        examples_path,
                        batch_size,
                        input_size,
                        label_shape=label_shape,
                        shuffle=mode == 'train',
                        num_enqueuing_threads=config.threads)
                    # assert not tf.debugging.is_nan(inputs)
                    # assert not tf.debugging.is_nan(labels)
                    # assert not tf.debugging.is_nan(lengths)
            else:
                inputs = tf.placeholder(tf.float32,
                                        [batch_size, None, input_size])
            config.dropout = 1.0 if mode == 'generate' else config.dropout

            outputs, initial_state, final_state = None, None, None
            if config.gpu:
                tf.logging.info("Using CudNN")
                outputs, initial_state, final_state = get_cudnn(
                    inputs, config.rnn_layers, config.dropout, batch_size,
                    mode)
            else:
                cell = get_deep_lstm(config.rnn_layers, config.dropout)
                initial_state = cell.zero_state(batch_size, tf.float32)
                outputs, final_state = tf.nn.dynamic_rnn(
                    cell,
                    inputs,
                    sequence_length=lengths,
                    initial_state=initial_state,
                    swap_memory=True)

            outputs = tf.debugging.check_numerics(outputs, "outputs invalid")
            outputs_flat = mg.common.flatten_maybe_padded_sequences(
                outputs, lengths)
            outputs_flat = tf.debugging.check_numerics(outputs_flat,
                                                       "outputs_flat invalid")

            num_logits = num_classes

            logits_flat = tf.contrib.layers.linear(outputs_flat, num_logits)

            if config.label_classifier_weight:
                composer_logits = tf.layers.dense(
                    final_state[-1].h, config.label_classifier_units)
                composer_predictions = tf.argmax(composer_logits, axis=1)

            if mode in ('train', 'eval'):
                labels_flat = mg.common.flatten_maybe_padded_sequences(
                    labels, lengths)
                labels_flat = tf.cast(
                    tf.debugging.check_numerics(
                        tf.cast(labels_flat, tf.float32),
                        "labels_flat invalid"), tf.int64)
                logits_flat = tf.debugging.check_numerics(
                    logits_flat, "logits_flat invalid")
                softmax_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                    labels=labels_flat, logits=logits_flat)
                softmax_cross_entropy = tf.debugging.check_numerics(
                    softmax_cross_entropy, "softmax_cross_entropy invalid")

                if config.label_classifier_weight:
                    composer_softmax_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
                        labels=composers, logits=composer_logits)

                predictions_flat = tf.argmax(logits_flat, axis=1)
                correct_predictions = tf.to_float(
                    tf.equal(labels_flat, predictions_flat))

                loss = None

                # Predict our composer to enforce structure on embeddings
                if mode == 'train':
                    global_step = tf.Variable(-1, trainable=False)
                    if config.label_classifier_weight:

                        tf.add_to_collection('composer_logits',
                                             tf.nn.softmax(composer_logits))

                        composer_loss = tf.reduce_mean(
                            composer_softmax_cross_entropy)
                        lstm_loss = tf.reduce_mean(softmax_cross_entropy)

                        tf.add_to_collection('composer_loss', composer_loss)
                        tf.add_to_collection('lstm_loss', lstm_loss)
                        tf.summary.scalar('composer_loss', composer_loss)
                        tf.summary.scalar('lstm_loss', lstm_loss)

                        decay_steps = config.decay_steps
                        classifier_weight = tf.Variable(
                            config.label_classifier_weight, trainable=False)
                        classifier_weight = classifier_weight - tf.train.polynomial_decay(
                            config.label_classifier_weight,
                            global_step,
                            decay_steps,
                            0.0,
                            power=0.2)
                        composer_loss = classifier_weight * composer_loss
                        lstm_loss = (1 - classifier_weight) * lstm_loss

                        tf.add_to_collection('composer_weighting',
                                             classifier_weight)
                        tf.summary.scalar('composer_weight', classifier_weight)
                        # composer_loss = tf.maximum(tf.Variable(1e-07), composer_loss)

                        composer_loss = tf.debugging.check_numerics(
                            composer_loss, "composer_loss invalid")
                        lstm_loss = tf.debugging.check_numerics(
                            lstm_loss, "lstm_loss invalid")

                        loss = tf.add(lstm_loss, composer_loss)
                        loss = tf.debugging.check_numerics(
                            loss, "loss invalid")

                        tf.add_to_collection('loss', loss)
                        tf.summary.scalar('loss', loss)

                    else:
                        tf.logging.info("Building normal graph.")
                        loss = tf.reduce_mean(softmax_cross_entropy)
                        tf.add_to_collection('loss', loss)
                        tf.summary.scalar('loss', loss)

                    perplexity = tf.exp(loss)
                    accuracy = tf.reduce_mean(correct_predictions)
                    tf.add_to_collection('perplexity', perplexity)
                    tf.add_to_collection('accuracy', accuracy)

                    tf.summary.scalar('perplexity', perplexity)
                    tf.summary.scalar('accuracy', accuracy)

                    optimizer = config.optimizer(learning_rate=learning_rate,
                                                 momentum=config.momentum)
                    train_op = tf.contrib.slim.learning.create_train_op(
                        loss,
                        optimizer,
                        global_step=global_step,
                        clip_gradient_norm=config.norm)

                    tf.add_to_collection('global_step', global_step)
                    tf.add_to_collection('train_op', train_op)
                    tf.add_to_collection('optimizer', optimizer)
                elif mode == 'eval':
                    global_step = evaluation._get_or_create_eval_step()
                    metric_map = {
                        'loss':
                        tf.metrics.mean(softmax_cross_entropy),
                        'metrics/accuracy':
                        tf.metrics.accuracy(labels_flat, predictions_flat),
                        'metrics/per_class_accuracy':
                        tf.metrics.mean_per_class_accuracy(
                            labels_flat, predictions_flat, num_classes)
                    }
                    if config.label_classifier_weight:
                        metric_map['composer_loss'] = tf.metrics.mean(
                            composer_softmax_cross_entropy)
                        metric_map[
                            'metrics/composer_accuracy'] = tf.metrics.accuracy(
                                composers, composer_logits)
                        metric_map[
                            'metrics/composer_per_class_accuracy'] = tf.metrics.mean_per_class_accuracy(
                                composers, composer_logits,
                                config.label_classifier_units)

                    vars_to_summarize, update_ops = tf.contrib.metrics.aggregate_metric_map(
                        metric_map)
                    for updates_op in update_ops.values():
                        tf.add_to_collection('eval_ops', updates_op)
                    vars_to_summarize['metrics/perplexity'] = tf.exp(
                        vars_to_summarize['loss'])
                    if config.label_classifier_weight:
                        vars_to_summarize[
                            'metrics/composer_perplexity'] = tf.exp(
                                vars_to_summarize['composer_loss'])
                    for var_name, var_value in six.iteritems(
                            vars_to_summarize):
                        tf.summary.scalar(var_name, var_value)
                        tf.add_to_collection(var_name, var_value)

                    tf.add_to_collection('global_step', global_step)
            elif mode == 'generate':
                if config.label_classifier_weight:
                    composer_softmax = tf.nn.softmax(composer_logits)
                    tf.add_to_collection("composer_softmax", composer_softmax)

                temperature = tf.placeholder(tf.float32, [])
                softmax_flat = tf.nn.softmax(
                    tf.div(logits_flat, tf.fill([num_classes], temperature)))
                softmax = tf.reshape(softmax_flat,
                                     [batch_size, -1, num_classes])

                # if config.label_classifier_weight:

                tf.add_to_collection('inputs', inputs)
                tf.add_to_collection('temperature', temperature)
                tf.add_to_collection('softmax', softmax)

                for state in tf_nest.flatten(initial_state):
                    tf.add_to_collection('initial_state', state)
                for state in tf_nest.flatten(final_state):
                    tf.add_to_collection('final_state', state)
Exemple #9
0
 def before_run(self, run_context):
     return tf.compat.v1.train.SessionRunArgs(
         {'eval_steps': evaluation._get_or_create_eval_step()})