def evaluate_once():
        """Evaluates the model for one time."""
        _, status, _ = utils.create_checkpoint(log_dir_path,
                                               model=classifier,
                                               ema_model=ema_classifier,
                                               global_step=global_step)
        status.expect_partial()
        logging.info('Last checkpoint [iteration: %d] restored at %s.',
                     global_step.numpy(), log_dir_path)

        if global_step.numpy() >= FLAGS.max_iteration:
            nonlocal evaluated_last_ckpt
            evaluated_last_ckpt = True

        top_1_accuracy = tf.keras.metrics.CategoricalAccuracy()
        top_5_accuracy = tf.keras.metrics.TopKCategoricalAccuracy(k=5)
        for inputs in dataset:
            if FLAGS.input_features_dim > 0:
                features = inputs[common_module.KEY_FEATURES]
            else:
                features, _ = pipelines.create_model_input(
                    inputs, common_module.MODEL_INPUT_KEYPOINT_TYPE_2D_INPUT,
                    keypoint_profile_2d)
            features = tf.squeeze(features, axis=1)
            features = features[:, ::FLAGS.downsample_rate, :]
            labels = inputs[common_module.KEY_CLASS_TARGETS]
            labels = tf.squeeze(labels, axis=1)

            if FLAGS.use_moving_average:
                outputs = ema_classifier(features, training=False)
            else:
                outputs = classifier(features, training=False)
            top_1_accuracy.update_state(y_true=labels, y_pred=outputs)
            top_5_accuracy.update_state(y_true=labels, y_pred=outputs)

        nonlocal top_1_best_accuracy
        if (top_1_best_accuracy is None
                or top_1_accuracy.result().numpy() > top_1_best_accuracy):
            top_1_best_accuracy = top_1_accuracy.result().numpy()

        nonlocal top_5_best_accuracy
        if (top_5_best_accuracy is None
                or top_5_accuracy.result().numpy() > top_5_best_accuracy):
            top_5_best_accuracy = top_5_accuracy.result().numpy()

        tf.summary.scalar('eval/Basic/Top1_Accuracy',
                          top_1_accuracy.result(),
                          step=global_step.numpy())
        tf.summary.scalar('eval/Best/Top1_Accuracy',
                          top_1_best_accuracy,
                          step=global_step.numpy())
        tf.summary.scalar('eval/Basic/Top5_Accuracy',
                          top_5_accuracy.result(),
                          step=global_step.numpy())
        tf.summary.scalar('eval/Best/Top5_Accuracy',
                          top_5_best_accuracy,
                          step=global_step.numpy())
        logging.info('Accuracy: {:.2f}'.format(
            top_1_accuracy.result().numpy()))
    def train_one_iteration(inputs):
        """Trains the model for one iteration.

    Args:
      inputs: A dictionary for training inputs.

    Returns:
      loss: The training loss for this iteration.
    """
        _, side_outputs = pipelines.create_model_input(
            inputs, common_module.MODEL_INPUT_KEYPOINT_TYPE_2D_INPUT,
            keypoint_profile_2d)

        keypoints_2d = side_outputs[
            common_module.KEY_PREPROCESSED_KEYPOINTS_2D]
        keypoints_2d = tf.squeeze(keypoints_2d, axis=1)
        features = keypoints_2d[:, ::FLAGS.downsample_rate, Ellipsis]
        labels = inputs[common_module.KEY_CLASS_TARGETS]
        labels = tf.squeeze(labels, axis=1)

        batch_size, num_frames, num_joints, feature_dim = features.shape
        features = tf.reshape(features, (-1, num_joints, feature_dim))
        _, features = encoder(features, training=False)
        features = features[FLAGS.encoder_output_activation]
        features = tf.reshape(features, (batch_size, num_frames, -1))
        if (FLAGS.encoder_output_activation
                == 'embedder') and (FLAGS.encoder_algorithm_type !=
                                    algorithms.TYPE_ALGORITHM_ALIGN):
            features, _ = tf.split(features,
                                   num_or_size_splits=[
                                       FLAGS.encoder_pose_embedding_dim,
                                       FLAGS.encoder_view_embedding_dim
                                   ],
                                   axis=-1)

        with tf.GradientTape() as tape:
            outputs = classifier(features, training=True)
            regularization_loss = sum(classifier.losses)
            crossentropy_loss = loss_object(labels, outputs)
            total_loss = crossentropy_loss + regularization_loss

        trainable_variables = classifier.trainable_variables
        grads = tape.gradient(total_loss, trainable_variables)
        optimizer.apply_gradients(zip(grads, trainable_variables))

        for grad, trainable_variable in zip(grads, trainable_variables):
            tf.summary.scalar('summarize_grads/' + trainable_variable.name,
                              tf.linalg.norm(grad),
                              step=global_step)

        return dict(total_loss=total_loss,
                    crossentropy_loss=crossentropy_loss,
                    regularization_loss=regularization_loss)
  def train_one_iteration(inputs):
    """Trains the model for one iteration.

    Args:
      inputs: A dictionary for training inputs.

    Returns:
      loss: The training loss for this iteration.
    """
    if FLAGS.input_features_dim > 0:
      features = inputs[common_module.KEY_FEATURES]
    else:
      features, _ = pipelines.create_model_input(
          inputs, common_module.MODEL_INPUT_KEYPOINT_TYPE_2D_INPUT,
          keypoint_profile_2d)

    features = tf.squeeze(features, axis=1)
    features = features[:, ::FLAGS.downsample_rate, :]
    labels = inputs[common_module.KEY_CLASS_TARGETS]
    labels = tf.squeeze(labels, axis=1)

    with tf.GradientTape() as tape:
      outputs = classifier(features, training=True)
      regularization_loss = sum(classifier.losses)
      crossentropy_loss = loss_object(labels, outputs)
      total_loss = crossentropy_loss + regularization_loss

    trainable_variables = classifier.trainable_variables
    grads = tape.gradient(total_loss, trainable_variables)
    optimizer.apply_gradients(zip(grads, trainable_variables))

    for grad, trainable_variable in zip(grads, trainable_variables):
      tf.summary.scalar(
          'summarize_grads/' + trainable_variable.name,
          tf.linalg.norm(grad),
          step=global_step)

    return dict(
        total_loss=total_loss,
        crossentropy_loss=crossentropy_loss,
        regularization_loss=regularization_loss)
Пример #4
0
  def train_one_iteration(inputs):
    """Trains the model for one iteration.

    Args:
      inputs: A dictionary for training inputs.

    Returns:
      The training loss for this iteration.
    """
    _, side_outputs = pipelines.create_model_input(
        inputs, FLAGS.model_input_keypoint_type, keypoint_profile_2d,
        keypoint_profile_3d)

    keypoints_2d = side_outputs[common_module.KEY_PREPROCESSED_KEYPOINTS_2D]
    keypoints_3d, _ = keypoint_preprocessor_3d(
        inputs[common_module.KEY_KEYPOINTS_3D],
        keypoint_profile_3d,
        normalize_keypoints_3d=True)
    keypoints_2d, keypoints_3d = data_utils.shuffle_batches(
        [keypoints_2d, keypoints_3d])

    return model.train((keypoints_2d, keypoints_3d), **optimizers)
  def test_create_model_input(self):
    keypoint_profile_2d = keypoint_profiles.KeypointProfile2D(
        name='Dummy',
        keypoint_names=[('A', keypoint_profiles.LeftRightType.UNKNOWN),
                        ('B', keypoint_profiles.LeftRightType.UNKNOWN),
                        ('C', keypoint_profiles.LeftRightType.UNKNOWN)],
        offset_keypoint_names=['A', 'B'],
        scale_keypoint_name_pairs=[(['A', 'B'], ['B']), (['A'], ['B', 'C'])],
        segment_name_pairs=[],
        scale_distance_reduction_fn=tf.math.reduce_sum,
        scale_unit=1.0)

    # Shape = [2, 3, 2].
    keypoints_2d = tf.constant([[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]],
                                [[10.0, 11.0], [12.0, 13.0], [14.0, 15.0]]])
    keypoint_scores_2d = tf.ones(keypoints_2d.shape, dtype=tf.float32)

    inputs = {
        common.KEY_KEYPOINTS_2D: keypoints_2d,
        common.KEY_KEYPOINTS_3D: None,
        common.KEY_KEYPOINT_SCORES_2D: keypoint_scores_2d,
        common.KEY_IMAGE_SIZES: tf.ones((1, 2))
    }
    features, side_outputs = pipelines.create_model_input(
        inputs,
        model_input_keypoint_type=common.MODEL_INPUT_KEYPOINT_TYPE_2D_INPUT,
        normalize_keypoints_2d=True,
        keypoint_profile_2d=keypoint_profile_2d)

    sqrt_2 = 1.414213562
    self.assertAllClose(features,
                        [[
                            -0.25 / sqrt_2, -0.25 / sqrt_2, 0.25 / sqrt_2,
                            0.25 / sqrt_2, 0.75 / sqrt_2, 0.75 / sqrt_2
                        ],
                         [
                             -0.25 / sqrt_2, -0.25 / sqrt_2, 0.25 / sqrt_2,
                             0.25 / sqrt_2, 0.75 / sqrt_2, 0.75 / sqrt_2
                         ]])
    self.assertCountEqual(side_outputs.keys(), [
        'preprocessed_keypoints_2d', 'preprocessed_keypoint_masks_2d',
        'offset_points_2d', 'scale_distances_2d', 'keypoints_2d',
        'keypoint_masks_2d'
    ])
    self.assertAllClose(side_outputs['keypoints_2d'],
                        [[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]],
                         [[10.0, 11.0], [12.0, 13.0], [14.0, 15.0]]])
    self.assertAllClose(
        side_outputs['preprocessed_keypoints_2d'],
        [[[-0.25 / sqrt_2, -0.25 / sqrt_2], [0.25 / sqrt_2, 0.25 / sqrt_2],
          [0.75 / sqrt_2, 0.75 / sqrt_2]],
         [[-0.25 / sqrt_2, -0.25 / sqrt_2], [0.25 / sqrt_2, 0.25 / sqrt_2],
          [0.75 / sqrt_2, 0.75 / sqrt_2]]])
    self.assertAllClose(side_outputs['keypoint_masks_2d'],
                        [[[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]],
                         [[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]])
    self.assertAllClose(side_outputs['preprocessed_keypoint_masks_2d'],
                        [[[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]],
                         [[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]])
    self.assertAllClose(side_outputs['offset_points_2d'],
                        [[[1.0, 2.0]], [[11.0, 12.0]]])
    self.assertAllClose(side_outputs['scale_distances_2d'],
                        [[[4.0 * sqrt_2]], [[4.0 * sqrt_2]]])
Пример #6
0
    def evaluate_once():
        """Evaluates the model for one time."""
        _, status, _ = utils.create_checkpoint(log_dir_path,
                                               model=classifier,
                                               ema_model=ema_classifier,
                                               global_step=global_step)
        status.expect_partial()
        logging.info('Last checkpoint [iteration: %d] restored at %s.',
                     global_step.numpy(), log_dir_path)

        if global_step.numpy() >= FLAGS.max_iteration:
            nonlocal evaluated_last_ckpt
            evaluated_last_ckpt = True

        top_1_accuracy = tf.keras.metrics.CategoricalAccuracy()
        top_5_accuracy = tf.keras.metrics.TopKCategoricalAccuracy(k=5)
        for inputs in dataset:
            _, side_outputs = pipelines.create_model_input(
                inputs, common_module.MODEL_INPUT_KEYPOINT_TYPE_2D_INPUT,
                keypoint_profile_2d)

            keypoints_2d = side_outputs[
                common_module.KEY_PREPROCESSED_KEYPOINTS_2D]
            keypoints_2d = tf.squeeze(keypoints_2d, axis=1)
            features = keypoints_2d[:, ::FLAGS.downsample_rate, Ellipsis]
            labels = inputs[common_module.KEY_CLASS_TARGETS]
            labels = tf.squeeze(labels, axis=1)

            batch_size, num_frames, num_joints, feature_dim = features.shape
            features = tf.reshape(features, (-1, num_joints, feature_dim))
            _, features = encoder(features, training=False)
            features = features[FLAGS.encoder_output_activation]
            features = tf.reshape(features, (batch_size, num_frames, -1))
            if (FLAGS.encoder_output_activation
                    == 'embedder') and (FLAGS.encoder_algorithm_type !=
                                        algorithms.TYPE_ALGORITHM_ALIGN):
                features, _ = tf.split(features,
                                       num_or_size_splits=[
                                           FLAGS.encoder_pose_embedding_dim,
                                           FLAGS.encoder_view_embedding_dim
                                       ],
                                       axis=-1)

            if FLAGS.use_moving_average:
                outputs = ema_classifier(features, training=False)
            else:
                outputs = classifier(features, training=False)
            top_1_accuracy.update_state(y_true=labels, y_pred=outputs)
            top_5_accuracy.update_state(y_true=labels, y_pred=outputs)

        nonlocal top_1_best_accuracy
        if (top_1_best_accuracy is None
                or top_1_accuracy.result().numpy() > top_1_best_accuracy):
            top_1_best_accuracy = top_1_accuracy.result().numpy()

        nonlocal top_5_best_accuracy
        if (top_5_best_accuracy is None
                or top_5_accuracy.result().numpy() > top_5_best_accuracy):
            top_5_best_accuracy = top_5_accuracy.result().numpy()

        tf.summary.scalar('eval/Basic/Top1_Accuracy',
                          top_1_accuracy.result(),
                          step=global_step.numpy())
        tf.summary.scalar('eval/Best/Top1_Accuracy',
                          top_1_best_accuracy,
                          step=global_step.numpy())
        tf.summary.scalar('eval/Basic/Top5_Accuracy',
                          top_5_accuracy.result(),
                          step=global_step.numpy())
        tf.summary.scalar('eval/Best/Top5_Accuracy',
                          top_5_best_accuracy,
                          step=global_step.numpy())
        logging.info('Accuracy: {:.2f}'.format(
            top_1_accuracy.result().numpy()))