def test_multi_task_prediction(self): features = np.array([[0.2, 0.4]]) taskname_and_labels = [('task1', np.array([0])), ('task2', np.array([1]))] input_fn = _create_input_fn(features=features, taskname_and_labels=taskname_and_labels) tasknames_to_num_classes = {'task1': 3, 'task2': 2} tasknames_to_kernels = { 'task1': np.array([[0.5, 0.6, 0.7], [0.1, 0.2, 0.3]]), 'task2': np.array([[0.2, 0.4], [0.3, 0.5]]) } model_fn = multi_task_utils.create_model_fn( multi_task_utils.multi_task_estimator_spec_fn, tasknames_to_kernels, tasknames_to_num_classes) estimator = tf.estimator.Estimator(model_fn=model_fn) # Estimator.predict returns a generator to interate through every input # example. Given that we have passed one example, prediction_result should # not be None. prediction_result = next(estimator.predict(input_fn=input_fn), None) with self.test_session(): self.assertAllClose( prediction_result['task1/probabilities'], tf.squeeze( tf.nn.softmax(features.dot( tasknames_to_kernels['task1']))).eval()) self.assertAllClose( prediction_result['task2/probabilities'], tf.squeeze( tf.nn.softmax(features.dot( tasknames_to_kernels['task2']))).eval()) self.assertEqual(prediction_result['task1/top_class'], 2) self.assertEqual(prediction_result['task2/top_class'], 1)
def test_multi_task_evaluation(self): features = np.array([[0.2, 0.4], [0.1, 0.3], [0.5, 0.8], [0.9, 0.95]]) taskname_and_labels = [('task1', np.array([0, 2, -1, -1])), ('task2', np.array([-1, -1, 0, 1]))] input_fn = _create_input_fn(features=features, taskname_and_labels=taskname_and_labels) tasknames_to_num_classes = {'task1': 3, 'task2': 2} tasknames_to_kernels = { 'task1': np.array([[0.5, 0.6, 0.7], [0.1, 0.2, 0.3]]), 'task2': np.array([[0.2, 0.4], [0.3, 0.5]]) } model_fn = multi_task_utils.create_model_fn( multi_task_utils.multi_task_estimator_spec_fn, tasknames_to_kernels, tasknames_to_num_classes) estimator = tf.estimator.Estimator(model_fn=model_fn) # Total loss is equal to the sum of cross-entropy loss for both tasks + # the weighted sum of individual regularization loss, weighted by the # normalized valid example counts. expected_loss = tf_testing_utils.softmax_cross_entropy_loss( features.dot(tasknames_to_kernels['task1']), taskname_and_labels[0] [1]) + tf_testing_utils.softmax_cross_entropy_loss( features.dot(tasknames_to_kernels['task2']), taskname_and_labels[1][1]) + 0.5 * 0.1 * np.sum( tasknames_to_kernels['task1']**2) / 2 + 0.5 * 0.1 * np.sum( tasknames_to_kernels['task2']**2) / 2 result_metrics = estimator.evaluate(input_fn=input_fn, steps=1) self.assertAlmostEqual(result_metrics['loss'], expected_loss, places=6) self.assertAlmostEqual( result_metrics['task1/Eval/Accuracy/validation'], 0.5) self.assertAlmostEqual( result_metrics['task2/Eval/Accuracy/validation'], 0.5)
def test_multi_task_training_with_task_regularization_loss(self): features = np.array([[0.2, 0.4], [0.1, 0.3], [0.5, 0.8], [0.9, 0.7]]) input_fn = _create_input_fn(features=features, taskname_and_labels=[ ('task1', np.array([-1, -1, -1, -1])), ('task2', np.array([-1, -1, -1, -1])) ]) tasknames_to_num_classes = {'task1': 3, 'task2': 2} tasknames_to_kernels = { 'task1': np.array([[0.5, 0.6, 0.7], [0.1, 0.2, 0.3]]), 'task2': np.array([[0.2, 0.4], [0.3, 0.5]]) } model_fn = multi_task_utils.create_model_fn( multi_task_utils.multi_task_estimator_spec_fn, tasknames_to_kernels, tasknames_to_num_classes, no_optimizer=True) estimator = tf.estimator.Estimator(model_fn=model_fn) # Checks if individual regularization loss matches our manual computation. tensor_name_to_expected_value = { 'task1_reg_loss:0': 0.1 * np.sum(tasknames_to_kernels['task1']**2) / 2, 'task2_reg_loss:0': 0.1 * np.sum(tasknames_to_kernels['task2']**2) / 2 } estimator.train( input_fn=input_fn, steps=1, hooks=[AssertLossHook(self, tensor_name_to_expected_value)])
def test_multi_task_model_export(self): model_dir = '/media/haoweiliu/Data/tensorflow_scripts/tests/models' export_dir = model_dir + '/exports' checkpoint_path = model_dir + '/model.ckpt-0' features = np.array([[0.2, 0.4], [0.1, 0.3], [0.5, 0.8], [0.9, 0.7]]).astype(float) input_fn = _create_input_fn(features=features, taskname_and_labels=[ ('task1', np.array([-1, -1, -1, -1])), ('task2', np.array([1, 0, 0, 1])) ]) tasknames_to_num_classes = {'task1': 3, 'task2': 2} tasknames_to_kernels = { 'task1': np.array([[0.5, 0.6, 0.7], [0.1, 0.2, 0.3]]), 'task2': np.array([[0.2, 0.4], [0.3, 0.5]]) } model_fn = multi_task_utils.create_model_fn( multi_task_utils.multi_task_estimator_spec_fn, tasknames_to_kernels, tasknames_to_num_classes) estimator = tf.estimator.Estimator(model_fn=model_fn, model_dir=model_dir) estimator.train(input_fn=input_fn, steps=1) def serving_input_fn(): input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn( { multi_task_utils.FEATURE_KEY: tf.FixedLenFeature([2], tf.float32) }, default_batch_size=None) raw_features, receiver_tensors, _ = input_fn() return tf.estimator.export.ServingInputReceiver( raw_features, receiver_tensors) exported_model_path = estimator.export_saved_model( export_dir_base=export_dir, checkpoint_path=checkpoint_path, serving_input_receiver_fn=serving_input_fn) self.assertTrue(os.path.isfile(exported_model_path + '/saved_model.pb'))
def test_multi_task_training_total_reg_loss_with_optimization(self): features = np.array([[0.2, 0.4], [0.1, 0.3], [0.5, 0.8], [0.9, 0.7]]) input_fn = _create_input_fn(features=features, taskname_and_labels=[ ('task1', np.array([-1, -1, -1, -1])), ('task2', np.array([0, 1, 0, 1])) ]) tasknames_to_num_classes = {'task1': 3, 'task2': 2} tasknames_to_kernels = { 'task1': np.array([[0.5, 0.6, 0.7], [0.1, 0.2, 0.3]]), 'task2': np.array([[0.2, 0.4], [0.3, 0.5]]) } model_fn = multi_task_utils.create_model_fn( multi_task_utils.multi_task_estimator_spec_fn, tasknames_to_kernels, tasknames_to_num_classes) estimator = tf.estimator.Estimator(model_fn=model_fn) estimator.train(input_fn=input_fn, steps=1) # Given that we do not provide any training examples for task1, kernel1 # should stay at the initial values. self.assertAllClose(estimator.get_variable_value('task1_logit/kernel'), np.array([[0.5, 0.6, 0.7], [0.1, 0.2, 0.3]]))
def test_multi_task_training_total_loss(self): features = np.array([[0.2, 0.4], [0.1, 0.3], [0.5, 0.8], [0.9, 0.7]]) input_fn = _create_input_fn(features=features, taskname_and_labels=[ ('task1', np.array([-1, -1, -1, -1])), ('task2', np.array([1, 0, 0, 1])) ]) tasknames_to_num_classes = {'task1': 3, 'task2': 2} tasknames_to_kernels = { 'task1': np.array([[0.5, 0.6, 0.7], [0.1, 0.2, 0.3]]), 'task2': np.array([[0.2, 0.4], [0.3, 0.5]]) } model_fn = multi_task_utils.create_model_fn( multi_task_utils.multi_task_estimator_spec_fn, tasknames_to_kernels, tasknames_to_num_classes, no_optimizer=True) estimator = tf.estimator.Estimator(model_fn=model_fn) # Since there is no valid examples for task1, the total loss is equal to # the cross_entropy loss plus the regularization loss for task1. expected_loss = tf_testing_utils.softmax_cross_entropy_loss( features.dot(tasknames_to_kernels['task2']), np.array([ 1, 0, 0, 1 ])) + 0.1 * np.sum(tasknames_to_kernels['task2']**2) / 2 tensor_name_to_expected_value = {'total_loss:0': expected_loss} estimator.train( input_fn=input_fn, steps=1, hooks=[AssertLossHook(self, tensor_name_to_expected_value)]) # Since optimization is disabled, the model parameters should not change. self.assertAllClose(estimator.get_variable_value('task1_logit/kernel'), np.array([[0.5, 0.6, 0.7], [0.1, 0.2, 0.3]])) self.assertAllClose(estimator.get_variable_value('task2_logit/kernel'), np.array([[0.2, 0.4], [0.3, 0.5]]))