def test_eval_job(self): # Create test context. summary_dir = self.create_tempdir().full_path environment = test_envs.CountingEnv(steps_per_episode=4) action_tensor_spec = tensor_spec.from_spec(environment.action_spec()) time_step_tensor_spec = tensor_spec.from_spec( environment.time_step_spec()) policy = py_tf_eager_policy.PyTFEagerPolicy( random_tf_policy.RandomTFPolicy(time_step_tensor_spec, action_tensor_spec)) mock_variable_container = mock.create_autospec( reverb_variable_container.ReverbVariableContainer) with mock.patch.object( tf.summary, 'scalar', autospec=True) as mock_scalar_summary, mock.patch.object( train_utils, 'wait_for_predicate', autospec=True): # Run the function tested. eval_job.evaluate(summary_dir=summary_dir, policy=policy, environment_name=None, suite_load_fn=lambda _: environment, variable_container=mock_variable_container, is_running=_NTimesReturnTrue(n=2)) # Check if the expected calls happened. # As an input, an eval job is expected to fetch data from the variable # container. mock_variable_container.assert_has_calls( [mock.call.update(mock.ANY)]) # As an output, an eval job is expected to write at least the average # return corresponding to the first step. mock_scalar_summary.assert_any_call( name='eval_actor/AverageReturn', data=mock.ANY, step=mock.ANY)
def testZeroState(self): policy_state_length = 5 batch_size = 3 mock_py_policy = mock.create_autospec(py_policy.Base) observation_spec = array_spec.ArraySpec((3,), np.float32) mock_py_policy.time_step_spec = ts.time_step_spec(observation_spec) mock_py_policy.action_spec = array_spec.BoundedArraySpec( (7,), np.int32, 1, 1) py_policy_state_spec = array_spec.BoundedArraySpec((policy_state_length,), np.int32, 1, 1) # Make the mock policy and reset return value. mock_py_policy.policy_state_spec = py_policy_state_spec mock_py_policy.info_spec = () expected_py_policy_state = np.zeros( [batch_size] + list(py_policy_state_spec.shape), py_policy_state_spec.dtype) mock_py_policy.get_initial_state.return_value = expected_py_policy_state tf_mock_py_policy = tf_py_policy.TFPyPolicy(mock_py_policy) initial_state = tf_mock_py_policy.get_initial_state(batch_size=batch_size) initial_state_ = self.evaluate(initial_state) self.assertEqual(1, mock_py_policy.get_initial_state.call_count) np.testing.assert_equal(initial_state_, expected_py_policy_state)
def setUp(self): super(AsyncPipelineTaskGeneratorTest, self).setUp() pipeline_root = os.path.join( os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()), self.id()) self._pipeline_root = pipeline_root # Makes sure multiple connections within a test always connect to the same # MLMD instance. metadata_path = os.path.join(pipeline_root, 'metadata', 'metadata.db') self._metadata_path = metadata_path connection_config = metadata.sqlite_metadata_connection_config( metadata_path) connection_config.sqlite.SetInParent() self._mlmd_connection = metadata.Metadata( connection_config=connection_config) # Sets up the pipeline. pipeline = pipeline_pb2.Pipeline() self.load_proto_from_text( os.path.join( os.path.dirname(__file__), 'testdata', 'async_pipeline.pbtxt'), pipeline) self._pipeline = pipeline self._pipeline_info = pipeline.pipeline_info self._pipeline_runtime_spec = pipeline.runtime_spec self._pipeline_runtime_spec.pipeline_root.field_value.string_value = ( pipeline_root) # Extracts components. self._example_gen = pipeline.nodes[0].pipeline_node self._transform = pipeline.nodes[1].pipeline_node self._trainer = pipeline.nodes[2].pipeline_node self._task_queue = tq.TaskQueue() self._mock_service_job_manager = mock.create_autospec( service_jobs.ServiceJobManager, instance=True) def _is_pure_service_node(unused_pipeline_state, node_id): return node_id == self._example_gen.node_info.id def _is_mixed_service_node(unused_pipeline_state, node_id): return node_id == self._transform.node_info.id self._mock_service_job_manager.is_pure_service_node.side_effect = ( _is_pure_service_node) self._mock_service_job_manager.is_mixed_service_node.side_effect = ( _is_mixed_service_node) def _default_ensure_node_services(unused_pipeline_state, node_id): self.assertIn( node_id, (self._example_gen.node_info.id, self._transform.node_info.id)) return service_jobs.ServiceStatus.RUNNING self._mock_service_job_manager.ensure_node_services.side_effect = ( _default_ensure_node_services)
def _get_mock_py_policy(self): mock_py_policy = mock.create_autospec(py_policy.Base) observation_spec = tensor_spec.TensorSpec([5], dtype=tf.float32) mock_py_policy.time_step_spec = ts.time_step_spec(observation_spec) mock_py_policy.action_spec = tensor_spec.BoundedTensorSpec( [3], tf.float32, -1.0, 1.0) mock_py_policy.policy_state_spec = () mock_py_policy.info_spec = () return mock_py_policy
def testSave(self): saver = mock.create_autospec(policy_saver.PolicySaver, instance=True) async_saver = async_policy_saver.AsyncPolicySaver(saver) self.evaluate(tf.compat.v1.global_variables_initializer()) save_path = os.path.join(self.get_temp_dir(), 'policy') async_saver.save(save_path) async_saver.flush() saver.save.assert_called_once_with(save_path)
def setUp(self): super().setUp() self._mock_service_job_manager = mock.create_autospec( service_jobs.ServiceJobManager, instance=True) self._mock_service_job_manager.ensure_node_services.return_value = ( service_jobs.ServiceStatus.SUCCESS) self._mock_service_job_manager.stop_node_services.return_value = True self._mock_service_job_manager.is_pure_service_node.return_value = True self._mock_service_job_manager.is_mixed_service_node.return_value = False self._wrapper = service_jobs.ServiceJobManagerCleanupWrapper( self._mock_service_job_manager)
def testBlockingSave(self): saver = mock.create_autospec(policy_saver.PolicySaver, instance=True) async_saver = async_policy_saver.AsyncPolicySaver(saver) path1 = os.path.join(self.get_temp_dir(), 'save_model') path2 = os.path.join(self.get_temp_dir(), 'save_model2') self.evaluate(tf.compat.v1.global_variables_initializer()) async_saver.save(path1) async_saver.save(path2, blocking=True) saver.save.assert_has_calls([mock.call(path1), mock.call(path2)])
def testCheckpointSave(self): saver = mock.create_autospec(policy_saver.PolicySaver, instance=True) async_saver = async_policy_saver.AsyncPolicySaver(saver) path = os.path.join(self.get_temp_dir(), 'save_model') self.evaluate(tf.compat.v1.global_variables_initializer()) async_saver.save(path) async_saver.flush() checkpoint_path = os.path.join(self.get_temp_dir(), 'checkpoint') async_saver.save_checkpoint(checkpoint_path) async_saver.flush() saver.save_checkpoint.assert_called_once_with(checkpoint_path)
def testSave(self): saver = mock.create_autospec(policy_saver.PolicySaver, instance=True) async_saver = async_policy_saver.AsyncPolicySaver(saver) self.evaluate(tf.compat.v1.global_variables_initializer()) save_path = os.path.join(self.get_temp_dir(), 'policy') async_saver.save(save_path) async_saver.flush() saver.save.assert_called_once_with(save_path) # Have to close the saver to avoid hanging threads that will prevent OSS # tests from finishing. async_saver.close()
def setUp(self): super().setUp() pipeline_root = os.path.join( os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()), self.id()) self._pipeline_root = pipeline_root # Makes sure multiple connections within a test always connect to the same # MLMD instance. metadata_path = os.path.join(pipeline_root, 'metadata', 'metadata.db') self._metadata_path = metadata_path connection_config = metadata.sqlite_metadata_connection_config( metadata_path) connection_config.sqlite.SetInParent() self._mlmd_connection = metadata.Metadata( connection_config=connection_config) # Sets up the pipeline. pipeline = self._make_pipeline(self._pipeline_root, str(uuid.uuid4())) self._pipeline = pipeline # Extracts components. self._example_gen = test_utils.get_node(pipeline, 'my_example_gen') self._stats_gen = test_utils.get_node(pipeline, 'my_statistics_gen') self._schema_gen = test_utils.get_node(pipeline, 'my_schema_gen') self._transform = test_utils.get_node(pipeline, 'my_transform') self._example_validator = test_utils.get_node(pipeline, 'my_example_validator') self._trainer = test_utils.get_node(pipeline, 'my_trainer') self._evaluator = test_utils.get_node(pipeline, 'my_evaluator') self._chore_a = test_utils.get_node(pipeline, 'chore_a') self._chore_b = test_utils.get_node(pipeline, 'chore_b') self._task_queue = tq.TaskQueue() self._mock_service_job_manager = mock.create_autospec( service_jobs.ServiceJobManager, instance=True) self._mock_service_job_manager.is_pure_service_node.side_effect = ( lambda _, node_id: node_id == self._example_gen.node_info.id) self._mock_service_job_manager.is_mixed_service_node.side_effect = ( lambda _, node_id: node_id == self._transform.node_info.id) def _default_ensure_node_services(unused_pipeline_state, node_id): self.assertIn( node_id, (self._example_gen.node_info.id, self._transform.node_info.id)) return service_jobs.ServiceStatus.SUCCESS self._mock_service_job_manager.ensure_node_services.side_effect = ( _default_ensure_node_services)
def testAction(self): py_observation_spec = array_spec.BoundedArraySpec((3, ), np.int32, 1, 1) py_time_step_spec = ts.time_step_spec(py_observation_spec) py_action_spec = array_spec.BoundedArraySpec((7, ), np.int32, 1, 1) py_policy_state_spec = array_spec.BoundedArraySpec((5, ), np.int32, 0, 1) py_policy_info_spec = array_spec.BoundedArraySpec((3, ), np.int32, 0, 1) mock_py_policy = mock.create_autospec(py_policy.PyPolicy) mock_py_policy.time_step_spec = py_time_step_spec mock_py_policy.action_spec = py_action_spec mock_py_policy.policy_state_spec = py_policy_state_spec mock_py_policy.info_spec = py_policy_info_spec expected_py_policy_state = np.ones(py_policy_state_spec.shape, py_policy_state_spec.dtype) expected_py_time_step = tf.nest.map_structure( lambda arr_spec: np.ones((1, ) + arr_spec.shape, arr_spec.dtype), py_time_step_spec) expected_py_action = np.ones((1, ) + py_action_spec.shape, py_action_spec.dtype) expected_new_py_policy_state = np.zeros(py_policy_state_spec.shape, py_policy_state_spec.dtype) expected_py_info = np.zeros(py_policy_info_spec.shape, py_policy_info_spec.dtype) mock_py_policy.action.return_value = policy_step.PolicyStep( nest_utils.unbatch_nested_array(expected_py_action), expected_new_py_policy_state, expected_py_info) tf_mock_py_policy = tf_py_policy.TFPyPolicy(mock_py_policy) time_step = tf.nest.map_structure( lambda arr_spec: tf.ones((1, ) + arr_spec.shape, arr_spec.dtype), py_time_step_spec) action_step = tf_mock_py_policy.action( time_step, tf.ones(py_policy_state_spec.shape, tf.int32)) py_action_step = self.evaluate(action_step) self.assertEqual(1, mock_py_policy.action.call_count) np.testing.assert_equal( mock_py_policy.action.call_args[1]['time_step'], nest_utils.unbatch_nested_array(expected_py_time_step)) np.testing.assert_equal( mock_py_policy.action.call_args[1]['policy_state'], expected_py_policy_state) np.testing.assert_equal(py_action_step.action, expected_py_action) np.testing.assert_equal(py_action_step.state, expected_new_py_policy_state) np.testing.assert_equal(py_action_step.info, expected_py_info)
def testBlockingCheckpointSave(self): saver = mock.create_autospec(policy_saver.PolicySaver, instance=True) async_saver = async_policy_saver.AsyncPolicySaver(saver) path1 = os.path.join(self.get_temp_dir(), 'save_model') path2 = os.path.join(self.get_temp_dir(), 'save_model2') self.evaluate(tf.compat.v1.global_variables_initializer()) async_saver.save_checkpoint(path1) async_saver.save_checkpoint(path2, blocking=True) saver.save_checkpoint.assert_has_calls([mock.call(path1), mock.call(path2)]) # Have to close the saver to avoid hanging threads that will prevent OSS # tests from finishing. async_saver.close()
def test_eval_job_constant_eval(self): """Tests eval every step for 2 steps. This test's `variable_container` passes the same train step twice to test that `is_train_step_the_same_or_behind` is working as expected. If were not working, the number of train steps processed will be incorrect (2x higher). """ summary_dir = self.create_tempdir().full_path environment = test_envs.CountingEnv(steps_per_episode=4) action_tensor_spec = tensor_spec.from_spec(environment.action_spec()) time_step_tensor_spec = tensor_spec.from_spec(environment.time_step_spec()) policy = py_tf_eager_policy.PyTFEagerPolicy( random_tf_policy.RandomTFPolicy(time_step_tensor_spec, action_tensor_spec)) mock_variable_container = mock.create_autospec( reverb_variable_container.ReverbVariableContainer) class VCUpdateIncrementEveryOtherTrainStep(object): """Side effect that updates train_step on every other call.""" def __init__(self): self.fake_train_step = -1 self.call_count = 0 def __call__(self, variables): if self.call_count % 2: self.fake_train_step += 1 variables[reverb_variable_container.TRAIN_STEP_KEY].assign( self.fake_train_step) self.call_count += 1 fake_update = VCUpdateIncrementEveryOtherTrainStep() mock_variable_container.update.side_effect = fake_update with mock.patch.object( tf.summary, 'scalar', autospec=True) as mock_scalar_summary: eval_job.evaluate( summary_dir=summary_dir, policy=policy, environment_name=None, suite_load_fn=lambda _: environment, variable_container=mock_variable_container, eval_interval=1, is_running=_NTimesReturnTrue(n=2)) summary_count = self.count_summary_scalar_tags_in_call_list( mock_scalar_summary, 'Metrics/eval_actor/AverageReturn') self.assertEqual(summary_count, 2)
def setUp(self): super(SyncPipelineTaskGeneratorTest, self).setUp() pipeline_root = os.path.join( os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()), self.id()) self._pipeline_root = pipeline_root # Makes sure multiple connections within a test always connect to the same # MLMD instance. metadata_path = os.path.join(pipeline_root, 'metadata', 'metadata.db') self._metadata_path = metadata_path connection_config = metadata.sqlite_metadata_connection_config( metadata_path) connection_config.sqlite.SetInParent() self._mlmd_connection = metadata.Metadata( connection_config=connection_config) # Sets up the pipeline. pipeline = pipeline_pb2.Pipeline() self.load_proto_from_text( os.path.join( os.path.dirname(__file__), 'testdata', 'sync_pipeline.pbtxt'), pipeline) self._pipeline_run_id = str(uuid.uuid4()) runtime_parameter_utils.substitute_runtime_parameter( pipeline, { 'pipeline_root': pipeline_root, 'pipeline_run_id': self._pipeline_run_id }) self._pipeline = pipeline # Extracts components. self._example_gen = _get_node(pipeline, 'my_example_gen') self._stats_gen = _get_node(pipeline, 'my_statistics_gen') self._schema_gen = _get_node(pipeline, 'my_schema_gen') self._transform = _get_node(pipeline, 'my_transform') self._example_validator = _get_node(pipeline, 'my_example_validator') self._trainer = _get_node(pipeline, 'my_trainer') self._task_queue = tq.TaskQueue() self._mock_service_job_manager = mock.create_autospec( service_jobs.ServiceJobManager, instance=True) self._mock_service_job_manager.is_pure_service_node.side_effect = ( lambda _, node_id: node_id == self._example_gen.node_info.id) self._mock_service_job_manager.is_mixed_service_node.side_effect = ( lambda _, node_id: node_id == self._transform.node_info.id)
def testClose(self): saver = mock.create_autospec(policy_saver.PolicySaver, instance=True) async_saver = async_policy_saver.AsyncPolicySaver(saver) path = os.path.join(self.get_temp_dir(), 'save_model') self.evaluate(tf.compat.v1.global_variables_initializer()) async_saver.save(path) self.assertTrue(async_saver._save_thread.is_alive()) async_saver.close() saver.save.assert_called_once() self.assertFalse(async_saver._save_thread.is_alive()) with self.assertRaises(ValueError): async_saver.save(path)
def test_eval_job(self): """Tests the eval job doing an eval every 5 steps for 10 train steps.""" summary_dir = self.create_tempdir().full_path environment = test_envs.CountingEnv(steps_per_episode=4) action_tensor_spec = tensor_spec.from_spec(environment.action_spec()) time_step_tensor_spec = tensor_spec.from_spec(environment.time_step_spec()) policy = py_tf_eager_policy.PyTFEagerPolicy( random_tf_policy.RandomTFPolicy(time_step_tensor_spec, action_tensor_spec)) class VCUpdateIncrementTrainStep(object): """Side effect that updates train_step.""" def __init__(self): self.fake_train_step = -1 def __call__(self, variables): self.fake_train_step += 1 variables[reverb_variable_container.TRAIN_STEP_KEY].assign( self.fake_train_step) mock_variable_container = mock.create_autospec( reverb_variable_container.ReverbVariableContainer) fake_update = VCUpdateIncrementTrainStep() mock_variable_container.update.side_effect = fake_update with mock.patch.object( tf.summary, 'scalar', autospec=True) as mock_scalar_summary: # Run the function tested. # 11 loops to do 10 steps becaue the eval occurs on the loop after the # train_step is found. eval_job.evaluate( summary_dir=summary_dir, policy=policy, environment_name=None, suite_load_fn=lambda _: environment, variable_container=mock_variable_container, eval_interval=5, is_running=_NTimesReturnTrue(n=11)) summary_count = self.count_summary_scalar_tags_in_call_list( mock_scalar_summary, 'Metrics/eval_actor/AverageReturn') self.assertEqual(summary_count, 3)
def test_active_pipelines_with_stop_initiated_nodes(self, mock_gen_task_from_active, mock_async_task_gen): with self._mlmd_connection as m: pipeline = _test_pipeline('pipeline') pipeline.nodes.add().pipeline_node.node_info.id = 'ExampleGen' pipeline.nodes.add().pipeline_node.node_info.id = 'Transform' pipeline.nodes.add().pipeline_node.node_info.id = 'Trainer' pipeline.nodes.add().pipeline_node.node_info.id = 'Evaluator' mock_service_job_manager = mock.create_autospec( service_jobs.ServiceJobManager, instance=True) mock_service_job_manager.is_pure_service_node.side_effect = ( lambda _, node_id: node_id == 'ExampleGen') example_gen_node_uid = task_lib.NodeUid.from_pipeline_node( pipeline, pipeline.nodes[0].pipeline_node) transform_node_uid = task_lib.NodeUid.from_pipeline_node( pipeline, pipeline.nodes[1].pipeline_node) transform_task = test_utils.create_exec_node_task( node_uid=transform_node_uid) trainer_node_uid = task_lib.NodeUid.from_pipeline_node( pipeline, pipeline.nodes[2].pipeline_node) trainer_task = test_utils.create_exec_node_task(node_uid=trainer_node_uid) evaluator_node_uid = task_lib.NodeUid.from_pipeline_node( pipeline, pipeline.nodes[3].pipeline_node) evaluator_task = test_utils.create_exec_node_task( node_uid=evaluator_node_uid) cancelled_evaluator_task = test_utils.create_exec_node_task( node_uid=evaluator_node_uid, is_cancelled=True) pipeline_ops.initiate_pipeline_start(m, pipeline) with pstate.PipelineState.load( m, task_lib.PipelineUid.from_pipeline(pipeline)) as pipeline_state: # Stop example-gen, trainer and evaluator. pipeline_state.initiate_node_stop( example_gen_node_uid, status_lib.Status(code=status_lib.Code.CANCELLED)) pipeline_state.initiate_node_stop( trainer_node_uid, status_lib.Status(code=status_lib.Code.CANCELLED)) pipeline_state.initiate_node_stop( evaluator_node_uid, status_lib.Status(code=status_lib.Code.ABORTED)) task_queue = tq.TaskQueue() # Simulate a new transform execution being triggered. mock_async_task_gen.return_value.generate.return_value = [transform_task] # Simulate ExecNodeTask for trainer already present in the task queue. task_queue.enqueue(trainer_task) # Simulate Evaluator having an active execution in MLMD. mock_gen_task_from_active.side_effect = [evaluator_task] pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager) self.assertEqual(1, mock_async_task_gen.return_value.generate.call_count) # stop_node_services should be called on example-gen which is a pure # service node. mock_service_job_manager.stop_node_services.assert_called_once_with( mock.ANY, 'ExampleGen') # Verify that tasks are enqueued in the expected order: # Pre-existing trainer task. task = task_queue.dequeue() task_queue.task_done(task) self.assertEqual(trainer_task, task) # CancelNodeTask for trainer. task = task_queue.dequeue() task_queue.task_done(task) self.assertTrue(task_lib.is_cancel_node_task(task)) self.assertEqual(trainer_node_uid, task.node_uid) # ExecNodeTask with is_cancelled=True for evaluator. task = task_queue.dequeue() task_queue.task_done(task) self.assertTrue(cancelled_evaluator_task, task) # ExecNodeTask for newly triggered transform node. task = task_queue.dequeue() task_queue.task_done(task) self.assertEqual(transform_task, task) # No more tasks. self.assertTrue(task_queue.is_empty())
def test_stop_initiated_pipelines(self, pipeline, mock_gen_task_from_active, mock_async_task_gen, mock_sync_task_gen): with self._mlmd_connection as m: pipeline.nodes.add().pipeline_node.node_info.id = 'ExampleGen' pipeline.nodes.add().pipeline_node.node_info.id = 'Transform' pipeline.nodes.add().pipeline_node.node_info.id = 'Trainer' pipeline.nodes.add().pipeline_node.node_info.id = 'Evaluator' mock_service_job_manager = mock.create_autospec( service_jobs.ServiceJobManager, instance=True) mock_service_job_manager.is_pure_service_node.side_effect = ( lambda _, node_id: node_id == 'ExampleGen') mock_service_job_manager.is_mixed_service_node.side_effect = ( lambda _, node_id: node_id == 'Transform') pipeline_ops.initiate_pipeline_start(m, pipeline) with pstate.PipelineState.load( m, task_lib.PipelineUid.from_pipeline(pipeline)) as pipeline_state: pipeline_state.initiate_stop( status_lib.Status(code=status_lib.Code.CANCELLED)) pipeline_execution = pipeline_state.execution task_queue = tq.TaskQueue() # For the stop-initiated pipeline, "Transform" execution task is in queue, # "Trainer" has an active execution in MLMD but no task in queue, # "Evaluator" has no active execution. task_queue.enqueue( test_utils.create_exec_node_task( task_lib.NodeUid( pipeline_uid=task_lib.PipelineUid.from_pipeline(pipeline), node_id='Transform'))) transform_task = task_queue.dequeue() # simulates task being processed mock_gen_task_from_active.side_effect = [ test_utils.create_exec_node_task( node_uid=task_lib.NodeUid( pipeline_uid=task_lib.PipelineUid.from_pipeline(pipeline), node_id='Trainer'), is_cancelled=True), None, None, None, None ] pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager) # There are no active pipelines so these shouldn't be called. mock_async_task_gen.assert_not_called() mock_sync_task_gen.assert_not_called() # stop_node_services should be called for ExampleGen which is a pure # service node. mock_service_job_manager.stop_node_services.assert_called_once_with( mock.ANY, 'ExampleGen') mock_service_job_manager.reset_mock() task_queue.task_done(transform_task) # Pop out transform task. # CancelNodeTask for the "Transform" ExecNodeTask should be next. task = task_queue.dequeue() task_queue.task_done(task) self.assertTrue(task_lib.is_cancel_node_task(task)) self.assertEqual('Transform', task.node_uid.node_id) # ExecNodeTask (with is_cancelled=True) for "Trainer" is next. task = task_queue.dequeue() task_queue.task_done(task) self.assertTrue(task_lib.is_exec_node_task(task)) self.assertEqual('Trainer', task.node_uid.node_id) self.assertTrue(task.is_cancelled) self.assertTrue(task_queue.is_empty()) mock_gen_task_from_active.assert_has_calls([ mock.call( m, pipeline_state.pipeline, pipeline.nodes[2].pipeline_node, mock.ANY, is_cancelled=True), mock.call( m, pipeline_state.pipeline, pipeline.nodes[3].pipeline_node, mock.ANY, is_cancelled=True) ]) self.assertEqual(2, mock_gen_task_from_active.call_count) # Pipeline execution should continue to be active since active node # executions were found in the last call to `orchestrate`. [execution] = m.store.get_executions_by_id([pipeline_execution.id]) self.assertTrue(execution_lib.is_execution_active(execution)) # Call `orchestrate` again; this time there are no more active node # executions so the pipeline should be marked as cancelled. pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager) self.assertTrue(task_queue.is_empty()) [execution] = m.store.get_executions_by_id([pipeline_execution.id]) self.assertEqual(metadata_store_pb2.Execution.CANCELED, execution.last_known_state) # stop_node_services should be called on both ExampleGen and Transform # which are service nodes. mock_service_job_manager.stop_node_services.assert_has_calls( [mock.call(mock.ANY, 'ExampleGen'), mock.call(mock.ANY, 'Transform')], any_order=True)