Пример #1
0
def _generate_task_from_execution(
        metadata_handler: metadata.Metadata, pipeline: pipeline_pb2.Pipeline,
        node: pipeline_pb2.PipelineNode,
        execution: metadata_store_pb2.Execution) -> task_lib.Task:
    """Generates `ExecNodeTask` given execution."""
    contexts = metadata_handler.store.get_contexts_by_execution(execution.id)
    exec_properties = _extract_properties(execution)
    input_artifacts = execution_lib.get_artifacts_dict(
        metadata_handler, execution.id, metadata_store_pb2.Event.INPUT)
    outputs_resolver = outputs_utils.OutputsResolver(node,
                                                     pipeline.pipeline_info,
                                                     pipeline.runtime_spec,
                                                     pipeline.execution_mode)
    return task_lib.ExecNodeTask(
        node_uid=task_lib.NodeUid.from_pipeline_node(pipeline, node),
        execution=execution,
        contexts=contexts,
        exec_properties=exec_properties,
        input_artifacts=input_artifacts,
        output_artifacts=outputs_resolver.generate_output_artifacts(
            execution.id),
        executor_output_uri=outputs_resolver.get_executor_output_uri(
            execution.id),
        stateful_working_dir=outputs_resolver.get_stateful_working_directory(
            execution.id))
Пример #2
0
 def test_task_ids(self):
     node_uid = task_lib.NodeUid(pipeline_id='pipeline',
                                 pipeline_run_id='run0',
                                 node_id='Trainer')
     exec_node_task = task_lib.ExecNodeTask(node_uid=node_uid,
                                            execution_id=123)
     self.assertEqual(('ExecNodeTask', node_uid), exec_node_task.task_id)
     cancel_node_task = task_lib.CancelNodeTask(node_uid=node_uid)
     self.assertEqual(('CancelNodeTask', node_uid),
                      cancel_node_task.task_id)
Пример #3
0
    def _generate_task(
        self, node: pipeline_pb2.PipelineNode,
        node_executions: Sequence[metadata_store_pb2.Execution]
    ) -> task_lib.Task:
        """Generates a node execution task.

    If node execution is not feasible, `None` is returned.

    Args:
      node: The pipeline node for which to generate a task.
      node_executions: Node executions fetched from MLMD.

    Returns:
      Returns an `ExecNodeTask` if node can be executed. If an error occurs,
      a `FinalizePipelineTask` is returned to abort the pipeline execution.
    """
        result = task_gen_utils.generate_task_from_active_execution(
            self._mlmd_handle, self._pipeline, node, node_executions)
        if result:
            return result

        node_uid = task_lib.NodeUid.from_pipeline_node(self._pipeline, node)
        resolved_info = task_gen_utils.generate_resolved_info(
            self._mlmd_handle, node)
        if resolved_info.input_artifacts is None:
            return task_lib.FinalizePipelineTask(
                pipeline_uid=self._pipeline_state.pipeline_uid,
                status=status_lib.Status(
                    code=status_lib.Code.ABORTED,
                    message=
                    (f'Aborting pipeline execution due to failure to resolve '
                     f'inputs; problematic node uid: {node_uid}')))

        execution = execution_publish_utils.register_execution(
            metadata_handler=self._mlmd_handle,
            execution_type=node.node_info.type,
            contexts=resolved_info.contexts,
            input_artifacts=resolved_info.input_artifacts,
            exec_properties=resolved_info.exec_properties)
        outputs_resolver = outputs_utils.OutputsResolver(
            node, self._pipeline.pipeline_info, self._pipeline.runtime_spec,
            self._pipeline.execution_mode)
        return task_lib.ExecNodeTask(
            node_uid=node_uid,
            execution=execution,
            contexts=resolved_info.contexts,
            input_artifacts=resolved_info.input_artifacts,
            exec_properties=resolved_info.exec_properties,
            output_artifacts=outputs_resolver.generate_output_artifacts(
                execution.id),
            executor_output_uri=outputs_resolver.get_executor_output_uri(
                execution.id),
            stateful_working_dir=outputs_resolver.
            get_stateful_working_directory(execution.id),
            pipeline=self._pipeline)
Пример #4
0
    def _generate_task(
            self, metadata_handler: metadata.Metadata,
            node: pipeline_pb2.PipelineNode) -> Optional[task_lib.Task]:
        """Generates a node execution task.

    If node execution is not feasible, `None` is returned.

    Args:
      metadata_handler: A handler to access MLMD db.
      node: The pipeline node for which to generate a task.

    Returns:
      Returns a `Task` or `None` if task generation is deemed infeasible.
    """
        if not task_gen_utils.is_feasible_node(node):
            return None

        executions = task_gen_utils.get_executions(metadata_handler, node)
        result = task_gen_utils.generate_task_from_active_execution(
            metadata_handler, self._pipeline, node, executions)
        if result:
            return result

        resolved_info = task_gen_utils.generate_resolved_info(
            metadata_handler, node)
        if resolved_info.input_artifacts is None:
            # TODO(goutham): If the pipeline can't make progress, there should be a
            # standard mechanism to surface it to the user.
            logging.warning(
                'Task cannot be generated for node %s since no input artifacts '
                'are resolved.', node.node_info.id)
            return None

        execution = execution_publish_utils.register_execution(
            metadata_handler=metadata_handler,
            execution_type=node.node_info.type,
            contexts=resolved_info.contexts,
            input_artifacts=resolved_info.input_artifacts,
            exec_properties=resolved_info.exec_properties)
        outputs_resolver = outputs_utils.OutputsResolver(
            node, self._pipeline.pipeline_info, self._pipeline.runtime_spec,
            self._pipeline.execution_mode)
        return task_lib.ExecNodeTask(
            node_uid=task_lib.NodeUid.from_pipeline_node(self._pipeline, node),
            execution=execution,
            contexts=resolved_info.contexts,
            input_artifacts=resolved_info.input_artifacts,
            exec_properties=resolved_info.exec_properties,
            output_artifacts=outputs_resolver.generate_output_artifacts(
                execution.id),
            executor_output_uri=outputs_resolver.get_executor_output_uri(
                execution.id),
            stateful_working_dir=outputs_resolver.
            get_stateful_working_directory(execution.id))
Пример #5
0
 def test_exec_node_task_create(self):
     pipeline = pipeline_pb2.Pipeline()
     pipeline.pipeline_info.id = 'pipeline'
     pipeline.runtime_spec.pipeline_run_id.field_value.string_value = 'run0'
     node = pipeline_pb2.PipelineNode()
     node.node_info.id = 'Trainer'
     self.assertEqual(
         task_lib.ExecNodeTask(node_uid=task_lib.NodeUid(
             pipeline_id='pipeline',
             pipeline_run_id='run0',
             node_id='Trainer'),
                               execution_id=123),
         task_lib.ExecNodeTask.create(pipeline, node, 123))
Пример #6
0
def create_exec_node_task(node_uid,
                          execution=None,
                          contexts=None,
                          exec_properties=None,
                          input_artifacts=None,
                          output_artifacts=None,
                          executor_output_uri=None,
                          stateful_working_dir=None) -> task_lib.ExecNodeTask:
    """Creates an `ExecNodeTask` for testing."""
    return task_lib.ExecNodeTask(node_uid=node_uid,
                                 execution=execution or mock.Mock(),
                                 contexts=contexts or [],
                                 exec_properties=exec_properties or {},
                                 input_artifacts=input_artifacts or {},
                                 output_artifacts=output_artifacts or {},
                                 executor_output_uri=executor_output_uri or '',
                                 stateful_working_dir=stateful_working_dir
                                 or '')
Пример #7
0
  def test_registration_and_creation(self):
    # Create a pipeline IR containing deployment config for testing.
    deployment_config = pipeline_pb2.IntermediateDeploymentConfig()
    executor_spec = pipeline_pb2.ExecutorSpec.PythonClassExecutorSpec(
        class_path='trainer.TrainerExecutor')
    deployment_config.executor_specs['Trainer'].Pack(executor_spec)
    pipeline = pipeline_pb2.Pipeline()
    pipeline.deployment_config.Pack(deployment_config)

    # Register a fake task scheduler.
    spec_type_url = deployment_config.executor_specs['Trainer'].type_url
    ts.TaskSchedulerRegistry.register(spec_type_url, _FakeTaskScheduler)

    # Create a task and verify that the correct scheduler is instantiated.
    task = task_lib.ExecNodeTask(
        node_uid=task_lib.NodeUid(
            pipeline_id='pipeline', pipeline_run_id=None, node_id='Trainer'),
        execution_id=123)
    task_scheduler = ts.TaskSchedulerRegistry.create_task_scheduler(
        mock.Mock(), pipeline, task)
    self.assertIsInstance(task_scheduler, _FakeTaskScheduler)
Пример #8
0
def create_exec_node_task(node_uid,
                          execution=None,
                          contexts=None,
                          exec_properties=None,
                          input_artifacts=None,
                          output_artifacts=None,
                          executor_output_uri=None,
                          stateful_working_dir=None,
                          pipeline=None,
                          is_cancelled=False) -> task_lib.ExecNodeTask:
  """Creates an `ExecNodeTask` for testing."""
  return task_lib.ExecNodeTask(
      node_uid=node_uid,
      execution_id=execution.id if execution else 1,
      contexts=contexts or [],
      exec_properties=exec_properties or {},
      input_artifacts=input_artifacts or {},
      output_artifacts=output_artifacts or {},
      executor_output_uri=executor_output_uri or '',
      stateful_working_dir=stateful_working_dir or '',
      pipeline=pipeline or mock.Mock(),
      is_cancelled=is_cancelled)
Пример #9
0
    def _resolve_inputs_and_generate_tasks_for_node(
        self,
        node: pipeline_pb2.PipelineNode,
    ) -> List[task_lib.Task]:
        """Generates tasks for a node by freshly resolving inputs."""
        result = []
        node_uid = task_lib.NodeUid.from_pipeline_node(self._pipeline, node)
        resolved_info = task_gen_utils.generate_resolved_info(
            self._mlmd_handle, node)
        if resolved_info is None:
            result.append(
                task_lib.UpdateNodeStateTask(node_uid=node_uid,
                                             state=pstate.NodeState.SKIPPED))
            return result

        if not resolved_info.input_artifacts:
            error_msg = f'failure to resolve inputs; node uid: {node_uid}'
            result.append(
                task_lib.UpdateNodeStateTask(node_uid=node_uid,
                                             state=pstate.NodeState.FAILED,
                                             status=status_lib.Status(
                                                 code=status_lib.Code.ABORTED,
                                                 message=error_msg)))
            return result
        # TODO(b/207038460): Update sync pipeline to support ForEach.
        input_artifacts = resolved_info.input_artifacts[0]

        execution = execution_publish_utils.register_execution(
            metadata_handler=self._mlmd_handle,
            execution_type=node.node_info.type,
            contexts=resolved_info.contexts,
            input_artifacts=input_artifacts,
            exec_properties=resolved_info.exec_properties)
        outputs_resolver = outputs_utils.OutputsResolver(
            node, self._pipeline.pipeline_info, self._pipeline.runtime_spec,
            self._pipeline.execution_mode)
        output_artifacts = outputs_resolver.generate_output_artifacts(
            execution.id)

        # For mixed service nodes, we ensure node services and check service
        # status; pipeline is aborted if the service jobs have failed.
        service_status = self._ensure_node_services_if_mixed(node.node_info.id)
        if service_status == service_jobs.ServiceStatus.FAILED:
            error_msg = f'associated service job failed; node uid: {node_uid}'
            result.append(
                task_lib.UpdateNodeStateTask(node_uid=node_uid,
                                             state=pstate.NodeState.FAILED,
                                             status=status_lib.Status(
                                                 code=status_lib.Code.ABORTED,
                                                 message=error_msg)))
            return result

        outputs_utils.make_output_dirs(output_artifacts)
        result.append(
            task_lib.UpdateNodeStateTask(node_uid=node_uid,
                                         state=pstate.NodeState.RUNNING))
        result.append(
            task_lib.ExecNodeTask(
                node_uid=node_uid,
                execution_id=execution.id,
                contexts=resolved_info.contexts,
                input_artifacts=input_artifacts,
                exec_properties=resolved_info.exec_properties,
                output_artifacts=output_artifacts,
                executor_output_uri=outputs_resolver.get_executor_output_uri(
                    execution.id),
                stateful_working_dir=outputs_resolver.
                get_stateful_working_directory(execution.id),
                tmp_dir=outputs_resolver.make_tmp_dir(execution.id),
                pipeline=self._pipeline))
        return result
Пример #10
0
    def _maybe_generate_task(
        self,
        node: pipeline_pb2.PipelineNode,
        node_executions: Sequence[metadata_store_pb2.Execution],
        successful_node_ids: MutableSet[str],
    ) -> Optional[task_lib.Task]:
        """Generates a task to execute or `None` if no action is required.

    If node is executable, `ExecNodeTask` is returned.

    If node execution is infeasible due to unsatisfied preconditions such as
    missing inputs or service job failure, task to abort the pipeline is
    returned.

    If cache is enabled and previously computed outputs are found, those outputs
    are used to finish the execution. Since node execution can be elided, `None`
    is returned after adding the node_id to `successful_node_ids` set.

    Args:
      node: The pipeline node for which to generate a task.
      node_executions: Node executions fetched from MLMD.
      successful_node_ids: Set that tracks successful node ids.

    Returns:
      Returns an `ExecNodeTask` if node can be executed. If an error occurs,
      a `FinalizePipelineTask` is returned to abort the pipeline execution.
    """
        result = task_gen_utils.generate_task_from_active_execution(
            self._mlmd_handle, self._pipeline, node, node_executions)
        if result:
            return result

        node_uid = task_lib.NodeUid.from_pipeline_node(self._pipeline, node)
        resolved_info = task_gen_utils.generate_resolved_info(
            self._mlmd_handle, node)
        if resolved_info.input_artifacts is None:
            return self._abort_task(
                f'failure to resolve inputs; node uid {node_uid}')

        execution = execution_publish_utils.register_execution(
            metadata_handler=self._mlmd_handle,
            execution_type=node.node_info.type,
            contexts=resolved_info.contexts,
            input_artifacts=resolved_info.input_artifacts,
            exec_properties=resolved_info.exec_properties)
        outputs_resolver = outputs_utils.OutputsResolver(
            node, self._pipeline.pipeline_info, self._pipeline.runtime_spec,
            self._pipeline.execution_mode)
        output_artifacts = outputs_resolver.generate_output_artifacts(
            execution.id)

        # Check if we can elide node execution by reusing previously computed
        # outputs for the node.
        cache_context = cache_utils.get_cache_context(
            self._mlmd_handle,
            pipeline_node=node,
            pipeline_info=self._pipeline.pipeline_info,
            executor_spec=_get_executor_spec(self._pipeline,
                                             node.node_info.id),
            input_artifacts=resolved_info.input_artifacts,
            output_artifacts=output_artifacts,
            parameters=resolved_info.exec_properties)
        contexts = resolved_info.contexts + [cache_context]
        if node.execution_options.caching_options.enable_cache:
            cached_outputs = cache_utils.get_cached_outputs(
                self._mlmd_handle, cache_context=cache_context)
            if cached_outputs is not None:
                logging.info(
                    'Eliding node execution, using cached outputs; node uid: %s',
                    node_uid)
                execution_publish_utils.publish_cached_execution(
                    self._mlmd_handle,
                    contexts=contexts,
                    execution_id=execution.id,
                    output_artifacts=cached_outputs)
                successful_node_ids.add(node.node_info.id)
                pstate.record_state_change_time()
                return None

        # For mixed service nodes, we ensure node services and check service
        # status; pipeline is aborted if the service jobs have failed.
        service_status = self._ensure_node_services_if_mixed(node.node_info.id)
        if service_status == service_jobs.ServiceStatus.FAILED:
            return self._abort_task(
                f'associated service job failed; node uid: {node_uid}')

        return task_lib.ExecNodeTask(
            node_uid=node_uid,
            execution_id=execution.id,
            contexts=contexts,
            input_artifacts=resolved_info.input_artifacts,
            exec_properties=resolved_info.exec_properties,
            output_artifacts=output_artifacts,
            executor_output_uri=outputs_resolver.get_executor_output_uri(
                execution.id),
            stateful_working_dir=outputs_resolver.
            get_stateful_working_directory(execution.id),
            pipeline=self._pipeline)
Пример #11
0
    def _generate_task(
            self, metadata_handler: metadata.Metadata,
            node: pipeline_pb2.PipelineNode) -> Optional[task_lib.Task]:
        """Generates a node execution task.

    If a node execution is not feasible, `None` is returned.

    Args:
      metadata_handler: A handler to access MLMD db.
      node: The pipeline node for which to generate a task.

    Returns:
      Returns a `Task` or `None` if task generation is deemed infeasible.
    """
        executions = task_gen_utils.get_executions(metadata_handler, node)
        result = task_gen_utils.generate_task_from_active_execution(
            metadata_handler, self._pipeline, node, executions)
        if result:
            return result

        resolved_info = task_gen_utils.generate_resolved_info(
            metadata_handler, node)
        if resolved_info.input_artifacts is None or not any(
                resolved_info.input_artifacts.values()):
            logging.info(
                'Task cannot be generated for node %s since no input artifacts '
                'are resolved.', node.node_info.id)
            return None

        # If the latest successful execution had the same resolved input artifacts,
        # the component should not be triggered, so task is not generated.
        # TODO(b/170231077): This logic should be handled by the resolver when it's
        # implemented. Also, currently only the artifact ids of previous execution
        # are checked to decide if a new execution is warranted but it may also be
        # necessary to factor in the difference of execution properties.
        latest_exec = task_gen_utils.get_latest_successful_execution(
            executions)
        if latest_exec:
            artifact_ids_by_event_type = (
                execution_lib.get_artifact_ids_by_event_type_for_execution_id(
                    metadata_handler, latest_exec.id))
            latest_exec_input_artifact_ids = artifact_ids_by_event_type.get(
                metadata_store_pb2.Event.INPUT, set())
            current_exec_input_artifact_ids = set(
                a.id for a in itertools.chain(
                    *resolved_info.input_artifacts.values()))
            if latest_exec_input_artifact_ids == current_exec_input_artifact_ids:
                return None

        node_uid = task_lib.NodeUid.from_pipeline_node(self._pipeline, node)
        execution = execution_publish_utils.register_execution(
            metadata_handler=metadata_handler,
            execution_type=node.node_info.type,
            contexts=resolved_info.contexts,
            input_artifacts=resolved_info.input_artifacts,
            exec_properties=resolved_info.exec_properties)
        outputs_resolver = outputs_utils.OutputsResolver(
            node, self._pipeline.pipeline_info, self._pipeline.runtime_spec,
            self._pipeline.execution_mode)

        # For mixed service nodes, we ensure node services and check service
        # status; the node is aborted if its service jobs have failed.
        service_status = self._ensure_node_services_if_mixed(node.node_info.id)
        if service_status is not None:
            if service_status != service_jobs.ServiceStatus.RUNNING:
                return self._abort_node_task(node_uid)

        return task_lib.ExecNodeTask(
            node_uid=node_uid,
            execution=execution,
            contexts=resolved_info.contexts,
            input_artifacts=resolved_info.input_artifacts,
            exec_properties=resolved_info.exec_properties,
            output_artifacts=outputs_resolver.generate_output_artifacts(
                execution.id),
            executor_output_uri=outputs_resolver.get_executor_output_uri(
                execution.id),
            stateful_working_dir=outputs_resolver.
            get_stateful_working_directory(execution.id),
            pipeline=self._pipeline)
Пример #12
0
    def _generate_tasks_for_node(
            self, metadata_handler: metadata.Metadata,
            node: pipeline_pb2.PipelineNode) -> List[task_lib.Task]:
        """Generates a node execution task.

    If a node execution is not feasible, `None` is returned.

    Args:
      metadata_handler: A handler to access MLMD db.
      node: The pipeline node for which to generate a task.

    Returns:
      Returns a `Task` or `None` if task generation is deemed infeasible.
    """
        result = []
        node_uid = task_lib.NodeUid.from_pipeline_node(self._pipeline, node)

        executions = task_gen_utils.get_executions(metadata_handler, node)
        exec_node_task = task_gen_utils.generate_task_from_active_execution(
            metadata_handler, self._pipeline, node, executions)
        if exec_node_task:
            result.append(
                task_lib.UpdateNodeStateTask(node_uid=node_uid,
                                             state=pstate.NodeState.RUNNING))
            result.append(exec_node_task)
            return result

        resolved_info = task_gen_utils.generate_resolved_info(
            metadata_handler, node)
        # TODO(b/207038460): Update async pipeline to support ForEach.
        if (resolved_info is None or not resolved_info.input_artifacts
                or resolved_info.input_artifacts[0] is None
                or not any(resolved_info.input_artifacts[0].values())):
            logging.info(
                'Task cannot be generated for node %s since no input artifacts '
                'are resolved.', node.node_info.id)
            return result
        input_artifact = resolved_info.input_artifacts[0]

        executor_spec_fingerprint = hashlib.sha256()
        executor_spec = task_gen_utils.get_executor_spec(
            self._pipeline_state.pipeline, node.node_info.id)
        if executor_spec is not None:
            executor_spec_fingerprint.update(
                executor_spec.SerializeToString(deterministic=True))
        resolved_info.exec_properties[
            constants.
            EXECUTOR_SPEC_FINGERPRINT_KEY] = executor_spec_fingerprint.hexdigest(
            )

        # If the latest execution had the same resolved input artifacts, execution
        # properties and executor specs, we should not trigger a new execution.
        latest_exec = task_gen_utils.get_latest_execution(executions)
        if latest_exec:
            artifact_ids_by_event_type = (
                execution_lib.get_artifact_ids_by_event_type_for_execution_id(
                    metadata_handler, latest_exec.id))
            latest_exec_input_artifact_ids = artifact_ids_by_event_type.get(
                metadata_store_pb2.Event.INPUT, set())
            current_exec_input_artifact_ids = set(
                a.id for a in itertools.chain(*input_artifact.values()))
            latest_exec_properties = task_gen_utils.extract_properties(
                latest_exec)
            current_exec_properties = resolved_info.exec_properties
            latest_exec_executor_spec_fp = latest_exec_properties[
                constants.EXECUTOR_SPEC_FINGERPRINT_KEY]
            current_exec_executor_spec_fp = resolved_info.exec_properties[
                constants.EXECUTOR_SPEC_FINGERPRINT_KEY]
            if (latest_exec_input_artifact_ids
                    == current_exec_input_artifact_ids
                    and _exec_properties_match(latest_exec_properties,
                                               current_exec_properties)
                    and latest_exec_executor_spec_fp
                    == current_exec_executor_spec_fp):
                result.append(
                    task_lib.UpdateNodeStateTask(
                        node_uid=node_uid, state=pstate.NodeState.STARTED))
                return result

        execution = execution_publish_utils.register_execution(
            metadata_handler=metadata_handler,
            execution_type=node.node_info.type,
            contexts=resolved_info.contexts,
            input_artifacts=input_artifact,
            exec_properties=resolved_info.exec_properties)
        outputs_resolver = outputs_utils.OutputsResolver(
            node, self._pipeline.pipeline_info, self._pipeline.runtime_spec,
            self._pipeline.execution_mode)

        # For mixed service nodes, we ensure node services and check service
        # status; the node is aborted if its service jobs have failed.
        service_status = self._ensure_node_services_if_mixed(node.node_info.id)
        if service_status is not None:
            if service_status != service_jobs.ServiceStatus.RUNNING:
                error_msg = f'associated service job failed; node uid: {node_uid}'
                result.append(
                    task_lib.UpdateNodeStateTask(
                        node_uid=node_uid,
                        state=pstate.NodeState.FAILED,
                        status=status_lib.Status(code=status_lib.Code.ABORTED,
                                                 message=error_msg)))
                return result

        output_artifacts = outputs_resolver.generate_output_artifacts(
            execution.id)
        outputs_utils.make_output_dirs(output_artifacts)
        result.append(
            task_lib.UpdateNodeStateTask(node_uid=node_uid,
                                         state=pstate.NodeState.RUNNING))
        result.append(
            task_lib.ExecNodeTask(
                node_uid=node_uid,
                execution_id=execution.id,
                contexts=resolved_info.contexts,
                input_artifacts=input_artifact,
                exec_properties=resolved_info.exec_properties,
                output_artifacts=output_artifacts,
                executor_output_uri=outputs_resolver.get_executor_output_uri(
                    execution.id),
                stateful_working_dir=outputs_resolver.
                get_stateful_working_directory(execution.id),
                tmp_dir=outputs_resolver.make_tmp_dir(execution.id),
                pipeline=self._pipeline))
        return result
Пример #13
0
def _test_task(node_id, pipeline_id, pipeline_run_id=None):
    node_uid = task_lib.NodeUid(pipeline_id=pipeline_id,
                                pipeline_run_id=pipeline_run_id,
                                node_id=node_id)
    return task_lib.ExecNodeTask(node_uid=node_uid, execution_id=123)