コード例 #1
0
ファイル: task_gen_utils.py プロジェクト: hamzamaiot/tfx
def _generate_task_from_execution(
        metadata_handler: metadata.Metadata, pipeline: pipeline_pb2.Pipeline,
        node: pipeline_pb2.PipelineNode,
        execution: metadata_store_pb2.Execution) -> task_lib.Task:
    """Generates `ExecNodeTask` given execution."""
    contexts = metadata_handler.store.get_contexts_by_execution(execution.id)
    exec_properties = _extract_properties(execution)
    input_artifacts = execution_lib.get_artifacts_dict(
        metadata_handler, execution.id, metadata_store_pb2.Event.INPUT)
    outputs_resolver = outputs_utils.OutputsResolver(node,
                                                     pipeline.pipeline_info,
                                                     pipeline.runtime_spec,
                                                     pipeline.execution_mode)
    return task_lib.ExecNodeTask(
        node_uid=task_lib.NodeUid.from_pipeline_node(pipeline, node),
        execution=execution,
        contexts=contexts,
        exec_properties=exec_properties,
        input_artifacts=input_artifacts,
        output_artifacts=outputs_resolver.generate_output_artifacts(
            execution.id),
        executor_output_uri=outputs_resolver.get_executor_output_uri(
            execution.id),
        stateful_working_dir=outputs_resolver.get_stateful_working_directory(
            execution.id))
コード例 #2
0
    def _generate_task(
        self, node: pipeline_pb2.PipelineNode,
        node_executions: Sequence[metadata_store_pb2.Execution]
    ) -> task_lib.Task:
        """Generates a node execution task.

    If node execution is not feasible, `None` is returned.

    Args:
      node: The pipeline node for which to generate a task.
      node_executions: Node executions fetched from MLMD.

    Returns:
      Returns an `ExecNodeTask` if node can be executed. If an error occurs,
      a `FinalizePipelineTask` is returned to abort the pipeline execution.
    """
        result = task_gen_utils.generate_task_from_active_execution(
            self._mlmd_handle, self._pipeline, node, node_executions)
        if result:
            return result

        node_uid = task_lib.NodeUid.from_pipeline_node(self._pipeline, node)
        resolved_info = task_gen_utils.generate_resolved_info(
            self._mlmd_handle, node)
        if resolved_info.input_artifacts is None:
            return task_lib.FinalizePipelineTask(
                pipeline_uid=self._pipeline_state.pipeline_uid,
                status=status_lib.Status(
                    code=status_lib.Code.ABORTED,
                    message=
                    (f'Aborting pipeline execution due to failure to resolve '
                     f'inputs; problematic node uid: {node_uid}')))

        execution = execution_publish_utils.register_execution(
            metadata_handler=self._mlmd_handle,
            execution_type=node.node_info.type,
            contexts=resolved_info.contexts,
            input_artifacts=resolved_info.input_artifacts,
            exec_properties=resolved_info.exec_properties)
        outputs_resolver = outputs_utils.OutputsResolver(
            node, self._pipeline.pipeline_info, self._pipeline.runtime_spec,
            self._pipeline.execution_mode)
        return task_lib.ExecNodeTask(
            node_uid=node_uid,
            execution=execution,
            contexts=resolved_info.contexts,
            input_artifacts=resolved_info.input_artifacts,
            exec_properties=resolved_info.exec_properties,
            output_artifacts=outputs_resolver.generate_output_artifacts(
                execution.id),
            executor_output_uri=outputs_resolver.get_executor_output_uri(
                execution.id),
            stateful_working_dir=outputs_resolver.
            get_stateful_working_directory(execution.id),
            pipeline=self._pipeline)
コード例 #3
0
ファイル: outputs_utils_test.py プロジェクト: kp425/tfx
    def setUp(self):
        super().setUp()
        pipeline_runtime_spec = pipeline_pb2.PipelineRuntimeSpec()
        pipeline_runtime_spec.pipeline_root.field_value.string_value = self.tmp_dir
        pipeline_runtime_spec.pipeline_run_id.field_value.string_value = (
            'test_run_0')

        self._output_resolver = outputs_utils.OutputsResolver(
            pipeline_node=_PIPLINE_NODE,
            pipeline_info=_PIPELING_INFO,
            pipeline_runtime_spec=pipeline_runtime_spec)
コード例 #4
0
    def _generate_task(
            self, metadata_handler: metadata.Metadata,
            node: pipeline_pb2.PipelineNode) -> Optional[task_lib.Task]:
        """Generates a node execution task.

    If node execution is not feasible, `None` is returned.

    Args:
      metadata_handler: A handler to access MLMD db.
      node: The pipeline node for which to generate a task.

    Returns:
      Returns a `Task` or `None` if task generation is deemed infeasible.
    """
        if not task_gen_utils.is_feasible_node(node):
            return None

        executions = task_gen_utils.get_executions(metadata_handler, node)
        result = task_gen_utils.generate_task_from_active_execution(
            metadata_handler, self._pipeline, node, executions)
        if result:
            return result

        resolved_info = task_gen_utils.generate_resolved_info(
            metadata_handler, node)
        if resolved_info.input_artifacts is None:
            # TODO(goutham): If the pipeline can't make progress, there should be a
            # standard mechanism to surface it to the user.
            logging.warning(
                'Task cannot be generated for node %s since no input artifacts '
                'are resolved.', node.node_info.id)
            return None

        execution = execution_publish_utils.register_execution(
            metadata_handler=metadata_handler,
            execution_type=node.node_info.type,
            contexts=resolved_info.contexts,
            input_artifacts=resolved_info.input_artifacts,
            exec_properties=resolved_info.exec_properties)
        outputs_resolver = outputs_utils.OutputsResolver(
            node, self._pipeline.pipeline_info, self._pipeline.runtime_spec,
            self._pipeline.execution_mode)
        return task_lib.ExecNodeTask(
            node_uid=task_lib.NodeUid.from_pipeline_node(self._pipeline, node),
            execution=execution,
            contexts=resolved_info.contexts,
            input_artifacts=resolved_info.input_artifacts,
            exec_properties=resolved_info.exec_properties,
            output_artifacts=outputs_resolver.generate_output_artifacts(
                execution.id),
            executor_output_uri=outputs_resolver.get_executor_output_uri(
                execution.id),
            stateful_working_dir=outputs_resolver.
            get_stateful_working_directory(execution.id))
コード例 #5
0
    def __init__(self,
                 pipeline_node: pipeline_pb2.PipelineNode,
                 mlmd_connection: metadata.Metadata,
                 pipeline_info: pipeline_pb2.PipelineInfo,
                 pipeline_runtime_spec: pipeline_pb2.PipelineRuntimeSpec,
                 executor_spec: Optional[message.Message] = None,
                 custom_driver_spec: Optional[message.Message] = None,
                 platform_spec: Optional[message.Message] = None,
                 custom_executor_operators: Optional[Dict[
                     Any, Type[ExecutorOperator]]] = None):
        """Initializes a Launcher.

    Args:
      pipeline_node: The specification of the node that this launcher lauches.
      mlmd_connection: ML metadata connection.
      pipeline_info: The information of the pipeline that this node runs in.
      pipeline_runtime_spec: The runtime information of the pipeline that this
        node runs in.
      executor_spec: Specification for the executor of the node. This is
        expected for all components nodes. This will be used to determine the
        specific ExecutorOperator class to be used to execute and will be passed
        into ExecutorOperator.
      custom_driver_spec: Specification for custom driver. This is expected only
        for advanced use cases.
      platform_spec: Platform config that will be used as auxiliary info of the
        node execution. This will be passed to ExecutorOperator along with the
        `executor_spec`.
      custom_executor_operators: a map of ExecutorSpec to its ExecutorOperation
        implementation.

    Raises:
      ValueError: when component and component_config are not launchable by the
      launcher.
    """
        del custom_driver_spec

        self._pipeline_node = pipeline_node
        self._mlmd_connection = mlmd_connection
        self._pipeline_info = pipeline_info
        self._pipeline_runtime_spec = pipeline_runtime_spec
        self._executor_operators = {}
        self._executor_operators.update(DEFAULT_EXECUTOR_OPERATORS)
        self._executor_operators.update(custom_executor_operators or {})

        self._executor_operator = self._executor_operators[type(
            executor_spec)](executor_spec, platform_spec)
        self._output_resolver = outputs_utils.OutputsResolver(
            pipeline_node=self._pipeline_node,
            pipeline_info=self._pipeline_info,
            pipeline_runtime_spec=self._pipeline_runtime_spec)
コード例 #6
0
    def __init__(self,
                 pipeline_node: pipeline_pb2.PipelineNode,
                 mlmd_connection: metadata.Metadata,
                 pipeline_info: pipeline_pb2.PipelineInfo,
                 pipeline_runtime_spec: pipeline_pb2.PipelineRuntimeSpec,
                 custom_executor_operators: Optional[Dict[
                     Any, Type[ExecutorOperator]]] = None):
        """Initializes a Launcher.

    Args:
      pipeline_node: The specification of the node that this launcher lauches.
      mlmd_connection: ML metadata connection. The connection is expected to
        not be opened before launcher is initiated.
      pipeline_info: The information of the pipeline that this node runs in.
      pipeline_runtime_spec: The runtime information of the pipeline that this
        node runs in.
      custom_executor_operators: a map of ExcutorSpec to its ExecutorOperation
        implementation.

    Raises:
      ValueError: when component and component_config are not launchable by the
      launcher.
    """
        self._pipeline_node = pipeline_node
        self._mlmd_connection = mlmd_connection
        self._pipeline_info = pipeline_info
        self._pipeline_runtime_spec = pipeline_runtime_spec
        self._executor_operators = {}
        self._executor_operators.update(DEFAULT_EXECUTOR_OPERATORS)
        self._executor_operators.update(custom_executor_operators or {})

        executor_spec_name = self._pipeline_node.executor.WhichOneof('spec')
        self._executor_spec = getattr(self._pipeline_node.executor,
                                      executor_spec_name)

        self._executor_operator = self._executor_operators[type(
            self._executor_spec)](self._executor_spec)
        self._output_resolver = outputs_utils.OutputsResolver(
            pipeline_node=self._pipeline_node,
            pipeline_info=self._pipeline_info,
            pipeline_runtime_spec=self._pipeline_runtime_spec)
コード例 #7
0
ファイル: outputs_utils_test.py プロジェクト: jay90099/tfx
 def _output_resolver(self, execution_mode=pipeline_pb2.Pipeline.SYNC):
     return outputs_utils.OutputsResolver(
         pipeline_node=_PIPELINE_NODE,
         pipeline_info=_PIPELINE_INFO,
         pipeline_runtime_spec=self._pipeline_runtime_spec,
         execution_mode=execution_mode)
コード例 #8
0
    def _resolve_inputs_and_generate_tasks_for_node(
        self,
        node: pipeline_pb2.PipelineNode,
    ) -> List[task_lib.Task]:
        """Generates tasks for a node by freshly resolving inputs."""
        result = []
        node_uid = task_lib.NodeUid.from_pipeline_node(self._pipeline, node)
        resolved_info = task_gen_utils.generate_resolved_info(
            self._mlmd_handle, node)
        if resolved_info is None:
            result.append(
                task_lib.UpdateNodeStateTask(node_uid=node_uid,
                                             state=pstate.NodeState.SKIPPED))
            return result

        if not resolved_info.input_artifacts:
            error_msg = f'failure to resolve inputs; node uid: {node_uid}'
            result.append(
                task_lib.UpdateNodeStateTask(node_uid=node_uid,
                                             state=pstate.NodeState.FAILED,
                                             status=status_lib.Status(
                                                 code=status_lib.Code.ABORTED,
                                                 message=error_msg)))
            return result
        # TODO(b/207038460): Update sync pipeline to support ForEach.
        input_artifacts = resolved_info.input_artifacts[0]

        execution = execution_publish_utils.register_execution(
            metadata_handler=self._mlmd_handle,
            execution_type=node.node_info.type,
            contexts=resolved_info.contexts,
            input_artifacts=input_artifacts,
            exec_properties=resolved_info.exec_properties)
        outputs_resolver = outputs_utils.OutputsResolver(
            node, self._pipeline.pipeline_info, self._pipeline.runtime_spec,
            self._pipeline.execution_mode)
        output_artifacts = outputs_resolver.generate_output_artifacts(
            execution.id)

        # For mixed service nodes, we ensure node services and check service
        # status; pipeline is aborted if the service jobs have failed.
        service_status = self._ensure_node_services_if_mixed(node.node_info.id)
        if service_status == service_jobs.ServiceStatus.FAILED:
            error_msg = f'associated service job failed; node uid: {node_uid}'
            result.append(
                task_lib.UpdateNodeStateTask(node_uid=node_uid,
                                             state=pstate.NodeState.FAILED,
                                             status=status_lib.Status(
                                                 code=status_lib.Code.ABORTED,
                                                 message=error_msg)))
            return result

        outputs_utils.make_output_dirs(output_artifacts)
        result.append(
            task_lib.UpdateNodeStateTask(node_uid=node_uid,
                                         state=pstate.NodeState.RUNNING))
        result.append(
            task_lib.ExecNodeTask(
                node_uid=node_uid,
                execution_id=execution.id,
                contexts=resolved_info.contexts,
                input_artifacts=input_artifacts,
                exec_properties=resolved_info.exec_properties,
                output_artifacts=output_artifacts,
                executor_output_uri=outputs_resolver.get_executor_output_uri(
                    execution.id),
                stateful_working_dir=outputs_resolver.
                get_stateful_working_directory(execution.id),
                tmp_dir=outputs_resolver.make_tmp_dir(execution.id),
                pipeline=self._pipeline))
        return result
コード例 #9
0
    def _maybe_generate_task(
        self,
        node: pipeline_pb2.PipelineNode,
        node_executions: Sequence[metadata_store_pb2.Execution],
        successful_node_ids: MutableSet[str],
    ) -> Optional[task_lib.Task]:
        """Generates a task to execute or `None` if no action is required.

    If node is executable, `ExecNodeTask` is returned.

    If node execution is infeasible due to unsatisfied preconditions such as
    missing inputs or service job failure, task to abort the pipeline is
    returned.

    If cache is enabled and previously computed outputs are found, those outputs
    are used to finish the execution. Since node execution can be elided, `None`
    is returned after adding the node_id to `successful_node_ids` set.

    Args:
      node: The pipeline node for which to generate a task.
      node_executions: Node executions fetched from MLMD.
      successful_node_ids: Set that tracks successful node ids.

    Returns:
      Returns an `ExecNodeTask` if node can be executed. If an error occurs,
      a `FinalizePipelineTask` is returned to abort the pipeline execution.
    """
        result = task_gen_utils.generate_task_from_active_execution(
            self._mlmd_handle, self._pipeline, node, node_executions)
        if result:
            return result

        node_uid = task_lib.NodeUid.from_pipeline_node(self._pipeline, node)
        resolved_info = task_gen_utils.generate_resolved_info(
            self._mlmd_handle, node)
        if resolved_info.input_artifacts is None:
            return self._abort_task(
                f'failure to resolve inputs; node uid {node_uid}')

        execution = execution_publish_utils.register_execution(
            metadata_handler=self._mlmd_handle,
            execution_type=node.node_info.type,
            contexts=resolved_info.contexts,
            input_artifacts=resolved_info.input_artifacts,
            exec_properties=resolved_info.exec_properties)
        outputs_resolver = outputs_utils.OutputsResolver(
            node, self._pipeline.pipeline_info, self._pipeline.runtime_spec,
            self._pipeline.execution_mode)
        output_artifacts = outputs_resolver.generate_output_artifacts(
            execution.id)

        # Check if we can elide node execution by reusing previously computed
        # outputs for the node.
        cache_context = cache_utils.get_cache_context(
            self._mlmd_handle,
            pipeline_node=node,
            pipeline_info=self._pipeline.pipeline_info,
            executor_spec=_get_executor_spec(self._pipeline,
                                             node.node_info.id),
            input_artifacts=resolved_info.input_artifacts,
            output_artifacts=output_artifacts,
            parameters=resolved_info.exec_properties)
        contexts = resolved_info.contexts + [cache_context]
        if node.execution_options.caching_options.enable_cache:
            cached_outputs = cache_utils.get_cached_outputs(
                self._mlmd_handle, cache_context=cache_context)
            if cached_outputs is not None:
                logging.info(
                    'Eliding node execution, using cached outputs; node uid: %s',
                    node_uid)
                execution_publish_utils.publish_cached_execution(
                    self._mlmd_handle,
                    contexts=contexts,
                    execution_id=execution.id,
                    output_artifacts=cached_outputs)
                successful_node_ids.add(node.node_info.id)
                pstate.record_state_change_time()
                return None

        # For mixed service nodes, we ensure node services and check service
        # status; pipeline is aborted if the service jobs have failed.
        service_status = self._ensure_node_services_if_mixed(node.node_info.id)
        if service_status == service_jobs.ServiceStatus.FAILED:
            return self._abort_task(
                f'associated service job failed; node uid: {node_uid}')

        return task_lib.ExecNodeTask(
            node_uid=node_uid,
            execution_id=execution.id,
            contexts=contexts,
            input_artifacts=resolved_info.input_artifacts,
            exec_properties=resolved_info.exec_properties,
            output_artifacts=output_artifacts,
            executor_output_uri=outputs_resolver.get_executor_output_uri(
                execution.id),
            stateful_working_dir=outputs_resolver.
            get_stateful_working_directory(execution.id),
            pipeline=self._pipeline)
コード例 #10
0
    def _generate_task(
            self, metadata_handler: metadata.Metadata,
            node: pipeline_pb2.PipelineNode) -> Optional[task_lib.Task]:
        """Generates a node execution task.

    If a node execution is not feasible, `None` is returned.

    Args:
      metadata_handler: A handler to access MLMD db.
      node: The pipeline node for which to generate a task.

    Returns:
      Returns a `Task` or `None` if task generation is deemed infeasible.
    """
        executions = task_gen_utils.get_executions(metadata_handler, node)
        result = task_gen_utils.generate_task_from_active_execution(
            metadata_handler, self._pipeline, node, executions)
        if result:
            return result

        resolved_info = task_gen_utils.generate_resolved_info(
            metadata_handler, node)
        if resolved_info.input_artifacts is None or not any(
                resolved_info.input_artifacts.values()):
            logging.info(
                'Task cannot be generated for node %s since no input artifacts '
                'are resolved.', node.node_info.id)
            return None

        # If the latest successful execution had the same resolved input artifacts,
        # the component should not be triggered, so task is not generated.
        # TODO(b/170231077): This logic should be handled by the resolver when it's
        # implemented. Also, currently only the artifact ids of previous execution
        # are checked to decide if a new execution is warranted but it may also be
        # necessary to factor in the difference of execution properties.
        latest_exec = task_gen_utils.get_latest_successful_execution(
            executions)
        if latest_exec:
            artifact_ids_by_event_type = (
                execution_lib.get_artifact_ids_by_event_type_for_execution_id(
                    metadata_handler, latest_exec.id))
            latest_exec_input_artifact_ids = artifact_ids_by_event_type.get(
                metadata_store_pb2.Event.INPUT, set())
            current_exec_input_artifact_ids = set(
                a.id for a in itertools.chain(
                    *resolved_info.input_artifacts.values()))
            if latest_exec_input_artifact_ids == current_exec_input_artifact_ids:
                return None

        node_uid = task_lib.NodeUid.from_pipeline_node(self._pipeline, node)
        execution = execution_publish_utils.register_execution(
            metadata_handler=metadata_handler,
            execution_type=node.node_info.type,
            contexts=resolved_info.contexts,
            input_artifacts=resolved_info.input_artifacts,
            exec_properties=resolved_info.exec_properties)
        outputs_resolver = outputs_utils.OutputsResolver(
            node, self._pipeline.pipeline_info, self._pipeline.runtime_spec,
            self._pipeline.execution_mode)

        # For mixed service nodes, we ensure node services and check service
        # status; the node is aborted if its service jobs have failed.
        service_status = self._ensure_node_services_if_mixed(node.node_info.id)
        if service_status is not None:
            if service_status != service_jobs.ServiceStatus.RUNNING:
                return self._abort_node_task(node_uid)

        return task_lib.ExecNodeTask(
            node_uid=node_uid,
            execution=execution,
            contexts=resolved_info.contexts,
            input_artifacts=resolved_info.input_artifacts,
            exec_properties=resolved_info.exec_properties,
            output_artifacts=outputs_resolver.generate_output_artifacts(
                execution.id),
            executor_output_uri=outputs_resolver.get_executor_output_uri(
                execution.id),
            stateful_working_dir=outputs_resolver.
            get_stateful_working_directory(execution.id),
            pipeline=self._pipeline)
コード例 #11
0
    def _generate_tasks_for_node(
            self, metadata_handler: metadata.Metadata,
            node: pipeline_pb2.PipelineNode) -> List[task_lib.Task]:
        """Generates a node execution task.

    If a node execution is not feasible, `None` is returned.

    Args:
      metadata_handler: A handler to access MLMD db.
      node: The pipeline node for which to generate a task.

    Returns:
      Returns a `Task` or `None` if task generation is deemed infeasible.
    """
        result = []
        node_uid = task_lib.NodeUid.from_pipeline_node(self._pipeline, node)

        executions = task_gen_utils.get_executions(metadata_handler, node)
        exec_node_task = task_gen_utils.generate_task_from_active_execution(
            metadata_handler, self._pipeline, node, executions)
        if exec_node_task:
            result.append(
                task_lib.UpdateNodeStateTask(node_uid=node_uid,
                                             state=pstate.NodeState.RUNNING))
            result.append(exec_node_task)
            return result

        resolved_info = task_gen_utils.generate_resolved_info(
            metadata_handler, node)
        # TODO(b/207038460): Update async pipeline to support ForEach.
        if (resolved_info is None or not resolved_info.input_artifacts
                or resolved_info.input_artifacts[0] is None
                or not any(resolved_info.input_artifacts[0].values())):
            logging.info(
                'Task cannot be generated for node %s since no input artifacts '
                'are resolved.', node.node_info.id)
            return result
        input_artifact = resolved_info.input_artifacts[0]

        executor_spec_fingerprint = hashlib.sha256()
        executor_spec = task_gen_utils.get_executor_spec(
            self._pipeline_state.pipeline, node.node_info.id)
        if executor_spec is not None:
            executor_spec_fingerprint.update(
                executor_spec.SerializeToString(deterministic=True))
        resolved_info.exec_properties[
            constants.
            EXECUTOR_SPEC_FINGERPRINT_KEY] = executor_spec_fingerprint.hexdigest(
            )

        # If the latest execution had the same resolved input artifacts, execution
        # properties and executor specs, we should not trigger a new execution.
        latest_exec = task_gen_utils.get_latest_execution(executions)
        if latest_exec:
            artifact_ids_by_event_type = (
                execution_lib.get_artifact_ids_by_event_type_for_execution_id(
                    metadata_handler, latest_exec.id))
            latest_exec_input_artifact_ids = artifact_ids_by_event_type.get(
                metadata_store_pb2.Event.INPUT, set())
            current_exec_input_artifact_ids = set(
                a.id for a in itertools.chain(*input_artifact.values()))
            latest_exec_properties = task_gen_utils.extract_properties(
                latest_exec)
            current_exec_properties = resolved_info.exec_properties
            latest_exec_executor_spec_fp = latest_exec_properties[
                constants.EXECUTOR_SPEC_FINGERPRINT_KEY]
            current_exec_executor_spec_fp = resolved_info.exec_properties[
                constants.EXECUTOR_SPEC_FINGERPRINT_KEY]
            if (latest_exec_input_artifact_ids
                    == current_exec_input_artifact_ids
                    and _exec_properties_match(latest_exec_properties,
                                               current_exec_properties)
                    and latest_exec_executor_spec_fp
                    == current_exec_executor_spec_fp):
                result.append(
                    task_lib.UpdateNodeStateTask(
                        node_uid=node_uid, state=pstate.NodeState.STARTED))
                return result

        execution = execution_publish_utils.register_execution(
            metadata_handler=metadata_handler,
            execution_type=node.node_info.type,
            contexts=resolved_info.contexts,
            input_artifacts=input_artifact,
            exec_properties=resolved_info.exec_properties)
        outputs_resolver = outputs_utils.OutputsResolver(
            node, self._pipeline.pipeline_info, self._pipeline.runtime_spec,
            self._pipeline.execution_mode)

        # For mixed service nodes, we ensure node services and check service
        # status; the node is aborted if its service jobs have failed.
        service_status = self._ensure_node_services_if_mixed(node.node_info.id)
        if service_status is not None:
            if service_status != service_jobs.ServiceStatus.RUNNING:
                error_msg = f'associated service job failed; node uid: {node_uid}'
                result.append(
                    task_lib.UpdateNodeStateTask(
                        node_uid=node_uid,
                        state=pstate.NodeState.FAILED,
                        status=status_lib.Status(code=status_lib.Code.ABORTED,
                                                 message=error_msg)))
                return result

        output_artifacts = outputs_resolver.generate_output_artifacts(
            execution.id)
        outputs_utils.make_output_dirs(output_artifacts)
        result.append(
            task_lib.UpdateNodeStateTask(node_uid=node_uid,
                                         state=pstate.NodeState.RUNNING))
        result.append(
            task_lib.ExecNodeTask(
                node_uid=node_uid,
                execution_id=execution.id,
                contexts=resolved_info.contexts,
                input_artifacts=input_artifact,
                exec_properties=resolved_info.exec_properties,
                output_artifacts=output_artifacts,
                executor_output_uri=outputs_resolver.get_executor_output_uri(
                    execution.id),
                stateful_working_dir=outputs_resolver.
                get_stateful_working_directory(execution.id),
                tmp_dir=outputs_resolver.make_tmp_dir(execution.id),
                pipeline=self._pipeline))
        return result