def _publish_failed_execution( self, execution_id: int, contexts: List[metadata_store_pb2.Context]) -> None: """Publishes failed execution to ml metadata.""" with self._mlmd_connection as m: execution_publish_utils.publish_failed_execution( metadata_handler=m, execution_id=execution_id, contexts=contexts)
def testPublishFailedExecution(self): with metadata.Metadata(connection_config=self._connection_config) as m: contexts = self._generate_contexts(m) execution_id = execution_publish_utils.register_execution( m, self._execution_type, contexts).id execution_publish_utils.publish_failed_execution(m, contexts, execution_id) [execution] = m.store.get_executions_by_id([execution_id]) self.assertProtoPartiallyEquals( """ id: 1 type_id: 3 last_known_state: FAILED """, execution, ignored_fields=[ 'create_time_since_epoch', 'last_update_time_since_epoch' ]) # No events because there is no artifact published. events = m.store.get_events_by_execution_ids([execution.id]) self.assertEmpty(events) # Verifies the context-execution edges are set up. self.assertCountEqual( [c.id for c in contexts], [c.id for c in m.store.get_contexts_by_execution(execution.id)])
def testPublishSuccessExecutionDropsEmptyResult(self): with metadata.Metadata(connection_config=self._connection_config) as m: executor_output = text_format.Parse( """ execution_result { code: 0 } """, execution_result_pb2.ExecutorOutput()) contexts = self._generate_contexts(m) execution_id = execution_publish_utils.register_execution( m, self._execution_type, contexts).id execution_publish_utils.publish_failed_execution(m, contexts, execution_id, executor_output) [execution] = m.store.get_executions_by_id([execution_id]) self.assertProtoPartiallyEquals( """ id: 1 last_known_state: FAILED """, execution, ignored_fields=[ 'type_id', 'create_time_since_epoch', 'last_update_time_since_epoch' ])
def _publish_failed_execution( self, execution_id: int, contexts: List[metadata_store_pb2.Context], executor_output: Optional[execution_result_pb2.ExecutorOutput] = None ) -> None: """Publishes failed execution to ml metadata.""" with self._mlmd_connection as m: execution_publish_utils.publish_failed_execution( metadata_handler=m, execution_id=execution_id, contexts=contexts, executor_output=executor_output)
def testPublishFailedExecution(self): with metadata.Metadata(connection_config=self._connection_config) as m: executor_output = text_format.Parse( """ execution_result { code: 1 result_message: 'error message.' } """, execution_result_pb2.ExecutorOutput()) contexts = self._generate_contexts(m) execution_id = execution_publish_utils.register_execution( m, self._execution_type, contexts).id execution_publish_utils.publish_failed_execution(m, contexts, execution_id, executor_output) [execution] = m.store.get_executions_by_id([execution_id]) self.assertProtoPartiallyEquals( """ id: 1 last_known_state: FAILED custom_properties { key: '__execution_result__' value { string_value: '{\\n "resultMessage": "error message.",\\n "code": 1\\n}' } } """, execution, ignored_fields=[ 'type_id', 'create_time_since_epoch', 'last_update_time_since_epoch' ]) # No events because there is no artifact published. events = m.store.get_events_by_execution_ids([execution.id]) self.assertEmpty(events) # Verifies the context-execution edges are set up. self.assertCountEqual( [c.id for c in contexts], [c.id for c in m.store.get_contexts_by_execution(execution.id)])
def run( self, mlmd_connection: metadata.Metadata, pipeline_node: pipeline_pb2.PipelineNode, pipeline_info: pipeline_pb2.PipelineInfo, pipeline_runtime_spec: pipeline_pb2.PipelineRuntimeSpec ) -> data_types.ExecutionInfo: """Runs Resolver specific logic. Args: mlmd_connection: ML metadata connection. pipeline_node: The specification of the node that this launcher lauches. pipeline_info: The information of the pipeline that this node runs in. pipeline_runtime_spec: The runtime information of the pipeline that this node runs in. Returns: The execution of the run. """ logging.info('Running as an resolver node.') with mlmd_connection as m: # 1.Prepares all contexts. contexts = context_lib.prepare_contexts( metadata_handler=m, node_contexts=pipeline_node.contexts) # 2. Resolves inputs and execution properties. exec_properties = data_types_utils.build_parsed_value_dict( inputs_utils.resolve_parameters_with_schema( node_parameters=pipeline_node.parameters)) try: resolved_inputs = inputs_utils.resolve_input_artifacts_v2( pipeline_node=pipeline_node, metadata_handler=m) except exceptions.InputResolutionError as e: execution = execution_publish_utils.register_execution( metadata_handler=m, execution_type=pipeline_node.node_info.type, contexts=contexts, exec_properties=exec_properties) execution_publish_utils.publish_failed_execution( metadata_handler=m, contexts=contexts, execution_id=execution.id, executor_output=self._build_error_output( code=e.grpc_code_value)) return data_types.ExecutionInfo( execution_id=execution.id, exec_properties=exec_properties, pipeline_node=pipeline_node, pipeline_info=pipeline_info) # 2a. If Skip (i.e. inside conditional), no execution should be made. # TODO(b/197907821): Publish special execution for Skip? if isinstance(resolved_inputs, inputs_utils.Skip): return data_types.ExecutionInfo() # 3. Registers execution in metadata. execution = execution_publish_utils.register_execution( metadata_handler=m, execution_type=pipeline_node.node_info.type, contexts=contexts, exec_properties=exec_properties) # TODO(b/197741942): Support len > 1. if len(resolved_inputs) > 1: execution_publish_utils.publish_failed_execution( metadata_handler=m, contexts=contexts, execution_id=execution.id, executor_output=self._build_error_output( _ERROR_CODE_UNIMPLEMENTED, 'Handling more than one input dicts not implemented yet.' )) return data_types.ExecutionInfo( execution_id=execution.id, exec_properties=exec_properties, pipeline_node=pipeline_node, pipeline_info=pipeline_info) input_artifacts = resolved_inputs[0] # 4. Publish the execution as a cached execution with # resolved input artifact as the output artifacts. execution_publish_utils.publish_internal_execution( metadata_handler=m, contexts=contexts, execution_id=execution.id, output_artifacts=input_artifacts) return data_types.ExecutionInfo(execution_id=execution.id, input_dict=input_artifacts, output_dict=input_artifacts, exec_properties=exec_properties, pipeline_node=pipeline_node, pipeline_info=pipeline_info)