def execute(self, trans, progress, invocation, step): tool = trans.app.toolbox.get_tool(step.tool_id, tool_version=step.tool_version) tool_state = step.state # Not strictly needed - but keep Tool state clean by stripping runtime # metadata parameters from it. if RUNTIME_STEP_META_STATE_KEY in tool_state.inputs: del tool_state.inputs[RUNTIME_STEP_META_STATE_KEY] collections_to_match = self._find_collections_to_match( tool, progress, step) # Have implicit collections... if collections_to_match.has_collections(): collection_info = self.trans.app.dataset_collections_service.match_collections( collections_to_match) else: collection_info = None param_combinations = [] if collection_info: iteration_elements_iter = collection_info.slice_collections() else: iteration_elements_iter = [None] for iteration_elements in iteration_elements_iter: execution_state = tool_state.copy() # TODO: Move next step into copy() execution_state.inputs = make_dict_copy(execution_state.inputs) # Connect up def callback(input, value, prefixed_name, prefixed_label): replacement = None if isinstance(input, DataToolParameter) or isinstance( input, DataCollectionToolParameter): if iteration_elements and prefixed_name in iteration_elements: if isinstance(input, DataToolParameter): # Pull out dataset instance from element. replacement = iteration_elements[ prefixed_name].dataset_instance else: # If collection - just use element model object. replacement = iteration_elements[prefixed_name] else: replacement = progress.replacement_for_tool_input( step, input, prefixed_name) return replacement try: # Replace DummyDatasets with historydatasetassociations visit_input_values(tool.inputs, execution_state.inputs, callback) except KeyError, k: message_template = "Error due to input mapping of '%s' in '%s'. A common cause of this is conditional outputs that cannot be determined until runtime, please review your workflow." message = message_template % (tool.name, k.message) raise exceptions.MessageException(message) param_combinations.append(execution_state.inputs)
def execute(self, trans, progress, invocation, step): tool = trans.app.toolbox.get_tool(step.tool_id, tool_version=step.tool_version) tool_state = step.state # Not strictly needed - but keep Tool state clean by stripping runtime # metadata parameters from it. if RUNTIME_STEP_META_STATE_KEY in tool_state.inputs: del tool_state.inputs[RUNTIME_STEP_META_STATE_KEY] collections_to_match = self._find_collections_to_match(tool, progress, step) # Have implicit collections... if collections_to_match.has_collections(): collection_info = self.trans.app.dataset_collections_service.match_collections(collections_to_match) else: collection_info = None param_combinations = [] if collection_info: iteration_elements_iter = collection_info.slice_collections() else: iteration_elements_iter = [None] for iteration_elements in iteration_elements_iter: execution_state = tool_state.copy() # TODO: Move next step into copy() execution_state.inputs = make_dict_copy(execution_state.inputs) # Connect up def callback(input, value, prefixed_name, prefixed_label): replacement = None if isinstance(input, DataToolParameter) or isinstance(input, DataCollectionToolParameter): if iteration_elements and prefixed_name in iteration_elements: if isinstance(input, DataToolParameter): # Pull out dataset instance from element. replacement = iteration_elements[prefixed_name].dataset_instance else: # If collection - just use element model object. replacement = iteration_elements[prefixed_name] else: replacement = progress.replacement_for_tool_input(step, input, prefixed_name) return replacement try: # Replace DummyDatasets with historydatasetassociations visit_input_values(tool.inputs, execution_state.inputs, callback) except KeyError, k: message_template = "Error due to input mapping of '%s' in '%s'. A common cause of this is conditional outputs that cannot be determined until runtime, please review your workflow." message = message_template % (tool.name, k.message) raise exceptions.MessageException(message) param_combinations.append(execution_state.inputs)
def execute(self, trans, progress, invocation, step): tool = trans.app.toolbox.get_tool(step.tool_id, tool_version=step.tool_version) tool_state = step.state # Not strictly needed - but keep Tool state clean by stripping runtime # metadata parameters from it. if RUNTIME_STEP_META_STATE_KEY in tool_state.inputs: del tool_state.inputs[RUNTIME_STEP_META_STATE_KEY] collections_to_match = self._find_collections_to_match(tool, progress, step) # Have implicit collections... if collections_to_match.has_collections(): collection_info = self.trans.app.dataset_collections_service.match_collections(collections_to_match) else: collection_info = None param_combinations = [] if collection_info: iteration_elements_iter = collection_info.slice_collections() else: iteration_elements_iter = [None] for iteration_elements in iteration_elements_iter: execution_state = tool_state.copy() # TODO: Move next step into copy() execution_state.inputs = make_dict_copy(execution_state.inputs) expected_replacement_keys = set(step.input_connections_by_name.keys()) found_replacement_keys = set() # Connect up def callback(input, prefixed_name, **kwargs): replacement = NO_REPLACEMENT if isinstance(input, DataToolParameter) or isinstance(input, DataCollectionToolParameter): if iteration_elements and prefixed_name in iteration_elements: if isinstance(input, DataToolParameter): # Pull out dataset instance from element. replacement = iteration_elements[prefixed_name].dataset_instance if hasattr(iteration_elements[prefixed_name], u'element_identifier') and iteration_elements[prefixed_name].element_identifier: replacement.element_identifier = iteration_elements[prefixed_name].element_identifier else: # If collection - just use element model object. replacement = iteration_elements[prefixed_name] else: replacement = progress.replacement_for_tool_input(step, input, prefixed_name) else: replacement = progress.replacement_for_tool_input(step, input, prefixed_name) if replacement is not NO_REPLACEMENT: found_replacement_keys.add(prefixed_name) return replacement try: # Replace DummyDatasets with historydatasetassociations visit_input_values(tool.inputs, execution_state.inputs, callback, no_replacement_value=NO_REPLACEMENT) except KeyError as k: message_template = "Error due to input mapping of '%s' in '%s'. A common cause of this is conditional outputs that cannot be determined until runtime, please review your workflow." message = message_template % (tool.name, k.message) raise exceptions.MessageException(message) unmatched_input_connections = expected_replacement_keys - found_replacement_keys if unmatched_input_connections: log.warn("Failed to use input connections for inputs [%s]" % unmatched_input_connections) param_combinations.append(execution_state.inputs) try: execution_tracker = execute( trans=self.trans, tool=tool, param_combinations=param_combinations, history=invocation.history, collection_info=collection_info, workflow_invocation_uuid=invocation.uuid.hex ) except ToolInputsNotReadyException: delayed_why = "tool [%s] inputs are not ready, this special tool requires inputs to be ready" % tool.id raise DelayedWorkflowEvaluation(why=delayed_why) if collection_info: step_outputs = dict(execution_tracker.implicit_collections) else: step_outputs = dict(execution_tracker.output_datasets) step_outputs.update(execution_tracker.output_collections) progress.set_step_outputs(step, step_outputs) jobs = execution_tracker.successful_jobs for job in jobs: self._handle_post_job_actions(step, job, invocation.replacement_dict) if execution_tracker.execution_errors: failed_count = len(execution_tracker.execution_errors) success_count = len(execution_tracker.successful_jobs) all_count = failed_count + success_count message = "Failed to create %d out of %s job(s) for workflow step." % (failed_count, all_count) raise Exception(message) return jobs
def execute( self, trans, progress, invocation, step ): tool = trans.app.toolbox.get_tool( step.tool_id, tool_version=step.tool_version ) tool_state = step.state # Not strictly needed - but keep Tool state clean by stripping runtime # metadata parameters from it. if RUNTIME_STEP_META_STATE_KEY in tool_state.inputs: del tool_state.inputs[ RUNTIME_STEP_META_STATE_KEY ] collections_to_match = self._find_collections_to_match( tool, progress, step ) # Have implicit collections... if collections_to_match.has_collections(): collection_info = self.trans.app.dataset_collections_service.match_collections( collections_to_match ) else: collection_info = None param_combinations = [] if collection_info: iteration_elements_iter = collection_info.slice_collections() else: iteration_elements_iter = [ None ] for iteration_elements in iteration_elements_iter: execution_state = tool_state.copy() # TODO: Move next step into copy() execution_state.inputs = make_dict_copy( execution_state.inputs ) # Connect up def callback( input, prefixed_name, **kwargs ): replacement = NO_REPLACEMENT if isinstance( input, DataToolParameter ) or isinstance( input, DataCollectionToolParameter ): if iteration_elements and prefixed_name in iteration_elements: if isinstance( input, DataToolParameter ): # Pull out dataset instance from element. replacement = iteration_elements[ prefixed_name ].dataset_instance if hasattr(iteration_elements[ prefixed_name ], u'element_identifier') and iteration_elements[ prefixed_name ].element_identifier: replacement.element_identifier = iteration_elements[ prefixed_name ].element_identifier else: # If collection - just use element model object. replacement = iteration_elements[ prefixed_name ] else: replacement = progress.replacement_for_tool_input( step, input, prefixed_name ) else: replacement = progress.replacement_for_tool_input( step, input, prefixed_name ) return replacement try: # Replace DummyDatasets with historydatasetassociations visit_input_values( tool.inputs, execution_state.inputs, callback, no_replacement_value=NO_REPLACEMENT ) except KeyError as k: message_template = "Error due to input mapping of '%s' in '%s'. A common cause of this is conditional outputs that cannot be determined until runtime, please review your workflow." message = message_template % (tool.name, k.message) raise exceptions.MessageException( message ) param_combinations.append( execution_state.inputs ) try: execution_tracker = execute( trans=self.trans, tool=tool, param_combinations=param_combinations, history=invocation.history, collection_info=collection_info, workflow_invocation_uuid=invocation.uuid.hex ) except ToolInputsNotReadyException: delayed_why = "tool [%s] inputs are not ready, this special tool requires inputs to be ready" % tool.id raise DelayedWorkflowEvaluation(why=delayed_why) if collection_info: step_outputs = dict( execution_tracker.implicit_collections ) else: step_outputs = dict( execution_tracker.output_datasets ) step_outputs.update( execution_tracker.output_collections ) progress.set_step_outputs( step, step_outputs ) jobs = execution_tracker.successful_jobs for job in jobs: self._handle_post_job_actions( step, job, invocation.replacement_dict ) if execution_tracker.execution_errors: failed_count = len(execution_tracker.execution_errors) success_count = len(execution_tracker.successful_jobs) all_count = failed_count + success_count message = "Failed to create %d out of %s job(s) for workflow step." % (failed_count, all_count) raise Exception(message) return jobs