def _initialize_metadata_inputs(dataset, path_for_part, tmp_dir, kwds, real_metadata_object=True): filename_out = path_for_part("out") filename_results_code = path_for_part("results") filename_kwds = path_for_part("kwds") filename_override_metadata = path_for_part("override") open( filename_out, 'wt+' ) # create the file on disk, so it cannot be reused by tempfile (unlikely, but possible) # create the file on disk, so it cannot be reused by tempfile (unlikely, but possible) json.dump((False, 'External set_meta() not called'), open(filename_results_code, 'wt+')) json.dump(kwds, open(filename_kwds, 'wt+'), ensure_ascii=True) override_metadata = [] for meta_key, spec_value in dataset.metadata.spec.items(): if isinstance( spec_value.param, FileParameter) and dataset.metadata.get( meta_key, None) is not None: if not real_metadata_object: metadata_temp = MetadataTempFile() metadata_temp.tmp_dir = tmp_dir shutil.copy( dataset.metadata.get(meta_key, None).file_name, metadata_temp.file_name) override_metadata.append((meta_key, metadata_temp.to_JSON())) json.dump(override_metadata, open(filename_override_metadata, 'wt+')) return filename_out, filename_results_code, filename_kwds, filename_override_metadata
def test_edit_metadata_files(): app = _mock_app(store_by="uuid") sa_session = app.model.context u = model.User(email="*****@*****.**", password="******") h = model.History(name="Test History", user=u) d1 = _create_datasets(sa_session, h, 1, extension="bam")[0] sa_session.add_all((h, d1)) sa_session.flush() index = NamedTemporaryFile("w") index.write("cool bam index") metadata_dict = { "bam_index": MetadataTempFile.from_JSON({ "kwds": {}, "filename": index.name }) } d1.metadata.from_JSON_dict(json_dict=metadata_dict) assert d1.metadata.bam_index assert isinstance(d1.metadata.bam_index, model.MetadataFile) temp_directory = mkdtemp() with store.DirectoryModelExportStore( temp_directory, app=app, for_edit=True, strip_metadata_files=False) as export_store: export_store.add_dataset(d1) import_history = model.History(name="Test History for Import", user=u) sa_session.add(import_history) sa_session.flush() _perform_import_from_directory(temp_directory, app, u, import_history, store.ImportOptions(allow_edit=True))
def _create_hda(model, object_store, history, path, visible=True, include_metadata_file=False): hda = HistoryDatasetAssociation(extension="bam", create_dataset=True, sa_session=model.context) hda.visible = visible model.context.add(hda) model.context.flush([hda]) object_store.update_from_file(hda, file_name=path, create=True) if include_metadata_file: hda.metadata.from_JSON_dict( json_dict={ "bam_index": MetadataTempFile.from_JSON({ "kwds": {}, "filename": path }) }) _check_metadata_file(hda) hda.set_size() history.add_dataset(hda) hda.add_item_annotation(model.context, history.user, hda, "annotation #%d" % hda.hid) return hda
def cleanup_external_metadata(self, sa_session): log.debug('Cleaning up external metadata files') for metadata_files in sa_session.query(galaxy.model.Job).get(self.job_id).external_output_metadata: # we need to confirm that any MetadataTempFile files were removed, if not we need to remove them # can occur if the job was stopped before completion, but a MetadataTempFile is used in the set_meta MetadataTempFile.cleanup_from_JSON_dict_filename(metadata_files.filename_out) dataset_key = self._get_dataset_metadata_key(metadata_files.dataset) for key, fname in [('filename_in', metadata_files.filename_in), ('filename_out', metadata_files.filename_out), ('filename_results_code', metadata_files.filename_results_code), ('filename_kwds', metadata_files.filename_kwds), ('filename_override_metadata', metadata_files.filename_override_metadata)]: try: os.remove(fname) except Exception as e: log.debug('Failed to cleanup external metadata file (%s) for %s: %s' % (key, dataset_key, e))
def _create_hda(model, object_store, history, path, visible=True, include_metadata_file=False): hda = model.HistoryDatasetAssociation(extension="bam", create_dataset=True, sa_session=model.context) hda.visible = visible model.context.add(hda) model.context.flush([hda]) object_store.update_from_file(hda, file_name=path, create=True) if include_metadata_file: hda.metadata.from_JSON_dict(json_dict={"bam_index": MetadataTempFile.from_JSON({"kwds": {}, "filename": path})}) _check_metadata_file(hda) hda.set_size() history.add_dataset(hda) hda.add_item_annotation(model.context, history.user, hda, "annotation #%d" % hda.hid) return hda
def test_nested_collection_attributes(self): model = self.model u = model.User(email="*****@*****.**", password="******") h1 = model.History(name="History 1", user=u) d1 = model.HistoryDatasetAssociation(extension="bam", history=h1, create_dataset=True, sa_session=model.session) index = NamedTemporaryFile("w") index.write("cool bam index") index2 = NamedTemporaryFile("w") index2.write("cool bam index 2") metadata_dict = {"bam_index": MetadataTempFile.from_JSON({"kwds": {}, "filename": index.name}), "bam_csi_index": MetadataTempFile.from_JSON({"kwds": {}, "filename": index2.name})} d1.metadata.from_JSON_dict(json_dict=metadata_dict) assert d1.metadata.bam_index assert d1.metadata.bam_csi_index assert isinstance(d1.metadata.bam_index, model.MetadataFile) assert isinstance(d1.metadata.bam_csi_index, model.MetadataFile) d2 = model.HistoryDatasetAssociation(extension="txt", history=h1, create_dataset=True, sa_session=model.session) c1 = model.DatasetCollection(collection_type='paired') dce1 = model.DatasetCollectionElement(collection=c1, element=d1, element_identifier="forward", element_index=0) dce2 = model.DatasetCollectionElement(collection=c1, element=d2, element_identifier="reverse", element_index=1) c2 = model.DatasetCollection(collection_type="list:paired") dce3 = model.DatasetCollectionElement(collection=c2, element=c1, element_identifier="inner_list", element_index=0) c3 = model.DatasetCollection(collection_type="list:list") c4 = model.DatasetCollection(collection_type="list:list:paired") dce4 = model.DatasetCollectionElement(collection=c4, element=c2, element_identifier="outer_list", element_index=0) model.session.add_all([d1, d2, c1, dce1, dce2, c2, dce3, c3, c4, dce4]) model.session.flush() q = c2._get_nested_collection_attributes(element_attributes=('element_identifier',), hda_attributes=('extension',), dataset_attributes=('state',)) assert [(r.keys()) for r in q] == [['element_identifier_0', 'element_identifier_1', 'extension', 'state'], ['element_identifier_0', 'element_identifier_1', 'extension', 'state']] assert q.all() == [('inner_list', 'forward', 'bam', 'new'), ('inner_list', 'reverse', 'txt', 'new')] q = c2._get_nested_collection_attributes(return_entities=(model.HistoryDatasetAssociation,)) assert q.all() == [d1, d2] q = c2._get_nested_collection_attributes(return_entities=(model.HistoryDatasetAssociation, model.Dataset)) assert q.all() == [(d1, d1.dataset), (d2, d2.dataset)] # Assert properties that use _get_nested_collection_attributes return correct content assert c2.dataset_instances == [d1, d2] assert c2.dataset_elements == [dce1, dce2] assert c2.dataset_action_tuples == [] assert c2.populated_optimized assert c2.dataset_states_and_extensions_summary == ({'new'}, {'txt', 'bam'}) assert c2.element_identifiers_extensions_paths_and_metadata_files == [[('inner_list', 'forward'), 'bam', 'mock_dataset_14.dat', [('bai', 'mock_dataset_14.dat'), ('bam.csi', 'mock_dataset_14.dat')]], [('inner_list', 'reverse'), 'txt', 'mock_dataset_14.dat', []]] assert c3.dataset_instances == [] assert c3.dataset_elements == [] assert c3.dataset_states_and_extensions_summary == (set(), set()) q = c4._get_nested_collection_attributes(element_attributes=('element_identifier',)) assert q.all() == [('outer_list', 'inner_list', 'forward'), ('outer_list', 'inner_list', 'reverse')] assert c4.dataset_elements == [dce1, dce2] assert c4.element_identifiers_extensions_and_paths == [(('outer_list', 'inner_list', 'forward'), 'bam', 'mock_dataset_14.dat'), (('outer_list', 'inner_list', 'reverse'), 'txt', 'mock_dataset_14.dat')]
def set_metadata_portable(): tool_job_working_directory = os.path.abspath(os.getcwd()) metadata_tmp_files_dir = os.path.join(tool_job_working_directory, "metadata") MetadataTempFile.tmp_dir = metadata_tmp_files_dir metadata_params_path = os.path.join("metadata", "params.json") try: with open(metadata_params_path) as f: metadata_params = json.load(f) except OSError: raise Exception( f"Failed to find metadata/params.json from cwd [{tool_job_working_directory}]" ) datatypes_config = metadata_params["datatypes_config"] job_metadata = metadata_params["job_metadata"] provided_metadata_style = metadata_params.get("provided_metadata_style") max_metadata_value_size = metadata_params.get( "max_metadata_value_size") or 0 outputs = metadata_params["outputs"] datatypes_registry = validate_and_load_datatypes_config(datatypes_config) tool_provided_metadata = load_job_metadata(job_metadata, provided_metadata_style) def set_meta(new_dataset_instance, file_dict): set_meta_with_tool_provided(new_dataset_instance, file_dict, set_meta_kwds, datatypes_registry, max_metadata_value_size) object_store_conf_path = os.path.join("metadata", "object_store_conf.json") extended_metadata_collection = os.path.exists(object_store_conf_path) object_store = None job_context = None version_string = "" export_store = None final_job_state = Job.states.OK if extended_metadata_collection: tool_dict = metadata_params["tool"] stdio_exit_code_dicts, stdio_regex_dicts = tool_dict[ "stdio_exit_codes"], tool_dict["stdio_regexes"] stdio_exit_codes = list(map(ToolStdioExitCode, stdio_exit_code_dicts)) stdio_regexes = list(map(ToolStdioRegex, stdio_regex_dicts)) with open(object_store_conf_path) as f: config_dict = json.load(f) assert config_dict is not None object_store = build_object_store_from_config(None, config_dict=config_dict) Dataset.object_store = object_store outputs_directory = os.path.join(tool_job_working_directory, "outputs") if not os.path.exists(outputs_directory): outputs_directory = tool_job_working_directory # TODO: constants... if os.path.exists(os.path.join(outputs_directory, "tool_stdout")): with open(os.path.join(outputs_directory, "tool_stdout"), "rb") as f: tool_stdout = f.read() with open(os.path.join(outputs_directory, "tool_stderr"), "rb") as f: tool_stderr = f.read() elif os.path.exists(os.path.join(tool_job_working_directory, "stdout")): with open(os.path.join(tool_job_working_directory, "stdout"), "rb") as f: tool_stdout = f.read() with open(os.path.join(tool_job_working_directory, "stderr"), "rb") as f: tool_stderr = f.read() elif os.path.exists(os.path.join(outputs_directory, "stdout")): # Puslar style output directory? Was this ever used - did this ever work? with open(os.path.join(outputs_directory, "stdout"), "rb") as f: tool_stdout = f.read() with open(os.path.join(outputs_directory, "stderr"), "rb") as f: tool_stderr = f.read() else: wdc = os.listdir(tool_job_working_directory) odc = os.listdir(outputs_directory) error_desc = "Failed to find tool_stdout or tool_stderr for this job, cannot collect metadata" error_extra = f"Working dir contents [{wdc}], output directory contents [{odc}]" log.warn(f"{error_desc}. {error_extra}") raise Exception(error_desc) job_id_tag = metadata_params["job_id_tag"] exit_code_file = default_exit_code_file(".", job_id_tag) tool_exit_code = read_exit_code_from(exit_code_file, job_id_tag) check_output_detected_state, tool_stdout, tool_stderr, job_messages = check_output( stdio_regexes, stdio_exit_codes, tool_stdout, tool_stderr, tool_exit_code, job_id_tag) if check_output_detected_state == DETECTED_JOB_STATE.OK and not tool_provided_metadata.has_failed_outputs( ): final_job_state = Job.states.OK else: final_job_state = Job.states.ERROR version_string = "" if os.path.exists(COMMAND_VERSION_FILENAME): version_string = open(COMMAND_VERSION_FILENAME).read() expression_context = ExpressionContext( dict(stdout=tool_stdout, stderr=tool_stderr)) # Load outputs. export_store = store.DirectoryModelExportStore( 'metadata/outputs_populated', serialize_dataset_objects=True, for_edit=True, strip_metadata_files=False, serialize_jobs=False) try: import_model_store = store.imported_store_for_metadata( 'metadata/outputs_new', object_store=object_store) except AssertionError: # Remove in 21.09, this should only happen for jobs that started on <= 20.09 and finish now import_model_store = None job_context = SessionlessJobContext( metadata_params, tool_provided_metadata, object_store, export_store, import_model_store, os.path.join(tool_job_working_directory, "working"), final_job_state=final_job_state, ) unnamed_id_to_path = {} for unnamed_output_dict in job_context.tool_provided_metadata.get_unnamed_outputs( ): destination = unnamed_output_dict["destination"] elements = unnamed_output_dict["elements"] destination_type = destination["type"] if destination_type == 'hdas': for element in elements: filename = element.get('filename') if filename: unnamed_id_to_path[element['object_id']] = os.path.join( job_context.job_working_directory, filename) for output_name, output_dict in outputs.items(): dataset_instance_id = output_dict["id"] klass = getattr( galaxy.model, output_dict.get('model_class', 'HistoryDatasetAssociation')) dataset = None if import_model_store: dataset = import_model_store.sa_session.query(klass).find( dataset_instance_id) if dataset is None: # legacy check for jobs that started before 21.01, remove on 21.05 filename_in = os.path.join(f"metadata/metadata_in_{output_name}") import pickle dataset = pickle.load(open(filename_in, 'rb')) # load DatasetInstance assert dataset is not None filename_kwds = os.path.join(f"metadata/metadata_kwds_{output_name}") filename_out = os.path.join(f"metadata/metadata_out_{output_name}") filename_results_code = os.path.join( f"metadata/metadata_results_{output_name}") override_metadata = os.path.join( f"metadata/metadata_override_{output_name}") dataset_filename_override = output_dict["filename_override"] # pre-20.05 this was a per job parameter and not a per dataset parameter, drop in 21.XX legacy_object_store_store_by = metadata_params.get( "object_store_store_by", "id") # Same block as below... set_meta_kwds = stringify_dictionary_keys( json.load(open(filename_kwds)) ) # load kwds; need to ensure our keywords are not unicode try: dataset.dataset.external_filename = unnamed_id_to_path.get( dataset_instance_id, dataset_filename_override) store_by = output_dict.get("object_store_store_by", legacy_object_store_store_by) extra_files_dir_name = f"dataset_{getattr(dataset.dataset, store_by)}_files" files_path = os.path.abspath( os.path.join(tool_job_working_directory, "working", extra_files_dir_name)) dataset.dataset.external_extra_files_path = files_path file_dict = tool_provided_metadata.get_dataset_meta( output_name, dataset.dataset.id, dataset.dataset.uuid) if 'ext' in file_dict: dataset.extension = file_dict['ext'] # Metadata FileParameter types may not be writable on a cluster node, and are therefore temporarily substituted with MetadataTempFiles override_metadata = json.load(open(override_metadata)) for metadata_name, metadata_file_override in override_metadata: if MetadataTempFile.is_JSONified_value(metadata_file_override): metadata_file_override = MetadataTempFile.from_JSON( metadata_file_override) setattr(dataset.metadata, metadata_name, metadata_file_override) if output_dict.get("validate", False): set_validated_state(dataset) if dataset_instance_id not in unnamed_id_to_path: # We're going to run through set_metadata in collect_dynamic_outputs with more contextual metadata, # so skip set_meta here. set_meta(dataset, file_dict) if extended_metadata_collection: meta = tool_provided_metadata.get_dataset_meta( output_name, dataset.dataset.id, dataset.dataset.uuid) if meta: context = ExpressionContext(meta, expression_context) else: context = expression_context # Lazy and unattached # if getattr(dataset, "hidden_beneath_collection_instance", None): # dataset.visible = False dataset.blurb = 'done' dataset.peek = 'no peek' dataset.info = (dataset.info or '') if context['stdout'].strip(): # Ensure white space between entries dataset.info = f"{dataset.info.rstrip()}\n{context['stdout'].strip()}" if context['stderr'].strip(): # Ensure white space between entries dataset.info = f"{dataset.info.rstrip()}\n{context['stderr'].strip()}" dataset.tool_version = version_string dataset.set_size() if 'uuid' in context: dataset.dataset.uuid = context['uuid'] if dataset_filename_override and dataset_filename_override != dataset.file_name: # This has to be a job with outputs_to_working_directory set. # We update the object store with the created output file. object_store.update_from_file( dataset.dataset, file_name=dataset_filename_override, create=True) collect_extra_files(object_store, dataset, ".") if Job.states.ERROR == final_job_state: dataset.blurb = "error" dataset.mark_unhidden() else: # If the tool was expected to set the extension, attempt to retrieve it if dataset.ext == 'auto': dataset.extension = context.get('ext', 'data') dataset.init_meta(copy_from=dataset) # This has already been done: # else: # self.external_output_metadata.load_metadata(dataset, output_name, self.sa_session, working_directory=self.working_directory, remote_metadata_directory=remote_metadata_directory) line_count = context.get('line_count', None) try: # Certain datatype's set_peek methods contain a line_count argument dataset.set_peek(line_count=line_count) except TypeError: # ... and others don't dataset.set_peek() for context_key in TOOL_PROVIDED_JOB_METADATA_KEYS: if context_key in context: context_value = context[context_key] setattr(dataset, context_key, context_value) # We never want to persist the external_filename. dataset.dataset.external_filename = None export_store.add_dataset(dataset) else: dataset.metadata.to_JSON_dict( filename_out) # write out results of set_meta json.dump((True, 'Metadata has been set successfully'), open(filename_results_code, 'wt+')) # setting metadata has succeeded except Exception: json.dump((False, traceback.format_exc()), open(filename_results_code, 'wt+')) # setting metadata has failed somehow if extended_metadata_collection: # discover extra outputs... output_collections = {} for name, output_collection in metadata_params[ "output_collections"].items(): output_collections[name] = import_model_store.sa_session.query( HistoryDatasetCollectionAssociation).find( output_collection["id"]) outputs = {} for name, output in metadata_params["outputs"].items(): klass = getattr( galaxy.model, output.get('model_class', 'HistoryDatasetAssociation')) outputs[name] = import_model_store.sa_session.query(klass).find( output["id"]) input_ext = json.loads(metadata_params["job_params"].get( "__input_ext", '"data"')) collect_primary_datasets( job_context, outputs, input_ext=input_ext, ) collect_dynamic_outputs(job_context, output_collections) if export_store: export_store._finalize() write_job_metadata(tool_job_working_directory, job_metadata, set_meta, tool_provided_metadata)
def set_metadata_portable(): tool_job_working_directory = os.path.abspath(os.getcwd()) metadata_tmp_files_dir = os.path.join(tool_job_working_directory, "metadata") MetadataTempFile.tmp_dir = metadata_tmp_files_dir metadata_params = get_metadata_params(tool_job_working_directory) datatypes_config = metadata_params["datatypes_config"] job_metadata = metadata_params["job_metadata"] provided_metadata_style = metadata_params.get("provided_metadata_style") max_metadata_value_size = metadata_params.get("max_metadata_value_size") or 0 max_discovered_files = metadata_params.get("max_discovered_files") outputs = metadata_params["outputs"] datatypes_registry = validate_and_load_datatypes_config(datatypes_config) tool_provided_metadata = load_job_metadata(job_metadata, provided_metadata_style) def set_meta(new_dataset_instance, file_dict): set_meta_with_tool_provided(new_dataset_instance, file_dict, set_meta_kwds, datatypes_registry, max_metadata_value_size) try: object_store = get_object_store(tool_job_working_directory=tool_job_working_directory) except (FileNotFoundError, AssertionError): object_store = None extended_metadata_collection = bool(object_store) job_context = None version_string = None export_store = None final_job_state = Job.states.OK job_messages = [] if extended_metadata_collection: tool_dict = metadata_params["tool"] stdio_exit_code_dicts, stdio_regex_dicts = tool_dict["stdio_exit_codes"], tool_dict["stdio_regexes"] stdio_exit_codes = list(map(ToolStdioExitCode, stdio_exit_code_dicts)) stdio_regexes = list(map(ToolStdioRegex, stdio_regex_dicts)) outputs_directory = os.path.join(tool_job_working_directory, "outputs") if not os.path.exists(outputs_directory): outputs_directory = tool_job_working_directory # TODO: constants... locations = [ (outputs_directory, 'tool_'), (tool_job_working_directory, ''), (outputs_directory, ''), # # Pulsar style output directory? Was this ever used - did this ever work? ] for directory, prefix in locations: if os.path.exists(os.path.join(directory, f"{prefix}stdout")): with open(os.path.join(directory, f"{prefix}stdout"), 'rb') as f: tool_stdout = f.read(MAX_STDIO_READ_BYTES) with open(os.path.join(directory, f"{prefix}stderr"), 'rb') as f: tool_stderr = f.read(MAX_STDIO_READ_BYTES) break else: if os.path.exists(os.path.join(tool_job_working_directory, 'task_0')): # We have a task splitting job tool_stdout = b'' tool_stderr = b'' paths = Path(tool_job_working_directory).glob('task_*') for path in paths: with open(path / 'outputs' / 'tool_stdout', 'rb') as f: task_stdout = f.read(MAX_STDIO_READ_BYTES) if task_stdout: tool_stdout = b"%s[%s stdout]\n%s\n" % (tool_stdout, path.name.encode(), task_stdout) with open(path / 'outputs' / 'tool_stderr', 'rb') as f: task_stderr = f.read(MAX_STDIO_READ_BYTES) if task_stderr: tool_stderr = b"%s[%s stdout]\n%s\n" % (tool_stderr, path.name.encode(), task_stderr) else: wdc = os.listdir(tool_job_working_directory) odc = os.listdir(outputs_directory) error_desc = "Failed to find tool_stdout or tool_stderr for this job, cannot collect metadata" error_extra = f"Working dir contents [{wdc}], output directory contents [{odc}]" log.warn(f"{error_desc}. {error_extra}") raise Exception(error_desc) job_id_tag = metadata_params["job_id_tag"] exit_code_file = default_exit_code_file(".", job_id_tag) tool_exit_code = read_exit_code_from(exit_code_file, job_id_tag) check_output_detected_state, tool_stdout, tool_stderr, job_messages = check_output(stdio_regexes, stdio_exit_codes, tool_stdout, tool_stderr, tool_exit_code, job_id_tag) if check_output_detected_state == DETECTED_JOB_STATE.OK and not tool_provided_metadata.has_failed_outputs(): final_job_state = Job.states.OK else: final_job_state = Job.states.ERROR version_string_path = os.path.join('outputs', COMMAND_VERSION_FILENAME) version_string = collect_shrinked_content_from_path(version_string_path) expression_context = ExpressionContext(dict(stdout=tool_stdout[:255], stderr=tool_stderr[:255])) # Load outputs. export_store = store.DirectoryModelExportStore('metadata/outputs_populated', serialize_dataset_objects=True, for_edit=True, strip_metadata_files=False, serialize_jobs=True) try: import_model_store = store.imported_store_for_metadata('metadata/outputs_new', object_store=object_store) except AssertionError: # Remove in 21.09, this should only happen for jobs that started on <= 20.09 and finish now import_model_store = None tool_script_file = os.path.join(tool_job_working_directory, 'tool_script.sh') job = None if import_model_store and export_store: job = next(iter(import_model_store.sa_session.objects[Job].values())) job_context = SessionlessJobContext( metadata_params, tool_provided_metadata, object_store, export_store, import_model_store, os.path.join(tool_job_working_directory, "working"), final_job_state=final_job_state, max_discovered_files=max_discovered_files, ) if extended_metadata_collection: # discover extra outputs... output_collections = {} for name, output_collection in metadata_params["output_collections"].items(): # TODO: remove HistoryDatasetCollectionAssociation fallback on 22.01, model_class used to not be serialized prior to 21.09 model_class = output_collection.get('model_class', 'HistoryDatasetCollectionAssociation') collection = import_model_store.sa_session.query(getattr(galaxy.model, model_class)).find(output_collection["id"]) output_collections[name] = collection output_instances = {} for name, output in metadata_params["outputs"].items(): klass = getattr(galaxy.model, output.get('model_class', 'HistoryDatasetAssociation')) output_instances[name] = import_model_store.sa_session.query(klass).find(output["id"]) input_ext = json.loads(metadata_params["job_params"].get("__input_ext") or '"data"') try: collect_primary_datasets( job_context, output_instances, input_ext=input_ext, ) collect_dynamic_outputs(job_context, output_collections) except MaxDiscoveredFilesExceededError as e: final_job_state = Job.states.ERROR job_messages.append(str(e)) if job: job.job_messages = job_messages job.state = final_job_state if os.path.exists(tool_script_file): with open(tool_script_file) as command_fh: command_line_lines = [] for i, line in enumerate(command_fh): if i == 0 and line.endswith('COMMAND_VERSION 2>&1;'): # Don't record version command as part of command line continue command_line_lines.append(line) job.command_line = "".join(command_line_lines).strip() export_store.export_job(job, include_job_data=False) unnamed_id_to_path = {} for unnamed_output_dict in job_context.tool_provided_metadata.get_unnamed_outputs(): destination = unnamed_output_dict["destination"] elements = unnamed_output_dict["elements"] destination_type = destination["type"] if destination_type == 'hdas': for element in elements: filename = element.get('filename') object_id = element.get('object_id') if filename and object_id: unnamed_id_to_path[object_id] = os.path.join(job_context.job_working_directory, filename) for output_name, output_dict in outputs.items(): dataset_instance_id = output_dict["id"] klass = getattr(galaxy.model, output_dict.get('model_class', 'HistoryDatasetAssociation')) dataset = None if import_model_store: dataset = import_model_store.sa_session.query(klass).find(dataset_instance_id) if dataset is None: # legacy check for jobs that started before 21.01, remove on 21.05 filename_in = os.path.join(f"metadata/metadata_in_{output_name}") import pickle dataset = pickle.load(open(filename_in, 'rb')) # load DatasetInstance assert dataset is not None filename_kwds = os.path.join(f"metadata/metadata_kwds_{output_name}") filename_out = os.path.join(f"metadata/metadata_out_{output_name}") filename_results_code = os.path.join(f"metadata/metadata_results_{output_name}") override_metadata = os.path.join(f"metadata/metadata_override_{output_name}") dataset_filename_override = output_dict["filename_override"] # pre-20.05 this was a per job parameter and not a per dataset parameter, drop in 21.XX legacy_object_store_store_by = metadata_params.get("object_store_store_by", "id") # Same block as below... set_meta_kwds = stringify_dictionary_keys(json.load(open(filename_kwds))) # load kwds; need to ensure our keywords are not unicode try: external_filename = unnamed_id_to_path.get(dataset_instance_id, dataset_filename_override) if not os.path.exists(external_filename): matches = glob.glob(external_filename) assert len(matches) == 1, f"More than one file matched by output glob '{external_filename}'" external_filename = matches[0] assert safe_contains(tool_job_working_directory, external_filename), f"Cannot collect output '{external_filename}' from outside of working directory" created_from_basename = os.path.relpath(external_filename, os.path.join(tool_job_working_directory, 'working')) dataset.dataset.created_from_basename = created_from_basename # override filename if we're dealing with outputs to working directory and dataset is not linked to link_data_only = metadata_params.get("link_data_only") if not link_data_only: # Only set external filename if we're dealing with files in job working directory. # Fixes link_data_only uploads dataset.dataset.external_filename = external_filename store_by = output_dict.get("object_store_store_by", legacy_object_store_store_by) extra_files_dir_name = f"dataset_{getattr(dataset.dataset, store_by)}_files" files_path = os.path.abspath(os.path.join(tool_job_working_directory, "working", extra_files_dir_name)) dataset.dataset.external_extra_files_path = files_path file_dict = tool_provided_metadata.get_dataset_meta(output_name, dataset.dataset.id, dataset.dataset.uuid) if 'ext' in file_dict: dataset.extension = file_dict['ext'] # Metadata FileParameter types may not be writable on a cluster node, and are therefore temporarily substituted with MetadataTempFiles override_metadata = json.load(open(override_metadata)) for metadata_name, metadata_file_override in override_metadata: if MetadataTempFile.is_JSONified_value(metadata_file_override): metadata_file_override = MetadataTempFile.from_JSON(metadata_file_override) setattr(dataset.metadata, metadata_name, metadata_file_override) if output_dict.get("validate", False): set_validated_state(dataset) if dataset_instance_id not in unnamed_id_to_path: # We're going to run through set_metadata in collect_dynamic_outputs with more contextual metadata, # so skip set_meta here. set_meta(dataset, file_dict) if extended_metadata_collection: collect_extra_files(object_store, dataset, ".") dataset.state = dataset.dataset.state = final_job_state if extended_metadata_collection: if not link_data_only and os.path.getsize(external_filename): # Here we might be updating a disk based objectstore when outputs_to_working_directory is used, # or a remote object store from its cache path. object_store.update_from_file(dataset.dataset, file_name=external_filename, create=True) # TODO: merge expression_context into tool_provided_metadata so we don't have to special case this (here and in _finish_dataset) meta = tool_provided_metadata.get_dataset_meta(output_name, dataset.dataset.id, dataset.dataset.uuid) if meta: context = ExpressionContext(meta, expression_context) else: context = expression_context dataset.blurb = 'done' dataset.peek = 'no peek' dataset.info = (dataset.info or '') if context['stdout'].strip(): # Ensure white space between entries dataset.info = f"{dataset.info.rstrip()}\n{context['stdout'].strip()}" if context['stderr'].strip(): # Ensure white space between entries dataset.info = f"{dataset.info.rstrip()}\n{context['stderr'].strip()}" dataset.tool_version = version_string if 'uuid' in context: dataset.dataset.uuid = context['uuid'] if not final_job_state == Job.states.ERROR: line_count = context.get('line_count', None) try: # Certain datatype's set_peek methods contain a line_count argument dataset.set_peek(line_count=line_count) except TypeError: # ... and others don't dataset.set_peek() for context_key in TOOL_PROVIDED_JOB_METADATA_KEYS: if context_key in context: context_value = context[context_key] setattr(dataset, context_key, context_value) # We only want to persist the external_filename if the dataset has been linked in. if not link_data_only: dataset.dataset.external_filename = None dataset.dataset.extra_files_path = None export_store.add_dataset(dataset) else: dataset.metadata.to_JSON_dict(filename_out) # write out results of set_meta json.dump((True, 'Metadata has been set successfully'), open(filename_results_code, 'wt+')) # setting metadata has succeeded except Exception: json.dump((False, traceback.format_exc()), open(filename_results_code, 'wt+')) # setting metadata has failed somehow if export_store: export_store._finalize() write_job_metadata(tool_job_working_directory, job_metadata, set_meta, tool_provided_metadata)