Esempio n. 1
0
    def __populate_output_dataset_wrappers(self, param_dict, output_datasets, job_working_directory):
        for name, hda in output_datasets.items():
            # Write outputs to the working directory (for security purposes)
            # if desired.
            param_dict[name] = DatasetFilenameWrapper(hda, compute_environment=self.compute_environment, io_type="output")
            output_path = str(param_dict[name])
            # Conditionally create empty output:
            # - may already exist (e.g. symlink output)
            # - parent directory might not exist (e.g. Pulsar)
            if not os.path.exists(output_path) and os.path.exists(os.path.dirname(output_path)):
                open(output_path, 'w').close()

            # Provide access to a path to store additional files
            # TODO: move compute path logic into compute environment, move setting files_path
            # logic into DatasetFilenameWrapper. Currently this sits in the middle and glues
            # stuff together inconsistently with the way the rest of path rewriting works.
            file_name = hda.dataset.extra_files_path_name
            param_dict[name].files_path = os.path.abspath(os.path.join(job_working_directory, "working", file_name))
        for out_name, output in self.tool.outputs.items():
            if out_name not in param_dict and output.filters:
                # Assume the reason we lack this output is because a filter
                # failed to pass; for tool writing convienence, provide a
                # NoneDataset
                ext = getattr(output, "format", None)  # populate only for output datasets (not collections)
                param_dict[out_name] = NoneDataset(datatypes_registry=self.app.datatypes_registry, ext=ext)
Esempio n. 2
0
 def __populate_output_dataset_wrappers(self, param_dict, output_datasets, output_paths, job_working_directory):
     output_dataset_paths = dataset_path_rewrites( output_paths )
     for name, hda in output_datasets.items():
         # Write outputs to the working directory (for security purposes)
         # if desired.
         real_path = hda.file_name
         if real_path in output_dataset_paths:
             dataset_path = output_dataset_paths[ real_path ]
             param_dict[name] = DatasetFilenameWrapper( hda, dataset_path=dataset_path )
             try:
                 open( dataset_path.false_path, 'w' ).close()
             except EnvironmentError:
                 pass  # May well not exist - e.g. Pulsar.
         else:
             param_dict[name] = DatasetFilenameWrapper( hda )
         # Provide access to a path to store additional files
         # TODO: path munging for cluster/dataset server relocatability
         param_dict[name].files_path = os.path.abspath(os.path.join( job_working_directory, "dataset_%s_files" % (hda.dataset.id) ))
         for child in hda.children:
             param_dict[ "_CHILD___%s___%s" % ( name, child.designation ) ] = DatasetFilenameWrapper( child )
     for out_name, output in self.tool.outputs.iteritems():
         if out_name not in param_dict and output.filters:
             # Assume the reason we lack this output is because a filter
             # failed to pass; for tool writing convienence, provide a
             # NoneDataset
             ext = getattr( output, "format", None )  # populate only for output datasets (not collections)
             param_dict[ out_name ] = NoneDataset( datatypes_registry=self.app.datatypes_registry, ext=ext )
Esempio n. 3
0
 def __populate_output_dataset_wrappers(self, param_dict, output_datasets, output_paths, job_working_directory):
     output_dataset_paths = dataset_path_rewrites( output_paths )
     for name, hda in output_datasets.items():
         # Write outputs to the working directory (for security purposes)
         # if desired.
         real_path = hda.file_name
         if real_path in output_dataset_paths:
             dataset_path = output_dataset_paths[ real_path ]
             param_dict[name] = DatasetFilenameWrapper( hda, dataset_path=dataset_path )
             try:
                 open( dataset_path.false_path, 'w' ).close()
             except EnvironmentError:
                 pass  # May well not exist - e.g. Pulsar.
         else:
             param_dict[name] = DatasetFilenameWrapper( hda )
         # Provide access to a path to store additional files
         # TODO: path munging for cluster/dataset server relocatability
         param_dict[name].files_path = os.path.abspath(os.path.join( job_working_directory, "dataset_%s_files" % (hda.dataset.id) ))
         for child in hda.children:
             param_dict[ "_CHILD___%s___%s" % ( name, child.designation ) ] = DatasetFilenameWrapper( child )
     for out_name, output in self.tool.outputs.iteritems():
         if out_name not in param_dict and output.filters:
             # Assume the reason we lack this output is because a filter
             # failed to pass; for tool writing convienence, provide a
             # NoneDataset
             ext = getattr( output, "format", None )  # populate only for output datasets (not collections)
             param_dict[ out_name ] = NoneDataset( datatypes_registry=self.app.datatypes_registry, ext=ext )