def test_input_value_wrapper(tool): parameter = IntegerToolParameter( tool, XML('<param name="blah" type="integer" size="4" value="10" min="0" />') ) wrapper = InputValueWrapper(parameter, "5") assert str(wrapper) == "5"
def wrap_values(self, inputs, input_values, skip_missing_values=False): trans = self.trans tool = self.tool incoming = self.incoming # Wrap tool inputs as necessary for input in inputs.itervalues(): if input.name not in input_values and skip_missing_values: continue value = input_values[input.name] if isinstance(input, Repeat): for d in input_values[input.name]: self.wrap_values(input.inputs, d, skip_missing_values=skip_missing_values) elif isinstance(input, Conditional): values = input_values[input.name] current = values["__current_case__"] self.wrap_values(input.cases[current].inputs, values, skip_missing_values=skip_missing_values) elif isinstance(input, Section): values = input_values[input.name] self.wrap_values(input.inputs, values, skip_missing_values=skip_missing_values) elif isinstance(input, DataToolParameter) and input.multiple: value = input_values[input.name] dataset_instances = DatasetListWrapper.to_dataset_instances( value) input_values[ input.name ] = \ DatasetListWrapper( None, dataset_instances, datatypes_registry=trans.app.datatypes_registry, tool=tool, name=input.name ) elif isinstance(input, DataToolParameter): input_values[ input.name ] = \ DatasetFilenameWrapper( input_values[ input.name ], datatypes_registry=trans.app.datatypes_registry, tool=tool, name=input.name ) elif isinstance(input, SelectToolParameter): input_values[input.name] = SelectToolParameterWrapper( input, input_values[input.name], tool.app, other_values=incoming) elif isinstance(input, DataCollectionToolParameter): input_values[input.name] = DatasetCollectionWrapper( None, input_values[input.name], datatypes_registry=trans.app.datatypes_registry, tool=tool, name=input.name, ) else: input_values[input.name] = InputValueWrapper( input, value, incoming)
def wrap_values(self, inputs, input_values, skip_missing_values=False): trans = self.trans tool = self.tool incoming = self.incoming element_identifier_mapper = ElementIdentifierMapper(self._input_datasets) # Wrap tool inputs as necessary for input in inputs.values(): if input.name not in input_values and skip_missing_values: continue value = input_values[input.name] copy_identifiers(destination=value, source=input_values) if isinstance(input, Repeat): for d in value: copy_identifiers(destination=d, source=value) self.wrap_values(input.inputs, d, skip_missing_values=skip_missing_values) elif isinstance(input, Conditional): values = value current = values["__current_case__"] self.wrap_values(input.cases[current].inputs, values, skip_missing_values=skip_missing_values) elif isinstance(input, Section): values = value self.wrap_values(input.inputs, values, skip_missing_values=skip_missing_values) elif isinstance(input, DataToolParameter) and input.multiple: dataset_instances = DatasetListWrapper.to_dataset_instances(value) input_values[input.name] = \ DatasetListWrapper(None, dataset_instances, datatypes_registry=trans.app.datatypes_registry, tool=tool, name=input.name, formats=input.formats) elif isinstance(input, DataToolParameter): wrapper_kwds = dict( datatypes_registry=trans.app.datatypes_registry, tool=tool, name=input.name, formats=input.formats ) element_identifier = element_identifier_mapper.identifier(value, input_values) if element_identifier: wrapper_kwds["identifier"] = element_identifier input_values[input.name] = DatasetFilenameWrapper(value, **wrapper_kwds) elif isinstance(input, SelectToolParameter): input_values[input.name] = SelectToolParameterWrapper(input, value, other_values=incoming) elif isinstance(input, DataCollectionToolParameter): input_values[input.name] = DatasetCollectionWrapper( None, value, datatypes_registry=trans.app.datatypes_registry, tool=tool, name=input.name, ) else: input_values[input.name] = InputValueWrapper(input, value, incoming)
def test_input_value_wrapper_comparison_optional(tool): parameter = IntegerToolParameter( tool, XML('<param name="blah" type="integer" min="0" optional="true"/>')) wrapper = InputValueWrapper(parameter, None) assert not wrapper with pytest.raises(ValueError): int(wrapper) assert str(wrapper) == "" assert wrapper == "" # for backward-compatibility parameter = IntegerToolParameter( tool, XML('<param name="blah" type="integer" min="0" optional="true"/>')) wrapper = InputValueWrapper(parameter, 0) assert wrapper == 0 assert int(wrapper) == 0 assert str(wrapper) assert wrapper != "" # for backward-compatibility, the correct way to check if an optional integer param is not empty is to use str(wrapper)
def valuewrapper(tool, value, paramtype, optional=False): if paramtype == "integer": optional = 'optional="true"' if optional else 'value="10"' parameter = IntegerToolParameter(tool, XML('<param name="blah" type="integer" %s min="0" />' % optional)) elif paramtype == "text": optional = 'optional="true"' if optional else 'value="foo"' parameter = TextToolParameter(tool, XML('<param name="blah" type="text" %s/>' % optional)) elif paramtype == "float": optional = 'optional="true"' if optional else 'value="10.0"' parameter = FloatToolParameter(tool, XML('<param name="blah" type="float" %s/>' % optional)) elif paramtype == "boolean": optional = 'optional="true"' if optional else 'value=""' parameter = BooleanToolParameter(tool, XML('<param name="blah" type="boolean" truevalue="truevalue" falsevalue="falsevalue" %s/>' % optional)) return InputValueWrapper(parameter, value)
def wrap_input(input_values, input): value = input_values[input.name] if isinstance(input, DataToolParameter) and input.multiple: dataset_instances = DatasetListWrapper.to_dataset_instances(value) input_values[input.name] = \ DatasetListWrapper(job_working_directory, dataset_instances, compute_environment=self.compute_environment, datatypes_registry=self.app.datatypes_registry, tool=self.tool, name=input.name, formats=input.formats) elif isinstance(input, DataToolParameter): dataset = input_values[input.name] wrapper_kwds = dict( datatypes_registry=self.app.datatypes_registry, tool=self, name=input.name, compute_environment=self.compute_environment ) element_identifier = element_identifier_mapper.identifier(dataset, param_dict) if element_identifier: wrapper_kwds["identifier"] = element_identifier input_values[input.name] = \ DatasetFilenameWrapper(dataset, **wrapper_kwds) elif isinstance(input, DataCollectionToolParameter): dataset_collection = value wrapper_kwds = dict( datatypes_registry=self.app.datatypes_registry, compute_environment=self.compute_environment, tool=self, name=input.name ) wrapper = DatasetCollectionWrapper( job_working_directory, dataset_collection, **wrapper_kwds ) input_values[input.name] = wrapper elif isinstance(input, SelectToolParameter): if input.multiple: value = listify(value) input_values[input.name] = SelectToolParameterWrapper( input, value, other_values=param_dict, compute_environment=self.compute_environment) else: input_values[input.name] = InputValueWrapper( input, value, param_dict)
def valuewrapper(tool, value, paramtype): if paramtype == "integer": parameter = IntegerToolParameter( tool, XML('<param name="blah" type="integer" value="10" min="0" />')) elif paramtype == "text": parameter = TextToolParameter( tool, XML('<param name="blah" type="text" value="foo"/>')) elif paramtype == "float": parameter = FloatToolParameter( tool, XML('<param name="blah" type="float" value="10.0"/>')) elif paramtype == "boolean": parameter = BooleanToolParameter( tool, XML('<param name="blah" type="boolean" truevalue="truevalue" falsevalue="falsevalue"/>' )) return InputValueWrapper(parameter, value)
def wrap_input(input_values, input): value = input_values[input.name] if isinstance(input, DataToolParameter) and input.multiple: dataset_instances = DatasetListWrapper.to_dataset_instances(value) input_values[input.name] = \ DatasetListWrapper(job_working_directory, dataset_instances, compute_environment=self.compute_environment, datatypes_registry=self.app.datatypes_registry, tool=self.tool, name=input.name, formats=input.formats) elif isinstance(input, DataToolParameter): # FIXME: We're populating param_dict with conversions when # wrapping values, this should happen as a separate # step before wrapping (or call this wrapping step # something more generic) (but iterating this same # list twice would be wasteful) # Add explicit conversions by name to current parent for conversion_name, conversion_extensions, conversion_datatypes in input.conversions: # If we are at building cmdline step, then converters # have already executed direct_match, conv_ext, converted_dataset = input_values[input.name].find_conversion_destination(conversion_datatypes) # When dealing with optional inputs, we'll provide a # valid extension to be used for None converted dataset if not direct_match and not conv_ext: conv_ext = conversion_extensions[0] # input_values[ input.name ] is None when optional # dataset, 'conversion' of optional dataset should # create wrapper around NoneDataset for converter output if input_values[input.name] and not converted_dataset: # Input that converter is based from has a value, # but converted dataset does not exist raise Exception('A path for explicit datatype conversion has not been found: %s --/--> %s' % (input_values[input.name].extension, conversion_extensions)) else: # Trick wrapper into using target conv ext (when # None) without actually being a tool parameter input_values[conversion_name] = \ DatasetFilenameWrapper(converted_dataset, datatypes_registry=self.app.datatypes_registry, tool=Bunch(conversion_name=Bunch(extensions=conv_ext)), name=conversion_name) # Wrap actual input dataset dataset = input_values[input.name] wrapper_kwds = dict( datatypes_registry=self.app.datatypes_registry, tool=self, name=input.name, compute_environment=self.compute_environment ) element_identifier = element_identifier_mapper.identifier(dataset, param_dict) if element_identifier: wrapper_kwds["identifier"] = element_identifier input_values[input.name] = \ DatasetFilenameWrapper(dataset, **wrapper_kwds) elif isinstance(input, DataCollectionToolParameter): dataset_collection = value wrapper_kwds = dict( datatypes_registry=self.app.datatypes_registry, compute_environment=self.compute_environment, tool=self, name=input.name ) wrapper = DatasetCollectionWrapper( job_working_directory, dataset_collection, **wrapper_kwds ) input_values[input.name] = wrapper elif isinstance(input, SelectToolParameter): if input.multiple: value = listify(value) input_values[input.name] = SelectToolParameterWrapper( input, value, other_values=param_dict, compute_environment=self.compute_environment) else: input_values[input.name] = InputValueWrapper( input, value, param_dict)
def wrap_input(input_values, input): if isinstance(input, DataToolParameter) and input.multiple: dataset_instances = input_values[input.name] if isinstance(dataset_instances, model.HistoryDatasetCollectionAssociation): dataset_instances = dataset_instances.collection.dataset_instances[:] input_values[ input.name ] = \ DatasetListWrapper( dataset_instances, dataset_paths=input_dataset_paths, datatypes_registry=self.app.datatypes_registry, tool=self.tool, name=input.name ) elif isinstance(input, DataToolParameter): ## FIXME: We're populating param_dict with conversions when ## wrapping values, this should happen as a separate ## step before wrapping (or call this wrapping step ## something more generic) (but iterating this same ## list twice would be wasteful) # Add explicit conversions by name to current parent for conversion_name, conversion_extensions, conversion_datatypes in input.conversions: # If we are at building cmdline step, then converters # have already executed conv_ext, converted_dataset = input_values[ input.name].find_conversion_destination( conversion_datatypes) # When dealing with optional inputs, we'll provide a # valid extension to be used for None converted dataset if not conv_ext: conv_ext = conversion_extensions[0] # input_values[ input.name ] is None when optional # dataset, 'conversion' of optional dataset should # create wrapper around NoneDataset for converter output if input_values[input.name] and not converted_dataset: # Input that converter is based from has a value, # but converted dataset does not exist raise Exception( 'A path for explicit datatype conversion has not been found: %s --/--> %s' % (input_values[input.name].extension, conversion_extensions)) else: # Trick wrapper into using target conv ext (when # None) without actually being a tool parameter input_values[ conversion_name ] = \ DatasetFilenameWrapper( converted_dataset, datatypes_registry=self.app.datatypes_registry, tool=Bunch( conversion_name=Bunch( extensions=conv_ext ) ), name=conversion_name ) # Wrap actual input dataset dataset = input_values[input.name] wrapper_kwds = dict( datatypes_registry=self.app.datatypes_registry, tool=self, name=input.name) if dataset: #A None dataset does not have a filename real_path = dataset.file_name if real_path in input_dataset_paths: wrapper_kwds["dataset_path"] = input_dataset_paths[ real_path] input_values[ input.name ] = \ DatasetFilenameWrapper( dataset, **wrapper_kwds ) elif isinstance(input, DataCollectionToolParameter): dataset_collection = input_values[input.name] wrapper_kwds = dict( datatypes_registry=self.app.datatypes_registry, dataset_paths=input_dataset_paths, tool=self, name=input.name) wrapper = DatasetCollectionWrapper(dataset_collection, **wrapper_kwds) input_values[input.name] = wrapper elif isinstance(input, SelectToolParameter): input_values[input.name] = SelectToolParameterWrapper( input, input_values[input.name], self.app, other_values=param_dict, path_rewriter=self.unstructured_path_rewriter) elif isinstance(input, LibraryDatasetToolParameter): # TODO: Handle input rewrites in here? How to test LibraryDatasetToolParameters? input_values[input.name] = LibraryDatasetValueWrapper( input, input_values[input.name], param_dict) else: input_values[input.name] = InputValueWrapper( input, input_values[input.name], param_dict)