def _deserializeFromHdf5(self, topGroup, groupVersion, hdf5File, projectFilePath): with Tracer(traceLogger): self._projectFilePath = projectFilePath self.initWithoutTopGroup(hdf5File, projectFilePath) infoDir = topGroup['infos'] self.mainOperator.Dataset.resize(len(infoDir)) for index, (infoGroupName, infoGroup) in enumerate(sorted(infoDir.items())): datasetInfo = DatasetInfo() # Make a reverse-lookup of the location storage strings LocationLookup = { v: k for k, v in self.LocationStrings.items() } datasetInfo.location = LocationLookup[str( infoGroup['location'].value)] # Write to the 'private' members to avoid resetting the dataset id datasetInfo._filePath = str(infoGroup['filePath'].value) datasetInfo._datasetId = str(infoGroup['datasetId'].value) # Deserialize the "allow labels" flag try: datasetInfo.allowLabels = infoGroup['allowLabels'].value except KeyError: pass # Deserialize the axisorder (if present) try: datasetInfo.axisorder = infoGroup['axisorder'].value except KeyError: if ilastik.utility.globals.ImportOptions.default_axis_order is not None: datasetInfo.axisorder = ilastik.utility.globals.ImportOptions.default_axis_order # If the data is supposed to be in the project, # check for it now. if datasetInfo.location == DatasetInfo.Location.ProjectInternal: if not datasetInfo.datasetId in topGroup[ 'local_data'].keys(): raise RuntimeError( "Corrupt project file. Could not find data for " + infoGroupName) # If the data is supposed to exist outside the project, make sure it really does. if datasetInfo.location == DatasetInfo.Location.FileSystem: filePath = PathComponents( datasetInfo.filePath, os.path.split(projectFilePath)[0]).externalPath if not os.path.exists(filePath): raise RuntimeError("Could not find external data: " + filePath) # Give the new info to the operator self.mainOperator.Dataset[index].setValue(datasetInfo) self._dirty = False
def configure_operator_with_parsed_args(self, parsed_args): """ Helper function for headless workflows. Configures this applet's top-level operator according to the settings provided in ``parsed_args``. :param parsed_args: Must be an ``argparse.Namespace`` as returned by :py:meth:`parse_known_cmdline_args()`. """ # TODO: Support image stack inputs by checking for globstrings and converting to hdf5. input_paths = parsed_args.input_files input_infos = [] for p in input_paths: info = DatasetInfo() info.location = DatasetInfo.Location.FileSystem # Convert all paths to absolute # (otherwise they are relative to the project file, which probably isn't what the user meant) comp = PathComponents(p) comp.externalPath = os.path.abspath(comp.externalPath) info.filePath = comp.totalPath() info.nickname = comp.filenameBase input_infos.append(info) opDataSelection = self.topLevelOperator opDataSelection.DatasetGroup.resize( len(input_infos) ) for lane_index, info in enumerate(input_infos): # Only one dataset role in pixel classification opDataSelection.DatasetGroup[lane_index][0].setValue( info )
def configure_operator_with_parsed_args(self, parsed_args): """ Helper function for headless workflows. Configures this applet's top-level operator according to the settings provided in ``parsed_args``. :param parsed_args: Must be an ``argparse.Namespace`` as returned by :py:meth:`parse_known_cmdline_args()`. """ input_paths = parsed_args.input_files # If the user doesn't want image stacks to be copied inte the project file, # we generate hdf5 volumes in a temporary directory and use those files instead. if parsed_args.preconvert_stacks: import tempfile input_paths = self.convertStacksToH5( input_paths, tempfile.gettempdir() ) input_infos = [] for p in input_paths: info = DatasetInfo() info.location = DatasetInfo.Location.FileSystem # Convert all paths to absolute # (otherwise they are relative to the project file, which probably isn't what the user meant) comp = PathComponents(p) comp.externalPath = os.path.abspath(comp.externalPath) info.filePath = comp.totalPath() info.nickname = comp.filenameBase input_infos.append(info) opDataSelection = self.topLevelOperator opDataSelection.DatasetGroup.resize( len(input_infos) ) for lane_index, info in enumerate(input_infos): # Only one dataset role in pixel classification opDataSelection.DatasetGroup[lane_index][0].setValue( info )
def _deserializeFromHdf5(self, topGroup, groupVersion, hdf5File, projectFilePath, headless): self._projectFilePath = projectFilePath self.initWithoutTopGroup(hdf5File, projectFilePath) # normally the serializer is not dirty after loading a project file # however, when the file was corrupted, the user has the possibility # to save the fixed file after loading it. dirty = False infoDir = topGroup['infos'] self.topLevelOperator.Dataset.resize( len(infoDir) ) for index, (infoGroupName, infoGroup) in enumerate( sorted(infoDir.items()) ): datasetInfo = DatasetInfo() # Make a reverse-lookup of the location storage strings LocationLookup = { v:k for k,v in self.LocationStrings.items() } datasetInfo.location = LocationLookup[ str(infoGroup['location'].value) ] # Write to the 'private' members to avoid resetting the dataset id datasetInfo._filePath = str(infoGroup['filePath'].value) datasetInfo._datasetId = str(infoGroup['datasetId'].value) # Deserialize the "allow labels" flag try: datasetInfo.allowLabels = infoGroup['allowLabels'].value except KeyError: pass # Deserialize the axisorder (if present) try: datasetInfo.axisorder = infoGroup['axisorder'].value except KeyError: pass # If the data is supposed to be in the project, # check for it now. if datasetInfo.location == DatasetInfo.Location.ProjectInternal: if not datasetInfo.datasetId in topGroup['local_data'].keys(): raise RuntimeError("Corrupt project file. Could not find data for " + infoGroupName) # If the data is supposed to exist outside the project, make sure it really does. if datasetInfo.location == DatasetInfo.Location.FileSystem: pathData = PathComponents( datasetInfo.filePath, os.path.split(projectFilePath)[0]) filePath = pathData.externalPath if not os.path.exists(filePath): if headless: raise RuntimeError("Could not find data at " + filePath) filt = "Image files (" + ' '.join('*.' + x for x in OpDataSelection.SupportedExtensions) + ')' newpath = self.repairFile(filePath, filt) newpath = newpath+pathData.internalPath datasetInfo._filePath = getPathVariants(newpath , os.path.split(projectFilePath)[0])[0] dirty = True # Give the new info to the operator self.topLevelOperator.Dataset[index].setValue(datasetInfo) self._dirty = dirty
def deserializeFromHdf5(self, hdf5File, projectFilePath, headless=False): # Check the overall file version ilastikVersion = hdf5File["ilastikVersion"].value # This is the v0.5 import deserializer. Don't work with 0.6 projects (or anything else). if ilastikVersion != 0.5: return # The 'working directory' for the purpose of constructing absolute # paths from relative paths is the project file's directory. projectDir = os.path.split(projectFilePath)[0] self.topLevelOperator.WorkingDirectory.setValue(projectDir) # Access the top group and the info group try: #dataset = hdf5File["DataSets"]["dataItem00"]["data"] dataDir = hdf5File["DataSets"] except KeyError: # If our group (or subgroup) doesn't exist, then make sure the operator is empty self.topLevelOperator.DatasetGroup.resize(0) return self.topLevelOperator.DatasetGroup.resize(len(dataDir)) for index, (datasetDirName, datasetDir) in enumerate(sorted(dataDir.items())): datasetInfo = DatasetInfo() # We'll set up the link to the dataset in the old project file, # but we'll set the location to ProjectInternal so that it will # be copied to the new file when the project is saved. datasetInfo.location = DatasetInfo.Location.ProjectInternal # Some older versions of ilastik 0.5 stored the data in tzyxc order. # Some power-users can enable a command-line flag that tells us to # transpose the data back to txyzc order when we import the old project. default_axis_order = ilastik.utility.globals.ImportOptions.default_axis_order if default_axis_order is not None: import warnings warnings.warn( "Using a strange axis order to import ilastik 0.5 projects: {}" .format(default_axis_order)) datasetInfo.axistags = vigra.defaultAxistags( default_axis_order) # Write to the 'private' members to avoid resetting the dataset id totalDatasetPath = str(projectFilePath + '/DataSets/' + datasetDirName + '/data') datasetInfo._filePath = totalDatasetPath datasetInfo._datasetId = datasetDirName # Use the old dataset name as the new dataset id datasetInfo.nickname = "{} (imported from v0.5)".format( datasetDirName) # Give the new info to the operator self.topLevelOperator.DatasetGroup[index][0].setValue(datasetInfo)
def deserializeFromHdf5(self, hdf5File, projectFilePath): with Tracer(traceLogger): # Check the overall file version ilastikVersion = hdf5File["ilastikVersion"].value # This is the v0.5 import deserializer. Don't work with 0.6 projects (or anything else). if ilastikVersion != 0.5: return # The 'working directory' for the purpose of constructing absolute # paths from relative paths is the project file's directory. projectDir = os.path.split(projectFilePath)[0] self.mainOperator.WorkingDirectory.setValue(projectDir) # These project file inputs are required, but are not used because the data is treated as "external" self.mainOperator.ProjectDataGroup.setValue('DataSets') self.mainOperator.ProjectFile.setValue(hdf5File) # Access the top group and the info group try: #dataset = hdf5File["DataSets"]["dataItem00"]["data"] dataDir = hdf5File["DataSets"] except KeyError: # If our group (or subgroup) doesn't exist, then make sure the operator is empty self.mainOperator.Dataset.resize(0) return self.mainOperator.Dataset.resize(len(dataDir)) for index, (datasetDirName, datasetDir) in enumerate(sorted(dataDir.items())): datasetInfo = DatasetInfo() # Since we are importing from a 0.5 file, all datasets will be external # to the project (pulled in from the old file as hdf5 datasets) datasetInfo.location = DatasetInfo.Location.FileSystem # Some older versions of ilastik 0.5 stored the data in tzyxc order. # Some power-users can enable a command-line flag that tells us to # transpose the data back to txyzc order when we import the old project. if ilastik.utility.globals.ImportOptions.default_axis_order is not None: datasetInfo.axisorder = ilastik.utility.globals.ImportOptions.default_axis_order # Write to the 'private' members to avoid resetting the dataset id totalDatasetPath = projectFilePath + '/DataSets/' + datasetDirName + '/data' datasetInfo._filePath = str(totalDatasetPath) datasetInfo._datasetId = datasetDirName # Use the old dataset name as the new dataset id # Give the new info to the operator self.mainOperator.Dataset[index].setValue(datasetInfo)
def deserializeFromHdf5(self, hdf5File, projectFilePath, headless = False): # Check the overall file version ilastikVersion = hdf5File["ilastikVersion"].value # This is the v0.5 import deserializer. Don't work with 0.6 projects (or anything else). if ilastikVersion != 0.5: return # The 'working directory' for the purpose of constructing absolute # paths from relative paths is the project file's directory. projectDir = os.path.split(projectFilePath)[0] self.topLevelOperator.WorkingDirectory.setValue( projectDir ) # Access the top group and the info group try: #dataset = hdf5File["DataSets"]["dataItem00"]["data"] dataDir = hdf5File["DataSets"] except KeyError: # If our group (or subgroup) doesn't exist, then make sure the operator is empty self.topLevelOperator.DatasetGroup.resize( 0 ) return self.topLevelOperator.DatasetGroup.resize( len(dataDir) ) for index, (datasetDirName, datasetDir) in enumerate( sorted(dataDir.items()) ): datasetInfo = DatasetInfo() # We'll set up the link to the dataset in the old project file, # but we'll set the location to ProjectInternal so that it will # be copied to the new file when the project is saved. datasetInfo.location = DatasetInfo.Location.ProjectInternal # Some older versions of ilastik 0.5 stored the data in tzyxc order. # Some power-users can enable a command-line flag that tells us to # transpose the data back to txyzc order when we import the old project. default_axis_order = ilastik.utility.globals.ImportOptions.default_axis_order if default_axis_order is not None: import warnings warnings.warn( "Using a strange axis order to import ilastik 0.5 projects: {}".format( default_axis_order ) ) datasetInfo.axistags = vigra.defaultAxistags(default_axis_order) # Write to the 'private' members to avoid resetting the dataset id totalDatasetPath = projectFilePath + '/DataSets/' + datasetDirName + '/data' datasetInfo._filePath = totalDatasetPath datasetInfo._datasetId = datasetDirName # Use the old dataset name as the new dataset id datasetInfo.nickname = "{} (imported from v0.5)".format( datasetDirName ) # Give the new info to the operator self.topLevelOperator.DatasetGroup[index][0].setValue(datasetInfo)
def deserializeFromHdf5(self, hdf5File, projectFilePath): with Tracer(traceLogger): # Check the overall file version ilastikVersion = hdf5File["ilastikVersion"].value # This is the v0.5 import deserializer. Don't work with 0.6 projects (or anything else). if ilastikVersion != 0.5: return # The 'working directory' for the purpose of constructing absolute # paths from relative paths is the project file's directory. projectDir = os.path.split(projectFilePath)[0] self.mainOperator.WorkingDirectory.setValue( projectDir ) # These project file inputs are required, but are not used because the data is treated as "external" self.mainOperator.ProjectDataGroup.setValue( 'DataSets' ) self.mainOperator.ProjectFile.setValue(hdf5File) # Access the top group and the info group try: #dataset = hdf5File["DataSets"]["dataItem00"]["data"] dataDir = hdf5File["DataSets"] except KeyError: # If our group (or subgroup) doesn't exist, then make sure the operator is empty self.mainOperator.Dataset.resize( 0 ) return self.mainOperator.Dataset.resize( len(dataDir) ) for index, (datasetDirName, datasetDir) in enumerate( sorted(dataDir.items()) ): datasetInfo = DatasetInfo() # Since we are importing from a 0.5 file, all datasets will be external # to the project (pulled in from the old file as hdf5 datasets) datasetInfo.location = DatasetInfo.Location.FileSystem # Some older versions of ilastik 0.5 stored the data in tzyxc order. # Some power-users can enable a command-line flag that tells us to # transpose the data back to txyzc order when we import the old project. if ilastik.utility.globals.ImportOptions.default_axis_order is not None: datasetInfo.axisorder = ilastik.utility.globals.ImportOptions.default_axis_order # Write to the 'private' members to avoid resetting the dataset id totalDatasetPath = projectFilePath + '/DataSets/' + datasetDirName + '/data' datasetInfo._filePath = str(totalDatasetPath) datasetInfo._datasetId = datasetDirName # Use the old dataset name as the new dataset id # Give the new info to the operator self.mainOperator.Dataset[index].setValue(datasetInfo)
def _deserializeFromHdf5(self, topGroup, groupVersion, hdf5File, projectFilePath): with Tracer(traceLogger): self._projectFilePath = projectFilePath self.initWithoutTopGroup(hdf5File, projectFilePath) infoDir = topGroup['infos'] self.mainOperator.Dataset.resize( len(infoDir) ) for index, (infoGroupName, infoGroup) in enumerate( sorted(infoDir.items()) ): datasetInfo = DatasetInfo() # Make a reverse-lookup of the location storage strings LocationLookup = { v:k for k,v in self.LocationStrings.items() } datasetInfo.location = LocationLookup[ str(infoGroup['location'].value) ] # Write to the 'private' members to avoid resetting the dataset id datasetInfo._filePath = str(infoGroup['filePath'].value) datasetInfo._datasetId = str(infoGroup['datasetId'].value) # Deserialize the "allow labels" flag try: datasetInfo.allowLabels = infoGroup['allowLabels'].value except KeyError: pass # Deserialize the axisorder (if present) try: datasetInfo.axisorder = infoGroup['axisorder'].value except KeyError: if ilastik.utility.globals.ImportOptions.default_axis_order is not None: datasetInfo.axisorder = ilastik.utility.globals.ImportOptions.default_axis_order # If the data is supposed to be in the project, # check for it now. if datasetInfo.location == DatasetInfo.Location.ProjectInternal: if not datasetInfo.datasetId in topGroup['local_data'].keys(): raise RuntimeError("Corrupt project file. Could not find data for " + infoGroupName) # If the data is supposed to exist outside the project, make sure it really does. if datasetInfo.location == DatasetInfo.Location.FileSystem: filePath = PathComponents( datasetInfo.filePath, os.path.split(projectFilePath)[0] ).externalPath if not os.path.exists(filePath): raise RuntimeError("Could not find external data: " + filePath) # Give the new info to the operator self.mainOperator.Dataset[index].setValue(datasetInfo) self._dirty = False
def create_default_headless_dataset_info(cls, filepath): """ filepath may be a globstring or a full hdf5 path+dataset """ comp = PathComponents(filepath) nickname = comp.filenameBase # Remove globstring syntax. if '*' in nickname: nickname = nickname.replace('*', '') if os.path.pathsep in nickname: nickname = PathComponents(nickname.split(os.path.pathsep)[0]).fileNameBase info = DatasetInfo() info.location = DatasetInfo.Location.FileSystem info.nickname = nickname info.filePath = filepath # Convert all (non-url) paths to absolute # (otherwise they are relative to the project file, which probably isn't what the user meant) if not isUrl(filepath): comp.externalPath = os.path.abspath(comp.externalPath) info.filePath = comp.totalPath() return info
def _readDatasetInfo(self, infoGroup, localDataGroup, projectFilePath, headless): # Unready datasets are represented with an empty group. if len( infoGroup ) == 0: return None, False datasetInfo = DatasetInfo() # Make a reverse-lookup of the location storage strings LocationLookup = { v:k for k,v in self.LocationStrings.items() } datasetInfo.location = LocationLookup[ str(infoGroup['location'].value) ] # Write to the 'private' members to avoid resetting the dataset id datasetInfo._filePath = infoGroup['filePath'].value datasetInfo._datasetId = infoGroup['datasetId'].value try: datasetInfo.allowLabels = infoGroup['allowLabels'].value except KeyError: pass try: datasetInfo.drange = tuple( infoGroup['drange'].value ) except KeyError: pass try: datasetInfo.nickname = infoGroup['nickname'].value except KeyError: datasetInfo.nickname = PathComponents(datasetInfo.filePath).filenameBase try: tags = vigra.AxisTags.fromJSON( infoGroup['axistags'].value ) datasetInfo.axistags = tags except KeyError: # Old projects just have an 'axisorder' field instead of full axistags try: axisorder = infoGroup['axisorder'].value datasetInfo.axistags = vigra.defaultAxistags(axisorder) except KeyError: pass # If the data is supposed to be in the project, # check for it now. if datasetInfo.location == DatasetInfo.Location.ProjectInternal: if not datasetInfo.datasetId in localDataGroup.keys(): raise RuntimeError("Corrupt project file. Could not find data for " + infoGroup.name) dirty = False # If the data is supposed to exist outside the project, make sure it really does. if datasetInfo.location == DatasetInfo.Location.FileSystem and not isUrl(datasetInfo.filePath): pathData = PathComponents( datasetInfo.filePath, os.path.split(projectFilePath)[0]) filePath = pathData.externalPath if not os.path.exists(filePath): if headless: raise RuntimeError("Could not find data at " + filePath) filt = "Image files (" + ' '.join('*.' + x for x in OpDataSelection.SupportedExtensions) + ')' newpath = self.repairFile(filePath, filt) if pathData.internalPath is not None: newpath += pathData.internalPath datasetInfo._filePath = getPathVariants(newpath , os.path.split(projectFilePath)[0])[0] dirty = True return datasetInfo, dirty
def _readDatasetInfo(self, infoGroup, localDataGroup, projectFilePath, headless): # Unready datasets are represented with an empty group. if len( infoGroup ) == 0: return None, False datasetInfo = DatasetInfo() # Make a reverse-lookup of the location storage strings LocationLookup = { v:k for k,v in self.LocationStrings.items() } datasetInfo.location = LocationLookup[ str(infoGroup['location'].value) ] # Write to the 'private' members to avoid resetting the dataset id datasetInfo._filePath = infoGroup['filePath'].value datasetInfo._datasetId = infoGroup['datasetId'].value try: datasetInfo.allowLabels = infoGroup['allowLabels'].value except KeyError: pass try: datasetInfo.drange = tuple( infoGroup['drange'].value ) except KeyError: pass try: datasetInfo.nickname = infoGroup['nickname'].value except KeyError: datasetInfo.nickname = PathComponents(datasetInfo.filePath).filenameBase try: datasetInfo.fromstack = infoGroup['fromstack'].value except KeyError: # Guess based on the storage setting and original filepath datasetInfo.fromstack = ( datasetInfo.location == DatasetInfo.Location.ProjectInternal and ( ('?' in datasetInfo._filePath) or (os.path.pathsep in datasetInfo._filePath) ) ) try: tags = vigra.AxisTags.fromJSON( infoGroup['axistags'].value ) datasetInfo.axistags = tags except KeyError: # Old projects just have an 'axisorder' field instead of full axistags try: axisorder = infoGroup['axisorder'].value datasetInfo.axistags = vigra.defaultAxistags(axisorder) except KeyError: pass try: start, stop = map( tuple, infoGroup['subvolume_roi'].value ) datasetInfo.subvolume_roi = (start, stop) except KeyError: pass # If the data is supposed to be in the project, # check for it now. if datasetInfo.location == DatasetInfo.Location.ProjectInternal: if not datasetInfo.datasetId in localDataGroup.keys(): raise RuntimeError("Corrupt project file. Could not find data for " + infoGroup.name) dirty = False # If the data is supposed to exist outside the project, make sure it really does. if datasetInfo.location == DatasetInfo.Location.FileSystem and not isUrl(datasetInfo.filePath): pathData = PathComponents( datasetInfo.filePath, os.path.split(projectFilePath)[0]) filePath = pathData.externalPath if not os.path.exists(filePath): if headless: raise RuntimeError("Could not find data at " + filePath) filt = "Image files (" + ' '.join('*.' + x for x in OpDataSelection.SupportedExtensions) + ')' newpath = self.repairFile(filePath, filt) if pathData.internalPath is not None: newpath += pathData.internalPath datasetInfo._filePath = getPathVariants(newpath , os.path.split(projectFilePath)[0])[0] dirty = True return datasetInfo, dirty
def configure_operator_with_parsed_args(self, parsed_args): """ Helper function for headless workflows. Configures this applet's top-level operator according to the settings provided in ``parsed_args``. :param parsed_args: Must be an ``argparse.Namespace`` as returned by :py:meth:`parse_known_cmdline_args()`. """ role_names = self.topLevelOperator.DatasetRoles.value role_paths = collections.OrderedDict() if role_names: for role_index, role_name in enumerate(role_names): arg_name = self._role_name_to_arg_name(role_name) input_paths = getattr(parsed_args, arg_name) role_paths[role_index] = input_paths if parsed_args.input_files: # We allow the file list to go to the 'default' role, but only if no other roles were explicitly configured. for role_index, input_paths in role_paths.items(): if input_paths: # FIXME: This error message could be more helpful. role_args = map(self._role_name_to_arg_name, role_names) role_args = map(lambda s: '--' + s, role_args) role_args_str = ", ".join(role_args) raise Exception( "Invalid command line arguments: All roles must be configured explicitly.\n" "Use the following flags to specify which files are matched with which inputs:\n" + role_args_str) role_paths = {0: parsed_args.input_files} for role_index, input_paths in role_paths.items(): # If the user doesn't want image stacks to be copied into the project file, # we generate hdf5 volumes in a temporary directory and use those files instead. if parsed_args.preconvert_stacks: import tempfile input_paths = self.convertStacksToH5(input_paths, tempfile.gettempdir()) input_infos = [] for p in input_paths: info = DatasetInfo() info.location = DatasetInfo.Location.FileSystem info.filePath = p comp = PathComponents(p) # Convert all (non-url) paths to absolute # (otherwise they are relative to the project file, which probably isn't what the user meant) if not isUrl(p): comp.externalPath = os.path.abspath(comp.externalPath) info.filePath = comp.totalPath() info.nickname = comp.filenameBase # Remove globstring syntax. if '*' in info.nickname: info.nickname = info.nickname.replace('*', '') if os.path.pathsep in info.nickname: info.nickname = PathComponents( info.nickname.split(os.path.pathsep)[0]).fileNameBase input_infos.append(info) opDataSelection = self.topLevelOperator existing_lanes = len(opDataSelection.DatasetGroup) opDataSelection.DatasetGroup.resize( max(len(input_infos), existing_lanes)) for lane_index, info in enumerate(input_infos): opDataSelection.DatasetGroup[lane_index][role_index].setValue( info) need_warning = False for lane_index in range(len(input_infos)): output_slot = opDataSelection.ImageGroup[lane_index][ role_index] if output_slot.meta.prefer_2d: need_warning = True break if need_warning: logger.warn( "*******************************************************************************************" ) logger.warn( "Some of your input data is stored in a format that is not efficient for 3D access patterns." ) logger.warn( "Performance may suffer as a result. For best performance, use a chunked HDF5 volume." ) logger.warn( "*******************************************************************************************" )
def configure_operator_with_parsed_args(self, parsed_args): """ Helper function for headless workflows. Configures this applet's top-level operator according to the settings provided in ``parsed_args``. :param parsed_args: Must be an ``argparse.Namespace`` as returned by :py:meth:`parse_known_cmdline_args()`. """ role_names = self.topLevelOperator.DatasetRoles.value role_paths = collections.OrderedDict() if role_names: for role_index, role_name in enumerate(role_names): arg_name = self._role_name_to_arg_name(role_name) input_paths = getattr(parsed_args, arg_name) role_paths[role_index] = input_paths if parsed_args.input_files: # We allow the file list to go to the 'default' role, but only if no other roles were explicitly configured. for role_index, input_paths in role_paths.items(): if input_paths: # FIXME: This error message could be more helpful. role_args = map( self._role_name_to_arg_name, role_names ) role_args = map( lambda s: '--' + s, role_args ) role_args_str = ", ".join( role_args ) raise Exception("Invalid command line arguments: All roles must be configured explicitly.\n" "Use the following flags to specify which files are matched with which inputs:\n" + role_args_str ) role_paths = { 0 : parsed_args.input_files } for role_index, input_paths in role_paths.items(): # If the user doesn't want image stacks to be copied into the project file, # we generate hdf5 volumes in a temporary directory and use those files instead. if parsed_args.preconvert_stacks: import tempfile input_paths = self.convertStacksToH5( input_paths, tempfile.gettempdir() ) input_infos = [] for p in input_paths: info = DatasetInfo() info.location = DatasetInfo.Location.FileSystem info.filePath = p comp = PathComponents(p) # Convert all (non-url) paths to absolute # (otherwise they are relative to the project file, which probably isn't what the user meant) if not isUrl(p): comp.externalPath = os.path.abspath(comp.externalPath) info.filePath = comp.totalPath() info.nickname = comp.filenameBase # Remove globstring syntax. if '*' in info.nickname: info.nickname = info.nickname.replace('*', '') if os.path.pathsep in info.nickname: info.nickname = PathComponents(info.nickname.split(os.path.pathsep)[0]).fileNameBase input_infos.append(info) opDataSelection = self.topLevelOperator existing_lanes = len(opDataSelection.DatasetGroup) opDataSelection.DatasetGroup.resize( max(len(input_infos), existing_lanes) ) for lane_index, info in enumerate(input_infos): opDataSelection.DatasetGroup[lane_index][role_index].setValue( info ) need_warning = False for lane_index in range(len(input_infos)): output_slot = opDataSelection.ImageGroup[lane_index][role_index] if output_slot.meta.prefer_2d: need_warning = True break if need_warning: logger.warn("*******************************************************************************************") logger.warn("Some of your input data is stored in a format that is not efficient for 3D access patterns.") logger.warn("Performance may suffer as a result. For best performance, use a chunked HDF5 volume.") logger.warn("*******************************************************************************************")