def test_metainfo_from_h5(self): shape = (3, 9, 10, 11) starting_metadata = VoxelsMetadata.create_default_metadata( shape, numpy.float32, "cxyz", 1.0, "nanometers" ) f = h5py.File("dummy.h5", mode='w', driver='core', backing_store=False) # In-memory dset = f.create_dataset( 'dset', shape=shape, dtype=numpy.float32, chunks=True ) metadata = VoxelsMetadata.create_from_h5_dataset( dset ) assert metadata.dtype.type is numpy.float32 assert metadata.shape == starting_metadata.shape, \ "Wrong shape: {} vs. {}".format( metadata.shape, starting_metadata.shape )
def test_create_axistags(self): try: import vigra except ImportError: raise nose.SkipTest metadata = VoxelsMetadata(self.metadata_json) tags = metadata.create_axistags() assert tags['x'].resolution == 3.1 assert tags['y'].resolution == 3.1 assert tags['z'].resolution == 40 assert tags.channelLabels == ["intensity-R", "intensity-G", "intensity-B"]
def _do_modify_data(self, uuid, dataname, dims, shape, offset): """ Respond to a POST request to modify a subvolume of data. All parameters are strings from the REST string. """ dataset = self._get_h5_dataset(uuid, dataname) roi_start, roi_stop = self._determine_request_roi( dataset, dims, shape, offset ) # Prepend channel to make "full" roi full_roi_start = (0,) + roi_start full_roi_stop = (dataset.shape[0],) + roi_stop full_roi_shape = numpy.subtract(full_roi_stop, full_roi_start) slicing = tuple( slice(x,y) for x,y in zip(full_roi_start, full_roi_stop) ) if 'dvid_metadata' in dataset.attrs: voxels_metadata = VoxelsMetadata(dataset.attrs['dvid_metadata']) del dataset.attrs['dvid_metadata'] else: voxels_metadata = VoxelsMetadata.create_from_h5_dataset(dataset) # If the user is writing data beyond the current extents of the dataset, # resize the dataset first. if (numpy.array(full_roi_stop) > dataset.shape).any(): dataset.resize( numpy.maximum(full_roi_stop, dataset.shape) ) voxels_metadata.shape = tuple( map(int, numpy.maximum( voxels_metadata.shape, full_roi_stop )) ) # Overwrite minindex is needed if (numpy.array(full_roi_start) < voxels_metadata.minindex).any(): voxels_metadata.minindex = tuple( numpy.minimum( voxels_metadata.minindex, full_roi_start ) ) dataset.attrs['dvid_metadata'] = voxels_metadata.to_json() # Must read the entire message body, even if it isn't used below. codec = VoxelsNddataCodec( dataset.dtype ) data = codec.decode_to_ndarray(self.rfile, full_roi_shape) if (numpy.array(roi_start) < 0).any(): # We don't support negative coordinates in this mock server. # But as a compromise, we don't choke here. # Instead, we simply do nothing. pass else: dataset[slicing] = data self.server.h5_file.flush() #self.send_response(httplib.NO_CONTENT) # "No Content" (accepted) self.send_response(httplib.OK) self.send_header("Content-length", 0 ) self.end_headers()
def test_create_axistags(self): try: import vigra except ImportError: raise nose.SkipTest metadata = VoxelsMetadata(self.metadata_json) tags = metadata.create_axistags() assert tags['x'].resolution == 3.1 assert tags['y'].resolution == 3.1 assert tags['z'].resolution == 40 assert tags.channelLabels == [ "intensity-R", "intensity-G", "intensity-B" ]
def test_metainfo_from_h5(self): shape = (3, 9, 10, 11) starting_metadata = VoxelsMetadata.create_default_metadata( shape, numpy.float32, "cxyz", 1.0, "nanometers") f = h5py.File("dummy.h5", mode='w', driver='core', backing_store=False) # In-memory dset = f.create_dataset('dset', shape=shape, dtype=numpy.float32, chunks=True) metadata = VoxelsMetadata.create_from_h5_dataset(dset) assert metadata.dtype.type is numpy.float32 assert metadata.shape == starting_metadata.shape, \ "Wrong shape: {} vs. {}".format( metadata.shape, starting_metadata.shape )
def _do_modify_data(self, uuid, dataname, dims, shape, offset): """ Respond to a POST request to modify a subvolume of data. All parameters are strings from the REST string. """ dataset = self._get_h5_dataset(uuid, dataname) roi_start, roi_stop = self._determine_request_roi( dataset, dims, shape, offset ) # Prepend channel to make "full" roi full_roi_start = (0,) + roi_start full_roi_stop = (dataset.shape[0],) + roi_stop full_roi_shape = numpy.subtract(full_roi_stop, full_roi_start) slicing = tuple( slice(x,y) for x,y in zip(full_roi_start, full_roi_stop) ) # If the user is writing data beoyond the current extents of the dataset, # resize the dataset first. if (numpy.array(full_roi_stop) > dataset.shape).any(): dataset.resize( full_roi_stop ) voxels_metadata = VoxelsMetadata.create_from_h5_dataset(dataset) codec = VoxelsNddataCodec( voxels_metadata ) data = codec.decode_to_ndarray(self.rfile, full_roi_shape) dataset[slicing] = data self.server.h5_file.flush() #self.send_response(httplib.NO_CONTENT) # "No Content" (accepted) self.send_response(httplib.OK) self.send_header("Content-length", 0 ) self.end_headers()
def test_parse(self): metadata = VoxelsMetadata(self.metadata_json) assert metadata.shape == (3, 100, 200, 400), "Wrong shape: {}".format( metadata.shape) assert metadata.dtype == numpy.uint8 assert metadata.axiskeys == 'cxyz' assert metadata['Axes'][0]["Resolution"] == 3.1 assert metadata['Axes'][1]["Resolution"] == 3.1 assert metadata['Axes'][2]["Resolution"] == 40
def test_basic_roundtrip(self): data = numpy.random.randint(0,255, (3, 100, 200)).astype(numpy.uint8) metadata = VoxelsMetadata.create_default_metadata(data.shape, data.dtype, 'cxy', 1.0, "nanometers") codec = VoxelsNddataCodec( metadata ) stream = StringIO.StringIO() codec.encode_from_ndarray(stream, data) stream.seek(0) roundtrip_data = codec.decode_to_ndarray(stream, data.shape) assert roundtrip_data.flags['F_CONTIGUOUS'] self._assert_matching(roundtrip_data, data)
def _do_get_volume_schema(self, uuid, dataname): """ Respond to a query for dataset info. """ dataset = self._get_h5_dataset(uuid, dataname) voxels_metadata = VoxelsMetadata.create_from_h5_dataset(dataset) json_text = json.dumps(voxels_metadata) self.send_response(httplib.OK) self.send_header("Content-type", "text/json") self.send_header("Content-length", str(len(json_text))) self.end_headers() self.wfile.write(json_text)
def _do_get_volume_schema(self, uuid, dataname): """ Respond to a query for dataset info. """ dataset = self._get_h5_dataset(uuid, dataname) voxels_metadata = VoxelsMetadata.create_from_h5_dataset(dataset) json_text = json.dumps( voxels_metadata ) self.send_response(httplib.OK) self.send_header("Content-type", "text/json") self.send_header("Content-length", str(len(json_text))) self.end_headers() self.wfile.write( json_text )
def _create_volume(self, dataset_name, uuid, dataname, volume_path, typename, instance_params): # Must read exact bytes. # Apparently rfile.read() just hangs. body_len = self.headers.get("Content-Length") ## Current DVID API does not use metadata json for creating the volume. ## This may change soon... ## #metadata_json = self.rfile.read( int(body_len) ) #try: # voxels_metadata = VoxelsMetadata( metadata_json ) #except ValueError as ex: # raise self.RequestError( httplib.BAD_REQUEST, 'Can\'t create volume. ' # 'Error parsing volume metadata: {}\n' # 'Invalid metadata response body was:\n{}' # ''.format( ex.args[0], metadata_json ) ) #expected_typename = voxels_metadata.determine_dvid_typename() #if typename != expected_typename: # raise self.RequestError( httplib.BAD_REQUEST, # "Cannot create volume. " # "REST typename was {}, but metadata JSON implies typename {}" # "".format( typename, expected_typename ) ) # Instead, the json contains some other parameters that we don't really care about... # But we need to read at least one of them to determine the dimensionality of the data. try: num_axes = len(instance_params["VoxelSize"].split(',')) except KeyError: raise self.RequestError( httplib.BAD_REQUEST, "Cannot create volume. Config data in message body is missing 'VoxelSize' parameter: \n" + str(instance_params)) # Create the new volume in the appropriate 'volumes' group, # and then link to it in the node group. dtypename, channels = VoxelsMetadata.determine_channels_from_dvid_typename( typename) shape = (channels, ) + (0, ) * num_axes maxshape = (None, ) * len(shape) # No maxsize dtype = numpy.dtype(dtypename) self.server.h5_file.create_dataset(volume_path, shape=shape, dtype=dtype, maxshape=maxshape) linkname = '/datasets/{dataset_name}/nodes/{uuid}/{dataname}'.format( **locals()) self.server.h5_file[linkname] = h5py.SoftLink(volume_path) self.server.h5_file.flush()
def test_create_default_metadata(self): metadata = VoxelsMetadata.create_default_metadata( (2,10,11), numpy.int64, "cxy", 1.5, "nanometers" ) metadata["Properties"]["Values"][0]["Label"] = "R" metadata["Properties"]["Values"][1]["Label"] = "G" assert len( metadata["Axes"] ) == 2 assert metadata["Axes"][0]["Label"] == "X" assert metadata["Axes"][0]["Size"] == 10 assert metadata["Axes"][1]["Label"] == "Y" assert metadata["Axes"][1]["Size"] == 11 assert len(metadata["Properties"]["Values"]) == 2 # 2 channels assert metadata["Properties"]["Values"][0]["DataType"] == "int64" assert metadata["Properties"]["Values"][1]["DataType"] == "int64" assert metadata["Properties"]["Values"][0]["Label"] == "R" assert metadata["Properties"]["Values"][1]["Label"] == "G"
def test_create_default_metadata(self): metadata = VoxelsMetadata.create_default_metadata( (2, 10, 11), numpy.int64, "cxy", 1.5, "nanometers") metadata["Properties"]["Values"][0]["Label"] = "R" metadata["Properties"]["Values"][1]["Label"] = "G" assert len(metadata["Axes"]) == 2 assert metadata["Axes"][0]["Label"] == "X" assert metadata["Axes"][0]["Size"] == 10 assert metadata["Axes"][1]["Label"] == "Y" assert metadata["Axes"][1]["Size"] == 11 assert len(metadata["Properties"]["Values"]) == 2 # 2 channels assert metadata["Properties"]["Values"][0]["DataType"] == "int64" assert metadata["Properties"]["Values"][1]["DataType"] == "int64" assert metadata["Properties"]["Values"][0]["Label"] == "R" assert metadata["Properties"]["Values"][1]["Label"] == "G"
def _do_modify_data(self, uuid, dataname, dims, shape, offset): """ Respond to a POST request to modify a subvolume of data. All parameters are strings from the REST string. """ dataset = self._get_h5_dataset(uuid, dataname) roi_start, roi_stop = self._determine_request_roi( dataset, dims, shape, offset) # Prepend channel to make "full" roi full_roi_start = (0, ) + roi_start full_roi_stop = (dataset.shape[0], ) + roi_stop full_roi_shape = numpy.subtract(full_roi_stop, full_roi_start) slicing = tuple( slice(x, y) for x, y in zip(full_roi_start, full_roi_stop)) if 'dvid_metadata' in dataset.attrs: voxels_metadata = VoxelsMetadata(dataset.attrs['dvid_metadata']) del dataset.attrs['dvid_metadata'] else: voxels_metadata = VoxelsMetadata.create_from_h5_dataset(dataset) # If the user is writing data beyond the current extents of the dataset, # resize the dataset first. if (numpy.array(full_roi_stop) > dataset.shape).any(): dataset.resize(numpy.maximum(full_roi_stop, dataset.shape)) voxels_metadata.shape = tuple( map(int, numpy.maximum(voxels_metadata.shape, full_roi_stop))) # Overwrite minindex is needed if (numpy.array(full_roi_start) < voxels_metadata.minindex).any(): voxels_metadata.minindex = tuple( numpy.minimum(voxels_metadata.minindex, full_roi_start)) dataset.attrs['dvid_metadata'] = voxels_metadata.to_json() # Must read the entire message body, even if it isn't used below. codec = VoxelsNddataCodec(dataset.dtype) data = codec.decode_to_ndarray(self.rfile, full_roi_shape) if (numpy.array(roi_start) < 0).any(): # We don't support negative coordinates in this mock server. # But as a compromise, we don't choke here. # Instead, we simply do nothing. pass else: dataset[slicing] = data self.server.h5_file.flush() #self.send_response(httplib.NO_CONTENT) # "No Content" (accepted) self.send_response(httplib.OK) self.send_header("Content-length", 0) self.end_headers()
def _create_volume( self, dataset_name, uuid, dataname, volume_path, typename, instance_params ): # Must read exact bytes. # Apparently rfile.read() just hangs. body_len = self.headers.get("Content-Length") ## Current DVID API does not use metadata json for creating the volume. ## This may change soon... ## #metadata_json = self.rfile.read( int(body_len) ) #try: # voxels_metadata = VoxelsMetadata( metadata_json ) #except ValueError as ex: # raise self.RequestError( httplib.BAD_REQUEST, 'Can\'t create volume. ' # 'Error parsing volume metadata: {}\n' # 'Invalid metadata response body was:\n{}' # ''.format( ex.args[0], metadata_json ) ) #expected_typename = voxels_metadata.determine_dvid_typename() #if typename != expected_typename: # raise self.RequestError( httplib.BAD_REQUEST, # "Cannot create volume. " # "REST typename was {}, but metadata JSON implies typename {}" # "".format( typename, expected_typename ) ) # Instead, the json contains some other parameters that we don't really care about... # But we need to read at least one of them to determine the dimensionality of the data. try: num_axes = len(instance_params["VoxelSize"].split(',')) except KeyError: raise self.RequestError( httplib.BAD_REQUEST, "Cannot create volume. Config data in message body is missing 'VoxelSize' parameter: \n" + str(instance_params) ) # Create the new volume in the appropriate 'volumes' group, # and then link to it in the node group. dtypename, channels = VoxelsMetadata.determine_channels_from_dvid_typename(typename) shape = (channels,) + (0,)*num_axes maxshape = (None,)*len(shape) # No maxsize dtype = numpy.dtype(dtypename) self.server.h5_file.create_dataset( volume_path, shape=shape, dtype=dtype, maxshape=maxshape ) linkname = '/datasets/{dataset_name}/nodes/{uuid}/{dataname}'.format( **locals() ) self.server.h5_file[linkname] = h5py.SoftLink( volume_path ) self.server.h5_file.flush()
def _get_format_selection_error_msg(self, *args): """ If the currently selected format does not support the input image format, return an error message stating why. Otherwise, return an empty string. """ if not self.Input.ready(): return "Input not ready" output_format = self.OutputFormat.value # These cases support all combinations if output_format in ('hdf5', 'npy', 'blockwise hdf5'): return "" tagged_shape = self.Input.meta.getTaggedShape() axes = OpStackWriter.get_nonsingleton_axes_for_tagged_shape( tagged_shape ) output_dtype = self.Input.meta.dtype if output_format == 'dvid': # dvid requires a channel axis, which must come last. # Internally, we transpose it before sending it over the wire if tagged_shape.keys()[-1] != 'c': return "DVID requires the last axis to be channel." # Make sure DVID supports this dtype/channel combo. from pydvid.voxels import VoxelsMetadata axiskeys = self.Input.meta.getAxisKeys() # We reverse the axiskeys because the export operator (see below) uses transpose_axes=True reverse_axiskeys = "".join(reversed( axiskeys )) reverse_shape = tuple(reversed(self.Input.meta.shape)) metainfo = VoxelsMetadata.create_default_metadata( reverse_shape, output_dtype, reverse_axiskeys, 0.0, 'nanometers' ) try: metainfo.determine_dvid_typename() except Exception as ex: return str(ex) else: return "" return FormatValidity.check(self.Input.meta.getTaggedShape(), self.Input.meta.dtype, output_format)
def _get_format_selection_error_msg(self, *args): """ If the currently selected format does not support the input image format, return an error message stating why. Otherwise, return an empty string. """ if not self.Input.ready(): return "Input not ready" output_format = self.OutputFormat.value # These cases support all combinations if output_format in ('hdf5', 'npy', 'blockwise hdf5'): return "" tagged_shape = self.Input.meta.getTaggedShape() axes = OpStackWriter.get_nonsingleton_axes_for_tagged_shape( tagged_shape) output_dtype = self.Input.meta.dtype if output_format == 'dvid': # dvid requires a channel axis, which must come last. # Internally, we transpose it before sending it over the wire if tagged_shape.keys()[-1] != 'c': return "DVID requires the last axis to be channel." # Make sure DVID supports this dtype/channel combo. from pydvid.voxels import VoxelsMetadata axiskeys = self.Input.meta.getAxisKeys() # We reverse the axiskeys because the export operator (see below) uses transpose_axes=True reverse_axiskeys = "".join(reversed(axiskeys)) reverse_shape = tuple(reversed(self.Input.meta.shape)) metainfo = VoxelsMetadata.create_default_metadata( reverse_shape, output_dtype, reverse_axiskeys, 0.0, 'nanometers') try: metainfo.determine_dvid_typename() except Exception as ex: return str(ex) else: return "" return FormatValidity.check(self.Input.meta.getTaggedShape(), self.Input.meta.dtype, output_format)
def __init__(self, connection, uuid, data_name, *args, **kwargs): """ Create a new VoxelsAccessor with all the same properties as the current instance, except that it accesses a roi mask volume. """ # Create default mask metadata. mask_metadata = {} mask_metadata["Properties"] = { "Values": [{ "DataType": "uint8", "Label": "roi-mask" }] } # For now, we hardcode XYZ order # The size/offset are left as None, because that doesn't apply to ROI data. default_axis_info = { "Label": "", "Resolution": 1, "Units": "", "Size": 0, "Offset": 0 } mask_metadata["Axes"] = [ copy.copy(default_axis_info), copy.copy(default_axis_info), copy.copy(default_axis_info) ] mask_metadata["Axes"][0]["Label"] = "X" mask_metadata["Axes"][1]["Label"] = "Y" mask_metadata["Axes"][2]["Label"] = "Z" assert '_metadata' not in kwargs or kwargs['_metadata'] is None kwargs['_metadata'] = VoxelsMetadata(mask_metadata) assert '_access_type' not in kwargs or kwargs['_access_type'] is None kwargs['_access_type'] = 'mask' # Init base class with pre-formed metadata instead of querying for it. super(RoiMaskAccessor, self).__init__(connection, uuid, data_name, *args, **kwargs)
def _do_get_data(self, uuid, dataname, dims, shape, offset): """ Respond to a query for volume data. All parameters are strings from the REST string. """ dataset = self._get_h5_dataset(uuid, dataname) roi_start, roi_stop = self._determine_request_roi( dataset, dims, shape, offset ) # Prepend channel slicing slicing = (slice(None),) + tuple( slice(x,y) for x,y in zip(roi_start, roi_stop) ) data = dataset[slicing] voxels_metadata = VoxelsMetadata.create_from_h5_dataset(dataset) codec = VoxelsNddataCodec( voxels_metadata ) buffer_len = codec.calculate_buffer_len( data.shape ) self.send_response(httplib.OK) self.send_header("Content-type", VoxelsNddataCodec.VOLUME_MIMETYPE) self.send_header("Content-length", str(buffer_len) ) self.end_headers() codec.encode_from_ndarray( self.wfile, data )
def _get_format_selection_error_msg(self, *args): """ If the currently selected format does not support the input image format, return an error message stating why. Otherwise, return an empty string. """ if not self.Input.ready(): return "Input not ready" output_format = self.OutputFormat.value # These cases support all combinations if output_format in ('hdf5', 'npy'): return "" tagged_shape = self.Input.meta.getTaggedShape() axes = OpStackWriter.get_nonsingleton_axes_for_tagged_shape( tagged_shape) output_dtype = self.Input.meta.dtype if output_format == 'dvid': # dvid requires a channel axis, which must come last. # Internally, we transpose it before sending it over the wire if tagged_shape.keys()[-1] != 'c': return "DVID requires the last axis to be channel." # Make sure DVID supports this dtype/channel combo. from pydvid.voxels import VoxelsMetadata axiskeys = self.Input.meta.getAxisKeys() # We reverse the axiskeys because the export operator (see below) uses transpose_axes=True reverse_axiskeys = "".join(reversed(axiskeys)) reverse_shape = tuple(reversed(self.Input.meta.shape)) metainfo = VoxelsMetadata.create_default_metadata( reverse_shape, output_dtype, reverse_axiskeys, 0.0, 'nanometers') try: metainfo.determine_dvid_typename() except Exception as ex: return str(ex) else: return "" # None of the remaining formats support more than 4 channels. if 'c' in tagged_shape and tagged_shape['c'] > 4 and not filter( lambda fmt: fmt.name == output_format, self._3d_sequence_formats): return "Too many channels." # HDR format supports float32 only, and must have exactly 3 channels if output_format == 'hdr' or output_format == 'hdr sequence': if output_dtype == numpy.float32 and\ 'c' in tagged_shape and tagged_shape['c'] == 3: return "" else: return "HDR volumes must be float32, with exactly 3 channels." # Apparently, TIFF supports everything but signed byte if 'tif' in output_format and output_dtype == numpy.int8: return "TIF output does not support signed byte (int8). Try unsigned (uint8)." # Apparently, these formats support everything except uint32 # See http://github.com/ukoethe/vigra/issues/153 if output_dtype == numpy.uint32 and \ ( 'pbm' in output_format or \ 'pgm' in output_format or \ 'pnm' in output_format or \ 'ppm' in output_format ): return "PBM/PGM/PNM/PPM do not support the uint32 pixel type." # These formats don't support 2 channels (must be either 1 or 3) non_dualband_formats = ['bmp', 'gif', 'jpg', 'jpeg', 'ras'] for fmt in non_dualband_formats: if fmt in output_format and axes[0] != 'c' and 'c' in tagged_shape: if 'c' in tagged_shape and tagged_shape[ 'c'] != 1 and tagged_shape['c'] != 3: return "Invalid number of channels (must be exactly 1 or 3)." # 2D formats only support 2D images (singleton/channel axes excepted) if filter(lambda fmt: fmt.name == output_format, self._2d_formats): # Examples: # OK: 'xy', 'xyc' # NOT OK: 'xc', 'xyz' nonchannel_axes = filter(lambda a: a != 'c', axes) if len(nonchannel_axes) == 2: return "" else: return "Input has too many dimensions for a 2D output format." nonstep_axes = axes[1:] nonchannel_axes = filter(lambda a: a != 'c', nonstep_axes) # 3D sequences of 2D images require a 3D image # (singleton/channel axes excepted, unless channel is the 'step' axis) if filter(lambda fmt: fmt.name == output_format, self._3d_sequence_formats)\ or output_format == 'multipage tiff': # Examples: # OK: 'xyz', 'xyzc', 'cxy' # NOT OK: 'cxyz' if len(nonchannel_axes) == 2: return "" else: return "Can't export 3D stack: Input is not 3D or axis are in the wrong order." # 4D sequences of 3D images require a 4D image # (singleton/channel axes excepted, unless channel is the 'step' axis) if output_format == 'multipage tiff sequence': # Examples: # OK: 'txyz', 'txyzc', 'cxyz' # NOT OK: 'xyzc', 'xyz', 'xyc' if len(nonchannel_axes) == 3: return "" else: return "Can't export 4D stack: Input is not 4D." assert False, "Unknown format case: {}".format(output_format)
def _get_format_selection_error_msg(self, *args): """ If the currently selected format does not support the input image format, return an error message stating why. Otherwise, return an empty string. """ if not self.Input.ready(): return "Input not ready" output_format = self.OutputFormat.value # These cases support all combinations if output_format in ('hdf5', 'npy'): return "" tagged_shape = self.Input.meta.getTaggedShape() axes = OpStackWriter.get_nonsingleton_axes_for_tagged_shape( tagged_shape ) output_dtype = self.Input.meta.dtype if output_format == 'dvid': # dvid requires a channel axis, which must come last. # Internally, we transpose it before sending it over the wire if tagged_shape.keys()[-1] != 'c': return "DVID requires the last axis to be channel." # Make sure DVID supports this dtype/channel combo. from pydvid.voxels import VoxelsMetadata axiskeys = self.Input.meta.getAxisKeys() # We reverse the axiskeys because the export operator (see below) uses transpose_axes=True reverse_axiskeys = "".join(reversed( axiskeys )) reverse_shape = tuple(reversed(self.Input.meta.shape)) metainfo = VoxelsMetadata.create_default_metadata( reverse_shape, output_dtype, reverse_axiskeys, 0.0, 'nanometers' ) try: metainfo.determine_dvid_typename() except Exception as ex: return str(ex) else: return "" # None of the remaining formats support more than 4 channels. if 'c' in tagged_shape and tagged_shape['c'] > 4 and not filter(lambda fmt: fmt.name == output_format, self._3d_sequence_formats): return "Too many channels." # HDR format supports float32 only, and must have exactly 3 channels if output_format == 'hdr' or output_format == 'hdr sequence': if output_dtype == numpy.float32 and\ 'c' in tagged_shape and tagged_shape['c'] == 3: return "" else: return "HDR volumes must be float32, with exactly 3 channels." # Apparently, TIFF supports everything but signed byte if 'tif' in output_format and output_dtype == numpy.int8: return "TIF output does not support signed byte (int8). Try unsigned (uint8)." # Apparently, these formats support everything except uint32 # See http://github.com/ukoethe/vigra/issues/153 if output_dtype == numpy.uint32 and \ ( 'pbm' in output_format or \ 'pgm' in output_format or \ 'pnm' in output_format or \ 'ppm' in output_format ): return "PBM/PGM/PNM/PPM do not support the uint32 pixel type." # These formats don't support 2 channels (must be either 1 or 3) non_dualband_formats = ['bmp', 'gif', 'jpg', 'jpeg', 'ras'] for fmt in non_dualband_formats: if fmt in output_format and axes[0] != 'c' and 'c' in tagged_shape: if 'c' in tagged_shape and tagged_shape['c'] != 1 and tagged_shape['c'] != 3: return "Invalid number of channels (must be exactly 1 or 3)." # 2D formats only support 2D images (singleton/channel axes excepted) if filter(lambda fmt: fmt.name == output_format, self._2d_formats): # Examples: # OK: 'xy', 'xyc' # NOT OK: 'xc', 'xyz' nonchannel_axes = filter(lambda a: a != 'c', axes) if len(nonchannel_axes) == 2: return "" else: return "Input has too many dimensions for a 2D output format." nonstep_axes = axes[1:] nonchannel_axes = filter( lambda a: a != 'c', nonstep_axes ) # 3D sequences of 2D images require a 3D image # (singleton/channel axes excepted, unless channel is the 'step' axis) if filter(lambda fmt: fmt.name == output_format, self._3d_sequence_formats)\ or output_format == 'multipage tiff': # Examples: # OK: 'xyz', 'xyzc', 'cxy' # NOT OK: 'cxyz' if len(nonchannel_axes) == 2: return "" else: return "Can't export 3D stack: Input is not 3D or axis are in the wrong order." # 4D sequences of 3D images require a 4D image # (singleton/channel axes excepted, unless channel is the 'step' axis) if output_format == 'multipage tiff sequence': # Examples: # OK: 'txyz', 'txyzc', 'cxyz' # NOT OK: 'xyzc', 'xyz', 'xyc' if len(nonchannel_axes) == 3: return "" else: return "Can't export 4D stack: Input is not 4D." assert False, "Unknown format case: {}".format( output_format )