def setUpClass(cls): """ Override. Called by nosetests. """ # Choose names cls.dvid_repo = "datasetA" cls.data_name = "random_data" cls.volume_location = "/repos/{dvid_repo}/volumes/{data_name}".format( **cls.__dict__ ) cls.data_uuid = get_testrepo_root_uuid() cls.node_location = "/repos/{dvid_repo}/nodes/{data_uuid}".format( **cls.__dict__ ) # Generate some test data #data = numpy.random.randint(0, 255, (128, 256, 512, 1)).astype( numpy.uint8 ) data = numpy.zeros((128, 256, 512, 1), dtype=numpy.uint8) data.flat[:] = numpy.arange( numpy.prod((128, 256, 512, 1)) ) cls.original_data = data cls.voxels_metadata = VoxelsMetadata.create_default_metadata(data.shape, data.dtype, "zyxc", 1.0, "") # Write it to a new data instance node_service = DVIDNodeService(TEST_DVID_SERVER, cls.data_uuid) node_service.create_grayscale8(cls.data_name) node_service.put_gray3D( cls.data_name, data[...,0], (0,0,0) )
def __init__(self, hostname, uuid, data_name, *args, **kwargs): """ Create a new VoxelsAccessor with all the same properties as the current instance, except that it accesses a roi mask volume. """ # Create default mask metadata. mask_metadata = {} mask_metadata["Properties"] = { "Values" : [ { "DataType" : "uint8", "Label": "roi-mask" } ] } # For now, we hardcode XYZ order # The size/offset are left as None, because that doesn't apply to ROI data. default_axis_info = { "Label": "", "Resolution": 1, "Units": "", "Size": 0, "Offset" : 0 } mask_metadata["Axes"] = [copy.copy(default_axis_info), copy.copy(default_axis_info), copy.copy(default_axis_info)] mask_metadata["Axes"][0]["Label"] = "X" mask_metadata["Axes"][1]["Label"] = "Y" mask_metadata["Axes"][2]["Label"] = "Z" assert '_metadata' not in kwargs or kwargs['_metadata'] is None kwargs['_metadata'] = VoxelsMetadata(mask_metadata) assert '_access_type' not in kwargs or kwargs['_access_type'] is None kwargs['_access_type'] = 'mask' # Init base class with pre-formed metadata instead of querying for it. super(RoiMaskAccessor, self).__init__( hostname, uuid, data_name, *args, **kwargs )
def run_export(self): self.progressSignal(0) url = self.NodeDataUrl.value url_path = url.split('://')[1] hostname, api, node, uuid, dataname = url_path.split('/') assert api == 'api' assert node == 'node' axiskeys = self.Input.meta.getAxisKeys() shape = self.Input.meta.shape if self._transpose_axes: axiskeys = reversed(axiskeys) shape = tuple(reversed(shape)) axiskeys = "".join( axiskeys ) if self.OffsetCoord.ready(): offset_start = self.OffsetCoord.value else: offset_start = (0,) * len( self.Input.meta.shape ) self.progressSignal(5) # Get the dataset details try: metadata = VoxelsAccessor.get_metadata(hostname, uuid, dataname) except VoxelsAccessor.BadRequestError as ex: # Dataset doesn't exist yet. Let's create it. metadata = VoxelsMetadata.create_default_metadata( shape, self.Input.meta.dtype, axiskeys, 0.0, "" ) VoxelsAccessor.create_new(hostname, uuid, dataname, metadata) # Since this class is generally used to push large blocks of data, # we'll be nice and set throttle=True client = VoxelsAccessor( hostname, uuid, dataname, throttle=True ) def handle_block_result(roi, data): # Send it to dvid roi = numpy.asarray(roi) roi += offset_start start, stop = roi if self._transpose_axes: data = data.transpose() start = tuple(reversed(start)) stop = tuple(reversed(stop)) client.post_ndarray( start, stop, data ) requester = BigRequestStreamer( self.Input, roiFromShape( self.Input.meta.shape ) ) requester.resultSignal.subscribe( handle_block_result ) requester.progressSignal.subscribe( self.progressSignal ) requester.execute() self.progressSignal(100)
def _update_subvol_widget(self, node_uuid, dataname, typename): """ Update the subvolume widget with the min/max extents of the given node and dataname. Note: The node and dataname do not necessarily have to match the currently selected node and dataname. This enables the right-click behavior, which can be used to limit your data volume to the size of a different data volume. """ error_msg = None try: if typename == "roi": node_service = DVIDNodeService(self._hostname, str(node_uuid)) roi_blocks_zyx = numpy.array( node_service.get_roi(str(dataname))) maxindex = tuple(DVID_BLOCK_WIDTH * (1 + numpy.max(roi_blocks_zyx, axis=0))) minindex = (0, 0, 0) # Rois are always 3D axiskeys = "zyx" # If the current selection is a dataset, then include a channel dimension if self.get_selection().typename != "roi": axiskeys = "zyxc" minindex = minindex + (0, ) maxindex = maxindex + ( 1, ) # FIXME: This assumes that the selected data has only 1 channel... else: # Query the server raw_metadata = VoxelsAccessor.get_metadata( self._hostname, node_uuid, dataname) voxels_metadata = VoxelsMetadata(raw_metadata) maxindex = voxels_metadata.shape minindex = voxels_metadata.minindex axiskeys = voxels_metadata.axiskeys # If the current selection is a roi, then remove the channel dimension if self.get_selection().typename == "roi": axiskeys = "zyx" minindex = minindex[:-1] maxindex = maxindex[:-1] except (DVIDException, ErrMsg) as ex: error_msg = str(ex) log_exception(logger) else: self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(True) if error_msg: self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(False) QMessageBox.critical(self, "DVID Error", error_msg) self._subvol_widget.initWithExtents("", (), (), ()) return self._subvol_widget.initWithExtents(axiskeys, maxindex, minindex, maxindex)
def get_metadata( hostname, uuid, data_name ): """ Query the voxels metadata for the given node/data_name. """ connection = DVIDConnection(hostname) rest_query = "/node/{uuid}/{data_name}/metadata".format( uuid=uuid, data_name=data_name ) status, response_body, _err_msg = connection.make_request( rest_query, ConnectionMethod.GET ) try: json_data = json.loads(response_body) except ValueError: raise RuntimeError("Response body could not be parsed as valid json:\n" "GET " + rest_query + "\n" + response_body) return VoxelsMetadata( json_data )
def _get_format_selection_error_msg(self, *args): """ If the currently selected format does not support the input image format, return an error message stating why. Otherwise, return an empty string. """ if not self.Input.ready(): return "Input not ready" output_format = self.OutputFormat.value # These cases support all combinations if output_format in ('hdf5', 'npy', 'blockwise hdf5'): return "" tagged_shape = self.Input.meta.getTaggedShape() axes = OpStackWriter.get_nonsingleton_axes_for_tagged_shape( tagged_shape ) output_dtype = self.Input.meta.dtype if output_format == 'dvid': # dvid requires a channel axis, which must come last. # Internally, we transpose it before sending it over the wire if tagged_shape.keys()[-1] != 'c': return "DVID requires the last axis to be channel." # Make sure DVID supports this dtype/channel combo. from libdvid.voxels import VoxelsMetadata axiskeys = self.Input.meta.getAxisKeys() # We reverse the axiskeys because the export operator (see below) uses transpose_axes=True reverse_axiskeys = "".join(reversed( axiskeys )) reverse_shape = tuple(reversed(self.Input.meta.shape)) metainfo = VoxelsMetadata.create_default_metadata( reverse_shape, output_dtype, reverse_axiskeys, 0.0, 'nanometers' ) try: metainfo.determine_dvid_typename() except Exception as ex: return str(ex) else: return "" return FormatValidity.check(self.Input.meta.getTaggedShape(), self.Input.meta.dtype, output_format)
def test_zz_quickstart_usage(self): import json import numpy from libdvid import DVIDConnection, ConnectionMethod from libdvid.voxels import VoxelsAccessor, VoxelsMetadata # Open a connection to DVID connection = DVIDConnection("127.0.0.1:8000") # Get detailed dataset info: /api/repos/info (note: /api is prepended automatically) status, body, _error_message = connection.make_request( "/repos/info", ConnectionMethod.GET) dataset_details = json.loads(body) # print(json.dumps( dataset_details, indent=4 )) # Create a new remote volume (assuming you already know the uuid of the node) uuid = UUID voxels_metadata = VoxelsMetadata.create_default_metadata( (0, 0, 0, 1), numpy.uint8, 'zyxc', 1.0, "") VoxelsAccessor.create_new("127.0.0.1:8000", uuid, "my_volume", voxels_metadata) # Use the VoxelsAccessor convenience class to manipulate a particular data volume accessor = VoxelsAccessor("127.0.0.1:8000", uuid, "my_volume") # print(dvid_volume.axiskeys, dvid_volume.dtype, dvid_volume.minindex, dvid_volume.shape) # Add some data (must be block-aligned) # Must include all channels. updated_data = numpy.ones((256, 192, 128, 1), dtype=numpy.uint8) accessor[256:512, 32:224, 0:128, 0] = updated_data # OR: #accessor.post_ndarray( (0,10,20,30), (1,110,120,130), updated_data ) # Read from it (First axis is channel.) cutout_array = accessor[300:330, 40:120, 10:110, 0] # OR: cutout_array = accessor.get_ndarray((300, 40, 10, 0), (330, 120, 110, 1)) assert isinstance(cutout_array, numpy.ndarray) assert cutout_array.shape == (30, 80, 100, 1)
def test_zz_quickstart_usage(self): import json import numpy from libdvid import DVIDConnection, ConnectionMethod from libdvid.voxels import VoxelsAccessor, VoxelsMetadata # Open a connection to DVID connection = DVIDConnection( "localhost:8000" ) # Get detailed dataset info: /api/repos/info (note: /api is prepended automatically) status, body, error_message = connection.make_request( "/repos/info", ConnectionMethod.GET) dataset_details = json.loads(body) # print json.dumps( dataset_details, indent=4 ) # Create a new remote volume (assuming you already know the uuid of the node) uuid = UUID voxels_metadata = VoxelsMetadata.create_default_metadata( (1,0,0,0), numpy.uint8, 'cxyz', 1.0, "" ) VoxelsAccessor.create_new( "localhost:8000", uuid, "my_volume", voxels_metadata ) # Use the VoxelsAccessor convenience class to manipulate a particular data volume accessor = VoxelsAccessor( "localhost:8000", uuid, "my_volume" ) # print dvid_volume.axiskeys, dvid_volume.dtype, dvid_volume.minindex, dvid_volume.shape # Add some data (must be block-aligned) # Must include all channels. # Must be FORTRAN array, using FORTRAN indexing order conventions # (Use order='F', and make sure you're indexing it as cxyz) updated_data = numpy.ones( (1,128,192,256), dtype=numpy.uint8, order='F' ) updated_data = numpy.asfortranarray(updated_data) accessor[:, 0:128, 32:224, 256:512] = updated_data # OR: #accessor.post_ndarray( (0,10,20,30), (1,110,120,130), updated_data ) # Read from it (First axis is channel.) cutout_array = accessor[:, 10:110, 40:120, 300:330] # OR: cutout_array = accessor.get_ndarray( (0,10,40,300), (1,110,120,330) ) assert isinstance(cutout_array, numpy.ndarray) assert cutout_array.shape == (1,100,80,30)
def setUpClass(cls): """ Override. Called by nosetests. """ # Choose names cls.dvid_repo = "datasetA" cls.data_name = "indices_data" cls.volume_location = "/repos/{dvid_repo}/volumes/{data_name}".format( **cls.__dict__ ) cls.data_uuid = get_testrepo_root_uuid() cls.node_location = "/repos/{dvid_repo}/nodes/{data_uuid}".format( **cls.__dict__ ) # Generate some test data data = numpy.random.randint(0, 255, (1, 128, 256, 512)) data = numpy.asfortranarray(data, numpy.uint8) cls.original_data = data cls.voxels_metadata = VoxelsMetadata.create_default_metadata(data.shape, data.dtype, "cxyz", 1.0, "") # Write it to a new data instance node_service = DVIDNodeService(TEST_DVID_SERVER, cls.data_uuid) node_service.create_grayscale8(cls.data_name) node_service.put_gray3D( cls.data_name, data[0,...], (0,0,0) )
def _update_display(self): super(DvidDataSelectionBrowser, self)._update_display() hostname, dset_uuid, dataname, node_uuid = self.get_selection() enable_contents = self._repos_info is not None and dataname != "" and node_uuid != "" self._roi_groupbox.setEnabled(enable_contents) if not dataname or not node_uuid: self._roi_widget.initWithExtents("", (), (), ()) return error_msg = None try: # Query the server raw_metadata = VoxelsAccessor.get_metadata(hostname, node_uuid, dataname) voxels_metadata = VoxelsMetadata(raw_metadata) except DVIDException as ex: error_msg = ex.message except ErrMsg as ex: error_msg = str(ErrMsg) except VoxelsAccessor.BadRequestError as ex: # DVID will return an error if the selected dataset # isn't a 'voxels' dataset and thus has no voxels metadata self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(False) return else: self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(True) if error_msg: QMessageBox.critical(self, "DVID Error", error_msg) self._roi_widget.initWithExtents("", (), (), ()) return self._roi_widget.initWithExtents(voxels_metadata.axiskeys, voxels_metadata.shape, voxels_metadata.minindex, voxels_metadata.shape)
def test_zz_quickstart_usage(self): import json import numpy from libdvid import DVIDConnection, ConnectionMethod from libdvid.voxels import VoxelsAccessor, VoxelsMetadata # Open a connection to DVID connection = DVIDConnection( "127.0.0.1:8000" ) # Get detailed dataset info: /api/repos/info (note: /api is prepended automatically) status, body, _error_message = connection.make_request( "/repos/info", ConnectionMethod.GET) dataset_details = json.loads(body) # print(json.dumps( dataset_details, indent=4 )) # Create a new remote volume (assuming you already know the uuid of the node) uuid = UUID voxels_metadata = VoxelsMetadata.create_default_metadata( (0,0,0,1), numpy.uint8, 'zyxc', 1.0, "" ) VoxelsAccessor.create_new( "127.0.0.1:8000", uuid, "my_volume", voxels_metadata ) # Use the VoxelsAccessor convenience class to manipulate a particular data volume accessor = VoxelsAccessor( "127.0.0.1:8000", uuid, "my_volume" ) # print(dvid_volume.axiskeys, dvid_volume.dtype, dvid_volume.minindex, dvid_volume.shape) # Add some data (must be block-aligned) # Must include all channels. updated_data = numpy.ones( (256,192,128,1), dtype=numpy.uint8) accessor[256:512, 32:224, 0:128, 0] = updated_data # OR: #accessor.post_ndarray( (0,10,20,30), (1,110,120,130), updated_data ) # Read from it (First axis is channel.) cutout_array = accessor[300:330, 40:120, 10:110, 0] # OR: cutout_array = accessor.get_ndarray( (300,40,10,0), (330,120,110,1) ) assert isinstance(cutout_array, numpy.ndarray) assert cutout_array.shape == (30,80,100,1)
def main(): # Read cmd-line args parser = argparse.ArgumentParser() parser.add_argument("--hostname", default="localhost:8000") parser.add_argument("--uuid", required=False, help="The node to upload to. If not provided, a new repo will be created (see --new-repo-alias).") parser.add_argument("--data-name", required=False, help="The name of the data instance to modify. If it doesn't exist, it will be created first.") parser.add_argument("--new-repo-alias", required=False, help="If no uuid is provided, a new repo is created, with this name.") parser.add_argument("input_file", help="For example: /tmp/myfile.h5/dataset") args = parser.parse_args() if '.h5' not in args.input_file: sys.stderr.write("File name does not indicate hdf5.\n") sys.exit(1) filepath, dset_name = args.input_file.split('.h5') filepath += '.h5' if not dset_name: sys.stderr.write("You must provide a dataset name, e.g. myfile.h5/mydataset\n") sys.exit(1) if not os.path.exists(filepath): sys.stderr.write("File doesn't exist: {}\n".format(filepath)) sys.exit(1) # If no uuid given, create a new repo on the server uuid = args.uuid if uuid is None: alias = args.new_repo_alias or "testrepo" server = DVIDServerService(args.hostname) uuid = server.create_new_repo(alias, "This is a test repo loaded with data from ".format(args.input_file)) uuid = str(uuid) # Read the input data from the file print("Reading {}{}".format( filepath, dset_name )) with h5py.File(filepath) as f_in: data = f_in[dset_name][:] # We assume data is 3D or 4D, in C-order # We adjust it to 4D, fortran-order if data.ndim == 3: data = data[...,None] data = data.transpose() assert data.flags['F_CONTIGUOUS'], "Data is not contiguous!" assert data.ndim == 4, "Data must be 3D with axes zyx or 4D with axes zyxc (C-order)" assert data.shape[0] == 1, "Data must have exactly 1 channel, not {}".format( data.shape[0] ) # Choose a default data instance name if necessary if data.dtype == numpy.uint8: data_name = args.data_name or "grayscale" elif data.dtype == numpy.uint64: data_name = args.data_name or "segmentation" else: sys.stderr.write("Unsupported dtype: {}\n".format(data.dtype)) sys.exit(1) # Create the new data instance if it doesn't exist already try: metadata = VoxelsAccessor.get_metadata(args.hostname, uuid, data_name) print("Data instance '{}' already exists. Will update.".format( data_name )) except DVIDException: print("Creating new data instance: {}".format( data_name )) metadata = VoxelsMetadata.create_default_metadata(data.shape, data.dtype, 'cxyz', 1.0, 'nanometers') VoxelsAccessor.create_new(args.hostname, uuid, data_name, metadata) # Finally, push the data to the server print("Pushing data to {}".format( '{}/api/node/{}/{}'.format( args.hostname, uuid, data_name ) )) accessor = VoxelsAccessor(args.hostname, uuid, data_name) accessor.post_ndarray((0,0,0,0), data.shape, data) print("DONE.")
def main(): # Read cmd-line args parser = argparse.ArgumentParser() parser.add_argument("--hostname", default="localhost:8000") parser.add_argument( "--uuid", required=False, help= "The node to upload to. If not provided, a new repo will be created (see --new-repo-alias)." ) parser.add_argument( "--data-name", required=False, help= "The name of the data instance to modify. If it doesn't exist, it will be created first." ) parser.add_argument( "--new-repo-alias", required=False, help="If no uuid is provided, a new repo is created, with this name.") parser.add_argument("input_file", help="For example: /tmp/myfile.h5/dataset") args = parser.parse_args() if '.h5' not in args.input_file: sys.stderr.write("File name does not indicate hdf5.\n") sys.exit(1) filepath, dset_name = args.input_file.split('.h5') filepath += '.h5' if not dset_name: sys.stderr.write( "You must provide a dataset name, e.g. myfile.h5/mydataset\n") sys.exit(1) if not os.path.exists(filepath): sys.stderr.write("File doesn't exist: {}\n".format(filepath)) sys.exit(1) # If no uuid given, create a new repo on the server uuid = args.uuid if uuid is None: alias = args.new_repo_alias or "testrepo" server = DVIDServerService(args.hostname) uuid = server.create_new_repo( alias, "This is a test repo loaded with data from ".format( args.input_file)) uuid = str(uuid) # Read the input data from the file print("Reading {}{}".format(filepath, dset_name)) with h5py.File(filepath) as f_in: data = f_in[dset_name][:] # We assume data is 3D or 4D, in C-order # We adjust it to 4D, fortran-order if data.ndim == 3: data = data[..., None] data = data.transpose() assert data.flags['F_CONTIGUOUS'], "Data is not contiguous!" assert data.ndim == 4, "Data must be 3D with axes zyx or 4D with axes zyxc (C-order)" assert data.shape[ 0] == 1, "Data must have exactly 1 channel, not {}".format( data.shape[0]) # Choose a default data instance name if necessary if data.dtype == numpy.uint8: data_name = args.data_name or "grayscale" elif data.dtype == numpy.uint64: data_name = args.data_name or "segmentation" else: sys.stderr.write("Unsupported dtype: {}\n".format(data.dtype)) sys.exit(1) # Create the new data instance if it doesn't exist already try: metadata = VoxelsAccessor.get_metadata(args.hostname, uuid, data_name) print("Data instance '{}' already exists. Will update.".format( data_name)) except DVIDException: print("Creating new data instance: {}".format(data_name)) metadata = VoxelsMetadata.create_default_metadata( data.shape, data.dtype, 'cxyz', 1.0, 'nanometers') VoxelsAccessor.create_new(args.hostname, uuid, data_name, metadata) # Finally, push the data to the server print("Pushing data to {}".format('{}/api/node/{}/{}'.format( args.hostname, uuid, data_name))) accessor = VoxelsAccessor(args.hostname, uuid, data_name) accessor.post_ndarray((0, 0, 0, 0), data.shape, data) print("DONE.")