def run_export(self): self.progressSignal(0) url = self.NodeDataUrl.value url_path = url.split('://')[1] hostname, api, node, uuid, dataname = url_path.split('/') assert api == 'api' assert node == 'node' axiskeys = self.Input.meta.getAxisKeys() shape = self.Input.meta.shape if self._transpose_axes: axiskeys = reversed(axiskeys) shape = tuple(reversed(shape)) axiskeys = "".join( axiskeys ) if self.OffsetCoord.ready(): offset_start = self.OffsetCoord.value else: offset_start = (0,) * len( self.Input.meta.shape ) self.progressSignal(5) # Get the dataset details try: metadata = VoxelsAccessor.get_metadata(hostname, uuid, dataname) except VoxelsAccessor.BadRequestError as ex: # Dataset doesn't exist yet. Let's create it. metadata = VoxelsMetadata.create_default_metadata( shape, self.Input.meta.dtype, axiskeys, 0.0, "" ) VoxelsAccessor.create_new(hostname, uuid, dataname, metadata) # Since this class is generally used to push large blocks of data, # we'll be nice and set throttle=True client = VoxelsAccessor( hostname, uuid, dataname, throttle=True ) def handle_block_result(roi, data): # Send it to dvid roi = numpy.asarray(roi) roi += offset_start start, stop = roi if self._transpose_axes: data = data.transpose() start = tuple(reversed(start)) stop = tuple(reversed(stop)) client.post_ndarray( start, stop, data ) requester = BigRequestStreamer( self.Input, roiFromShape( self.Input.meta.shape ) ) requester.resultSignal.subscribe( handle_block_result ) requester.progressSignal.subscribe( self.progressSignal ) requester.execute() self.progressSignal(100)
def test_get_ndarray(self): """ Get some data from the server and check it. """ start, stop = (0,9,5,50), (1,10,20,150) dvid_vol = VoxelsAccessor( TEST_DVID_SERVER, self.data_uuid, self.data_name ) subvolume = dvid_vol.get_ndarray( start, stop ) assert (self.original_data[roi_to_slice(start, stop)] == subvolume).all()
def run_export(self): self.progressSignal(0) url = self.NodeDataUrl.value url_path = url.split('://')[1] hostname, api, node, uuid, dataname = url_path.split('/') assert api == 'api' assert node == 'node' axiskeys = self.Input.meta.getAxisKeys() shape = self.Input.meta.shape if self._transpose_axes: axiskeys = reversed(axiskeys) shape = tuple(reversed(shape)) axiskeys = "".join(axiskeys) if self.OffsetCoord.ready(): offset_start = self.OffsetCoord.value else: offset_start = (0, ) * len(self.Input.meta.shape) self.progressSignal(5) # Get the dataset details try: metadata = VoxelsAccessor.get_metadata(hostname, uuid, dataname) except DVIDException as ex: if ex.status != 404: raise # Dataset doesn't exist yet. Let's create it. metadata = VoxelsMetadata.create_default_metadata( shape, self.Input.meta.dtype, axiskeys, 0.0, "") VoxelsAccessor.create_new(hostname, uuid, dataname, metadata) # Since this class is generally used to push large blocks of data, # we'll be nice and set throttle=True client = VoxelsAccessor(hostname, uuid, dataname, throttle=True) def handle_block_result(roi, data): # Send it to dvid roi = numpy.asarray(roi) roi += offset_start start, stop = roi if self._transpose_axes: data = data.transpose() start = tuple(reversed(start)) stop = tuple(reversed(stop)) client.post_ndarray(start, stop, data) requester = BigRequestStreamer(self.Input, roiFromShape(self.Input.meta.shape)) requester.resultSignal.subscribe(handle_block_result) requester.progressSignal.subscribe(self.progressSignal) requester.execute() self.progressSignal(100)
def test_get_ndarray(self): """ Get some data from the server and check it. """ start, stop = (50, 5, 9, 0), (150, 20, 10, 1) dvid_vol = VoxelsAccessor(TEST_DVID_SERVER, self.data_uuid, self.data_name) subvolume = dvid_vol.get_ndarray(start, stop) assert (self.original_data[roi_to_slice(start, stop)] == subvolume).all()
def test_get_ndarray_throttled_2(self): """ Get some data from the server and check it. Enable throttle via query_args Note: This test doesn't really exercise our handling of 503 responses... """ start, stop = (0,9,5,50), (1,10,20,150) dvid_vol = VoxelsAccessor( TEST_DVID_SERVER, self.data_uuid, self.data_name, query_args={'throttle' : 'on'} ) subvolume = dvid_vol.get_ndarray( start, stop ) assert (self.original_data[roi_to_slice(start, stop)] == subvolume).all()
def test_get_ndarray_throttled(self): """ Get some data from the server and check it. Enable throttle with throttle=True Note: This test doesn't really exercise our handling of 503 responses... """ start, stop = (50,5,9,0), (150,20,10,1) dvid_vol = VoxelsAccessor( TEST_DVID_SERVER, self.data_uuid, self.data_name, throttle=True ) subvolume = dvid_vol.get_ndarray( start, stop ) assert (self.original_data[roi_to_slice(start, stop)] == subvolume).all()
def test_extra_query_args(self): """ Create a VoxelsAccessor that uses extra query args They come after the '?' in the REST URI. For example: http://localhost/api/node/mydata/_0_1_2/10_10_10/0_0_0?roi=whatever&attenuation=3 """ # Retrieve from server start, stop = (0,9,5,50), (1,10,20,150) query_args = {'roi' : 'some_ref', 'attenuation' : 5} dvid_vol = VoxelsAccessor( TEST_DVID_SERVER, self.data_uuid, self.data_name, query_args=query_args ) subvolume = dvid_vol.get_ndarray( start, stop ) # Compare assert (subvolume == self.original_data[roi_to_slice(start, stop)]).all()
def test_get_ndarray_throttled_2(self): """ Get some data from the server and check it. Enable throttle via query_args Note: This test doesn't really exercise our handling of 503 responses... """ start, stop = (50, 5, 9, 0), (150, 20, 10, 1) dvid_vol = VoxelsAccessor(TEST_DVID_SERVER, self.data_uuid, self.data_name, query_args={'throttle': 'on'}) subvolume = dvid_vol.get_ndarray(start, stop) assert (self.original_data[roi_to_slice(start, stop)] == subvolume).all()
def test_export(self): # For now, we require block-aligned POST data = numpy.random.randint(0, 255, (32, 128, 256, 1)).astype(numpy.uint8) data = numpy.asfortranarray(data, numpy.uint8) assert data.shape == (32, 128, 256, 1) data = data.astype(numpy.uint8) data = vigra.taggedView(data, vigra.defaultAxistags("zyxc")) # Retrieve from server graph = Graph() opPiper = OpArrayPiper(graph=graph) opPiper.Input.setValue(data) opExport = OpExportDvidVolume(transpose_axes=True, graph=graph) # Reverse data order for dvid export opExport.Input.connect(opPiper.Output) opExport.NodeDataUrl.setValue( "http://localhost:8000/api/node/{uuid}/{dataname}".format( uuid=self.data_uuid, dataname=self.data_name)) # Export! opExport.run_export() # Read back. (transposed, because of transposed_axes, above) accessor = VoxelsAccessor(TEST_DVID_SERVER, self.data_uuid, self.data_name) read_data = accessor[:] # Compare. assert (data.view(numpy.ndarray) == read_data.transpose() ).all(), "Exported data is not correct"
def test_get_ellipsis_slicing(self): dvid_vol = VoxelsAccessor(TEST_DVID_SERVER, self.data_uuid, self.data_name) full_slicing = roi_to_slice((0, ) * 4, self.original_data.shape) partial_slicing = full_slicing[:-2] + (Ellipsis, ) + full_slicing[-1:] subvolume = dvid_vol[partial_slicing] assert (subvolume == self.original_data).all()
def init_client(self): """ Ideally, this would be run within the __init__ function, but operators should never raise non-fatal exceptions within Operator.__init__() (See OperatorMetaClass.__call__) This serves as an alternative init function, from which we are allowed to raise exceptions. """ try: self._default_accessor = VoxelsAccessor( self._hostname, self._uuid, self._dataname, self._query_args ) self._throttled_accessor = VoxelsAccessor( self._hostname, self._uuid, self._dataname, self._query_args, throttle=True ) except DVIDException as ex: if ex.status == httplib.NOT_FOUND: raise OpDvidVolume.DatasetReadError("DVIDException: " + ex.message) raise except ErrMsg as ex: raise OpDvidVolume.DatasetReadError("ErrMsg: " + str(ex))
def download_to_h5( hostname, uuid, instance, roi, output_filepath, dset_name=None, compression='lzf', overlap_px=0): """ """ ns = DVIDNodeService(hostname, uuid) va = VoxelsAccessor(hostname, uuid, instance, throttle=True) dset_name = dset_name or instance assert roi, "Must provide a ROI" logger.info("Downloading {hostname}/api/node/{uuid}/{instance}?roi={roi} to {output_filepath}/{dset_name}".format(**locals())) substacks, _packing_factor = ns.get_roi_partition(roi, SUBSTACK_SIZE / DVID_BLOCK_SIZE) # Substack tuples are (size, z, y, x) substacks_zyx = np.array(substacks)[:, 1:] # If the user specified an 'overlap', we add it to all substacks. # Technically, this isn't very efficient, because a lot of overlapping # pixels on the interior of the ROI will be fetched twice. substacks_zyx[:,0] -= overlap_px substacks_zyx[:,1] += overlap_px roi_bb = ( np.min(substacks_zyx, axis=0), np.max(substacks_zyx, axis=0)+SUBSTACK_SIZE ) with h5py.File(output_filepath, 'a') as output_file: try: del output_file[dset_name] except KeyError: pass dset = output_file.create_dataset( dset_name, shape=roi_bb[1], dtype=va.dtype, chunks=True, compression=compression ) for i, substack_zyx in enumerate(substacks_zyx): logger.info("Substack {}/{} {}: Downloading...".format( i, len(substacks_zyx), list(substack_zyx) )) # Append a singleton channel axis substack_bb = np.array(( tuple(substack_zyx) + (0,), tuple(substack_zyx + SUBSTACK_SIZE) + (1,) )) # Includes singleton channel substack_data = va.get_ndarray(*substack_bb) logger.info("Substack {}/{} {}: Writing...".format( i, len(substacks_zyx), list(substack_zyx) )) dset[bb_to_slicing(*substack_bb[:,:-1])] = substack_data[...,0] logger.info("DONE Downloading {hostname}/api/node/{uuid}/{instance}?roi={roi} to {output_filepath}/{dset_name}".format(**locals()))
def test_extra_query_args(self): """ Create a VoxelsAccessor that uses extra query args They come after the '?' in the REST URI. For example: http://localhost/api/node/mydata/_0_1_2/10_10_10/0_0_0?roi=whatever&attenuation=3 """ # Retrieve from server start, stop = (50, 5, 9, 0), (150, 20, 10, 1) query_args = {'roi': 'some_ref', 'attenuation': 5} dvid_vol = VoxelsAccessor(TEST_DVID_SERVER, self.data_uuid, self.data_name, query_args=query_args) subvolume = dvid_vol.get_ndarray(start, stop) # Compare assert (subvolume == self.original_data[roi_to_slice(start, stop)]).all()
def test_post_reduced_dim_slicing(self): # Cutout dims start, stop = (64,32,0,0), (96,64,32,1) shape = numpy.subtract( stop, start ) # Generate test data new_subvolume = numpy.random.randint( 0,1000, shape ).astype( numpy.uint8 ) # Send to server dvid_vol = VoxelsAccessor( TEST_DVID_SERVER, self.data_uuid, self.data_name ) dvid_vol[64:96, 32:64, 0:32, 0] = new_subvolume[...,0] # Now read it back read_subvolume = dvid_vol.get_ndarray( start, stop ) assert (read_subvolume == new_subvolume).all() # Modify our master copy so other tests don't get messed up. self.original_data[roi_to_slice(start, stop)] = new_subvolume
def test_zy_post_negative_coordinates(self): """ Just make sure nothing blows up if we post to negative coordinates. """ # Cutout dims (must be block-aligned for the POST) start, stop = (-64,0,-32,0), (128,32,32,1) shape = numpy.subtract( stop, start ) # Generate test data subvolume = numpy.random.randint( 0,1000, shape ).astype(numpy.uint8) dvid_vol = VoxelsAccessor( TEST_DVID_SERVER, self.data_uuid, self.data_name ) # Send to server dvid_vol.post_ndarray(start, stop, subvolume) # Now try to 'get' data from negative coords read_back_vol = dvid_vol.get_ndarray(start, stop) assert (read_back_vol == subvolume).all()
def test_zy_post_negative_coordinates(self): """ Just make sure nothing blows up if we post to negative coordinates. """ # Cutout dims (must be block-aligned for the POST) start, stop = (-64, 0, -32, 0), (128, 32, 32, 1) shape = numpy.subtract(stop, start) # Generate test data subvolume = numpy.random.randint(0, 1000, shape).astype(numpy.uint8) dvid_vol = VoxelsAccessor(TEST_DVID_SERVER, self.data_uuid, self.data_name) # Send to server dvid_vol.post_ndarray(start, stop, subvolume) # Now try to 'get' data from negative coords read_back_vol = dvid_vol.get_ndarray(start, stop) assert (read_back_vol == subvolume).all()
def test_post_reduced_dim_slicing(self): # Cutout dims start, stop = (64, 32, 0, 0), (96, 64, 32, 1) shape = numpy.subtract(stop, start) # Generate test data new_subvolume = numpy.random.randint(0, 1000, shape).astype(numpy.uint8) # Send to server dvid_vol = VoxelsAccessor(TEST_DVID_SERVER, self.data_uuid, self.data_name) dvid_vol[64:96, 32:64, 0:32, 0] = new_subvolume[..., 0] # Now read it back read_subvolume = dvid_vol.get_ndarray(start, stop) assert (read_subvolume == new_subvolume).all() # Modify our master copy so other tests don't get messed up. self.original_data[roi_to_slice(start, stop)] = new_subvolume
def test_get_full_volume_via_slicing(self): dvid_vol = VoxelsAccessor(TEST_DVID_SERVER, self.data_uuid, self.data_name) # Two slicing syntaxes for the same thing subvolume1 = dvid_vol[:] subvolume2 = dvid_vol[...] # Results should match assert (subvolume1 == self.original_data).all() assert (subvolume1 == subvolume2).all()
def test_zz_quickstart_usage(self): import json import numpy from libdvid import DVIDConnection, ConnectionMethod from libdvid.voxels import VoxelsAccessor, VoxelsMetadata # Open a connection to DVID connection = DVIDConnection("127.0.0.1:8000") # Get detailed dataset info: /api/repos/info (note: /api is prepended automatically) status, body, _error_message = connection.make_request( "/repos/info", ConnectionMethod.GET) dataset_details = json.loads(body) # print(json.dumps( dataset_details, indent=4 )) # Create a new remote volume (assuming you already know the uuid of the node) uuid = UUID voxels_metadata = VoxelsMetadata.create_default_metadata( (0, 0, 0, 1), numpy.uint8, 'zyxc', 1.0, "") VoxelsAccessor.create_new("127.0.0.1:8000", uuid, "my_volume", voxels_metadata) # Use the VoxelsAccessor convenience class to manipulate a particular data volume accessor = VoxelsAccessor("127.0.0.1:8000", uuid, "my_volume") # print(dvid_volume.axiskeys, dvid_volume.dtype, dvid_volume.minindex, dvid_volume.shape) # Add some data (must be block-aligned) # Must include all channels. updated_data = numpy.ones((256, 192, 128, 1), dtype=numpy.uint8) accessor[256:512, 32:224, 0:128, 0] = updated_data # OR: #accessor.post_ndarray( (0,10,20,30), (1,110,120,130), updated_data ) # Read from it (First axis is channel.) cutout_array = accessor[300:330, 40:120, 10:110, 0] # OR: cutout_array = accessor.get_ndarray((300, 40, 10, 0), (330, 120, 110, 1)) assert isinstance(cutout_array, numpy.ndarray) assert cutout_array.shape == (30, 80, 100, 1)
def test_post_ndarray(self): """ Modify a remote subvolume and verify that the server wrote it. """ # Cutout dims start, stop = (64,32,0,0), (96,64,32,1) shape = numpy.subtract( stop, start ) # Generate test data new_subvolume = numpy.random.randint( 0,1000, shape ).astype( numpy.uint8 ) # Send to server dvid_vol = VoxelsAccessor( TEST_DVID_SERVER, self.data_uuid, self.data_name ) dvid_vol.post_ndarray(start, stop, new_subvolume) # Now read it back read_subvolume = dvid_vol.get_ndarray( start, stop ) assert (read_subvolume == new_subvolume).all() # Modify our master copy so other tests don't get messed up. self.original_data[roi_to_slice(start, stop)] = new_subvolume
def test_zz_quickstart_usage(self): import json import numpy from libdvid import DVIDConnection, ConnectionMethod from libdvid.voxels import VoxelsAccessor, VoxelsMetadata # Open a connection to DVID connection = DVIDConnection( "localhost:8000" ) # Get detailed dataset info: /api/repos/info (note: /api is prepended automatically) status, body, error_message = connection.make_request( "/repos/info", ConnectionMethod.GET) dataset_details = json.loads(body) # print json.dumps( dataset_details, indent=4 ) # Create a new remote volume (assuming you already know the uuid of the node) uuid = UUID voxels_metadata = VoxelsMetadata.create_default_metadata( (1,0,0,0), numpy.uint8, 'cxyz', 1.0, "" ) VoxelsAccessor.create_new( "localhost:8000", uuid, "my_volume", voxels_metadata ) # Use the VoxelsAccessor convenience class to manipulate a particular data volume accessor = VoxelsAccessor( "localhost:8000", uuid, "my_volume" ) # print dvid_volume.axiskeys, dvid_volume.dtype, dvid_volume.minindex, dvid_volume.shape # Add some data (must be block-aligned) # Must include all channels. # Must be FORTRAN array, using FORTRAN indexing order conventions # (Use order='F', and make sure you're indexing it as cxyz) updated_data = numpy.ones( (1,128,192,256), dtype=numpy.uint8, order='F' ) updated_data = numpy.asfortranarray(updated_data) accessor[:, 0:128, 32:224, 256:512] = updated_data # OR: #accessor.post_ndarray( (0,10,20,30), (1,110,120,130), updated_data ) # Read from it (First axis is channel.) cutout_array = accessor[:, 10:110, 40:120, 300:330] # OR: cutout_array = accessor.get_ndarray( (0,10,40,300), (1,110,120,330) ) assert isinstance(cutout_array, numpy.ndarray) assert cutout_array.shape == (1,100,80,30)
def _update_subvol_widget(self, node_uuid, dataname, typename): """ Update the subvolume widget with the min/max extents of the given node and dataname. Note: The node and dataname do not necessarily have to match the currently selected node and dataname. This enables the right-click behavior, which can be used to limit your data volume to the size of a different data volume. """ error_msg = None try: if typename == "roi": node_service = DVIDNodeService(self._hostname, str(node_uuid)) roi_blocks_zyx = numpy.array( node_service.get_roi(str(dataname))) maxindex = tuple(DVID_BLOCK_WIDTH * (1 + numpy.max(roi_blocks_zyx, axis=0))) minindex = (0, 0, 0) # Rois are always 3D axiskeys = "zyx" # If the current selection is a dataset, then include a channel dimension if self.get_selection().typename != "roi": axiskeys = "zyxc" minindex = minindex + (0, ) maxindex = maxindex + ( 1, ) # FIXME: This assumes that the selected data has only 1 channel... else: # Query the server raw_metadata = VoxelsAccessor.get_metadata( self._hostname, node_uuid, dataname) voxels_metadata = VoxelsMetadata(raw_metadata) maxindex = voxels_metadata.shape minindex = voxels_metadata.minindex axiskeys = voxels_metadata.axiskeys # If the current selection is a roi, then remove the channel dimension if self.get_selection().typename == "roi": axiskeys = "zyx" minindex = minindex[:-1] maxindex = maxindex[:-1] except (DVIDException, ErrMsg) as ex: error_msg = str(ex) log_exception(logger) else: self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(True) if error_msg: self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(False) QMessageBox.critical(self, "DVID Error", error_msg) self._subvol_widget.initWithExtents("", (), (), ()) return self._subvol_widget.initWithExtents(axiskeys, maxindex, minindex, maxindex)
def reopen_libdvid_voxelsaccessor_dataset(dataset): opened_dataset = dict(dataset) for key in dataset: dataset_value = dataset[key] if type(dataset_value) is VoxelsAccessor: hostname = dataset_value.hostname uuid = dataset_value.uuid data_name = dataset_value.data_name new_voxels_accessor = VoxelsAccessor(hostname, uuid, data_name) opened_dataset[key] = new_voxels_accessor logger.debug('opened {} at {} from {}'.format( data_name, uuid, hostname)) yield opened_dataset
def test_post_ndarray(self): """ Modify a remote subvolume and verify that the server wrote it. """ # Cutout dims start, stop = (64, 32, 0, 0), (96, 64, 32, 1) shape = numpy.subtract(stop, start) # Generate test data new_subvolume = numpy.random.randint(0, 1000, shape).astype(numpy.uint8) # Send to server dvid_vol = VoxelsAccessor(TEST_DVID_SERVER, self.data_uuid, self.data_name) dvid_vol.post_ndarray(start, stop, new_subvolume) # Now read it back read_subvolume = dvid_vol.get_ndarray(start, stop) assert (read_subvolume == new_subvolume).all() # Modify our master copy so other tests don't get messed up. self.original_data[roi_to_slice(start, stop)] = new_subvolume
def test_zz_quickstart_usage(self): import json import numpy from libdvid import DVIDConnection, ConnectionMethod from libdvid.voxels import VoxelsAccessor, VoxelsMetadata # Open a connection to DVID connection = DVIDConnection( "127.0.0.1:8000" ) # Get detailed dataset info: /api/repos/info (note: /api is prepended automatically) status, body, _error_message = connection.make_request( "/repos/info", ConnectionMethod.GET) dataset_details = json.loads(body) # print(json.dumps( dataset_details, indent=4 )) # Create a new remote volume (assuming you already know the uuid of the node) uuid = UUID voxels_metadata = VoxelsMetadata.create_default_metadata( (0,0,0,1), numpy.uint8, 'zyxc', 1.0, "" ) VoxelsAccessor.create_new( "127.0.0.1:8000", uuid, "my_volume", voxels_metadata ) # Use the VoxelsAccessor convenience class to manipulate a particular data volume accessor = VoxelsAccessor( "127.0.0.1:8000", uuid, "my_volume" ) # print(dvid_volume.axiskeys, dvid_volume.dtype, dvid_volume.minindex, dvid_volume.shape) # Add some data (must be block-aligned) # Must include all channels. updated_data = numpy.ones( (256,192,128,1), dtype=numpy.uint8) accessor[256:512, 32:224, 0:128, 0] = updated_data # OR: #accessor.post_ndarray( (0,10,20,30), (1,110,120,130), updated_data ) # Read from it (First axis is channel.) cutout_array = accessor[300:330, 40:120, 10:110, 0] # OR: cutout_array = accessor.get_ndarray( (300,40,10,0), (330,120,110,1) ) assert isinstance(cutout_array, numpy.ndarray) assert cutout_array.shape == (30,80,100,1)
def test_get_channel_slicing(self): """ Test that slicing in the channel dimension works. This is a special case because the entire volume needs to be requested from DVID, but only the requested subset of channels will be returned. FIXME: libdvid only supports single-channel dtypes right now anyway, so this test doesn't do anything... """ # Retrieve from server dvid_vol = VoxelsAccessor(TEST_DVID_SERVER, self.data_uuid, self.data_name) subvolume = dvid_vol[50:150, 5:20, 9:10, 0:1] # Compare assert (subvolume == self.original_data[50:150, 5:20, 9:10, 0:1]).all()
def test_get_stepped_slicing(self): """ """ # Retrieve from server dvid_vol = VoxelsAccessor(TEST_DVID_SERVER, self.data_uuid, self.data_name) subvolume = dvid_vol[50:150:10, 5:20:5, 1:10:3, 0:1] # Compare to file full_start = (0, ) * len(self.original_data.shape) full_stop = self.original_data.shape stored_stepped_volume = self.original_data[50:150:10, 5:20:5, 1:10:3, 0:1] assert subvolume.shape == stored_stepped_volume.shape assert subvolume.dtype == stored_stepped_volume.dtype assert (subvolume == stored_stepped_volume).all()
def _update_subvol_widget(self, node_uuid, dataname, typename): """ Update the subvolume widget with the min/max extents of the given node and dataname. Note: The node and dataname do not necessarily have to match the currently selected node and dataname. This enables the right-click behavior, which can be used to limit your data volume to the size of a different data volume. """ error_msg = None try: if typename == "roi": node_service = DVIDNodeService(self._hostname, str(node_uuid)) roi_blocks_xyz = numpy.array(node_service.get_roi(str(dataname))) maxindex = tuple(DVID_BLOCK_WIDTH * (1 + numpy.max(roi_blocks_xyz, axis=0))) minindex = (0, 0, 0) # Rois are always 3D axiskeys = "xyz" # If the current selection is a dataset, then include a channel dimension if self.get_selection().typename != "roi": axiskeys = "cxyz" minindex = (0,) + minindex maxindex = (1,) + maxindex # FIXME: This assumes that the selected data has only 1 channel... else: # Query the server raw_metadata = VoxelsAccessor.get_metadata(self._hostname, node_uuid, dataname) voxels_metadata = VoxelsMetadata(raw_metadata) maxindex = voxels_metadata.shape minindex = voxels_metadata.minindex axiskeys = voxels_metadata.axiskeys # If the current selection is a roi, then remove the channel dimension if self.get_selection().typename == "roi": axiskeys = "xyz" minindex = minindex[1:] maxindex = maxindex[1:] except (DVIDException, ErrMsg) as ex: error_msg = str(ex) log_exception(logger) else: self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(True) if error_msg: self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(False) QMessageBox.critical(self, "DVID Error", error_msg) self._subvol_widget.initWithExtents("", (), (), ()) return self._subvol_widget.initWithExtents(axiskeys, maxindex, minindex, maxindex)
def test_export_with_offset(self): """ For now, the offset and data must both be block-aligned for DVID. """ data = numpy.random.randint(0, 255, (32, 128, 256, 1)).astype(numpy.uint8) data = numpy.asfortranarray(data, numpy.uint8) assert data.shape == (32, 128, 256, 1) data = vigra.taggedView(data, vigra.defaultAxistags("zyxc")) # Retrieve from server graph = Graph() opPiper = OpArrayPiper(graph=graph) opPiper.Input.setValue(data) opExport = OpExportDvidVolume(transpose_axes=True, graph=graph) # Reverse data order for dvid export opExport.Input.connect(opPiper.Output) opExport.NodeDataUrl.setValue( "http://localhost:8000/api/node/{uuid}/{dataname}".format( uuid=self.data_uuid, dataname=self.data_name)) offset = (32, 64, 128, 0) opExport.OffsetCoord.setValue(offset) # Export! opExport.run_export() # Read back. (transposed, because of transposed_axes, above) accessor = VoxelsAccessor(TEST_DVID_SERVER, self.data_uuid, self.data_name) read_data = accessor[:] # The offset should have caused larger extents in the saved data. assert (read_data.transpose().shape == numpy.add( data.shape, offset)).all(), "Wrong shape: {}".format( exported_data.transpose().shape) # Compare. offset_slicing = tuple(slice(s, None) for s in offset) assert (data.view( numpy.ndarray) == read_data.transpose()[offset_slicing] ).all(), "Exported data is not correct"
def _update_display(self): super(DvidDataSelectionBrowser, self)._update_display() hostname, dset_uuid, dataname, node_uuid = self.get_selection() enable_contents = self._repos_info is not None and dataname != "" and node_uuid != "" self._roi_groupbox.setEnabled(enable_contents) if not dataname or not node_uuid: self._roi_widget.initWithExtents("", (), (), ()) return error_msg = None try: # Query the server raw_metadata = VoxelsAccessor.get_metadata(hostname, node_uuid, dataname) voxels_metadata = VoxelsMetadata(raw_metadata) except DVIDException as ex: error_msg = ex.message except ErrMsg as ex: error_msg = str(ErrMsg) except VoxelsAccessor.BadRequestError as ex: # DVID will return an error if the selected dataset # isn't a 'voxels' dataset and thus has no voxels metadata self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(False) return else: self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(True) if error_msg: QMessageBox.critical(self, "DVID Error", error_msg) self._roi_widget.initWithExtents("", (), (), ()) return self._roi_widget.initWithExtents(voxels_metadata.axiskeys, voxels_metadata.shape, voxels_metadata.minindex, voxels_metadata.shape)
def test_get_reduced_dim_slicing(self): # Retrieve from server dvid_vol = VoxelsAccessor(TEST_DVID_SERVER, self.data_uuid, self.data_name) assert self.original_data.shape == (512, 256, 128, 1), "Update this unit test." full_roi = ((0, 0, 0, 0), (512, 256, 128, 1)) subvol_roi = ((0, 0, 10, 0), (512, 256, 128, 1)) reduced_subvol_roi = ((0, 0, 0), (512, 128, 1)) reduced_dim_slicing = numpy.s_[ 0:512, 10, 0:128, 0:1] # Notice that the third dim is dropped # request subvolume = dvid_vol[reduced_dim_slicing] # Check dimensionality/shape of returned volume reduced_shape = numpy.subtract(reduced_subvol_roi[1], reduced_subvol_roi[0]) assert subvolume.shape == tuple(reduced_shape) # Before we compare, re-insert the dropped axis assert (subvolume == self.original_data[reduced_dim_slicing]).all()
def _update_display(self): super(DvidDataSelectionBrowser, self)._update_display() hostname, dset_uuid, dataname, node_uuid = self.get_selection() enable_contents = self._repos_info is not None and dataname != "" and node_uuid != "" self._roi_groupbox.setEnabled(enable_contents) if not dataname or not node_uuid: self._roi_widget.initWithExtents("", (), (), ()) return error_msg = None try: # Query the server raw_metadata = VoxelsAccessor.get_metadata(hostname, node_uuid, dataname) voxels_metadata = VoxelsMetadata(raw_metadata) except DVIDException as ex: error_msg = ex.message except ErrMsg as ex: error_msg = str(ErrMsg) except VoxelsAccessor.BadRequestError as ex: # DVID will return an error if the selected dataset # isn't a 'voxels' dataset and thus has no voxels metadata self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(False) return else: self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(True) if error_msg: QMessageBox.critical(self, "DVID Error", error_msg) self._roi_widget.initWithExtents("", (), (), ()) return self._roi_widget.initWithExtents( voxels_metadata.axiskeys, voxels_metadata.shape, voxels_metadata.minindex, voxels_metadata.shape )
def download_to_h5(hostname, uuid, instance, roi, output_filepath, dset_name=None, compression='lzf', overlap_px=0): """ """ ns = DVIDNodeService(hostname, uuid) va = VoxelsAccessor(hostname, uuid, instance, throttle=True) dset_name = dset_name or instance assert roi, "Must provide a ROI" logger.info( "Downloading {hostname}/api/node/{uuid}/{instance}?roi={roi} to {output_filepath}/{dset_name}" .format(**locals())) substacks, _packing_factor = ns.get_roi_partition( roi, SUBSTACK_SIZE / DVID_BLOCK_SIZE) # Substack tuples are (size, z, y, x) substacks_zyx = np.array(substacks)[:, 1:] # If the user specified an 'overlap', we add it to all substacks. # Technically, this isn't very efficient, because a lot of overlapping # pixels on the interior of the ROI will be fetched twice. substacks_zyx[:, 0] -= overlap_px substacks_zyx[:, 1] += overlap_px roi_bb = (np.min(substacks_zyx, axis=0), np.max(substacks_zyx, axis=0) + SUBSTACK_SIZE) with h5py.File(output_filepath, 'a') as output_file: try: del output_file[dset_name] except KeyError: pass dset = output_file.create_dataset(dset_name, shape=roi_bb[1], dtype=va.dtype, chunks=True, compression=compression) for i, substack_zyx in enumerate(substacks_zyx): logger.info("Substack {}/{} {}: Downloading...".format( i, len(substacks_zyx), list(substack_zyx))) # Append a singleton channel axis substack_bb = np.array( (tuple(substack_zyx) + (0, ), tuple(substack_zyx + SUBSTACK_SIZE) + (1, ))) # Includes singleton channel substack_data = va.get_ndarray(*substack_bb) logger.info("Substack {}/{} {}: Writing...".format( i, len(substacks_zyx), list(substack_zyx))) dset[bb_to_slicing(*substack_bb[:, :-1])] = substack_data[..., 0] logger.info( "DONE Downloading {hostname}/api/node/{uuid}/{instance}?roi={roi} to {output_filepath}/{dset_name}" .format(**locals()))
def main(): # Read cmd-line args parser = argparse.ArgumentParser() parser.add_argument("--hostname", default="localhost:8000") parser.add_argument( "--uuid", required=False, help= "The node to upload to. If not provided, a new repo will be created (see --new-repo-alias)." ) parser.add_argument( "--data-name", required=False, help= "The name of the data instance to modify. If it doesn't exist, it will be created first." ) parser.add_argument( "--new-repo-alias", required=False, help="If no uuid is provided, a new repo is created, with this name.") parser.add_argument("input_file", help="For example: /tmp/myfile.h5/dataset") args = parser.parse_args() if '.h5' not in args.input_file: sys.stderr.write("File name does not indicate hdf5.\n") sys.exit(1) filepath, dset_name = args.input_file.split('.h5') filepath += '.h5' if not dset_name: sys.stderr.write( "You must provide a dataset name, e.g. myfile.h5/mydataset\n") sys.exit(1) if not os.path.exists(filepath): sys.stderr.write("File doesn't exist: {}\n".format(filepath)) sys.exit(1) # If no uuid given, create a new repo on the server uuid = args.uuid if uuid is None: alias = args.new_repo_alias or "testrepo" server = DVIDServerService(args.hostname) uuid = server.create_new_repo( alias, "This is a test repo loaded with data from ".format( args.input_file)) uuid = str(uuid) # Read the input data from the file print("Reading {}{}".format(filepath, dset_name)) with h5py.File(filepath) as f_in: data = f_in[dset_name][:] # We assume data is 3D or 4D, in C-order # We adjust it to 4D, fortran-order if data.ndim == 3: data = data[..., None] data = data.transpose() assert data.flags['F_CONTIGUOUS'], "Data is not contiguous!" assert data.ndim == 4, "Data must be 3D with axes zyx or 4D with axes zyxc (C-order)" assert data.shape[ 0] == 1, "Data must have exactly 1 channel, not {}".format( data.shape[0]) # Choose a default data instance name if necessary if data.dtype == numpy.uint8: data_name = args.data_name or "grayscale" elif data.dtype == numpy.uint64: data_name = args.data_name or "segmentation" else: sys.stderr.write("Unsupported dtype: {}\n".format(data.dtype)) sys.exit(1) # Create the new data instance if it doesn't exist already try: metadata = VoxelsAccessor.get_metadata(args.hostname, uuid, data_name) print("Data instance '{}' already exists. Will update.".format( data_name)) except DVIDException: print("Creating new data instance: {}".format(data_name)) metadata = VoxelsMetadata.create_default_metadata( data.shape, data.dtype, 'cxyz', 1.0, 'nanometers') VoxelsAccessor.create_new(args.hostname, uuid, data_name, metadata) # Finally, push the data to the server print("Pushing data to {}".format('{}/api/node/{}/{}'.format( args.hostname, uuid, data_name))) accessor = VoxelsAccessor(args.hostname, uuid, data_name) accessor.post_ndarray((0, 0, 0, 0), data.shape, data) print("DONE.")
def main(): # Read cmd-line args parser = argparse.ArgumentParser() parser.add_argument("--hostname", default="localhost:8000") parser.add_argument("--uuid", required=False, help="The node to upload to. If not provided, a new repo will be created (see --new-repo-alias).") parser.add_argument("--data-name", required=False, help="The name of the data instance to modify. If it doesn't exist, it will be created first.") parser.add_argument("--new-repo-alias", required=False, help="If no uuid is provided, a new repo is created, with this name.") parser.add_argument("input_file", help="For example: /tmp/myfile.h5/dataset") args = parser.parse_args() if '.h5' not in args.input_file: sys.stderr.write("File name does not indicate hdf5.\n") sys.exit(1) filepath, dset_name = args.input_file.split('.h5') filepath += '.h5' if not dset_name: sys.stderr.write("You must provide a dataset name, e.g. myfile.h5/mydataset\n") sys.exit(1) if not os.path.exists(filepath): sys.stderr.write("File doesn't exist: {}\n".format(filepath)) sys.exit(1) # If no uuid given, create a new repo on the server uuid = args.uuid if uuid is None: alias = args.new_repo_alias or "testrepo" server = DVIDServerService(args.hostname) uuid = server.create_new_repo(alias, "This is a test repo loaded with data from ".format(args.input_file)) uuid = str(uuid) # Read the input data from the file print("Reading {}{}".format( filepath, dset_name )) with h5py.File(filepath) as f_in: data = f_in[dset_name][:] # We assume data is 3D or 4D, in C-order # We adjust it to 4D, fortran-order if data.ndim == 3: data = data[...,None] data = data.transpose() assert data.flags['F_CONTIGUOUS'], "Data is not contiguous!" assert data.ndim == 4, "Data must be 3D with axes zyx or 4D with axes zyxc (C-order)" assert data.shape[0] == 1, "Data must have exactly 1 channel, not {}".format( data.shape[0] ) # Choose a default data instance name if necessary if data.dtype == numpy.uint8: data_name = args.data_name or "grayscale" elif data.dtype == numpy.uint64: data_name = args.data_name or "segmentation" else: sys.stderr.write("Unsupported dtype: {}\n".format(data.dtype)) sys.exit(1) # Create the new data instance if it doesn't exist already try: metadata = VoxelsAccessor.get_metadata(args.hostname, uuid, data_name) print("Data instance '{}' already exists. Will update.".format( data_name )) except DVIDException: print("Creating new data instance: {}".format( data_name )) metadata = VoxelsMetadata.create_default_metadata(data.shape, data.dtype, 'cxyz', 1.0, 'nanometers') VoxelsAccessor.create_new(args.hostname, uuid, data_name, metadata) # Finally, push the data to the server print("Pushing data to {}".format( '{}/api/node/{}/{}'.format( args.hostname, uuid, data_name ) )) accessor = VoxelsAccessor(args.hostname, uuid, data_name) accessor.post_ndarray((0,0,0,0), data.shape, data) print("DONE.")
def copy_voxels( source_details, destination_details, transfer_cube_width_px=512, roi=None, subvol_bounds_zyx=None ): """ Transfer voxels data from one DVID server to another. source_details: Either a tuple of (hostname, uuid, instance), or a url of the form http://hostname/api/node/uuid/instance destination_details: Same format as source_details, or just an instance name (in which case the destination is presumed to be in the same host/node as the source). transfer_cube_width_px: The data will be transferred one 'substack' at a time, with the given substack width. NOTE: Exactly ONE of the following parameters should be provided. roi: Same format as destination_details, but should point to a ROI instance. subvol_bounds_zyx: A tuple (start_zyx, stop_zyx) indicating a rectangular region to copy (instead of a ROI). Specified in pixel coordinates. Must be aligned to DVID block boundaries. For example: ((0,0,0), (1024, 1024, 512)) """ if isinstance(source_details, str): source_details = parse_instance_url( source_details ) else: source_details = InstanceDetails(*source_details) src_accessor = VoxelsAccessor( *source_details ) if isinstance(destination_details, str): destination_details = str_to_details( destination_details, default=source_details ) else: destination_details = InstanceDetails(*destination_details) dest_accessor = VoxelsAccessor( *destination_details ) assert (roi is not None) ^ (subvol_bounds_zyx is not None), \ "You must provide roi OR subvol_bounds-zyx (but not both)." # Figure out what blocks ('substacks') we're copying if subvol_bounds_zyx: assert False, "User beware: The subvol_bounds_zyx option hasn't been tested yet. " \ "Now that you've been warned, comment out this assertion and give it a try. "\ "(It *should* work...)" assert len(subvol_bounds_zyx) == 2, "Invalid value for subvol_bounds_zyx" assert list(map(len, subvol_bounds_zyx)) == [3,3], "Invalid value for subvol_bounds_zyx" subvol_bounds_zyx = np.array(subvol_bounds_zyx) subvol_shape = subvol_bounds_zyx[1] - subvol_bounds_zyx[0] np.array(subvol_bounds_zyx) / transfer_cube_width_px assert (subvol_shape % transfer_cube_width_px).all(), \ "subvolume must be divisible by the transfer_cube_width_px" blocks_zyx = [] transfer_block_indexes = np.ndindex( *(subvol_shape / transfer_cube_width_px) ) for tbi in transfer_block_indexes: start_zyx = tbi*transfer_cube_width_px + subvol_bounds_zyx[0] blocks_zyx.append( SubstackZYX(transfer_cube_width_px, *start_zyx) ) elif roi is not None: if isinstance(roi, str): roi_details = str_to_details( roi, default=source_details ) else: roi_details = InstanceDetails(*roi) roi_node = DVIDNodeService(roi_details.host, roi_details.uuid) blocks_zyx = roi_node.get_roi_partition(roi_details.instance, transfer_cube_width_px/DVID_BLOCK_WIDTH)[0] else: assert False # Fetch/write the blocks one at a time # TODO: We could speed this up if we used a threadpool... logger.debug( "Beginning Transfer of {} blocks ({} px each)".format( len(blocks_zyx), transfer_cube_width_px ) ) for block_index, block_zyx in enumerate(blocks_zyx, start=1): start_zyxc = np.array(tuple(block_zyx[1:]) + (0,)) # skip item 0 ('size'), append channel stop_zyxc = start_zyxc + transfer_cube_width_px stop_zyxc[-1] = 1 logger.debug("Fetching block: {} ({}/{})".format(start_zyxc[:-1], block_index, len(blocks_zyx)) ) src_block_data = src_accessor.get_ndarray( start_zyxc, stop_zyxc ) logger.debug("Writing block: {} ({}/{})".format(start_zyxc[:-1], block_index, len(blocks_zyx)) ) dest_accessor.post_ndarray( start_zyxc, stop_zyxc, new_data=src_block_data ) logger.debug("DONE.")
def test_get_full_slicing(self): dvid_vol = VoxelsAccessor(TEST_DVID_SERVER, self.data_uuid, self.data_name) full_slicing = roi_to_slice((0, ) * 4, self.original_data.shape) subvolume = dvid_vol[full_slicing] assert (subvolume == self.original_data).all()
def open_array(self, mode="r"): from libdvid.voxels import VoxelsAccessor host_port = ":".join([str(x) for x in (self.host, self.port)]) va = VoxelsAccessor(host_port, self.uuid, self.data_name) yield va