def test_zy_post_negative_coordinates(self): """ Just make sure nothing blows up if we post to negative coordinates. """ # Cutout dims (must be block-aligned for the POST) start, stop = (-64,0,-32,0), (128,32,32,1) shape = numpy.subtract( stop, start ) # Generate test data subvolume = numpy.random.randint( 0,1000, shape ).astype(numpy.uint8) dvid_vol = VoxelsAccessor( TEST_DVID_SERVER, self.data_uuid, self.data_name ) # Send to server dvid_vol.post_ndarray(start, stop, subvolume) # Now try to 'get' data from negative coords read_back_vol = dvid_vol.get_ndarray(start, stop) assert (read_back_vol == subvolume).all()
def test_zy_post_negative_coordinates(self): """ Just make sure nothing blows up if we post to negative coordinates. """ # Cutout dims (must be block-aligned for the POST) start, stop = (-64, 0, -32, 0), (128, 32, 32, 1) shape = numpy.subtract(stop, start) # Generate test data subvolume = numpy.random.randint(0, 1000, shape).astype(numpy.uint8) dvid_vol = VoxelsAccessor(TEST_DVID_SERVER, self.data_uuid, self.data_name) # Send to server dvid_vol.post_ndarray(start, stop, subvolume) # Now try to 'get' data from negative coords read_back_vol = dvid_vol.get_ndarray(start, stop) assert (read_back_vol == subvolume).all()
def test_post_ndarray(self): """ Modify a remote subvolume and verify that the server wrote it. """ # Cutout dims start, stop = (64,32,0,0), (96,64,32,1) shape = numpy.subtract( stop, start ) # Generate test data new_subvolume = numpy.random.randint( 0,1000, shape ).astype( numpy.uint8 ) # Send to server dvid_vol = VoxelsAccessor( TEST_DVID_SERVER, self.data_uuid, self.data_name ) dvid_vol.post_ndarray(start, stop, new_subvolume) # Now read it back read_subvolume = dvid_vol.get_ndarray( start, stop ) assert (read_subvolume == new_subvolume).all() # Modify our master copy so other tests don't get messed up. self.original_data[roi_to_slice(start, stop)] = new_subvolume
def test_post_ndarray(self): """ Modify a remote subvolume and verify that the server wrote it. """ # Cutout dims start, stop = (64, 32, 0, 0), (96, 64, 32, 1) shape = numpy.subtract(stop, start) # Generate test data new_subvolume = numpy.random.randint(0, 1000, shape).astype(numpy.uint8) # Send to server dvid_vol = VoxelsAccessor(TEST_DVID_SERVER, self.data_uuid, self.data_name) dvid_vol.post_ndarray(start, stop, new_subvolume) # Now read it back read_subvolume = dvid_vol.get_ndarray(start, stop) assert (read_subvolume == new_subvolume).all() # Modify our master copy so other tests don't get messed up. self.original_data[roi_to_slice(start, stop)] = new_subvolume
def copy_voxels( source_details, destination_details, transfer_cube_width_px=512, roi=None, subvol_bounds_zyx=None ): """ Transfer voxels data from one DVID server to another. source_details: Either a tuple of (hostname, uuid, instance), or a url of the form http://hostname/api/node/uuid/instance destination_details: Same format as source_details, or just an instance name (in which case the destination is presumed to be in the same host/node as the source). transfer_cube_width_px: The data will be transferred one 'substack' at a time, with the given substack width. NOTE: Exactly ONE of the following parameters should be provided. roi: Same format as destination_details, but should point to a ROI instance. subvol_bounds_zyx: A tuple (start_zyx, stop_zyx) indicating a rectangular region to copy (instead of a ROI). Specified in pixel coordinates. Must be aligned to DVID block boundaries. For example: ((0,0,0), (1024, 1024, 512)) """ if isinstance(source_details, str): source_details = parse_instance_url( source_details ) else: source_details = InstanceDetails(*source_details) src_accessor = VoxelsAccessor( *source_details ) if isinstance(destination_details, str): destination_details = str_to_details( destination_details, default=source_details ) else: destination_details = InstanceDetails(*destination_details) dest_accessor = VoxelsAccessor( *destination_details ) assert (roi is not None) ^ (subvol_bounds_zyx is not None), \ "You must provide roi OR subvol_bounds-zyx (but not both)." # Figure out what blocks ('substacks') we're copying if subvol_bounds_zyx: assert False, "User beware: The subvol_bounds_zyx option hasn't been tested yet. " \ "Now that you've been warned, comment out this assertion and give it a try. "\ "(It *should* work...)" assert len(subvol_bounds_zyx) == 2, "Invalid value for subvol_bounds_zyx" assert list(map(len, subvol_bounds_zyx)) == [3,3], "Invalid value for subvol_bounds_zyx" subvol_bounds_zyx = np.array(subvol_bounds_zyx) subvol_shape = subvol_bounds_zyx[1] - subvol_bounds_zyx[0] np.array(subvol_bounds_zyx) / transfer_cube_width_px assert (subvol_shape % transfer_cube_width_px).all(), \ "subvolume must be divisible by the transfer_cube_width_px" blocks_zyx = [] transfer_block_indexes = np.ndindex( *(subvol_shape / transfer_cube_width_px) ) for tbi in transfer_block_indexes: start_zyx = tbi*transfer_cube_width_px + subvol_bounds_zyx[0] blocks_zyx.append( SubstackZYX(transfer_cube_width_px, *start_zyx) ) elif roi is not None: if isinstance(roi, str): roi_details = str_to_details( roi, default=source_details ) else: roi_details = InstanceDetails(*roi) roi_node = DVIDNodeService(roi_details.host, roi_details.uuid) blocks_zyx = roi_node.get_roi_partition(roi_details.instance, transfer_cube_width_px/DVID_BLOCK_WIDTH)[0] else: assert False # Fetch/write the blocks one at a time # TODO: We could speed this up if we used a threadpool... logger.debug( "Beginning Transfer of {} blocks ({} px each)".format( len(blocks_zyx), transfer_cube_width_px ) ) for block_index, block_zyx in enumerate(blocks_zyx, start=1): start_zyxc = np.array(tuple(block_zyx[1:]) + (0,)) # skip item 0 ('size'), append channel stop_zyxc = start_zyxc + transfer_cube_width_px stop_zyxc[-1] = 1 logger.debug("Fetching block: {} ({}/{})".format(start_zyxc[:-1], block_index, len(blocks_zyx)) ) src_block_data = src_accessor.get_ndarray( start_zyxc, stop_zyxc ) logger.debug("Writing block: {} ({}/{})".format(start_zyxc[:-1], block_index, len(blocks_zyx)) ) dest_accessor.post_ndarray( start_zyxc, stop_zyxc, new_data=src_block_data ) logger.debug("DONE.")
def main(): # Read cmd-line args parser = argparse.ArgumentParser() parser.add_argument("--hostname", default="localhost:8000") parser.add_argument("--uuid", required=False, help="The node to upload to. If not provided, a new repo will be created (see --new-repo-alias).") parser.add_argument("--data-name", required=False, help="The name of the data instance to modify. If it doesn't exist, it will be created first.") parser.add_argument("--new-repo-alias", required=False, help="If no uuid is provided, a new repo is created, with this name.") parser.add_argument("input_file", help="For example: /tmp/myfile.h5/dataset") args = parser.parse_args() if '.h5' not in args.input_file: sys.stderr.write("File name does not indicate hdf5.\n") sys.exit(1) filepath, dset_name = args.input_file.split('.h5') filepath += '.h5' if not dset_name: sys.stderr.write("You must provide a dataset name, e.g. myfile.h5/mydataset\n") sys.exit(1) if not os.path.exists(filepath): sys.stderr.write("File doesn't exist: {}\n".format(filepath)) sys.exit(1) # If no uuid given, create a new repo on the server uuid = args.uuid if uuid is None: alias = args.new_repo_alias or "testrepo" server = DVIDServerService(args.hostname) uuid = server.create_new_repo(alias, "This is a test repo loaded with data from ".format(args.input_file)) uuid = str(uuid) # Read the input data from the file print("Reading {}{}".format( filepath, dset_name )) with h5py.File(filepath) as f_in: data = f_in[dset_name][:] # We assume data is 3D or 4D, in C-order # We adjust it to 4D, fortran-order if data.ndim == 3: data = data[...,None] data = data.transpose() assert data.flags['F_CONTIGUOUS'], "Data is not contiguous!" assert data.ndim == 4, "Data must be 3D with axes zyx or 4D with axes zyxc (C-order)" assert data.shape[0] == 1, "Data must have exactly 1 channel, not {}".format( data.shape[0] ) # Choose a default data instance name if necessary if data.dtype == numpy.uint8: data_name = args.data_name or "grayscale" elif data.dtype == numpy.uint64: data_name = args.data_name or "segmentation" else: sys.stderr.write("Unsupported dtype: {}\n".format(data.dtype)) sys.exit(1) # Create the new data instance if it doesn't exist already try: metadata = VoxelsAccessor.get_metadata(args.hostname, uuid, data_name) print("Data instance '{}' already exists. Will update.".format( data_name )) except DVIDException: print("Creating new data instance: {}".format( data_name )) metadata = VoxelsMetadata.create_default_metadata(data.shape, data.dtype, 'cxyz', 1.0, 'nanometers') VoxelsAccessor.create_new(args.hostname, uuid, data_name, metadata) # Finally, push the data to the server print("Pushing data to {}".format( '{}/api/node/{}/{}'.format( args.hostname, uuid, data_name ) )) accessor = VoxelsAccessor(args.hostname, uuid, data_name) accessor.post_ndarray((0,0,0,0), data.shape, data) print("DONE.")
def main(): # Read cmd-line args parser = argparse.ArgumentParser() parser.add_argument("--hostname", default="localhost:8000") parser.add_argument( "--uuid", required=False, help= "The node to upload to. If not provided, a new repo will be created (see --new-repo-alias)." ) parser.add_argument( "--data-name", required=False, help= "The name of the data instance to modify. If it doesn't exist, it will be created first." ) parser.add_argument( "--new-repo-alias", required=False, help="If no uuid is provided, a new repo is created, with this name.") parser.add_argument("input_file", help="For example: /tmp/myfile.h5/dataset") args = parser.parse_args() if '.h5' not in args.input_file: sys.stderr.write("File name does not indicate hdf5.\n") sys.exit(1) filepath, dset_name = args.input_file.split('.h5') filepath += '.h5' if not dset_name: sys.stderr.write( "You must provide a dataset name, e.g. myfile.h5/mydataset\n") sys.exit(1) if not os.path.exists(filepath): sys.stderr.write("File doesn't exist: {}\n".format(filepath)) sys.exit(1) # If no uuid given, create a new repo on the server uuid = args.uuid if uuid is None: alias = args.new_repo_alias or "testrepo" server = DVIDServerService(args.hostname) uuid = server.create_new_repo( alias, "This is a test repo loaded with data from ".format( args.input_file)) uuid = str(uuid) # Read the input data from the file print("Reading {}{}".format(filepath, dset_name)) with h5py.File(filepath) as f_in: data = f_in[dset_name][:] # We assume data is 3D or 4D, in C-order # We adjust it to 4D, fortran-order if data.ndim == 3: data = data[..., None] data = data.transpose() assert data.flags['F_CONTIGUOUS'], "Data is not contiguous!" assert data.ndim == 4, "Data must be 3D with axes zyx or 4D with axes zyxc (C-order)" assert data.shape[ 0] == 1, "Data must have exactly 1 channel, not {}".format( data.shape[0]) # Choose a default data instance name if necessary if data.dtype == numpy.uint8: data_name = args.data_name or "grayscale" elif data.dtype == numpy.uint64: data_name = args.data_name or "segmentation" else: sys.stderr.write("Unsupported dtype: {}\n".format(data.dtype)) sys.exit(1) # Create the new data instance if it doesn't exist already try: metadata = VoxelsAccessor.get_metadata(args.hostname, uuid, data_name) print("Data instance '{}' already exists. Will update.".format( data_name)) except DVIDException: print("Creating new data instance: {}".format(data_name)) metadata = VoxelsMetadata.create_default_metadata( data.shape, data.dtype, 'cxyz', 1.0, 'nanometers') VoxelsAccessor.create_new(args.hostname, uuid, data_name, metadata) # Finally, push the data to the server print("Pushing data to {}".format('{}/api/node/{}/{}'.format( args.hostname, uuid, data_name))) accessor = VoxelsAccessor(args.hostname, uuid, data_name) accessor.post_ndarray((0, 0, 0, 0), data.shape, data) print("DONE.")