def test_DataInstance(self): """Tests DataInstance class. """ service = DVIDServerService(dvidserver) uuid = service.create_new_repo("foo", "bar") ns = DVIDNodeService(dvidserver, uuid) ns.create_labelblk("labels") ns.create_grayscale8("gray") ns.create_keyvalue("kv") try: temp = DataInstance(dvidserver, uuid, "blah") except ValueError: # correct caught error self.assertTrue(True) labels = DataInstance(dvidserver, uuid, "labels") gray = DataInstance(dvidserver, uuid, "gray") kv = DataInstance(dvidserver, uuid, "kv") self.assertTrue(labels.is_array()) self.assertTrue(labels.is_labels()) self.assertTrue(gray.is_array()) self.assertFalse(gray.is_labels()) self.assertFalse(kv.is_array()) self.assertFalse(kv.is_labels())
def test_isdvidversion(self): """Tests is_dvidversion function. """ service = DVIDServerService(dvidserver) uuid = service.create_new_repo("foo", "bar") self.assertTrue(is_dvidversion(dvidserver, uuid)) self.assertFalse(is_dvidversion(dvidserver, uuid + "JUNK"))
def createrepo(): """Creates a repo which results in a new gbucket. """ try: ds = DVIDServerService(DVIDLOCATION) uuid = ds.create_new_repo("test", "test description") except Exception as e: return False, str(e) return True, uuid
def test_create_label_instance(self): """Test creation of labelarray and block size fetch """ service = DVIDServerService(dvidserver) uuid = service.create_new_repo("foo", "bar") create_label_instance(dvidserver, uuid, "labels", typename='labelmap') blocksize = get_blocksize(dvidserver, uuid, "labels") self.assertEqual(blocksize, (64, 64, 64))
def test_create_rawarray8(self): """Test creation of rawarray and block size fetch. """ service = DVIDServerService(dvidserver) uuid = service.create_new_repo("foo", "bar") create_rawarray8(dvidserver, uuid, "gray", (32, 16, 14), Compression.JPEG) blocksize = get_blocksize(dvidserver, uuid, "gray") self.assertEqual(blocksize, (32, 16, 14))
def test_isdatainstance(self): """Tests is_datainstance function. """ service = DVIDServerService(dvidserver) uuid = service.create_new_repo("foo", "bar") ns = DVIDNodeService(dvidserver, uuid) ns.create_labelblk("labels") self.assertTrue(is_datainstance(dvidserver, uuid, "labels")) self.assertFalse(is_datainstance(dvidserver, uuid, "labels2"))
def setUpClass(cls): server_service = DVIDServerService(TEST_DVID_SERVER) cls.uuid = server_service.create_new_repo("foo", "bar") cls.instance = 'labels_test_mask_utils' cls.labels = np.zeros((256, 256, 256), np.uint64) cls.labels[50, 50, 40:140] = 1 cls.labels[50, 60:200, 30] = 2 ns = DVIDNodeService(TEST_DVID_SERVER, cls.uuid) ns.create_label_instance(cls.instance, 64) ns.put_labels3D(cls.instance, cls.labels, (0, 0, 0))
def get_testrepo_root_uuid(): connection = DVIDConnection(TEST_DVID_SERVER) status, body, error_message = connection.make_request( "/repos/info", ConnectionMethod.GET) assert status == http.client.OK, "Request for /repos/info returned status {}".format( status ) assert error_message == "" repos_info = json.loads(body) test_repos = [uuid_repo_info for uuid_repo_info in list(repos_info.items()) if uuid_repo_info[1] and uuid_repo_info[1]['Alias'] == 'testrepo'] if test_repos: uuid = test_repos[0][0] return str(uuid) else: from libdvid import DVIDServerService server = DVIDServerService(TEST_DVID_SERVER) uuid = server.create_new_repo("testrepo", "This repo is for unit tests to use and abuse."); return str(uuid)
def get_testrepo_root_uuid(): connection = DVIDConnection(TEST_DVID_SERVER) status, body, error_message = connection.make_request( "/repos/info", ConnectionMethod.GET) assert status == httplib.OK, "Request for /repos/info returned status {}".format( status ) assert error_message == "" repos_info = json.loads(body) test_repos = filter( lambda (uuid, repo_info): repo_info and repo_info['Alias'] == 'testrepo', repos_info.items() ) if test_repos: uuid = test_repos[0][0] return str(uuid) else: from libdvid import DVIDServerService server = DVIDServerService(TEST_DVID_SERVER) uuid = server.create_new_repo("testrepo", "This repo is for unit tests to use and abuse."); return str(uuid)
def get_testrepo_root_uuid(): connection = DVIDConnection(TEST_DVID_SERVER, "*****@*****.**", "myapp") status, body, _error_message = connection.make_request( "/repos/info", ConnectionMethod.GET) repos_info = json.loads(body) test_repos = [ uuid_repo_info for uuid_repo_info in list(repos_info.items()) if uuid_repo_info[1] and uuid_repo_info[1]['Alias'] == 'testrepo' ] if test_repos: uuid = test_repos[0][0] return str(uuid) else: from libdvid import DVIDServerService server = DVIDServerService(TEST_DVID_SERVER) uuid = server.create_new_repo( "testrepo", "This repo is for unit tests to use and abuse.") return str(uuid)
def get_testrepo_root_uuid(): connection = DVIDConnection(TEST_DVID_SERVER) status, body, error_message = connection.make_request("/repos/info", ConnectionMethod.GET) assert status == http.client.OK, "Request for /repos/info returned status {}".format(status) assert error_message == "" repos_info = json.loads(body) test_repos = [ uuid_repo_info for uuid_repo_info in list(repos_info.items()) if uuid_repo_info[1] and uuid_repo_info[1]["Alias"] == "testrepo" ] if test_repos: uuid = test_repos[0][0] return str(uuid) else: from libdvid import DVIDServerService server = DVIDServerService(TEST_DVID_SERVER) uuid = server.create_new_repo("testrepo", "This repo is for unit tests to use and abuse.") return str(uuid)
def test_dvidpadlabels(self): """Check padding data with DVID labels. """ service = DVIDServerService(dvidserver) uuid = service.create_new_repo("foo", "bar") ns = DVIDNodeService(dvidserver, uuid) ns.create_labelblk("labels") arr = np.random.randint(12442, size=(58, 58, 58)).astype(np.uint64) arr2 = np.zeros((64, 64, 64), np.uint64) arr2[0:58, 0:58, 0:58] = arr # load gray data ns.put_labels3D("labels", arr2, (0, 0, 0)) # load shifted data for comparison arr2[6:64, 6:64, 6:64] = arr # read and pad data schema = partitionSchema(PartitionDims(32, 64, 64), enablemask=True, padding=8, blank_delimiter=99999) volpart = volumePartition(0, VolumeOffset(6, 6, 6)) partitions = schema.partition_data([(volpart, arr)]) # fetch with mask dvidreader = dvidSrc(dvidserver, uuid, "labels", partitions) newparts = dvidreader.extract_volume() self.assertEqual(len(newparts), 2) for (part, vol) in newparts: if part.get_offset().z == 0: match = np.array_equal(arr2[0:32, :, :], vol) self.assertTrue(match) else: match = np.array_equal(arr2[32:64, :, :], vol) self.assertTrue(match)
def test_sycns(self): """Test sync check and setting a sync. """ service = DVIDServerService(dvidserver) uuid = service.create_new_repo("foo", "bar") create_label_instance(dvidserver, uuid, "labels") # check if labels is listening to labels2 self.assertFalse(has_sync(dvidserver, uuid, "labels", "bodies")) # create labelvol and sync to it conn = DVIDConnection(dvidserver) endpoint = "/repo/" + uuid + "/instance" data = {"typename": "labelvol", "dataname": "bodies"} conn.make_request(endpoint, ConnectionMethod.POST, json.dumps(data).encode()) set_sync(dvidserver, uuid, "labels", "bodies") self.assertTrue(has_sync(dvidserver, uuid, "labels", "bodies"))
def create_repo(self, name, description=""): """Create repo. Note: DVID does not require unique names but unique names will be enforced through this interface. This will simplify access for most common use cases. In general, users should use the web console and specific version ids to ensure access to the desired data. Args: name (str): name of DVID respository (must be unique) description (str): description of repository """ try: curr_repos = self.list_repos() for (reponame, uuid) in curr_repos: if reponame == name: raise DicedException("Repo name already exists") service = DVIDServerService(self._server) uuid = service.create_new_repo(name, description) except DVIDException as err: raise DicedException("Failed to create repo")
parser = argparse.ArgumentParser(description="Batch mode nrrd file to dvid migration script") existingNode = parser.add_argument_group('existing node', 'for working with a node that already exists on the dvid server') parser.add_argument('address', metavar='address', help='address to a valid dvid server in the form x.x.x.x:yyyy') parser.add_argument('file', metavar='nrrdfile', help='filepath for uploading to dvid') existingNode.add_argument('--uuid', '-u', metavar='uuid', help='minimal uid of the node to access on the dvid server') newNode = parser.add_argument_group('new node', 'for creating a new node before migrating the nrrd files') newNode.add_argument('--alias', '-a', metavar='alias', help='alias for a new node to create') newNode.add_argument('--description', '-d', metavar='description', help='description for new node') newNode.add_argument('--segmentation', '-s', action='store_true', help='flags data as a segmentation block in the case that there was no indication in the header') args = parser.parse_args() addr = args.address if args.alias: service = DVIDServerService(addr) uid = service.create_new_repo(args.alias, args.description) else: uid = args.uuid def push_to_dvid(method, handle, data, preoffset=(0, 0, 0), throttle=False, compress=True, chunkDepth=512): """Function for pushing to DVID.""" zsize = data.shape[0] numsplits = zsize / 512 offset = 0 pdb.set_trace() for i in xrange(numsplits): seg = data[i * chunkDepth:(i + 1) * chunkDepth, :, :] offsetTuple = (preoffset[0] + i * chunkDepth, preoffset[1], preoffset[2])
parser.add_argument('--verbose', type=bool, dest='verbose', default=False, help='verbose logs') args = parser.parse_args() if args.verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) # initialize plugin manager plugin_manager = TrackingPluginManager(verbose=False) image_provider = plugin_manager.getImageProvider() # create dataset on server and get uuid server_address = args.dvidAddress server_service = DVIDServerService(server_address) uuid = server_service.create_new_repo(args.datasetName, "description") logging.info('UUID:\n{}'.format(uuid)) # get node service node_service = DVIDNodeService(server_address, uuid) # get dataset size and store in dvid shape = image_provider.getImageShape(args.ilpFilename, args.labelImagePath) time_range = image_provider.getTimeRange(args.ilpFilename, args.labelImagePath) if args.timeRange is not None: time_range = (max(time_range[0], args.timeRange[0]), min(time_range[1], args.timeRange[1])) logging.info('Uploading time range {} to {}'.format(time_range, server_address)) keyvalue_store = "config" node_service.create_keyvalue(keyvalue_store) settings = { "shape": shape, "time_range": time_range }
def main(): # Read cmd-line args parser = argparse.ArgumentParser() parser.add_argument("--hostname", default="localhost:8000") parser.add_argument( "--uuid", required=False, help= "The node to upload to. If not provided, a new repo will be created (see --new-repo-alias)." ) parser.add_argument( "--data-name", required=False, help= "The name of the data instance to modify. If it doesn't exist, it will be created first." ) parser.add_argument( "--new-repo-alias", required=False, help="If no uuid is provided, a new repo is created, with this name.") parser.add_argument("input_file", help="For example: /tmp/myfile.h5/dataset") args = parser.parse_args() if '.h5' not in args.input_file: sys.stderr.write("File name does not indicate hdf5.\n") sys.exit(1) filepath, dset_name = args.input_file.split('.h5') filepath += '.h5' if not dset_name: sys.stderr.write( "You must provide a dataset name, e.g. myfile.h5/mydataset\n") sys.exit(1) if not os.path.exists(filepath): sys.stderr.write("File doesn't exist: {}\n".format(filepath)) sys.exit(1) # If no uuid given, create a new repo on the server uuid = args.uuid if uuid is None: alias = args.new_repo_alias or "testrepo" server = DVIDServerService(args.hostname) uuid = server.create_new_repo( alias, "This is a test repo loaded with data from ".format( args.input_file)) uuid = str(uuid) # Read the input data from the file print("Reading {}{}".format(filepath, dset_name)) with h5py.File(filepath) as f_in: data = f_in[dset_name][:] # We assume data is 3D or 4D, in C-order # We adjust it to 4D, fortran-order if data.ndim == 3: data = data[..., None] data = data.transpose() assert data.flags['F_CONTIGUOUS'], "Data is not contiguous!" assert data.ndim == 4, "Data must be 3D with axes zyx or 4D with axes zyxc (C-order)" assert data.shape[ 0] == 1, "Data must have exactly 1 channel, not {}".format( data.shape[0]) # Choose a default data instance name if necessary if data.dtype == numpy.uint8: data_name = args.data_name or "grayscale" elif data.dtype == numpy.uint64: data_name = args.data_name or "segmentation" else: sys.stderr.write("Unsupported dtype: {}\n".format(data.dtype)) sys.exit(1) # Create the new data instance if it doesn't exist already try: metadata = VoxelsAccessor.get_metadata(args.hostname, uuid, data_name) print("Data instance '{}' already exists. Will update.".format( data_name)) except DVIDException: print("Creating new data instance: {}".format(data_name)) metadata = VoxelsMetadata.create_default_metadata( data.shape, data.dtype, 'cxyz', 1.0, 'nanometers') VoxelsAccessor.create_new(args.hostname, uuid, data_name, metadata) # Finally, push the data to the server print("Pushing data to {}".format('{}/api/node/{}/{}'.format( args.hostname, uuid, data_name))) accessor = VoxelsAccessor(args.hostname, uuid, data_name) accessor.post_ndarray((0, 0, 0, 0), data.shape, data) print("DONE.")
def get_testrepo_root_uuid(): from libdvid import DVIDServerService server = DVIDServerService(TEST_DVID_SERVER) uuid = server.create_new_repo( "testrepo", "This repo is for unit tests to use and abuse.") return str(uuid)
def get_testrepo_root_uuid(): from libdvid import DVIDServerService server = DVIDServerService(TEST_DVID_SERVER) uuid = server.create_new_repo("testrepo", "This repo is for unit tests to use and abuse."); return str(uuid)
def main(): # Read cmd-line args parser = argparse.ArgumentParser() parser.add_argument("--hostname", default="localhost:8000") parser.add_argument("--uuid", required=False, help="The node to upload to. If not provided, a new repo will be created (see --new-repo-alias).") parser.add_argument("--data-name", required=False, help="The name of the data instance to modify. If it doesn't exist, it will be created first.") parser.add_argument("--new-repo-alias", required=False, help="If no uuid is provided, a new repo is created, with this name.") parser.add_argument("input_file", help="For example: /tmp/myfile.h5/dataset") args = parser.parse_args() if '.h5' not in args.input_file: sys.stderr.write("File name does not indicate hdf5.\n") sys.exit(1) filepath, dset_name = args.input_file.split('.h5') filepath += '.h5' if not dset_name: sys.stderr.write("You must provide a dataset name, e.g. myfile.h5/mydataset\n") sys.exit(1) if not os.path.exists(filepath): sys.stderr.write("File doesn't exist: {}\n".format(filepath)) sys.exit(1) # If no uuid given, create a new repo on the server uuid = args.uuid if uuid is None: alias = args.new_repo_alias or "testrepo" server = DVIDServerService(args.hostname) uuid = server.create_new_repo(alias, "This is a test repo loaded with data from ".format(args.input_file)) uuid = str(uuid) # Read the input data from the file print("Reading {}{}".format( filepath, dset_name )) with h5py.File(filepath) as f_in: data = f_in[dset_name][:] # We assume data is 3D or 4D, in C-order # We adjust it to 4D, fortran-order if data.ndim == 3: data = data[...,None] data = data.transpose() assert data.flags['F_CONTIGUOUS'], "Data is not contiguous!" assert data.ndim == 4, "Data must be 3D with axes zyx or 4D with axes zyxc (C-order)" assert data.shape[0] == 1, "Data must have exactly 1 channel, not {}".format( data.shape[0] ) # Choose a default data instance name if necessary if data.dtype == numpy.uint8: data_name = args.data_name or "grayscale" elif data.dtype == numpy.uint64: data_name = args.data_name or "segmentation" else: sys.stderr.write("Unsupported dtype: {}\n".format(data.dtype)) sys.exit(1) # Create the new data instance if it doesn't exist already try: metadata = VoxelsAccessor.get_metadata(args.hostname, uuid, data_name) print("Data instance '{}' already exists. Will update.".format( data_name )) except DVIDException: print("Creating new data instance: {}".format( data_name )) metadata = VoxelsMetadata.create_default_metadata(data.shape, data.dtype, 'cxyz', 1.0, 'nanometers') VoxelsAccessor.create_new(args.hostname, uuid, data_name, metadata) # Finally, push the data to the server print("Pushing data to {}".format( '{}/api/node/{}/{}'.format( args.hostname, uuid, data_name ) )) accessor = VoxelsAccessor(args.hostname, uuid, data_name) accessor.post_ndarray((0,0,0,0), data.shape, data) print("DONE.")
def test_dvidfetchgray(self): """Check reading grayscale from DVID from partitions. This also checks basic iteration and overwrite of previous data. """ service = DVIDServerService(dvidserver) uuid = service.create_new_repo("foo", "bar") ns = DVIDNodeService(dvidserver, uuid) ns.create_grayscale8("gray") arr = np.random.randint(255, size=(64, 64, 64)).astype(np.uint8) # load gray data ns.put_gray3D("gray", arr, (0, 0, 0)) # read data schema = partitionSchema(PartitionDims(32, 64, 64)) volpart = volumePartition(0, VolumeOffset(0, 0, 0)) overwrite = np.random.randint(255, size=(64, 64, 64)).astype(np.uint8) partitions = schema.partition_data([(volpart, overwrite)]) dvidreader = dvidSrc(dvidserver, uuid, "gray", partitions, maskonly=False) newparts = dvidreader.extract_volume() self.assertEqual(len(newparts), 2) for (part, vol) in newparts: if part.get_offset().z == 0: match = np.array_equal(arr[0:32, :, :], vol) self.assertTrue(match) else: match = np.array_equal(arr[32:64, :, :], vol) self.assertTrue(match) # test iteration dvidreader2 = dvidSrc(dvidserver, uuid, "gray", partitions, maskonly=False) newparts2 = [] for newpart in dvidreader2: self.assertEqual(len(newpart), 1) newparts2.extend(newpart) self.assertEqual(len(newparts2), 2) for (part, vol) in newparts2: if part.get_offset().z == 0: match = np.array_equal(arr[0:32, :, :], vol) self.assertTrue(match) else: match = np.array_equal(arr[32:64, :, :], vol) self.assertTrue(match)
default=False, help='verbose logs') args = parser.parse_args() if args.verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) # initialize plugin manager plugin_manager = TrackingPluginManager(verbose=False) image_provider = plugin_manager.getImageProvider() # create dataset on server and get uuid server_address = args.dvidAddress server_service = DVIDServerService(server_address) uuid = server_service.create_new_repo(args.datasetName, "description") logging.info('UUID:\n{}'.format(uuid)) # get node service node_service = DVIDNodeService(server_address, uuid) # get dataset size and store in dvid shape = image_provider.getImageShape(args.ilpFilename, args.labelImagePath) time_range = image_provider.getTimeRange(args.ilpFilename, args.labelImagePath) if args.timeRange is not None: time_range = (max(time_range[0], args.timeRange[0]), min(time_range[1], args.timeRange[1])) logging.info('Uploading time range {} to {}'.format( time_range, server_address))
def __init__(self, location, port=8000, rpcport=8001, permissionfile=None, appdir=None): """Init. The user can start the DVIDStore in three different ways. To store data in Google Storage, DVIDStore location should be formatted as 'gs://<bucketname>, where the bucket should already exist. This will automatically launch a server on the specified local port that will communicate with this storage. A local path can also be specified, instead of a google bucket if very large-scale storage in unnecessary. If the user wants to point to a pre-existing DVID server that could have any backend storage, the location should be formatted as "dvid://<servername>". Note: If permissions are needed to access the google bucket, a configuration JSON should be pointed to by GOOGLE_APPLICATION_CREDENTIALS environment variable or should be passed to this function. 'dvid' needs to be in the executable path. This will be setup by default if using a conda installation. Default DVID ports or specified ports must be available to the program. Args: location (str): location of DVID server or DB port (integer): port that DVID will take http requests port (integer): port that DVID will take rpc requests permissionfile (str): permission json file location for gbucket appdir (str): directory that contains diced logs and other data Exceptons: Will raise DicedException if DVID server cannot be created or if provide address cannot be found. """ self._dvidproc = None self._server = None self.rpcport = rpcport self.port = port # if gs or local launch DVID gbucket = location.startswith("gs://") fileloc = not location.startswith("dvid://") and not gbucket if gbucket or fileloc: # appdir is '~/.dicedstore' by default if appdir is None: appdir = '~/.dicedstore' appdir = os.path.expanduser(appdir) if not os.path.exists(appdir): os.makedirs(appdir) if not os.path.exists(appdir + "/dvid"): os.makedirs(appdir + "/dvid") self._server = "127.0.0.1:" + str(port) # find pre-built console in resources import pkg_resources consolepath = pkg_resources.resource_filename( 'diced', 'dvid-console') # create dvidlog logfile = tempfile.NamedTemporaryFile(dir=(appdir + "/dvid"), suffix='.log', delete=False) logname = logfile.name tomldata = None if gbucket: tomldata = self.GBUCKET_TOML tomldata = tomldata.replace(self.DBPATH, location.split("gs://")[1]) else: tomldata = self.LEVELDB_TOML tomldata = tomldata.replace(self.DBPATH, location) tomldata = tomldata.replace(self.WEBCLIENT, consolepath + "/lite-dist/") tomldata = tomldata.replace(self.LOGNAME, logname) tomldata = tomldata.replace(self.RPCPORT, str(rpcport)) tomldata = tomldata.replace(self.PORT, str(port)) # write toml to temporary file tomlfile = tempfile.NamedTemporaryFile(dir=appdir + "/dvid", suffix='.toml', delete=False) tomllocation = tomlfile.name tomlfile.write(tomldata.encode('utf-8')) tomlfile.close() # copy environment and set new variable if permissionfile local_env = os.environ.copy() if permissionfile is not None: local_env["GOOGLE_APPLICATION_CREDENTIALS"] = permissionfile # check dvid does not already exist dvidexists = False try: DVIDServerService(self._server) dvidexists = True except: pass if dvidexists: raise DicedException("DVID already exists") with open(os.devnull, 'w') as devnull: self._dvidproc = subprocess.Popen( ['dvid', 'serve', tomllocation], env=local_env, stdout=devnull) else: self._server = location.split("dvid://")[1] + ":" + str(port) # allow a few seconds for DVID to launch if self._dvidproc is not None: import time #print "Establishing connection..." # poll server every second while True: try: retval = self._dvidproc.poll() # early termination (checking DVID later will fail) if retval is not None: self._dvidproc = None break DVIDServerService(self._server) break except: time.sleep(1) # wait for connection # check that dvid server is accepting connections try: DVIDServerService(self._server) except DVIDException as err: raise DicedException("DVID connection failed")