def setUpClass(cls):
        """
        Override.  Called by nosetests.
        """
        # Choose names
        cls.dvid_repo = "datasetA"
        cls.data_name = "random_data"
        cls.volume_location = "/repos/{dvid_repo}/volumes/{data_name}".format(
            **cls.__dict__)

        cls.data_uuid = get_testrepo_root_uuid()
        cls.node_location = "/repos/{dvid_repo}/nodes/{data_uuid}".format(
            **cls.__dict__)

        # Generate some test data
        #data = numpy.random.randint(0, 255, (128, 256, 512, 1)).astype( numpy.uint8 )

        data = numpy.zeros((128, 256, 512, 1), dtype=numpy.uint8)
        data.flat[:] = numpy.arange(numpy.prod((128, 256, 512, 1)))
        cls.original_data = data
        cls.voxels_metadata = VoxelsMetadata.create_default_metadata(
            data.shape, data.dtype, "zyxc", 1.0, "")

        # Write it to a new data instance
        node_service = DVIDNodeService(TEST_DVID_SERVER, cls.data_uuid)

        node_service.create_grayscale8(cls.data_name)
        node_service.put_gray3D(cls.data_name, data[..., 0], (0, 0, 0))
예제 #2
0
    def setUpClass(cls):
        """
        Override.  Called by nosetests.
        """
        # Choose names
        cls.dvid_repo = "datasetA"
        cls.data_name = "random_data"
        cls.volume_location = "/repos/{dvid_repo}/volumes/{data_name}".format( **cls.__dict__ )

        cls.data_uuid = get_testrepo_root_uuid()
        cls.node_location = "/repos/{dvid_repo}/nodes/{data_uuid}".format( **cls.__dict__ )

        # Generate some test data
        #data = numpy.random.randint(0, 255, (128, 256, 512, 1)).astype( numpy.uint8 )
        
        data = numpy.zeros((128, 256, 512, 1), dtype=numpy.uint8)
        data.flat[:] = numpy.arange( numpy.prod((128, 256, 512, 1)) )
        cls.original_data = data
        cls.voxels_metadata = VoxelsMetadata.create_default_metadata(data.shape, data.dtype, "zyxc", 1.0, "")

        # Write it to a new data instance
        node_service = DVIDNodeService(TEST_DVID_SERVER, cls.data_uuid)

        node_service.create_grayscale8(cls.data_name)
        node_service.put_gray3D( cls.data_name, data[...,0], (0,0,0) )
예제 #3
0
    def run_export(self):
        self.progressSignal(0)

        url = self.NodeDataUrl.value
        url_path = url.split('://')[1]
        hostname, api, node, uuid, dataname = url_path.split('/')
        assert api == 'api'
        assert node == 'node'
        
        axiskeys = self.Input.meta.getAxisKeys()
        shape = self.Input.meta.shape
        
        if self._transpose_axes:
            axiskeys = reversed(axiskeys)
            shape = tuple(reversed(shape))
        
        axiskeys = "".join( axiskeys )

        if self.OffsetCoord.ready():
            offset_start = self.OffsetCoord.value
        else:
            offset_start = (0,) * len( self.Input.meta.shape )

        self.progressSignal(5)
        
        # Get the dataset details
        try:
            metadata = VoxelsAccessor.get_metadata(hostname, uuid, dataname)
        except VoxelsAccessor.BadRequestError as ex:
            # Dataset doesn't exist yet.  Let's create it.
            metadata = VoxelsMetadata.create_default_metadata( shape, 
                                                               self.Input.meta.dtype, 
                                                               axiskeys, 
                                                               0.0, 
                                                               "" )
            VoxelsAccessor.create_new(hostname, uuid, dataname, metadata)

        # Since this class is generally used to push large blocks of data,
        #  we'll be nice and set throttle=True
        client = VoxelsAccessor( hostname, uuid, dataname, throttle=True )
        
        def handle_block_result(roi, data):
            # Send it to dvid
            roi = numpy.asarray(roi)
            roi += offset_start
            start, stop = roi
            if self._transpose_axes:
                data = data.transpose()
                start = tuple(reversed(start))
                stop = tuple(reversed(stop))
                client.post_ndarray( start, stop, data )
        requester = BigRequestStreamer( self.Input, roiFromShape( self.Input.meta.shape ) )
        requester.resultSignal.subscribe( handle_block_result )
        requester.progressSignal.subscribe( self.progressSignal )
        requester.execute()
        
        self.progressSignal(100)
    
        
예제 #4
0
    def run_export(self):
        self.progressSignal(0)

        url = self.NodeDataUrl.value
        url_path = url.split('://')[1]
        hostname, api, node, uuid, dataname = url_path.split('/')
        assert api == 'api'
        assert node == 'node'

        axiskeys = self.Input.meta.getAxisKeys()
        shape = self.Input.meta.shape

        if self._transpose_axes:
            axiskeys = reversed(axiskeys)
            shape = tuple(reversed(shape))

        axiskeys = "".join(axiskeys)

        if self.OffsetCoord.ready():
            offset_start = self.OffsetCoord.value
        else:
            offset_start = (0, ) * len(self.Input.meta.shape)

        self.progressSignal(5)

        # Get the dataset details
        try:
            metadata = VoxelsAccessor.get_metadata(hostname, uuid, dataname)
        except DVIDException as ex:
            if ex.status != 404:
                raise
            # Dataset doesn't exist yet.  Let's create it.
            metadata = VoxelsMetadata.create_default_metadata(
                shape, self.Input.meta.dtype, axiskeys, 0.0, "")
            VoxelsAccessor.create_new(hostname, uuid, dataname, metadata)

        # Since this class is generally used to push large blocks of data,
        #  we'll be nice and set throttle=True
        client = VoxelsAccessor(hostname, uuid, dataname, throttle=True)

        def handle_block_result(roi, data):
            # Send it to dvid
            roi = numpy.asarray(roi)
            roi += offset_start
            start, stop = roi
            if self._transpose_axes:
                data = data.transpose()
                start = tuple(reversed(start))
                stop = tuple(reversed(stop))
                client.post_ndarray(start, stop, data)

        requester = BigRequestStreamer(self.Input,
                                       roiFromShape(self.Input.meta.shape))
        requester.resultSignal.subscribe(handle_block_result)
        requester.progressSignal.subscribe(self.progressSignal)
        requester.execute()

        self.progressSignal(100)
예제 #5
0
    def _get_format_selection_error_msg(self, *args):
        """
        If the currently selected format does not support the input image format,
        return an error message stating why. Otherwise, return an empty string.
        """
        if not self.Input.ready():
            return "Input not ready"
        output_format = self.OutputFormat.value

        # These cases support all combinations
        if output_format in ("hdf5", "compressed hdf5", "n5", "compressed n5",
                             "npy", "blockwise hdf5"):
            return ""

        tagged_shape = self.Input.meta.getTaggedShape()
        axes = OpStackWriter.get_nonsingleton_axes_for_tagged_shape(
            tagged_shape)
        output_dtype = self.Input.meta.dtype

        if output_format == "dvid":
            # dvid requires a channel axis, which must come last.
            # Internally, we transpose it before sending it over the wire
            if list(tagged_shape.keys())[-1] != "c":
                return "DVID requires the last axis to be channel."

            # Make sure DVID supports this dtype/channel combo.
            from libdvid.voxels import VoxelsMetadata

            axiskeys = self.Input.meta.getAxisKeys()
            # We reverse the axiskeys because the export operator (see below) uses transpose_axes=True
            reverse_axiskeys = "".join(reversed(axiskeys))
            reverse_shape = tuple(reversed(self.Input.meta.shape))
            metainfo = VoxelsMetadata.create_default_metadata(
                reverse_shape, output_dtype, reverse_axiskeys, 0.0,
                "nanometers")
            try:
                metainfo.determine_dvid_typename()
            except Exception as ex:
                return str(ex)
            else:
                return ""

        return FormatValidity.check(self.Input.meta.getTaggedShape(),
                                    self.Input.meta.dtype, output_format)
예제 #6
0
    def _get_format_selection_error_msg(self, *args):
        """
        If the currently selected format does not support the input image format, 
        return an error message stating why. Otherwise, return an empty string.
        """
        if not self.Input.ready():
            return "Input not ready"
        output_format = self.OutputFormat.value

        # These cases support all combinations
        if output_format in ('hdf5', 'npy', 'blockwise hdf5'):
            return ""
        
        tagged_shape = self.Input.meta.getTaggedShape()
        axes = OpStackWriter.get_nonsingleton_axes_for_tagged_shape( tagged_shape )
        output_dtype = self.Input.meta.dtype

        if output_format == 'dvid':
            # dvid requires a channel axis, which must come last.
            # Internally, we transpose it before sending it over the wire
            if tagged_shape.keys()[-1] != 'c':
                return "DVID requires the last axis to be channel."

            # Make sure DVID supports this dtype/channel combo.
            from libdvid.voxels import VoxelsMetadata
            axiskeys = self.Input.meta.getAxisKeys()
            # We reverse the axiskeys because the export operator (see below) uses transpose_axes=True
            reverse_axiskeys = "".join(reversed( axiskeys ))
            reverse_shape = tuple(reversed(self.Input.meta.shape))
            metainfo = VoxelsMetadata.create_default_metadata( reverse_shape,
                                                               output_dtype,
                                                               reverse_axiskeys,
                                                               0.0,
                                                               'nanometers' )
            try:
                metainfo.determine_dvid_typename()
            except Exception as ex:
                return str(ex)
            else:
                return ""

        return FormatValidity.check(self.Input.meta.getTaggedShape(),
                                    self.Input.meta.dtype,
                                    output_format)
예제 #7
0
    def test_zz_quickstart_usage(self):
        import json
        import numpy
        from libdvid import DVIDConnection, ConnectionMethod
        from libdvid.voxels import VoxelsAccessor, VoxelsMetadata

        # Open a connection to DVID
        connection = DVIDConnection("127.0.0.1:8000")

        # Get detailed dataset info: /api/repos/info (note: /api is prepended automatically)
        status, body, _error_message = connection.make_request(
            "/repos/info", ConnectionMethod.GET)
        dataset_details = json.loads(body)
        # print(json.dumps( dataset_details, indent=4 ))

        # Create a new remote volume (assuming you already know the uuid of the node)
        uuid = UUID
        voxels_metadata = VoxelsMetadata.create_default_metadata(
            (0, 0, 0, 1), numpy.uint8, 'zyxc', 1.0, "")
        VoxelsAccessor.create_new("127.0.0.1:8000", uuid, "my_volume",
                                  voxels_metadata)

        # Use the VoxelsAccessor convenience class to manipulate a particular data volume
        accessor = VoxelsAccessor("127.0.0.1:8000", uuid, "my_volume")
        # print(dvid_volume.axiskeys, dvid_volume.dtype, dvid_volume.minindex, dvid_volume.shape)

        # Add some data (must be block-aligned)
        # Must include all channels.
        updated_data = numpy.ones((256, 192, 128, 1), dtype=numpy.uint8)
        accessor[256:512, 32:224, 0:128, 0] = updated_data
        # OR:
        #accessor.post_ndarray( (0,10,20,30), (1,110,120,130), updated_data )

        # Read from it (First axis is channel.)
        cutout_array = accessor[300:330, 40:120, 10:110, 0]
        # OR:
        cutout_array = accessor.get_ndarray((300, 40, 10, 0),
                                            (330, 120, 110, 1))

        assert isinstance(cutout_array, numpy.ndarray)
        assert cutout_array.shape == (30, 80, 100, 1)
   def test_zz_quickstart_usage(self):
       import json
       import numpy
       from libdvid import DVIDConnection, ConnectionMethod
       from libdvid.voxels import VoxelsAccessor, VoxelsMetadata
          
       # Open a connection to DVID
       connection = DVIDConnection( "localhost:8000" )
         
       # Get detailed dataset info: /api/repos/info (note: /api is prepended automatically)
       status, body, error_message = connection.make_request( "/repos/info", ConnectionMethod.GET)
       dataset_details = json.loads(body)
       # print json.dumps( dataset_details, indent=4 )
         
       # Create a new remote volume (assuming you already know the uuid of the node)
       uuid = UUID
       voxels_metadata = VoxelsMetadata.create_default_metadata( (1,0,0,0), numpy.uint8, 'cxyz', 1.0, "" )
       VoxelsAccessor.create_new( "localhost:8000", uuid, "my_volume", voxels_metadata )
 
       # Use the VoxelsAccessor convenience class to manipulate a particular data volume     
       accessor = VoxelsAccessor( "localhost:8000", uuid, "my_volume" )
       # print dvid_volume.axiskeys, dvid_volume.dtype, dvid_volume.minindex, dvid_volume.shape
          
       # Add some data (must be block-aligned)
       # Must include all channels.
       # Must be FORTRAN array, using FORTRAN indexing order conventions
       # (Use order='F', and make sure you're indexing it as cxyz)
       updated_data = numpy.ones( (1,128,192,256), dtype=numpy.uint8, order='F' )
       updated_data = numpy.asfortranarray(updated_data)
       accessor[:, 0:128, 32:224, 256:512] = updated_data
       # OR:
       #accessor.post_ndarray( (0,10,20,30), (1,110,120,130), updated_data )
         
       # Read from it (First axis is channel.)
       cutout_array = accessor[:, 10:110, 40:120, 300:330]
       # OR:
       cutout_array = accessor.get_ndarray( (0,10,40,300), (1,110,120,330) )
 
       assert isinstance(cutout_array, numpy.ndarray)
       assert cutout_array.shape == (1,100,80,30)
    def setUpClass(cls):
        """
        Override.  Called by nosetests.
        """
        # Choose names
        cls.dvid_repo = "datasetA"
        cls.data_name = "indices_data"
        cls.volume_location = "/repos/{dvid_repo}/volumes/{data_name}".format( **cls.__dict__ )

        cls.data_uuid = get_testrepo_root_uuid()
        cls.node_location = "/repos/{dvid_repo}/nodes/{data_uuid}".format( **cls.__dict__ )

        # Generate some test data
        data = numpy.random.randint(0, 255, (1, 128, 256, 512))
        data = numpy.asfortranarray(data, numpy.uint8)
        cls.original_data = data
        cls.voxels_metadata = VoxelsMetadata.create_default_metadata(data.shape, data.dtype, "cxyz", 1.0, "")

        # Write it to a new data instance
        node_service = DVIDNodeService(TEST_DVID_SERVER, cls.data_uuid)
        node_service.create_grayscale8(cls.data_name)
        node_service.put_gray3D( cls.data_name, data[0,...], (0,0,0) )
   def test_zz_quickstart_usage(self):
       import json
       import numpy
       from libdvid import DVIDConnection, ConnectionMethod
       from libdvid.voxels import VoxelsAccessor, VoxelsMetadata
          
       # Open a connection to DVID
       connection = DVIDConnection( "127.0.0.1:8000" )
         
       # Get detailed dataset info: /api/repos/info (note: /api is prepended automatically)
       status, body, _error_message = connection.make_request( "/repos/info", ConnectionMethod.GET)
       dataset_details = json.loads(body)
       # print(json.dumps( dataset_details, indent=4 ))
         
       # Create a new remote volume (assuming you already know the uuid of the node)
       uuid = UUID
       voxels_metadata = VoxelsMetadata.create_default_metadata( (0,0,0,1), numpy.uint8, 'zyxc', 1.0, "" )
       VoxelsAccessor.create_new( "127.0.0.1:8000", uuid, "my_volume", voxels_metadata )
 
       # Use the VoxelsAccessor convenience class to manipulate a particular data volume     
       accessor = VoxelsAccessor( "127.0.0.1:8000", uuid, "my_volume" )
       # print(dvid_volume.axiskeys, dvid_volume.dtype, dvid_volume.minindex, dvid_volume.shape)
          
       # Add some data (must be block-aligned)
       # Must include all channels.
       updated_data = numpy.ones( (256,192,128,1), dtype=numpy.uint8)
       accessor[256:512, 32:224, 0:128, 0] = updated_data
       # OR:
       #accessor.post_ndarray( (0,10,20,30), (1,110,120,130), updated_data )
         
       # Read from it (First axis is channel.)
       cutout_array = accessor[300:330, 40:120, 10:110, 0]
       # OR:
       cutout_array = accessor.get_ndarray( (300,40,10,0), (330,120,110,1) )
 
       assert isinstance(cutout_array, numpy.ndarray)
       assert cutout_array.shape == (30,80,100,1)
예제 #11
0
def main():
    # Read cmd-line args
    parser = argparse.ArgumentParser()
    parser.add_argument("--hostname", default="localhost:8000")
    parser.add_argument("--uuid", required=False, help="The node to upload to.  If not provided, a new repo will be created (see --new-repo-alias).")
    parser.add_argument("--data-name", required=False, help="The name of the data instance to modify. If it doesn't exist, it will be created first.")
    parser.add_argument("--new-repo-alias", required=False, help="If no uuid is provided, a new repo is created, with this name.")
    parser.add_argument("input_file", help="For example: /tmp/myfile.h5/dataset")
    args = parser.parse_args()

    if '.h5' not in args.input_file:
        sys.stderr.write("File name does not indicate hdf5.\n")
        sys.exit(1)

    filepath, dset_name = args.input_file.split('.h5')
    filepath += '.h5'
    if not dset_name:
        sys.stderr.write("You must provide a dataset name, e.g. myfile.h5/mydataset\n")
        sys.exit(1)

    if not os.path.exists(filepath):
        sys.stderr.write("File doesn't exist: {}\n".format(filepath))
        sys.exit(1)

    # If no uuid given, create a new repo on the server
    uuid = args.uuid
    if uuid is None:
        alias = args.new_repo_alias or "testrepo"
        server = DVIDServerService(args.hostname)
        uuid = server.create_new_repo(alias, "This is a test repo loaded with data from ".format(args.input_file))
        uuid = str(uuid)

    # Read the input data from the file
    print("Reading {}{}".format( filepath, dset_name ))
    with h5py.File(filepath) as f_in:
        data = f_in[dset_name][:]

    # We assume data is 3D or 4D, in C-order
    # We adjust it to 4D, fortran-order
    if data.ndim == 3:
        data = data[...,None]
    data = data.transpose()
    assert data.flags['F_CONTIGUOUS'], "Data is not contiguous!"
    assert data.ndim == 4, "Data must be 3D with axes zyx or 4D with axes zyxc (C-order)"
    assert data.shape[0] == 1, "Data must have exactly 1 channel, not {}".format( data.shape[0] )

    # Choose a default data instance name if necessary
    if data.dtype == numpy.uint8:
        data_name = args.data_name or "grayscale"
    elif data.dtype == numpy.uint64:
        data_name = args.data_name or "segmentation"
    else:
        sys.stderr.write("Unsupported dtype: {}\n".format(data.dtype))
        sys.exit(1)

    # Create the new data instance if it doesn't exist already        
    try:
        metadata = VoxelsAccessor.get_metadata(args.hostname, uuid, data_name)
        print("Data instance '{}' already exists.  Will update.".format( data_name ))
    except DVIDException:
        print("Creating new data instance: {}".format( data_name ))
        metadata = VoxelsMetadata.create_default_metadata(data.shape, data.dtype, 'cxyz', 1.0, 'nanometers')
        VoxelsAccessor.create_new(args.hostname, uuid, data_name, metadata)

    # Finally, push the data to the server
    print("Pushing data to {}".format( '{}/api/node/{}/{}'.format( args.hostname, uuid, data_name ) ))
    accessor = VoxelsAccessor(args.hostname, uuid, data_name)
    accessor.post_ndarray((0,0,0,0), data.shape, data)
    print("DONE.")
예제 #12
0
def main():
    # Read cmd-line args
    parser = argparse.ArgumentParser()
    parser.add_argument("--hostname", default="localhost:8000")
    parser.add_argument(
        "--uuid",
        required=False,
        help=
        "The node to upload to.  If not provided, a new repo will be created (see --new-repo-alias)."
    )
    parser.add_argument(
        "--data-name",
        required=False,
        help=
        "The name of the data instance to modify. If it doesn't exist, it will be created first."
    )
    parser.add_argument(
        "--new-repo-alias",
        required=False,
        help="If no uuid is provided, a new repo is created, with this name.")
    parser.add_argument("input_file",
                        help="For example: /tmp/myfile.h5/dataset")
    args = parser.parse_args()

    if '.h5' not in args.input_file:
        sys.stderr.write("File name does not indicate hdf5.\n")
        sys.exit(1)

    filepath, dset_name = args.input_file.split('.h5')
    filepath += '.h5'
    if not dset_name:
        sys.stderr.write(
            "You must provide a dataset name, e.g. myfile.h5/mydataset\n")
        sys.exit(1)

    if not os.path.exists(filepath):
        sys.stderr.write("File doesn't exist: {}\n".format(filepath))
        sys.exit(1)

    # If no uuid given, create a new repo on the server
    uuid = args.uuid
    if uuid is None:
        alias = args.new_repo_alias or "testrepo"
        server = DVIDServerService(args.hostname)
        uuid = server.create_new_repo(
            alias, "This is a test repo loaded with data from ".format(
                args.input_file))
        uuid = str(uuid)

    # Read the input data from the file
    print("Reading {}{}".format(filepath, dset_name))
    with h5py.File(filepath) as f_in:
        data = f_in[dset_name][:]

    # We assume data is 3D or 4D, in C-order
    # We adjust it to 4D, fortran-order
    if data.ndim == 3:
        data = data[..., None]
    data = data.transpose()
    assert data.flags['F_CONTIGUOUS'], "Data is not contiguous!"
    assert data.ndim == 4, "Data must be 3D with axes zyx or 4D with axes zyxc (C-order)"
    assert data.shape[
        0] == 1, "Data must have exactly 1 channel, not {}".format(
            data.shape[0])

    # Choose a default data instance name if necessary
    if data.dtype == numpy.uint8:
        data_name = args.data_name or "grayscale"
    elif data.dtype == numpy.uint64:
        data_name = args.data_name or "segmentation"
    else:
        sys.stderr.write("Unsupported dtype: {}\n".format(data.dtype))
        sys.exit(1)

    # Create the new data instance if it doesn't exist already
    try:
        metadata = VoxelsAccessor.get_metadata(args.hostname, uuid, data_name)
        print("Data instance '{}' already exists.  Will update.".format(
            data_name))
    except DVIDException:
        print("Creating new data instance: {}".format(data_name))
        metadata = VoxelsMetadata.create_default_metadata(
            data.shape, data.dtype, 'cxyz', 1.0, 'nanometers')
        VoxelsAccessor.create_new(args.hostname, uuid, data_name, metadata)

    # Finally, push the data to the server
    print("Pushing data to {}".format('{}/api/node/{}/{}'.format(
        args.hostname, uuid, data_name)))
    accessor = VoxelsAccessor(args.hostname, uuid, data_name)
    accessor.post_ndarray((0, 0, 0, 0), data.shape, data)
    print("DONE.")