Example #1
0
    def transform_scalars(self, dataset):
        """Define this method for Python operators that
        transform input scalars"""

        from tomviz import utils
        import numpy as np
        self.progress.maximum = NUMBER_OF_CHUNKS

        scalars = utils.get_scalars(dataset)
        if scalars is None:
            raise RuntimeError("No scalars found!")

        if scalars.min() < 0:
            print("WARNING: Square root of negative values results in NaN!")
        else:
            # transform the dataset
            # Process dataset in chunks so the user gets an opportunity to
            # cancel.
            result = np.float32(scalars)
            step = 0
            for chunk in np.array_split(result, NUMBER_OF_CHUNKS):
                if self.canceled:
                    return
                np.sqrt(chunk, chunk)
                step += 1
                self.progress.value = step

            # set the result as the new scalars.
            utils.set_scalars(dataset, result)
Example #2
0
def transform_scalars(dataset, constant=0):
    """Add a constant to the data set"""

    from tomviz import utils
    import numpy as np

    scalars = utils.get_scalars(dataset)
    if scalars is None:
        raise RuntimeError("No scalars found!")

    # Try to be a little smart so that we don't always just produce a
    # double-precision output
    newMin = np.min(scalars) + constant
    newMax = np.max(scalars) + constant
    if (constant).is_integer() and newMin.is_integer() and newMax.is_integer():
        # Let ints be ints!
        constant = int(constant)
        newMin = int(newMin)
        newMax = int(newMax)
    for dtype in [np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32,
                  np.uint64, np.int64, np.float32, np.float64]:
        if np.can_cast(newMin, dtype) and np.can_cast(newMax, dtype):
            constant = np.array([constant], dtype=dtype)
            break

    # numpy should cast to an appropriate output type to avoid overflow
    result = scalars + constant

    utils.set_scalars(dataset, result)
Example #3
0
def transform_scalars(dataset):
    """Reinterpret a signed integral array type as its unsigned counterpart.
    This can be used when the bytes of a data array have been interpreted as a
    signed array when it should have been interpreted as an unsigned array."""

    from tomviz import utils
    import numpy as np
    scalars = utils.get_scalars(dataset)
    if scalars is None:
        raise RuntimeError("No scalars found!")

    dtype = scalars.dtype
    dtype = dtype.type

    typeMap = {np.int8: np.uint8, np.int16: np.uint16, np.int32: np.uint32}

    typeAddend = {np.int8: 128, np.int16: 32768, np.int32: 2147483648}

    if dtype not in typeMap:
        raise RuntimeError("Scalars are not int8, int16, or int32")

    newType = typeMap[dtype]
    addend = typeAddend[dtype]

    newScalars = scalars.astype(dtype=newType) + addend
    utils.set_scalars(dataset, newScalars)
def transform_scalars(dataset):
    """Reinterpret a signed integral array type as its unsigned counterpart.
    This can be used when the bytes of a data array have been interpreted as a
    signed array when it should have been interpreted as an unsigned array."""

    from tomviz import utils
    import numpy as np
    scalars = utils.get_scalars(dataset)
    if scalars is None:
        raise RuntimeError("No scalars found!")

    dtype = scalars.dtype
    dtype = dtype.type

    typeMap = {
        np.int8: np.uint8,
        np.int16: np.uint16,
        np.int32: np.uint32
    }

    typeAddend = {
        np.int8: 128,
        np.int16: 32768,
        np.int32: 2147483648
    }

    if dtype not in typeMap:
        raise RuntimeError("Scalars are not int8, int16, or int32")

    newType = typeMap[dtype]
    addend = typeAddend[dtype]

    newScalars = scalars.astype(dtype=newType) + addend
    utils.set_scalars(dataset, newScalars)
Example #5
0
    def transform_scalars(self, dataset):
        """Define this method for Python operators that
        transform input scalars"""

        from tomviz import utils
        import numpy as np
        self.progress.maximum = NUMBER_OF_CHUNKS

        scalars = utils.get_scalars(dataset)
        if scalars is None:
            raise RuntimeError("No scalars found!")

        if scalars.min() < 0:
            print("WARNING: Square root of negative values results in NaN!")
        else:
            # transform the dataset
            # Process dataset in chunks so the user gets an opportunity to
            # cancel.
            result = np.float32(scalars)
            step = 0
            for chunk in np.array_split(result, NUMBER_OF_CHUNKS):
                if self.canceled:
                    return
                np.sqrt(chunk, chunk)
                step += 1
                self.progress.value = step

            # set the result as the new scalars.
            utils.set_scalars(dataset, result)
Example #6
0
def transform_scalars(dataset):
    from tomviz import utils
    import numpy as np

    scalars = utils.get_scalars(dataset)
    if scalars is None:
        raise RuntimeError("No scalars found!")

    result = np.amax(scalars) - scalars
    utils.set_scalars(dataset, result)
Example #7
0
def transform_scalars(dataset):
    from tomviz import utils
    import numpy as np

    scalars = utils.get_scalars(dataset)
    if scalars is None:
        raise RuntimeError("No scalars found!")

    result = np.amax(scalars) - np.float32(scalars)
    utils.set_scalars(dataset, result)
Example #8
0
def transform_scalars(dataset):
    """Define this method for Python operators that 
    transform input scalars"""
    
    from tomviz import utils
    import numpy as np

    scalars = utils.get_scalars(dataset)
    if scalars is None:
        raise RuntimeError("No scalars found!")

    # transform the dataset
    result = np.sqrt(scalars)
    
    # set the result as the new scalars.
    utils.set_scalars(dataset, result)
Example #9
0
def transform_scalars(dataset):
    """Define this method for Python operators that 
    transform input scalars"""

    from tomviz import utils
    import numpy as np

    scalars = utils.get_scalars(dataset)
    if scalars is None:
        raise RuntimeError("No scalars found!")

    # transform the dataset
    result = np.sqrt(scalars)

    # set the result as the new scalars.
    utils.set_scalars(dataset, result)
Example #10
0
def transform_scalars(dataset):
    """Define this method for Python operators that 
    transform input scalars"""

    from tomviz import utils
    import numpy as np

    scalars = utils.get_scalars(dataset)
    if scalars is None:
        raise RuntimeError("No scalars found!")

    if scalars.min() < 0:
        print("WARNING: Square root of negative values results in NaN!")
    else:
        # transform the dataset
        result = np.sqrt(scalars)
        # set the result as the new scalars.
        utils.set_scalars(dataset, result)
Example #11
0
def transform_scalars(dataset):
    """Define this method for Python operators that 
    transform input scalars"""
    
    from tomviz import utils
    import numpy as np

    scalars = utils.get_scalars(dataset)
    if scalars is None:
        raise RuntimeError("No scalars found!")

    if scalars.min() < 0:
        print("WARNING: Square root of negative values results in NaN!")
    else:
        # transform the dataset
        result = np.sqrt(scalars)
        # set the result as the new scalars.
        utils.set_scalars(dataset, result)
Example #12
0
    def transform_scalars(self, dataset):
        from tomviz import utils
        import numpy as np
        self.progress.maximum = NUMBER_OF_CHUNKS

        scalars = utils.get_scalars(dataset)
        if scalars is None:
            raise RuntimeError("No scalars found!")

        result = np.float32(scalars)
        max = np.amax(scalars)
        step = 0
        for chunk in np.array_split(result, NUMBER_OF_CHUNKS):
            if self.canceled:
                return
            chunk[:] = max - chunk
            step += 1
            self.progress.value = step

        utils.set_scalars(dataset, result)
Example #13
0
    def transform_scalars(self, dataset):
        from tomviz import utils
        import numpy as np
        self.progress.maximum = NUMBER_OF_CHUNKS

        scalars = utils.get_scalars(dataset)
        if scalars is None:
            raise RuntimeError("No scalars found!")

        result = np.float32(scalars)
        max = np.amax(scalars)
        step = 0
        for chunk in np.array_split(result, NUMBER_OF_CHUNKS):
            if self.canceled:
                return
            chunk[:] = max - chunk
            step += 1
            self.progress.value = step

        utils.set_scalars(dataset, result)
    def transform_scalars(self, dataset, label_value=1, principal_axis=0):
        """Computes the distance from the centroid of each connected component
        in the label object with the given label_value to the given principal
        axis and store that distance in each voxel of the label object connected
        component. A principal_axis of 0 is first principal axis, 1 is the
        second, and 2 is third.
        """

        import numpy as np
        from tomviz import itkutils
        from tomviz import utils

        self.progress.maximum = 100
        self.progress.value = 0

        STEP_PCT = [20, 60, 80, 100]

        fd = dataset.GetFieldData()
        axis_array = fd.GetArray('PrincipalAxes')
        assert axis_array is not None, \
            "Dataset does not have a PrincipalAxes field data array"
        assert axis_array.GetNumberOfTuples() == 3, \
            "PrincipalAxes array requires 3 tuples"
        assert axis_array.GetNumberOfComponents() == 3, \
            "PrincipalAxes array requires 3 components"
        assert principal_axis >= 0 and principal_axis <= 2, \
            "Invalid principal axis. Must be in range [0, 2]."

        axis = np.array(axis_array.GetTuple(principal_axis))

        center_array = fd.GetArray('Center')
        assert center_array is not None, \
            "Dataset does not have a Center field data array"
        assert center_array.GetNumberOfTuples() == 1, \
            "Center array requires 1 tuple"
        assert center_array.GetNumberOfComponents() == 3, \
            "Center array requires 3 components"

        center = np.array(center_array.GetTuple(0))

        # Blank out the undesired label values
        scalars = utils.get_scalars(dataset)
        scalars[scalars != label_value] = 0
        utils.set_scalars(dataset, scalars)
        self.progress.value = STEP_PCT[0]

        # Get connected components of voxels labeled by label value
        def connected_progress_func(fraction):
            self.progress.value = \
                int(fraction * (STEP_PCT[1] - STEP_PCT[0]) + STEP_PCT[0])
            return self.canceled

        utils.connected_components(dataset, 0, connected_progress_func)

        # Get shape attributes
        def label_progress_func(fraction):
            self.progress.value = \
                int(fraction * (STEP_PCT[2] - STEP_PCT[1]) + STEP_PCT[1])
            return self.canceled

        shape_label_map = \
            itkutils.get_label_object_attributes(dataset, label_progress_func)
        num_label_objects = shape_label_map.GetNumberOfLabelObjects()

        # Map from label value to distance from principal axis. Used later to
        # fill in distance array.
        labels = utils.get_scalars(dataset)
        max_label = np.max(labels)
        label_value_to_distance = [0 for i in range(max_label + 1)]
        for i in range(0, num_label_objects):
            label_object = shape_label_map.GetNthLabelObject(i)
            # Flip the centroid. I have verified that the x and z coordinates
            # of the centroid coming out of the shape label objects are swapped,
            # so I reverse it here.
            centroid = np.flipud(np.array(label_object.GetCentroid()))
            v = center - centroid
            dot = np.dot(v, axis)
            d = np.linalg.norm(v - dot*axis)
            label_value_to_distance[label_object.GetLabel()] = d

        distance = np.zeros(dataset.GetNumberOfPoints())
        for i in range(len(labels)):
            distance[i] = label_value_to_distance[labels[i]]

        self.progress.value = STEP_PCT[3]

        import vtk.util.numpy_support as np_s
        distance_array = np_s.numpy_to_vtk(distance, deep=1)
        distance_array.SetName('Distance')
        dataset.GetPointData().SetScalars(distance_array)
    def transform_scalars(self, dataset, label_value=1, principal_axis=0):
        """Computes the distance from the centroid of each connected component
        in the label object with the given label_value to the given principal
        axis and store that distance in each voxel of the label object connected
        component. A principal_axis of 0 is first principal axis, 1 is the
        second, and 2 is third.
        """

        import numpy as np
        from tomviz import itkutils
        from tomviz import utils

        self.progress.maximum = 100
        self.progress.value = 0

        STEP_PCT = [20, 60, 80, 100]

        fd = dataset.GetFieldData()
        axis_array = fd.GetArray('PrincipalAxes')
        assert axis_array is not None, \
            "Dataset does not have a PrincipalAxes field data array"
        assert axis_array.GetNumberOfTuples() == 3, \
            "PrincipalAxes array requires 3 tuples"
        assert axis_array.GetNumberOfComponents() == 3, \
            "PrincipalAxes array requires 3 components"
        assert principal_axis >= 0 and principal_axis <= 2, \
            "Invalid principal axis. Must be in range [0, 2]."

        axis = np.array(axis_array.GetTuple(principal_axis))

        center_array = fd.GetArray('Center')
        assert center_array is not None, \
            "Dataset does not have a Center field data array"
        assert center_array.GetNumberOfTuples() == 1, \
            "Center array requires 1 tuple"
        assert center_array.GetNumberOfComponents() == 3, \
            "Center array requires 3 components"

        center = np.array(center_array.GetTuple(0))

        # Blank out the undesired label values
        scalars = utils.get_scalars(dataset)
        scalars[scalars != label_value] = 0
        utils.set_scalars(dataset, scalars)
        self.progress.value = STEP_PCT[0]

        # Get connected components of voxels labeled by label value
        def connected_progress_func(fraction):
            self.progress.value = \
                int(fraction * (STEP_PCT[1] - STEP_PCT[0]) + STEP_PCT[0])
            return self.canceled

        utils.connected_components(dataset, 0, connected_progress_func)

        # Get shape attributes
        def label_progress_func(fraction):
            self.progress.value = \
                int(fraction * (STEP_PCT[2] - STEP_PCT[1]) + STEP_PCT[1])
            return self.canceled

        shape_label_map = \
            itkutils.get_label_object_attributes(dataset, label_progress_func)
        num_label_objects = shape_label_map.GetNumberOfLabelObjects()

        # Map from label value to distance from principal axis. Used later to
        # fill in distance array.
        labels = utils.get_scalars(dataset)
        max_label = np.max(labels)
        label_value_to_distance = [0 for i in range(max_label + 1)]
        for i in range(0, num_label_objects):
            label_object = shape_label_map.GetNthLabelObject(i)
            # Flip the centroid. I have verified that the x and z coordinates
            # of the centroid coming out of the shape label objects are swapped,
            # so I reverse it here.
            centroid = np.flipud(np.array(label_object.GetCentroid()))
            v = center - centroid
            dot = np.dot(v, axis)
            d = np.linalg.norm(v - dot * axis)
            label_value_to_distance[label_object.GetLabel()] = d

        distance = np.zeros(dataset.GetNumberOfPoints())
        for i in range(len(labels)):
            distance[i] = label_value_to_distance[labels[i]]

        self.progress.value = STEP_PCT[3]

        import vtk.util.numpy_support as np_s
        distance_array = np_s.numpy_to_vtk(distance, deep=1)
        distance_array.SetName('Distance')
        dataset.GetPointData().SetScalars(distance_array)