Exemplo n.º 1
0
    def test(self):

        p = dm.vtkPartitionedDataSet()

        s = ic.vtkRTAnalyticSource()
        s.SetWholeExtent(0, 10, 0, 10, 0, 5)
        s.Update()

        p1 = dm.vtkImageData()
        p1.ShallowCopy(s.GetOutput())

        s.SetWholeExtent(0, 10, 0, 10, 5, 10)
        s.Update()

        p2 = dm.vtkImageData()
        p2.ShallowCopy(s.GetOutput())

        p.SetPartition(0, p1)
        p.SetPartition(1, p2)

        p2 = dm.vtkPartitionedDataSet()
        p2.ShallowCopy(p)

        c = dm.vtkPartitionedDataSetCollection()
        c.SetPartitionedDataSet(0, p)
        c.SetPartitionedDataSet(1, p2)

        s = SimpleFilter()
        s.SetInputDataObject(c)
        s.Update()
        for i in (0, 1):
            self.assertEqual(
                s.GetOutputDataObject(0).GetPartitionedDataSet(
                    i).GetFieldData().GetArray("counter").GetValue(0), i)
    def test(self):

        p = dm.vtkPartitionedDataSet()

        s = ic.vtkRTAnalyticSource()
        s.SetWholeExtent(0, 10, 0, 10, 0, 5)
        s.Update()

        p1 = dm.vtkImageData()
        p1.ShallowCopy(s.GetOutput())

        s.SetWholeExtent(0, 10, 0, 10, 5, 10)
        s.Update()

        p2 = dm.vtkImageData()
        p2.ShallowCopy(s.GetOutput())

        p.SetPartition(0, p1)
        p.SetPartition(1, p2)

        p2 = dm.vtkPartitionedDataSet()
        p2.ShallowCopy(p)

        c = dm.vtkPartitionedDataSetCollection()
        c.SetPartitionedDataSet(0, p)
        c.SetPartitionedDataSet(1, p2)

        s = SimpleFilter()
        s.SetInputDataObject(c)
        s.Update()
        for i in (0,1):
            self.assertEqual(s.GetOutputDataObject(0).GetPartitionedDataSet(i).GetFieldData().GetArray("counter").GetValue(0), i)
    def test(self):
        p = dm.vtkPartitionedDataSet()

        s = ic.vtkRTAnalyticSource()
        s.SetWholeExtent(0, 10, 0, 10, 0, 5)
        s.Update()

        p1 = dm.vtkImageData()
        p1.ShallowCopy(s.GetOutput())

        s.SetWholeExtent(0, 10, 0, 10, 5, 10)
        s.Update()

        p2 = dm.vtkImageData()
        p2.ShallowCopy(s.GetOutput())

        p.SetPartition(0, p1)
        p.SetPartition(1, p2)

        p2 = dm.vtkPartitionedDataSet()
        p2.ShallowCopy(p)

        c = dm.vtkPartitionedDataSetCollection()
        c.SetPartitionedDataSet(0, p)
        c.SetPartitionedDataSet(1, p2)

        tmpdir = vtkGetTempDir()
        fname = tmpdir + "/testcompowriread.vtk"
        w = il.vtkCompositeDataWriter()
        w.SetInputData(c)
        w.SetFileName(fname)
        w.Write()

        r = il.vtkCompositeDataReader()
        r.SetFileName(fname)
        r.Update()
        o = r.GetOutputDataObject(0)

        self.assertTrue(o.IsA("vtkPartitionedDataSetCollection"))
        nd = o.GetNumberOfPartitionedDataSets()
        self.assertEqual(nd, 2)

        for i in range(nd):
            p = o.GetPartitionedDataSet(i)
            p2 = c.GetPartitionedDataSet(i)
            self.assertTrue(p.IsA("vtkPartitionedDataSet"))
            self.assertEqual(p.GetNumberOfPartitions(), 2)
            self.assertEqual(
                p.GetPartition(0).GetNumberOfCells(),
                p.GetPartition(0).GetNumberOfCells())
        del (r)
        import gc
        gc.collect()
        os.remove(fname)
    def test(self):
        p = dm.vtkPartitionedDataSet()

        s = ic.vtkRTAnalyticSource()
        s.SetWholeExtent(0, 10, 0, 10, 0, 5)
        s.Update()

        p1 = dm.vtkImageData()
        p1.ShallowCopy(s.GetOutput())

        s.SetWholeExtent(0, 10, 0, 10, 5, 10)
        s.Update()

        p2 = dm.vtkImageData()
        p2.ShallowCopy(s.GetOutput())

        p.SetPartition(0, p1)
        p.SetPartition(1, p2)

        p2 = dm.vtkPartitionedDataSet()
        p2.ShallowCopy(p)

        c = dm.vtkPartitionedDataSetCollection()
        c.SetPartitionedDataSet(0, p)
        c.SetPartitionedDataSet(1, p2)

        tmpdir = vtkGetTempDir()
        fname = tmpdir+"/testcompowriread.vtk"
        w = il.vtkCompositeDataWriter()
        w.SetInputData(c)
        w.SetFileName(fname)
        w.Write()

        r = il.vtkCompositeDataReader()
        r.SetFileName(fname)
        r.Update()
        o = r.GetOutputDataObject(0)

        self.assertTrue(o.IsA("vtkPartitionedDataSetCollection"))
        nd = o.GetNumberOfPartitionedDataSets()
        self.assertEqual(nd, 2)

        for i in range(nd):
            p = o.GetPartitionedDataSet(i)
            p2 = c.GetPartitionedDataSet(i)
            self.assertTrue(p.IsA("vtkPartitionedDataSet"))
            self.assertEqual(p.GetNumberOfPartitions(), 2)
            self.assertEqual(p.GetPartition(0).GetNumberOfCells(), p.GetPartition(0).GetNumberOfCells())
        del(r)
        import gc
        gc.collect()
        os.remove(fname)
Exemplo n.º 5
0
def to_vtk_mask(n_array, spacing=(1.0, 1.0, 1.0), origin=(0.0, 0.0, 0.0)):
    dz, dy, dx = n_array.shape
    ox, oy, oz = origin
    sx, sy, sz = spacing

    ox -= sx
    oy -= sy
    oz -= sz

    v_image = numpy_support.numpy_to_vtk(n_array.flat)
    extent = (0, dx - 1, 0, dy - 1, 0, dz - 1)

    # Generating the vtkImageData
    image = vtkImageData()
    image.SetOrigin(ox, oy, oz)
    image.SetSpacing(sx, sy, sz)
    image.SetDimensions(dx - 1, dy - 1, dz - 1)
    # SetNumberOfScalarComponents and SetScalrType were replaced by
    # AllocateScalars
    #  image.SetNumberOfScalarComponents(1)
    #  image.SetScalarType(numpy_support.get_vtk_array_type(n_array.dtype))
    image.AllocateScalars(numpy_support.get_vtk_array_type(n_array.dtype), 1)
    image.SetExtent(extent)
    image.GetPointData().SetScalars(v_image)

    #  image_copy = vtkImageData()
    #  image_copy.DeepCopy(image)

    return image
Exemplo n.º 6
0
    def RequestData(self, request, inInfo, outInfo):
        from vtkmodules.vtkCommonDataModel import vtkMultiBlockDataSet
        from vtkmodules.vtkCommonDataModel import vtkImageData
        from vtkmodules.vtkCommonDataModel import vtkFieldData
        from vtkmodules.vtkCommonCore import vtkDoubleArray
        output = vtkMultiBlockDataSet.GetData(outInfo, 0)
        img = vtkImageData()
        img.SetDimensions(16, 16, 16)
        img.AllocateScalars(11, 1)
        output.SetBlock(0, img)

        dataTable = []

        for k in range(0, self._numOfArrays):
            dataTable.append(vtkDoubleArray())
            dataTable[k].SetNumberOfComponents(1)
            dataTable[k].SetNumberOfTuples(self._numOfValues)
            dataTable[k].SetName("GlobalArray_%d" % k)

            import random
            curvePower = random.randint(1, 2)
            scale = random.randint(1, 50) / 100.0
            for i in range(0, self._numOfValues):
                dataTable[k].SetComponent(
                    i, 0, self._valueOffset + scale * pow(i, curvePower))
                #dataTable[1].SetComponent(i, 0, 0.5*i + self._valueOffset)

        field = vtkFieldData()
        for d in dataTable:
            field.AddArray(d)

        img.SetFieldData(field)
        #output.SetFieldData(field)

        return 1
Exemplo n.º 7
0
    def RequestData(self, request, inInfo, outInfo):
        from vtkmodules.vtkCommonDataModel import vtkMultiBlockDataSet
        from vtkmodules.vtkCommonDataModel import vtkImageData
        from vtkmodules.vtkCommonDataModel import vtkFieldData
        from vtkmodules.vtkCommonCore import vtkDoubleArray
        output = vtkMultiBlockDataSet.GetData(outInfo, 0)
        img = vtkImageData()
        img.SetDimensions(16,16,16)
        img.AllocateScalars(11,1)
        output.SetBlock(0, img)

        dataTable = []

        for k in range(0,self._numOfArrays):
            dataTable.append( vtkDoubleArray() )
            dataTable[k].SetNumberOfComponents(1)
            dataTable[k].SetNumberOfTuples(self._numOfValues)
            dataTable[k].SetName("GlobalArray_%d" % k)

            import random
            curvePower = random.randint(1,2)
            scale = random.randint(1,50) / 100.0;
            for i in range(0,self._numOfValues):
                dataTable[k].SetComponent(i, 0, self._valueOffset + scale*pow(i,curvePower))
                #dataTable[1].SetComponent(i, 0, 0.5*i + self._valueOffset)

        field = vtkFieldData()
        for d in dataTable:
            field.AddArray(d)

        img.SetFieldData(field)
        #output.SetFieldData(field)

        return 1
Exemplo n.º 8
0
    def merge_image(self, build_dim: np.ndarray, spacing: np.ndarray,
                    ls_obj: List['PrintObject']) -> Tuple[vtkImageData, int]:

        bg_im = vtkImageData()
        bg_im.SetSpacing(spacing)
        bg_im.SetDimensions(build_dim)
        bg_im.SetOrigin(0, 0, 0)
        bg_im.AllocateScalars(VTK_UNSIGNED_CHAR, 1)
        bg_im.GetPointData().GetScalars().Fill(255)

        w_bg = dsa.WrapDataObject(bg_im)
        data_bg = np.reshape(w_bg.PointData['ImageScalars'],
                             build_dim,
                             order='F')

        top_layer = 0

        for obj in ls_obj:
            vtk_im = dsa.WrapDataObject(obj.sliced_object)
            origin = np.ceil(np.array(vtk_im.GetOrigin()) /
                             spacing).astype(int)
            obj_dim = np.array(vtk_im.GetDimensions()).astype(int)
            top_z = origin[2] + obj_dim[2]
            obj_data = np.reshape(vtk_im.PointData['ImageScalars'],
                                  obj_dim,
                                  order='F')
            data_bg[origin[0]:origin[0] + obj_dim[0],
                    origin[1]:origin[1] + obj_dim[1],
                    origin[2]:top_z] = obj_data

            if (top_z > top_layer):
                top_layer = top_z

        return bg_im, top_layer
Exemplo n.º 9
0
    def getLookupTableForArrayName(self, name, numSamples=255):
        lutProxy = simple.GetColorTransferFunction(name)
        lut = lutProxy.GetClientSideObject()
        dataRange = lut.GetRange()
        delta = (dataRange[1] - dataRange[0]) / float(numSamples)

        colorArray = vtkUnsignedCharArray()
        colorArray.SetNumberOfComponents(3)
        colorArray.SetNumberOfTuples(numSamples)

        rgb = [0, 0, 0]
        for i in range(numSamples):
            lut.GetColor(dataRange[0] + float(i) * delta, rgb)
            r = int(round(rgb[0] * 255))
            g = int(round(rgb[1] * 255))
            b = int(round(rgb[2] * 255))
            colorArray.SetTuple3(i, r, g, b)

        # Add the color array to an image data
        imgData = vtkImageData()
        imgData.SetDimensions(numSamples, 1, 1)
        aIdx = imgData.GetPointData().SetScalars(colorArray)

        # Use the vtk data encoder to base-64 encode the image as png, using no compression
        encoder = vtkDataEncoder()
        # two calls in a row crash on Windows - bald timing hack to avoid the crash.
        time.sleep(0.01)
        b64Str = encoder.EncodeAsBase64Jpg(imgData, 100)

        return {
            "image": "data:image/jpg;base64," + b64Str,
            "range": dataRange,
            "name": name,
        }
Exemplo n.º 10
0
    def populate_volume_data(self):
        """
        Populate volume data
        """

        # Convert pixel_values in patient_dict_container into a 3D numpy array
        self.convert_pixel_values_to_vtk_3d_array()

        # Convert 3d pixel array into vtkImageData to display as vtkVolume
        self.imdata = vtkImageData()
        self.imdata.SetDimensions(self.shape)
        self.imdata.GetPointData().SetScalars(self.depth_array)

        self.volume_mapper = vtkFixedPointVolumeRayCastMapper()
        self.volume_mapper.SetBlendModeToComposite()
        self.volume_mapper.SetInputData(self.imdata)

        # The vtkLODProp3D controls the position and orientation
        # of the volume in world coordinates.
        self.volume = vtkVolume()
        self.volume.SetMapper(self.volume_mapper)
        self.volume.SetProperty(self.volume_property)
        self.volume.SetScale(
            self.patient_dict_container.get("pixmap_aspect")["axial"],
            self.patient_dict_container.get("pixmap_aspect")["sagittal"],
            self.patient_dict_container.get("pixmap_aspect")["sagittal"])

        # Add the volume to the renderer
        self.renderer.ResetCamera()
        self.renderer.RemoveVolume(self.volume)
        self.renderer.AddVolume(self.volume)
Exemplo n.º 11
0
    def slice_object(self, input_src: vtkPolyData) -> vtkImageData:
        bounds = np.array(input_src.GetBounds())
        spacing = compute_spacing(self._layer_thickness, self._resolution)
        img_dim = compute_dim(bounds, spacing)
        origin = bounds[0::2]

        background_img = vtkImageData()
        background_img.SetSpacing(spacing)
        background_img.SetDimensions(img_dim)
        background_img.SetOrigin(origin)
        background_img.AllocateScalars(VTK_UNSIGNED_CHAR, 1)
        background_img.GetPointData().GetScalars().Fill(0)

        poly_sten = vtkPolyDataToImageStencil()
        poly_sten.SetInputData(input_src)
        poly_sten.SetOutputOrigin(origin)
        poly_sten.SetOutputSpacing(spacing)
        poly_sten.SetOutputWholeExtent(background_img.GetExtent())
        poly_sten.Update()

        stencil = vtkImageStencil()
        stencil.SetInputData(background_img)
        stencil.SetStencilConnection(poly_sten.GetOutputPort())
        stencil.SetBackgroundValue(255)
        stencil.ReverseStencilOff()
        stencil.Update()
        return stencil.GetOutput()
Exemplo n.º 12
0
    def writeOrderSprite(self, path):
        ds = vtkImageData()
        ds.SetDimensions(self.width, self.height, self.nbLayers)
        ds.GetPointData().AddArray(self.getSortedOrderArray())

        writer = vtkDataSetWriter()
        writer.SetInputData(ds)
        writer.SetFileName(path)
        writer.Update()
Exemplo n.º 13
0
    def writeOrderSprite(self,path):
        ds = vtkImageData()
        ds.SetDimensions(self.width, self.height, self.nbLayers)
        ds.GetPointData().AddArray(self.getSortedOrderArray())

        writer = vtkDataSetWriter()
        writer.SetInputData(ds)
        writer.SetFileName(path)
        writer.Update()
    def test(self):

        p = dm.vtkPartitionedDataSet()

        s = ic.vtkRTAnalyticSource()
        s.SetWholeExtent(0, 10, 0, 10, 0, 5)
        s.Update()

        p1 = dm.vtkImageData()
        p1.ShallowCopy(s.GetOutput())

        s.SetWholeExtent(0, 10, 0, 10, 5, 10)
        s.Update()

        p2 = dm.vtkImageData()
        p2.ShallowCopy(s.GetOutput())

        p.SetPartition(0, p1)
        p.SetPartition(1, p2)

        tmpdir = vtkGetTempDir()
        fname = tmpdir+"/testxmlpartds.vtpd"
        w = ixml.vtkXMLPartitionedDataSetWriter()
        w.SetInputData(p)
        w.SetFileName(fname)
        w.Write()

        r = ixml.vtkXMLPartitionedDataSetReader()
        r.SetFileName(fname)
        r.Update()
        o = r.GetOutputDataObject(0)

        print(o.IsA("vtkPartitionedDataSet"))
        np = o.GetNumberOfPartitions()
        self.assertEqual(np, 2)

        for i in range(np):
            d = o.GetPartition(i)
            d2 = p.GetPartition(i)
            self.assertTrue(d.IsA("vtkImageData"))
            self.assertEqual(d.GetNumberOfCells(), d2.GetNumberOfCells())
        os.remove(fname)
Exemplo n.º 15
0
def to_vtk(n_array, spacing, slice_number, orientation):
    """
    It transforms a numpy array into a vtkImageData.
    """
    # TODO Merge this function with imagedata_utils.to_vtk to eliminate
    # duplicated code
    try:
        dz, dy, dx = n_array.shape
    except ValueError:
        dy, dx = n_array.shape
        dz = 1

    v_image = numpy_support.numpy_to_vtk(n_array.flat)

    if orientation == 'AXIAL':
        extent = (0, dx -1, 0, dy -1, slice_number, slice_number + dz - 1)
    elif orientation == 'SAGITAL':
        extent = (slice_number, slice_number + dx - 1, 0, dy - 1, 0, dz - 1)
    elif orientation == 'CORONAL':
        extent = (0, dx - 1, slice_number, slice_number + dy - 1, 0, dz - 1)

    image = vtkImageData()
    image.SetOrigin(0, 0, 0)
    image.SetSpacing(spacing)
    image.SetDimensions(dx, dy, dz)
    image.SetExtent(extent)
    #  image.SetNumberOfScalarComponents(1)
    #  image.SetScalarType(numpy_support.get_vtk_array_type(n_array.dtype))
    image.AllocateScalars(numpy_support.get_vtk_array_type(n_array.dtype), 1)
    #  image.Update()
    image.GetCellData().SetScalars(v_image)
    image.GetPointData().SetScalars(v_image)
    #  image.Update()

    image_copy = vtkImageData()
    image_copy.DeepCopy(image)
    #  image_copy.Update()

    return image_copy
Exemplo n.º 16
0
def VtkRead(filepath, t):
    if not const.VTK_WARNING:
        log_path = os.path.join(inv_paths.USER_LOG_DIR, "vtkoutput.txt")
        fow = vtkFileOutputWindow()
        fow.SetFileName(log_path.encode(const.FS_ENCODE))
        ow = vtkOutputWindow()
        ow.SetInstance(fow)

    global no_error

    if t == "bmp":
        reader = vtkBMPReader()

    elif t == "tiff" or t == "tif":
        reader = vtkTIFFReader()

    elif t == "png":
        reader = vtkPNGReader()

    elif t == "jpeg" or t == "jpg":
        reader = vtkJPEGReader()

    else:
        return False

    reader.AddObserver("ErrorEvent", VtkErrorToPy)
    reader.SetFileName(filepath)
    reader.Update()

    if no_error:
        image = reader.GetOutput()
        dim = image.GetDimensions()

        if reader.GetNumberOfScalarComponents() > 1:
            luminanceFilter = vtkImageLuminance()
            luminanceFilter.SetInputData(image)
            luminanceFilter.Update()

            image = vtkImageData()
            image.DeepCopy(luminanceFilter.GetOutput())

        img_array = numpy_support.vtk_to_numpy(
            image.GetPointData().GetScalars())
        img_array.shape = (dim[1], dim[0])

        return img_array
    else:
        no_error = True
        return False
Exemplo n.º 17
0
    def __init__(self,
                 location,
                 imageMimeType,
                 cameraInfo,
                 metadata={},
                 sections={}):
        DataSetBuilder.__init__(self, location, cameraInfo, metadata, sections)

        self.dataHandler.addTypes("volume-composite", "rgba+depth")

        self.imageMimeType = imageMimeType
        self.imageExtenstion = "." + imageMimeType.split("/")[1]

        if imageMimeType == "image/png":
            self.imageWriter = vtkPNGWriter()
        if imageMimeType == "image/jpg":
            self.imageWriter = vtkJPEGWriter()

        self.imageDataColor = vtkImageData()
        self.imageWriter.SetInputData(self.imageDataColor)

        self.imageDataDepth = vtkImageData()
        self.depthToWrite = None

        self.layerInfo = {}
        self.colorByMapping = {}
        self.compositePipeline = {
            "layers": [],
            "dimensions": [],
            "fields": {},
            "layer_fields": {},
            "pipeline": [],
        }
        self.activeDepthKey = ""
        self.activeRGBKey = ""
        self.nodeWithChildren = {}
Exemplo n.º 18
0
def _matrix_math_filter (narray, operation) :
    if operation not in ['Determinant', 'Inverse', 'Eigenvalue', 'Eigenvector'] :
       raise RuntimeError('Unknown quality measure ['+operation+']'+
                          ' Supported are [Determinant, Inverse, Eigenvalue, Eigenvector]')

    if narray.ndim != 3 :
       raise RuntimeError(operation+' only works for an array of matrices(3D array).'+
                           ' Input shape ' + str(narray.shape))
    elif narray.shape[1] != narray.shape[2] :
       raise RuntimeError(operation+' requires an array of 2D square matrices.' +
                           ' Input shape ' + str(narray.shape))

    # numpy_to_vtk converts only contiguous arrays
    if not narray.flags.contiguous : narray = narray.copy()

    # Reshape is necessary because numpy_support.numpy_to_vtk only works with 2D or
    # less arrays.
    nrows = narray.shape[0]
    ncols = narray.shape[1] * narray.shape[2]
    narray = narray.reshape(nrows, ncols)

    ds = vtkImageData()
    ds.SetDimensions(nrows, 1, 1)

    varray = numpy_support.numpy_to_vtk(narray)
    varray.SetName('tensors')
    ds.GetPointData().SetTensors(varray)

    filter = vtkMatrixMathFilter()

    if   operation == 'Determinant'  : filter.SetOperationToDeterminant()
    elif operation == 'Inverse'      : filter.SetOperationToInverse()
    elif operation == 'Eigenvalue'   : filter.SetOperationToEigenvalue()
    elif operation == 'Eigenvector'  : filter.SetOperationToEigenvector()

    filter.SetInputData(ds)
    filter.Update()

    varray = filter.GetOutput().GetPointData().GetArray(operation)

    ans = dsa.vtkDataArrayToVTKArray(varray)

    # The association information has been lost over the vtk filter
    # we must reconstruct it otherwise lower pipeline will be broken.
    ans.Association = narray.Association
    ans.DataSet = narray.DataSet

    return ans
Exemplo n.º 19
0
def np_rgba_to_vtk(n_array, spacing=(1.0, 1.0, 1.0)):
    dy, dx, dc = n_array.shape
    v_image = numpy_support.numpy_to_vtk(n_array.reshape(dy * dx, dc))

    extent = (0, dx - 1, 0, dy - 1, 0, 0)

    # Generating the vtkImageData
    image = vtkImageData()
    image.SetOrigin(0, 0, 0)
    image.SetSpacing(spacing)
    image.SetDimensions(dx, dy, 1)
    # SetNumberOfScalarComponents and SetScalrType were replaced by
    # AllocateScalars
    #  image.SetNumberOfScalarComponents(1)
    #  image.SetScalarType(numpy_support.get_vtk_array_type(n_array.dtype))
    image.AllocateScalars(numpy_support.get_vtk_array_type(n_array.dtype), dc)
    image.SetExtent(extent)
    image.GetPointData().SetScalars(v_image)

    return image
Exemplo n.º 20
0
def numpy_to_image(numpy_array):
    """Convert a numpy 2D or 3D array to a vtkImageData object.

  numpy_array
    2D or 3D numpy array containing image data

  return
    vtkImageData with the numpy_array content
  """
    try:
        import numpy
    except:
        paraview.print_error("Error: Cannot import numpy")

    shape = numpy_array.shape
    if len(shape) < 2:
        raise Exception('numpy array must have dimensionality of at least 2')

    h, w = shape[0], shape[1]
    c = 1
    if len(shape) == 3:
        c = shape[2]

    # Reshape 2D image to 1D array suitable for conversion to a
    # vtkArray with numpy_support.numpy_to_vtk()
    linear_array = numpy.reshape(numpy_array, (w * h, c))

    try:
        from vtkmodules.util import numpy_support
    except:
        paraview.print_error(
            "Error: Cannot import vtkmodules.util.numpy_support")

    vtk_array = numpy_support.numpy_to_vtk(linear_array)

    image = vtkImageData()
    image.SetDimensions(w, h, 1)
    image.AllocateScalars(vtk_array.GetDataType(), 4)
    image.GetPointData().GetScalars().DeepCopy(vtk_array)

    return image
Exemplo n.º 21
0
def numpy_to_image(numpy_array):
  """Convert a numpy 2D or 3D array to a vtkImageData object.

  numpy_array
    2D or 3D numpy array containing image data

  return
    vtkImageData with the numpy_array content
  """
  try:
    import numpy
  except:
    paraview.print_error("Error: Cannot import numpy")

  shape = numpy_array.shape
  if len(shape) < 2:
    raise Exception('numpy array must have dimensionality of at least 2')

  h, w = shape[0], shape[1]
  c = 1
  if len(shape) == 3:
    c = shape[2]

  # Reshape 2D image to 1D array suitable for conversion to a
  # vtkArray with numpy_support.numpy_to_vtk()
  linear_array = numpy.reshape(numpy_array, (w*h, c))

  try:
    from vtkmodules.util import numpy_support
  except:
    paraview.print_error("Error: Cannot import vtkmodules.util.numpy_support")

  vtk_array = numpy_support.numpy_to_vtk(linear_array)

  image = vtkImageData()
  image.SetDimensions(w, h, 1)
  image.AllocateScalars(vtk_array.GetDataType(), 4)
  image.GetPointData().GetScalars().DeepCopy(vtk_array)

  return image
Exemplo n.º 22
0
    def RequestData(self, request, inInfo, outInfoVec):
        from vtkmodules.vtkCommonDataModel import vtkMultiBlockDataSet
        from vtkmodules.vtkCommonDataModel import vtkImageData
        from vtkmodules.vtkCommonDataModel import vtkFieldData
        from vtkmodules.vtkCommonCore import vtkDoubleArray
        output = vtkMultiBlockDataSet.GetData(outInfoVec, 0)

        if not self._dataTable:
            timesteps = self._get_timesteps()
            for k in range(0, self._numOfArrays):
                self._dataTable.append(vtkDoubleArray())
                self._dataTable[k].SetNumberOfComponents(1)
                self._dataTable[k].SetNumberOfTuples(self._numOfValues)
                self._dataTable[k].SetName("GlobalArray_%d" % k)

                import random
                curvePower = random.randint(1, 2)
                scale = random.randint(1, 50) / 100.0
                for i in timesteps:
                    self._dataTable[k].SetComponent(
                        i, 0, self._valueOffset + scale * pow(i, curvePower))

        field = vtkFieldData()
        for d in self._dataTable:
            field.AddArray(d)

        img = vtkImageData()
        img.SetDimensions(16, 16, 16)
        img.AllocateScalars(11, 1)
        img.SetFieldData(field)
        output.SetBlock(0, img)

        data_time = self._get_update_time(outInfoVec.GetInformationObject(0))
        if data_time is not None:
            output.GetInformation().Set(output.DATA_TIME_STEP(), data_time)

        return 1
Exemplo n.º 23
0
    def RequestData(self, request, inInfo, outInfoVec):
        from vtkmodules.vtkCommonDataModel import vtkMultiBlockDataSet
        from vtkmodules.vtkCommonDataModel import vtkImageData
        from vtkmodules.vtkCommonDataModel import vtkFieldData
        from vtkmodules.vtkCommonCore import vtkDoubleArray
        output = vtkMultiBlockDataSet.GetData(outInfoVec, 0)

        if not self._dataTable:
          timesteps = self._get_timesteps()
          for k in range(0,self._numOfArrays):
              self._dataTable.append( vtkDoubleArray() )
              self._dataTable[k].SetNumberOfComponents(1)
              self._dataTable[k].SetNumberOfTuples(self._numOfValues)
              self._dataTable[k].SetName("GlobalArray_%d" % k)

              import random
              curvePower = random.randint(1,2)
              scale = random.randint(1,50) / 100.0;
              for i in timesteps:
                  self._dataTable[k].SetComponent(i, 0, self._valueOffset + scale*pow(i,curvePower))

        field = vtkFieldData()
        for d in self._dataTable:
            field.AddArray(d)

        img = vtkImageData()
        img.SetDimensions(16,16,16)
        img.AllocateScalars(11,1)
        img.SetFieldData(field)
        output.SetBlock(0, img)

        data_time = self._get_update_time(outInfoVec.GetInformationObject(0))
        if data_time is not None:
            output.GetInformation().Set(output.DATA_TIME_STEP(), data_time)

        return 1
Exemplo n.º 24
0
    def RequestData(self, request, inInfoVec, outInfoVec):
        global _has_openpmd
        if not _has_openpmd:
            print_error("Required Python module 'openpmd_api' missing!")
            return 0

        from vtkmodules.vtkCommonDataModel import vtkImageData, vtkUnstructuredGrid
        from vtkmodules.vtkCommonDataModel import vtkPartitionedDataSet, vtkPartitionedDataSetCollection
        from vtkmodules.vtkCommonExecutionModel import vtkExtentTranslator, vtkStreamingDemandDrivenPipeline
        from vtkmodules.numpy_interface import dataset_adapter as dsa

        executive = vtkStreamingDemandDrivenPipeline
        output = vtkPartitionedDataSet.GetData(outInfoVec, 0)
        poutput = vtkPartitionedDataSetCollection.GetData(outInfoVec, 1)
        outInfo = outInfoVec.GetInformationObject(0)
        piece = outInfo.Get(executive.UPDATE_PIECE_NUMBER())
        npieces = outInfo.Get(executive.UPDATE_NUMBER_OF_PIECES())
        nghosts = outInfo.Get(executive.UPDATE_NUMBER_OF_GHOST_LEVELS())
        et = vtkExtentTranslator()

        data_time = self._get_update_time(outInfo)
        idx = self._timemap[data_time]

        itr = self._series.iterations[idx]
        arrays = []
        narrays = self._arrayselection.GetNumberOfArrays()
        for i in range(narrays):
            if self._arrayselection.GetArraySetting(i):
                name = self._arrayselection.GetArrayName(i)
                arrays.append((name, self._find_array(itr, name)))
        shp = None
        spacing = None
        theta_modes = None
        grid_offset = None
        for _, ary in arrays:
            var = ary[0]
            for name, scalar in var.items():
                shape = scalar.shape
                break
            spc = list(ary[1])
            if not spacing:
                spacing = spc
            elif spacing != spc:  # all meshes need to have the same spacing
                return 0
            offset = list(ary[2])
            if not grid_offset:
                grid_offset = offset
            elif grid_offset != offset:  # all meshes need to have the same spacing
                return 0
            if not shp:
                shp = shape
            elif shape != shp:  # all arrays needs to have the same shape
                return 0
            if not theta_modes:
                theta_modes = ary[3]

        if theta_modes:
            et.SetWholeExtent(0, shp[0] - 1, 0, shp[1] - 1, 0, shp[2] - 1)
            et.SetSplitModeToZSlab()  # note: Y and Z are both fine
            et.SetPiece(piece)
            et.SetNumberOfPieces(npieces)
            # et.SetGhostLevel(nghosts)
            et.PieceToExtentByPoints()
            ext = et.GetExtent()

            chunk_offset = [ext[0], ext[2], ext[4]]
            chunk_extent = [
                ext[1] - ext[0] + 1, ext[3] - ext[2] + 1, ext[5] - ext[4] + 1
            ]

            data = []
            nthetas = 100  # user parameter
            thetas = np.linspace(0., 2. * np.pi, nthetas)
            chunk_cyl_shape = (chunk_extent[1], chunk_extent[2], nthetas
                               )  # z, r, theta
            for name, var in arrays:
                cyl_values = np.zeros(chunk_cyl_shape)
                values = self._load_array(var[0], chunk_offset, chunk_extent)
                self._series.flush()

                print(chunk_cyl_shape)
                print(values.shape)
                print("+++++++++++")
                for ntheta in range(nthetas):
                    cyl_values[:, :, ntheta] += values[0, :, :]
                data.append((name, cyl_values))
                # add all other modes via loop
                # for m in range(theta_modes):

            cyl_spacing = [spacing[0], spacing[1], thetas[1] - thetas[0]]

            z_coord = np.linspace(0., cyl_spacing[0] * chunk_cyl_shape[0],
                                  chunk_cyl_shape[0])
            r_coord = np.linspace(0., cyl_spacing[1] * chunk_cyl_shape[1],
                                  chunk_cyl_shape[1])
            t_coord = thetas

            # to cartesian
            print(z_coord.shape, r_coord.shape, t_coord.shape)
            cyl_coords = np.meshgrid(r_coord, z_coord, t_coord)
            rs = cyl_coords[1]
            zs = cyl_coords[0]
            thetas = cyl_coords[2]

            y_coord = rs * np.sin(thetas)
            x_coord = rs * np.cos(thetas)
            z_coord = zs
            # mesh_pts = np.zeros((chunk_cyl_shape[0], chunk_cyl_shape[1], chunk_cyl_shape[2], 3))
            # mesh_pts[:, :, :, 0] = z_coord

            img = vtkImageData()
            img.SetExtent(chunk_offset[1],
                          chunk_offset[1] + chunk_cyl_shape[0] - 1,
                          chunk_offset[2],
                          chunk_offset[2] + chunk_cyl_shape[1] - 1, 0,
                          nthetas - 1)
            img.SetSpacing(cyl_spacing)

            imgw = dsa.WrapDataObject(img)
            output.SetPartition(0, img)
            for name, array in data:
                # print(array.shape)
                # print(array.transpose(2,1,0).flatten(order='C').shape)
                imgw.PointData.append(
                    array.transpose(2, 1, 0).flatten(order='C'), name)

            # data = []
            # for name, var in arrays:
            #     unit_SI = var[0].unit_SI
            #     data.append((name, unit_SI * var[0].load_chunk(chunk_offset, chunk_extent)))
            # self._series.flush()

        else:
            et.SetWholeExtent(0, shp[0] - 1, 0, shp[1] - 1, 0, shp[2] - 1)
            et.SetPiece(piece)
            et.SetNumberOfPieces(npieces)
            et.SetGhostLevel(nghosts)
            et.PieceToExtent()
            ext = et.GetExtent()

            chunk_offset = [ext[0], ext[2], ext[4]]
            chunk_extent = [
                ext[1] - ext[0] + 1, ext[3] - ext[2] + 1, ext[5] - ext[4] + 1
            ]

            data = []
            for name, var in arrays:
                values = self._load_array(var[0], chunk_offset, chunk_extent)
                self._series.flush()
                data.append((name, values))

            img = vtkImageData()
            img.SetExtent(ext[0], ext[1], ext[2], ext[3], ext[4], ext[5])
            img.SetSpacing(spacing)
            img.SetOrigin(grid_offset)

            et.SetGhostLevel(0)
            et.PieceToExtent()
            ext = et.GetExtent()
            ext = [ext[0], ext[1], ext[2], ext[3], ext[4], ext[5]]
            img.GenerateGhostArray(ext)
            imgw = dsa.WrapDataObject(img)
            output.SetPartition(0, img)
            for name, array in data:
                imgw.PointData.append(array, name)

        itr = self._series.iterations[idx]
        array_by_species = {}
        narrays = self._particlearrayselection.GetNumberOfArrays()
        for i in range(narrays):
            if self._particlearrayselection.GetArraySetting(i):
                name = self._particlearrayselection.GetArrayName(i)
                names = self._get_particle_array_and_component(itr, name)
                if names[0] and self._speciesselection.ArrayIsEnabled(
                        names[0]):
                    if not names[0] in array_by_species:
                        array_by_species[names[0]] = []
                    array_by_species[names[0]].append(names)
        ids = 0
        for species, arrays in array_by_species.items():
            pds = vtkPartitionedDataSet()
            ugrid = vtkUnstructuredGrid()
            pds.SetPartition(0, ugrid)
            poutput.SetPartitionedDataSet(ids, pds)
            ids += 1
            self._load_species(itr, species, arrays, piece, npieces,
                               dsa.WrapDataObject(ugrid))

        return 1
Exemplo n.º 25
0
    def transform_scalars(self, dataset, Niter=None, Niter_update_support=None,
                          supportSigma=None, supportThreshold=None):
        """
        3D Reconstruct from a tilt series using constraint-based Direct Fourier
        Method
        """
        self.progress.maximum = 1

        from tomviz import utils
        import numpy as np

        supportThreshold = supportThreshold / 100.0

        nonnegativeVoxels = True
        tiltAngles = utils.get_tilt_angles(dataset) #Get Tilt angles

        tiltSeries = utils.get_array(dataset)
        if tiltSeries is None:
            raise RuntimeError("No scalars found!")

        self.progress.message = 'Initialization'

        #Direct Fourier recon without constraints
        (recon, recon_F) \
            = dfm3(tiltSeries, tiltAngles, np.size(tiltSeries, 1) * 2)

        kr_cutoffs = np.linspace(0.05, 0.5, 10)
        #average Fourier magnitude of tilt series as a function of kr
        I_data = radial_average(tiltSeries, kr_cutoffs)

        (Nx, Ny, Nz) = recon_F.shape
        #Note: Nz = np.int(Ny/2+1)
        Ntot = Nx * Ny * Ny
        f = pyfftw.n_byte_align_empty((Nx, Ny, Nz), 16, dtype='complex128')
        r = pyfftw.n_byte_align_empty((Nx, Ny, Ny), 16, dtype='float64')
        fft_forward = pyfftw.FFTW(r, f, axes=(0, 1, 2))
        fft_inverse = pyfftw.FFTW(
            f, r, direction='FFTW_BACKWARD', axes=(0, 1, 2))

        kx = np.fft.fftfreq(Nx)
        ky = np.fft.fftfreq(Ny)
        kz = ky[0:Nz]

        kX, kY, kZ = np.meshgrid(ky, kx, kz)
        kR = np.sqrt(kY**2 + kX**2 + kZ**2)

        sigma = 0.5 * supportSigma
        G = np.exp(-kR**2 / (2 * sigma**2))

        #create initial support using sw
        f = recon_F * G
        fft_inverse.update_arrays(f, r)
        fft_inverse.execute()
        cutoff = np.amax(r) * supportThreshold
        support = r >= cutoff

        recon_F[kR > kr_cutoffs[-1]] = 0

        x = np.random.rand(Nx, Ny, Ny) #initial solution

        self.progress.maximum = Niter
        step = 0

        t0 = time.time()
        counter = 1
        etcMessage = 'Estimated time to complete: n/a'

        for i in range(Niter):
            if self.canceled:
                return
            self.progress.message = 'Iteration No.%d/%d. ' % (
                i + 1, Niter) + etcMessage

            #image space projection
            y1 = x.copy()

            if nonnegativeVoxels:
                y1[y1 < 0] = 0  #non-negative constraint

            y1[np.logical_not(support)] = 0 #support constraint

            #Fourier space projection
            y2 = 2 * y1 - x

            r = y2.copy()
            fft_forward.update_arrays(r, f)
            fft_forward.execute()

            f[kR > kr_cutoffs[-1]] = 0 #apply low pass filter
            f[recon_F != 0] = recon_F[recon_F != 0] #data constraint

            #Fourier magnitude constraint
            #leave the inner shell unchanged
            for j in range(1, kr_cutoffs.size):
                shell = np.logical_and(
                    kR > kr_cutoffs[j - 1], kR <= kr_cutoffs[j])
                shell[recon_F != 0] = False
                I = np.sum(np.absolute(f[shell]))
                if I != 0:
                    I = I / np.sum(shell)
                    # lower magnitude for high frequency information to reduce
                    # artifacts
                    f[shell] = f[shell] / I * I_data[j] * 0.5

            fft_inverse.update_arrays(f, r)
            fft_inverse.execute()
            y2 = r.copy() / Ntot

            #update
            x = x + y2 - y1

            #update support
            if (i < Niter and np.mod(i, Niter_update_support) == 0):
                recon[:] = (y2 + y1) / 2
                r = recon.copy()
                fft_forward.update_arrays(r, f)
                fft_forward.execute()
                f = f * G
                fft_inverse.update_arrays(f, r)
                fft_inverse.execute()
                cutoff = np.amax(r) * supportThreshold
                support = r >= cutoff
            step += 1
            self.progress.value = step
            timeLeft = (time.time() - t0) / counter * (Niter - counter)
            counter += 1
            timeLeftMin, timeLeftSec = divmod(timeLeft, 60)
            timeLeftHour, timeLeftMin = divmod(timeLeftMin, 60)
            etcMessage = 'Estimated time to complete: %02d:%02d:%02d' % (
                timeLeftHour, timeLeftMin, timeLeftSec)

        recon[:] = (y2 + y1) / 2
        recon[:] = np.fft.fftshift(recon)

        from vtkmodules.vtkCommonDataModel import vtkImageData
        recon_dataset = vtkImageData()
        recon_dataset.CopyStructure(dataset)
        utils.set_array(recon_dataset, recon)
        utils.mark_as_volume(recon_dataset)

        returnValues = {}
        returnValues["reconstruction"] = recon_dataset
        return returnValues
Exemplo n.º 26
0
    def transform_scalars(self, dataset, Niter=1):
        """
        3D Reconstruction using Algebraic Reconstruction Technique (ART)
        """
        self.progress.maximum = 1

        # Get Tilt angles
        tiltAngles = utils.get_tilt_angles(dataset)

        # Get Tilt Series
        tiltSeries = utils.get_array(dataset)
        (Nslice, Nray, Nproj) = tiltSeries.shape

        if tiltSeries is None:
            raise RuntimeError("No scalars found!")

        # Generate measurement matrix
        self.progress.message = 'Generating measurement matrix'
        A = parallelRay(Nray, 1.0, tiltAngles, Nray, 1.0) #A is a sparse matrix
        recon = np.empty([Nslice, Nray, Nray], dtype=float, order='F')

        A = A.todense()
        (Nslice, Nray, Nproj) = tiltSeries.shape
        (Nrow, Ncol) = A.shape
        rowInnerProduct = np.zeros(Nrow)
        row = np.zeros(Ncol)
        f = np.zeros(Ncol) # Placeholder for 2d image
        beta = 1.0

        # Calculate row inner product
        for j in range(Nrow):
            row[:] = A[j, ].copy()
            rowInnerProduct[j] = np.dot(row, row)

        self.progress.maximum = Nslice
        step = 0
        t0 = time.time()
        etcMessage = 'Estimated time to complete: n/a'

        counter = 1
        for s in range(Nslice):
            if self.canceled:
                return
            f[:] = 0
            b = tiltSeries[s, :, :].transpose().flatten()
            for i in range(Niter):
                self.progress.message = 'Slice No.%d/%d, iteration No.%d/%d. ' \
                    % (s + 1, Nslice, i + 1, Niter) + etcMessage
                for j in range(Nrow):
                    row[:] = A[j, ].copy()
                    row_f_product = np.dot(row, f)
                    a = (b[j] - row_f_product) / rowInnerProduct[j]
                    f = f + row * a * beta

                timeLeft = (time.time() - t0) / counter * \
                    (Nslice * Niter - counter)
                counter += 1
                timeLeftMin, timeLeftSec = divmod(timeLeft, 60)
                timeLeftHour, timeLeftMin = divmod(timeLeftMin, 60)
                etcMessage = 'Estimated time to complete: %02d:%02d:%02d' % (
                    timeLeftHour, timeLeftMin, timeLeftSec)

            recon[s, :, :] = f.reshape((Nray, Nray))

            step += 1
            self.progress.value = step

        from vtkmodules.vtkCommonDataModel import vtkImageData
        # Set up the output dataset
        recon_dataset = vtkImageData()
        recon_dataset.CopyStructure(dataset)
        utils.set_array(recon_dataset, recon)
        utils.mark_as_volume(recon_dataset)

        returnValues = {}
        returnValues["reconstruction"] = recon_dataset
        return returnValues
Exemplo n.º 27
0
    def transform_scalars(self, dataset):
        """3D Reconstruct from a tilt series using Direct Fourier Method"""

        self.progress.maximum = 1

        # Get Tilt angles
        tiltAngles = utils.get_tilt_angles(dataset)

        tiltSeries = utils.get_array(dataset)
        if tiltSeries is None:
            raise RuntimeError("No scalars found!")

        tiltSeries = np.double(tiltSeries)
        (Nx, Ny, Nproj) = tiltSeries.shape
        Npad = Ny * 2

        tiltAngles = np.double(tiltAngles)
        pad_pre = int(np.ceil((Npad - Ny) / 2.0))
        pad_post = int(np.floor((Npad - Ny) / 2.0))

        # Initialization
        self.progress.message = 'Initialization'
        Nz = Ny
        w = np.zeros((Nx, Ny, Nz // 2 + 1)) #store weighting factors
        v = pyfftw.empty_aligned(
            (Nx, Ny, Nz // 2 + 1), dtype='complex64', n=16)

        p = pyfftw.empty_aligned((Nx, Npad), dtype='float32', n=16)
        pF = pyfftw.empty_aligned(
            (Nx, Npad // 2 + 1), dtype='complex64', n=16)
        p_fftw_object = pyfftw.FFTW(p, pF, axes=(0, 1))

        dk = np.double(Ny) / np.double(Npad)

        self.progress.maximum = Nproj + 1
        step = 0

        t0 = time.time()
        etcMessage = 'Estimated time to complete: n/a'
        counter = 1
        for a in range(Nproj):
            if self.canceled:
                return
            self.progress.message = 'Tilt image No.%d/%d. ' % (
                a + 1, Nproj) + etcMessage

            ang = tiltAngles[a] * np.pi / 180
            projection = tiltSeries[:, :, a] #2D projection image
            p = np.lib.pad(projection, ((0, 0), (pad_pre, pad_post)),
                           'constant', constant_values=(0, 0)) #pad zeros
            p = np.float32(np.fft.ifftshift(p))
            p_fftw_object.update_arrays(p, pF)
            p_fftw_object()
            p = None #Garbage collector (gc)

            if ang < 0:
                pF = np.conj(pF)
                pF[1:, :] = np.flipud(pF[1:, :])
                ang = np.pi + ang

            # Bilinear extrapolation
            for i in range(0, np.int(np.ceil(Npad / 2)) + 1):
                ky = i * dk
                #kz = 0
                ky_new = np.cos(ang) * ky #new coord. after rotation
                kz_new = np.sin(ang) * ky
                sy = abs(np.floor(ky_new) - ky_new) #calculate weights
                sz = abs(np.floor(kz_new) - kz_new)
                for b in range(1, 5): #bilinear extrapolation
                    pz, py, weight = bilinear(kz_new, ky_new, sz, sy, Ny, b)
                    if (py >= 0 and py < Ny and pz >= 0 and pz < np.floor( Nz / 2 + 1 )):
                        w[:, py, pz] = w[:, py, pz] + weight
                        v[:, py, pz] = v[:, py, pz] + \
                            weight * pF[:, i]
            step += 1
            self.progress.value = step
            timeLeft = (time.time() - t0) / counter * (Nproj - counter)
            counter += 1
            timeLeftMin, timeLeftSec = divmod(timeLeft, 60)
            timeLeftHour, timeLeftMin = divmod(timeLeftMin, 60)
            etcMessage = 'Estimated time to complete: %02d:%02d:%02d' % (
                timeLeftHour, timeLeftMin, timeLeftSec)

        p = pF = None #gc

        self.progress.message = 'Inverse Fourier transform'
        v_temp = v.copy()
        recon = pyfftw.empty_aligned(
            (Nx, Ny, Nz), dtype='float32', order='F', n=16)
        recon_fftw_object = pyfftw.FFTW(
            v_temp, recon, direction='FFTW_BACKWARD', axes=(0, 1, 2))
        v[w != 0] = v[w != 0] / w[w != 0]
        recon_fftw_object.update_arrays(v, recon)
        v = v_temp = []    #gc
        recon_fftw_object()
        recon[:] = np.fft.fftshift(recon)

        step += 1
        self.progress.value = step

        self.progress.message = 'Passing data to Tomviz'
        from vtkmodules.vtkCommonDataModel import vtkImageData
        recon_dataset = vtkImageData()
        recon_dataset.CopyStructure(dataset)
        utils.set_array(recon_dataset, recon)
        utils.mark_as_volume(recon_dataset)

        recon = None #gc

        returnValues = {}
        returnValues["reconstruction"] = recon_dataset
        return returnValues
Exemplo n.º 28
0
    def test(self):

        p = dm.vtkPartitionedDataSet()

        s = ic.vtkRTAnalyticSource()
        s.SetWholeExtent(0, 10, 0, 10, 0, 5)
        s.Update()

        p1 = dm.vtkImageData()
        p1.ShallowCopy(s.GetOutput())

        s.SetWholeExtent(0, 10, 0, 10, 5, 10)
        s.Update()

        p2 = dm.vtkImageData()
        p2.ShallowCopy(s.GetOutput())

        p.SetPartition(0, p1)
        p.SetPartition(1, p2)

        p2 = dm.vtkPartitionedDataSet()
        p2.ShallowCopy(p)

        c = dm.vtkPartitionedDataSetCollection()
        c.SetPartitionedDataSet(0, p)
        c.SetPartitionedDataSet(1, p2)

        # SimpleFilter:
        sf = SimpleFilter()
        sf.SetInputDataObject(c)
        sf.Update()
        self.assertEqual(sf.GetOutputDataObject(0).GetNumberOfPartitionedDataSets(), 2)
        for i in (0, 1):
            pdsc = sf.GetOutputDataObject(0)
            self.assertEqual(pdsc.GetClassName(), "vtkPartitionedDataSetCollection")
            pds = pdsc.GetPartitionedDataSet(i)
            self.assertEqual(pds.GetClassName(), "vtkPartitionedDataSet")
            self.assertEqual(pds.GetNumberOfPartitions(), 2)
            for j in (0, 1):
                part = pds.GetPartition(j)
                countArray = part.GetFieldData().GetArray("counter")
                info = countArray.GetInformation()
                self.assertEqual(countArray.GetValue(0), i * 2 + j);
                self.assertEqual(info.Get(dm.vtkDataObject.DATA_TYPE_NAME()), "vtkImageData")

        # PartitionAwareFilter
        pf = PartitionAwareFilter()
        pf.SetInputDataObject(c)
        pf.Update()
        self.assertEqual(pf.GetOutputDataObject(0).GetNumberOfPartitionedDataSets(), 2)
        for i in (0, 1):
            pdsc = pf.GetOutputDataObject(0)
            self.assertEqual(pdsc.GetClassName(), "vtkPartitionedDataSetCollection")
            pds = pdsc.GetPartitionedDataSet(i)
            self.assertEqual(pds.GetClassName(), "vtkPartitionedDataSet")
            self.assertEqual(pds.GetNumberOfPartitions(), 0)
            countArray = pds.GetFieldData().GetArray("counter")
            info = countArray.GetInformation()
            self.assertEqual(countArray.GetValue(0), i);
            self.assertEqual(info.Get(dm.vtkDataObject.DATA_TYPE_NAME()), "vtkPartitionedDataSet")

        # PartitionCollectionAwareFilter
        pcf = PartitionCollectionAwareFilter()
        pcf.SetInputDataObject(c)
        pcf.Update()
        self.assertEqual(pcf.GetOutputDataObject(0).GetNumberOfPartitionedDataSets(), 0)
        pdsc = pcf.GetOutputDataObject(0)
        self.assertEqual(pdsc.GetClassName(), "vtkPartitionedDataSetCollection")
        countArray = pdsc.GetFieldData().GetArray("counter")
        info = countArray.GetInformation()
        self.assertEqual(countArray.GetValue(0), 0);
        self.assertEqual(info.Get(dm.vtkDataObject.DATA_TYPE_NAME()), "vtkPartitionedDataSetCollection")

        # CompositeAwareFilter
        cf = CompositeAwareFilter()
        cf.SetInputDataObject(c)
        cf.Update()
        self.assertEqual(pcf.GetOutputDataObject(0).GetNumberOfPartitionedDataSets(), 0)
        pdsc = pcf.GetOutputDataObject(0)
        self.assertEqual(pdsc.GetClassName(), "vtkPartitionedDataSetCollection")
        countArray = pdsc.GetFieldData().GetArray("counter")
        info = countArray.GetInformation()
        self.assertEqual(countArray.GetValue(0), 0);
        self.assertEqual(info.Get(dm.vtkDataObject.DATA_TYPE_NAME()), "vtkPartitionedDataSetCollection")
Exemplo n.º 29
0
    def transform_scalars(self, dataset, minimum_radius=4):
        """Segment spherical particles from a homogeneous, dark background.
        Even if the particles have pores, they are segmented as solid
        structures.
        """

        # Initial progress
        self.progress.value = 0
        self.progress.maximum = 100

        # Approximate percentage of work completed after each step in the
        # transform
        step_pct = iter([10, 10, 10, 10, 10, 10, 10, 10, 10, 10])

        try:
            import itk
            from vtkmodules.vtkCommonDataModel import vtkImageData
            from tomviz import itkutils
            from tomviz import utils
        except Exception as exc:
            print("Could not import necessary module(s)")
            raise exc

        # Return values
        returnValues = None

        # Add a try/except around the ITK portion. ITK exceptions are
        # passed up to the Python layer, so we can at least report what
        # went wrong with the script, e.g,, unsupported image type.
        try:
            self.progress.message = "Converting data to ITK image"

            # Get the ITK image
            itk_input_image = itkutils.convert_vtk_to_itk_image(dataset)
            self.progress.value = next(step_pct)

            smoothed = median_filter(self, step_pct, itk_input_image)

            Dimension = 3
            StructuringElementType = itk.FlatStructuringElement[Dimension]
            structuring_element = StructuringElementType.Ball(minimum_radius)

            # Reduces reconstruction streak artifact effects and artifacts far
            # from the center of the image.
            opened = opening_by_reconstruction(self, step_pct, smoothed,
                                               structuring_element)

            thresholded = threshold(self, step_pct, opened)

            # Removes structures smaller than the structuring element while
            # retaining particle shape
            # Grayscale implementation is faster than binary
            cleaned = opening_by_reconstruction(self, step_pct, thresholded,
                                                structuring_element)

            # Fill in pores
            # Grayscale implementation is faster than binary
            closed = morphological_closing(self, step_pct, cleaned,
                                           structuring_element)

            # Fill in pores
            # Grayscale implementation is faster than binary
            filled = fill_holes(self, step_pct, closed)

            # Disconnect separate particles and reduce reconstruction
            opening = morphological_opening(self, step_pct, filled,
                                            structuring_element)

            self.progress.message = "Saving results"

            label_buffer = itk.PyBuffer[type(itk_input_image)] \
                .GetArrayFromImage(opening)

            label_map_dataset = vtkImageData()
            label_map_dataset.CopyStructure(dataset)
            utils.set_array(label_map_dataset, label_buffer, isFortran=False)

            # Set up dictionary to return operator results
            returnValues = {}
            returnValues["label_map"] = label_map_dataset

        except Exception as exc:
            print("Problem encountered while running %s" %
                  self.__class__.__name__)
            raise exc

        return returnValues
Exemplo n.º 30
0
    def transform_scalars(self,
                          dataset,
                          minimum_radius=0.5,
                          maximum_radius=6.):
        """Segment pores. The pore size must be greater than the minimum radius
        and less than the maximum radius.  Pores will be separated according to
        the minimum radius."""

        # Initial progress
        self.progress.value = 0
        self.progress.maximum = 100

        # Approximate percentage of work completed after each step in the
        # transform
        step_pct = iter([5, 5, 5, 5, 5, 5, 5, 5, 5, 30, 10, 5, 5, 5])

        try:
            import itk
            from vtkmodules.vtkCommonDataModel import vtkImageData
            from tomviz import itkutils
            from tomviz import utils
            import numpy as np
        except Exception as exc:
            print("Could not import necessary module(s)")
            raise exc

        # Return values
        returnValues = None

        # Add a try/except around the ITK portion. ITK exceptions are
        # passed up to the Python layer, so we can at least report what
        # went wrong with the script, e.g,, unsupported image type.
        try:
            self.progress.message = "Converting data to ITK image"

            # Get the ITK image
            itk_input_image = itkutils.convert_vtk_to_itk_image(dataset)
            self.progress.value = next(step_pct)

            # Reduce noise
            smoothed = median_filter(self, step_pct, itk_input_image)

            # Enhance pore contrast
            enhanced = unsharp_mask(self, step_pct, smoothed)

            thresholded = threshold(self, step_pct, enhanced)

            dimension = itk_input_image.GetImageDimension()
            spacing = itk_input_image.GetSpacing()
            closing_radius = itk.Size[dimension]()
            closing_radius.Fill(1)
            for dim in range(dimension):
                radius = int(np.round(maximum_radius / spacing[dim]))
                if radius > closing_radius[dim]:
                    closing_radius[dim] = radius
            StructuringElementType = itk.FlatStructuringElement[dimension]
            structuring_element = \
                StructuringElementType.Ball(closing_radius)
            particle_mask = morphological_closing(self, step_pct, thresholded,
                                                  structuring_element)

            encapsulated = encapsulate(self, step_pct, thresholded,
                                       particle_mask, structuring_element)

            distance = get_distance(self, step_pct, encapsulated)

            segmented = watershed(self, step_pct, distance, minimum_radius)

            inverted = invert(self, step_pct, thresholded)

            segmented.DisconnectPipeline()
            inverted.DisconnectPipeline()
            separated = apply_mask(self, step_pct, segmented, inverted)

            separated.DisconnectPipeline()
            particle_mask.DisconnectPipeline()
            in_particles = apply_mask(self, step_pct, separated, particle_mask)

            opening_radius = itk.Size[dimension]()
            opening_radius.Fill(1)
            for dim in range(dimension):
                radius = int(np.round(minimum_radius / spacing[dim]))
                if radius > opening_radius[dim]:
                    opening_radius[dim] = radius
            structuring_element = \
                StructuringElementType.Ball(opening_radius)
            opened = opening_by_reconstruction(self, step_pct, in_particles,
                                               structuring_element)

            self.progress.message = "Saving results"

            label_buffer = itk.PyBuffer[type(opened)] \
                .GetArrayFromImage(opened)

            # temp
            label_buffer = label_buffer.copy()
            label_map_dataset = vtkImageData()
            label_map_dataset.CopyStructure(dataset)
            utils.set_array(label_map_dataset, label_buffer, isFortran=False)

            # Set up dictionary to return operator results
            returnValues = {}
            returnValues["label_map"] = label_map_dataset

        except Exception as exc:
            print("Problem encountered while running %s" %
                  self.__class__.__name__)
            raise exc

        return returnValues
Exemplo n.º 31
0
    def transform_scalars(self, dataset, Niter=None, Niter_update_support=None,
                          supportSigma=None, supportThreshold=None):
        """
        3D Reconstruct from a tilt series using constraint-based Direct Fourier
        Method
        """
        self.progress.maximum = 1

        from tomviz import utils
        import numpy as np

        supportThreshold = supportThreshold / 100.0

        nonnegativeVoxels = True
        tiltAngles = utils.get_tilt_angles(dataset) #Get Tilt angles

        tiltSeries = utils.get_array(dataset)
        if tiltSeries is None:
            raise RuntimeError("No scalars found!")

        self.progress.message = 'Initialization'

        #Direct Fourier recon without constraints
        (recon, recon_F) \
            = dfm3(tiltSeries, tiltAngles, np.size(tiltSeries, 1) * 2)

        kr_cutoffs = np.linspace(0.05, 0.5, 10)
        #average Fourier magnitude of tilt series as a function of kr
        I_data = radial_average(tiltSeries, kr_cutoffs)

        (Nx, Ny, Nz) = recon_F.shape
        #Note: Nz = np.int(Ny/2+1)
        Ntot = Nx * Ny * Ny
        f = pyfftw.n_byte_align_empty((Nx, Ny, Nz), 16, dtype=np.complex64)
        r = pyfftw.n_byte_align_empty((Nx, Ny, Ny), 16, dtype=np.float32)
        fft_forward = pyfftw.FFTW(r, f, axes=(0, 1, 2))
        fft_inverse = pyfftw.FFTW(
            f, r, direction='FFTW_BACKWARD', axes=(0, 1, 2))

        kx = np.fft.fftfreq(Nx)
        ky = np.fft.fftfreq(Ny)
        kz = ky[0:Nz]

        kX, kY, kZ = np.meshgrid(ky, kx, kz)
        kR = np.sqrt(kY**2 + kX**2 + kZ**2)

        sigma = 0.5 * supportSigma
        G = np.exp(-kR**2 / (2 * sigma**2))

        #create initial support using sw
        f = (recon_F * G).astype(np.complex64)
        fft_inverse.update_arrays(f, r)
        fft_inverse.execute()
        cutoff = np.amax(r) * supportThreshold
        support = r >= cutoff

        recon_F[kR > kr_cutoffs[-1]] = 0

        x = np.random.rand(Nx, Ny, Ny).astype(np.float32) #initial solution

        self.progress.maximum = Niter
        step = 0

        t0 = time.time()
        counter = 1
        etcMessage = 'Estimated time to complete: n/a'

        for i in range(Niter):
            if self.canceled:
                return
            self.progress.message = 'Iteration No.%d/%d. ' % (
                i + 1, Niter) + etcMessage

            #image space projection
            y1 = x.copy()

            if nonnegativeVoxels:
                y1[y1 < 0] = 0  #non-negative constraint

            y1[np.logical_not(support)] = 0 #support constraint

            #Fourier space projection
            y2 = 2 * y1 - x
            r = y2.copy().astype(np.float32)
            fft_forward.update_arrays(r, f)
            fft_forward.execute()

            f[kR > kr_cutoffs[-1]] = 0 #apply low pass filter
            f[recon_F != 0] = recon_F[recon_F != 0] #data constraint

            #Fourier magnitude constraint
            #leave the inner shell unchanged
            for j in range(1, kr_cutoffs.size):
                shell = np.logical_and(
                    kR > kr_cutoffs[j - 1], kR <= kr_cutoffs[j])
                shell[recon_F != 0] = False
                I = np.sum(np.absolute(f[shell]))
                if I != 0:
                    I = I / np.sum(shell)
                    # lower magnitude for high frequency information to reduce
                    # artifacts
                    f[shell] = f[shell] / I * I_data[j] * 0.5

            fft_inverse.update_arrays(f, r)
            fft_inverse.execute()
            y2 = r.copy() / Ntot

            #update
            x = x + y2 - y1

            #update support
            if (i < Niter and np.mod(i, Niter_update_support) == 0):
                recon[:] = (y2 + y1) / 2
                r = recon.copy()
                fft_forward.update_arrays(r, f)
                fft_forward.execute()
                f = (f * G).astype(np.complex64)
                fft_inverse.update_arrays(f, r)
                fft_inverse.execute()
                cutoff = np.amax(r) * supportThreshold
                support = r >= cutoff
            step += 1
            self.progress.value = step
            timeLeft = (time.time() - t0) / counter * (Niter - counter)
            counter += 1
            timeLeftMin, timeLeftSec = divmod(timeLeft, 60)
            timeLeftHour, timeLeftMin = divmod(timeLeftMin, 60)
            etcMessage = 'Estimated time to complete: %02d:%02d:%02d' % (
                timeLeftHour, timeLeftMin, timeLeftSec)

        recon[:] = (y2 + y1) / 2
        recon[:] = np.fft.fftshift(recon)

        from vtkmodules.vtkCommonDataModel import vtkImageData
        recon_dataset = vtkImageData()
        recon_dataset.CopyStructure(dataset)
        utils.set_array(recon_dataset, recon)
        utils.mark_as_volume(recon_dataset)

        returnValues = {}
        returnValues["reconstruction"] = recon_dataset
        return returnValues
Exemplo n.º 32
0
def main():
    VTK_DATA_ROOT = vtkGetDataRoot()
    folder = "/Users/nandana/Downloads/image_ex"

    #read dicom files from specified directory
    reader = vtkDICOMImageReader()
    reader.SetDirectoryName(folder)
    reader.SetFilePrefix(VTK_DATA_ROOT + "/Data/headsq/quarter")

    reader.SetDataExtent(0, 63, 0, 63, 1, 93)
    reader.SetDataSpacing(3.2, 3.2, 1.5)
    reader.SetDataOrigin(-150.0, 150.0, 3.0)
    reader.SetDataScalarTypeToUnsignedShort()
    reader.UpdateWholeExtent()

    # Calculate the center of the volume
    reader.Update()

    (xMin, xMax, yMin, yMax, zMin,
     zMax) = reader.GetExecutive().GetWholeExtent(
         reader.GetOutputInformation(0))
    (xSpacing, ySpacing, zSpacing) = reader.GetOutput().GetSpacing()
    (x0, y0, z0) = reader.GetOutput().GetOrigin()

    center = [
        x0 + xSpacing * 0.5 * (xMin + xMax),
        y0 + ySpacing * 0.5 * (yMin + yMax),
        z0 + zSpacing * 0.5 * (zMin + zMax)
    ]
    yd = ((yMax - yMin) + 1) * ySpacing
    """
    # Matrices for axial, coronal, sagittal, oblique view orientations
    axial = vtkMatrix4x4()
    axial.DeepCopy((1, 0, 0, center[0],
                    0, 1, 0, center[1],
                    0, 0, 1, center[2],
                    0, 0, 0, 1))

    coronal = vtkMatrix4x4()
    coronal.DeepCopy((1, 0, 0, center[0],
                      0, 0, 1, center[1],
                      0,-1, 0, center[2],
                      0, 0, 0, 1))

    sagittal = vtkMatrix4x4
    sagittal.DeepCopy((0, 0,-1, center[0],
                       1, 0, 0, center[1],
                       0,-1, 0, center[2],
                       0, 0, 0, 1))

    oblique = vtkMatrix4x4()
    oblique.DeepCopy((1, 0, 0, center[0],
                      0, 0.866025, -0.5, center[1],
                      0, 0.5, 0.866025, center[2],
                      0, 0, 0, 1))
    
    reslice = vtkImageReslice()
    outputPort = reader.GetOutputPort()
    #reslice.SetInputConnection(reader.GetOutputPort())
    reslice.SetInputConnection(0, reader.GetOutputPort())
    print(reader.GetOutput().GetExtent())
    reslice.SetOutputExtent(reader.GetOutput().GetExtent())
    reslice.SetOutputDimensionality(2)
    reslice.SetResliceAxes(coronal)
    reslice.SetInterpolationModeToLinear()
    
    """
    # Visualize
    imageViewer = vtkResliceImageViewer()
    imageViewer.SetSliceOrientationToXY()
    #imageViewer.SetSlice(9)

    imageViewer.SetResliceModeToAxisAligned()
    imageViewer.SliceScrollOnMouseWheelOff()
    imageViewer.SetInputData(reader.GetOutput())

    #imageViewer.Render()
    camera = imageViewer.GetRenderer().GetActiveCamera()

    print(camera.GetOrientationWXYZ())

    # slice status message
    sliceTextProp = vtkTextProperty()
    sliceTextProp.SetFontFamilyToCourier()
    sliceTextProp.SetFontSize(20)
    sliceTextProp.SetVerticalJustificationToBottom()
    sliceTextProp.SetJustificationToLeft()
    sliceTextMapper = vtkTextMapper()
    msg = "Slice {} out of {}".format(imageViewer.GetSlice() + 1, \
                                     imageViewer.GetSliceMax() + 1)
    sliceTextMapper.SetInput(msg)
    sliceTextMapper.SetTextProperty(sliceTextProp)

    sliceTextActor = vtkActor2D()
    sliceTextActor.SetMapper(sliceTextMapper)
    sliceTextActor.SetPosition(100, 10)

    # coordinate display
    coordTextProp = vtkTextProperty()
    coordTextProp.SetFontFamilyToCourier()
    coordTextProp.SetFontSize(20)
    coordTextProp.SetVerticalJustificationToBottom()
    coordTextProp.SetJustificationToLeft()

    coordTextMapper = vtkTextMapper()
    coordTextMapper.SetInput("Pixel Coordinates: (--, --)")
    coordTextMapper.SetTextProperty(coordTextProp)

    coordTextActor = vtkActor2D()
    coordTextActor.SetMapper(coordTextMapper)
    coordTextActor.SetPosition(500, 10)

    worldCoordTextProp = vtkTextProperty()
    worldCoordTextProp.SetFontFamilyToCourier()
    worldCoordTextProp.SetFontSize(20)
    worldCoordTextProp.SetVerticalJustificationToBottom()
    worldCoordTextProp.SetJustificationToLeft()

    worldCoordTextMapper = vtkTextMapper()
    worldCoordTextMapper.SetInput("World Coordinates: (--, --)")
    worldCoordTextMapper.SetTextProperty(worldCoordTextProp)

    worldCoordTextActor = vtkActor2D()
    worldCoordTextActor.SetMapper(worldCoordTextMapper)
    worldCoordTextActor.SetPosition(500, 30)

    # usage hint message
    usageTextProp = vtkTextProperty()
    usageTextProp.SetFontFamilyToCourier()
    usageTextProp.SetFontSize(14)
    usageTextProp.SetVerticalJustificationToTop()
    usageTextProp.SetJustificationToLeft()

    usageTextMapper = vtkTextMapper()
    usageTextMapper.SetInput(
        "- Slice with mouse wheel\n- Zoom with pressed right\n  mouse button while dragging\n- Press i to toggle cursor line on/off"
    )
    usageTextMapper.SetTextProperty(usageTextProp)

    usageTextActor = vtkActor2D()
    usageTextActor.SetMapper(usageTextMapper)
    usageTextActor.GetPositionCoordinate(
    ).SetCoordinateSystemToNormalizedDisplay()
    usageTextActor.GetPositionCoordinate().SetValue(0.05, 0.95)

    actor = imageViewer.GetImageActor()
    #image = vtkImageActor()
    #actor.GetMapper().SetInputData(reader.GetOutput())

    image = imageViewer.GetInput()

    roiData = vtkImageData()
    roiImage = vtkImageActor()

    roiData.DeepCopy(image)
    extent = roiData.GetExtent()

    for i in range(extent[0], extent[1]):
        for j in range(extent[2], extent[3]):
            for k in range(extent[4], extent[5]):
                if image.GetScalarComponentAsDouble(i, j, k, 0) > -100:
                    roiData.SetScalarComponentFromDouble(i, j, k, 0, 1)
                    #roiData.SetScalarComponentFromDouble(0, i, j, k, 1)

                else:  #just in case
                    roiData.SetScalarComponentFromDouble(i, j, k, 0, 0.0)
                    #roiData.SetScalarComponentFromDouble(0, i, j, k, 0.0)
    """            
    for i in range(extent[0], extent[1]):
        for j in range(extent[2], extent[3]):
            #for k in range(extent[4], extent[5]):
            #k = 0
            roiData.SetScalarComponentFromDouble(i, j, k, 0, 0.0)
            #roiData.SetScalarComponentFromDouble(0, i, j, k, 0.0)
    """

    print(extent)

    table = vtkLookupTable()
    table.SetNumberOfTableValues(2)
    table.SetRange(0.0, 1.0)
    table.SetTableValue(0, 0.0, 0.0, 0.0, 0.0)
    table.SetTableValue(1, 0.0, 1.0, 0.0, 1.0)
    table.Build()

    mapToColor = vtkImageMapToColors()
    mapToColor.SetLookupTable(table)
    mapToColor.PassAlphaToOutputOn()

    mapToColor.SetInputData(roiData)

    #actor.GetMapper().SetInputConnection(mapToColor.GetOutputPort())
    roiImage.GetMapper().SetInputConnection(mapToColor.GetOutputPort())

    imageViewer.SetInputData(image)

    interactorStyle = vtkInteractorStyleImage()
    interactor = vtkRenderWindowInteractor()

    imageViewer.SetupInteractor(interactor)
    interactor.SetInteractorStyle(interactorStyle)

    # add slice status message and usage hint message to the renderer
    imageViewer.GetRenderer().AddActor2D(coordTextActor)
    imageViewer.GetRenderer().AddActor2D(sliceTextActor)
    imageViewer.GetRenderer().AddActor2D(usageTextActor)
    imageViewer.GetRenderer().AddActor2D(worldCoordTextActor)

    imageViewer.GetRenderer().AddActor(roiImage)
    #imageViewer.GetRenderer().AddViewProp(stack)

    # initialize rendering and interaction

    #imageViewer.SetSlice(35)

    imageViewer.GetRenderWindow().SetSize(1000, 1000)
    imageViewer.GetRenderer().SetBackground(0.2, 0.3, 0.4)

    imageViewer.GetWindowLevel().SetWindow(1000)
    imageViewer.GetWindowLevel().SetLevel(-1000)

    imageViewer.Render()

    yd = (yMax - yMin + 1) * ySpacing
    xd = (xMax - xMin + 1) * xSpacing

    d = camera.GetDistance()
    camera.SetParallelScale(0.5 * xd)
    camera.SetFocalPoint(center[0], center[1], 0)
    camera.SetPosition(center[0], center[1], +d)

    actions = {}
    actions["Dolly"] = -1
    actions["Cursor"] = 0

    def middlePressCallback(obj, event):
        # if middle + ctrl pressed, zoom in/out
        # otherwise slice through image (handled by mouseMoveCallback)

        if (interactor.GetControlKey()):
            actions["Dolly"] = 0
            interactorStyle.OnRightButtonDown()
        else:
            actions["Dolly"] = 1

    def middleReleaseCallback(obj, event):
        if (actions["Dolly"] == 0):
            interactorStyle.OnRightButtonUp()
        elif (actions["Dolly"] == 1):
            actions["Dolly"] = 0

    def mouseMoveCallback(obj, event):
        # if the middle button is pressed + mouse is moved, slice through image
        # otherwise, update world/pixel coords as mouse is moved

        if (actions["Dolly"] == 1):
            (lastX, lastY) = interactor.GetLastEventPosition()
            (curX, curY) = interactor.GetEventPosition()
            deltaY = curY - lastY

            if (deltaY > 0):
                imageViewer.IncrementSlice(1)
            elif (deltaY < 0):
                imageViewer.IncrementSlice(-1)

            msg = "Slice {} out of {}".format(imageViewer.GetSlice() + 1, \
                                     imageViewer.GetSliceMax() + 1)
            sliceTextMapper.SetInput(msg)
            imageViewer.Render()

        else:

            (mouseX, mouseY) = interactor.GetEventPosition()
            bounds = actor.GetMapper().GetInput().GetBounds()

            testCoord = vtkCoordinate()
            testCoord.SetCoordinateSystemToDisplay()
            testCoord.SetValue(mouseX, mouseY, 0)

            (posX, posY,
             posZ) = testCoord.GetComputedWorldValue(imageViewer.GetRenderer())

            inBounds = True
            if posX < bounds[0] or posX > bounds[1] or posY < bounds[
                    2] or posY > bounds[3]:
                inBounds = False

            if inBounds:
                wMousePos = "World Coordinates: (" + "{:.2f}".format(
                    posX) + ", " + "{:.2f}".format(
                        posY) + ", " + "{:.2f}".format(posZ) + ")"
                pMousePos = "Pixel Coordinates: (" + "{:.2f}".format(
                    mouseX) + ", " + "{:.2f}".format(mouseY) + ")"
                worldCoordTextMapper.SetInput(wMousePos)
                coordTextMapper.SetInput(pMousePos)

                imageViewer.Render()

            interactorStyle.OnMouseMove()

    def scrollForwardCallback(obj, event):
        # slice through image on scroll, update slice text

        imageViewer.IncrementSlice(1)

        msg = "Slice {} out of {}".format(imageViewer.GetSlice() + 1, \
                                     imageViewer.GetSliceMax() + 1)
        sliceTextMapper.SetInput(msg)
        imageViewer.Render()

    def scrollBackwardCallback(obj, event):
        imageViewer.IncrementSlice(-1)

        msg = "Slice {} out of {}".format(imageViewer.GetSlice() + 1, \
                                     imageViewer.GetSliceMax() + 1)
        sliceTextMapper.SetInput(msg)
        imageViewer.Render()

    def windowModifiedCallback(obj, event):
        # track render window width so coordinate text aligns itself
        # to the right side of the screen

        width = imageViewer.GetRenderWindow().GetSize()[0]
        coordTextActor.SetPosition(width - 550, 10)
        worldCoordTextActor.SetPosition(width - 550, 30)

        imageViewer.Render()

    def keyPressCallback(obj, event):
        # toggle cursor on/off when t key is pressed

        key = interactor.GetKeySym()
        if (key == "t"):
            if (actions["Cursor"] == 0):
                imageViewer.GetRenderWindow().HideCursor()
                actions["Cursor"] = 1
            elif (actions["Cursor"] == 1):
                imageViewer.GetRenderWindow().ShowCursor()
                actions["Cursor"] = 0

    interactorStyle.AddObserver("MiddleButtonPressEvent", middlePressCallback)
    interactorStyle.AddObserver("MiddleButtonReleaseEvent",
                                middleReleaseCallback)
    interactorStyle.AddObserver("MouseMoveEvent", mouseMoveCallback)
    interactorStyle.AddObserver("MouseWheelForwardEvent",
                                scrollForwardCallback)
    interactorStyle.AddObserver("MouseWheelBackwardEvent",
                                scrollBackwardCallback)
    interactorStyle.AddObserver("KeyPressEvent", keyPressCallback)
    imageViewer.GetRenderWindow().AddObserver("ModifiedEvent",
                                              windowModifiedCallback)

    interactor.Start()
Exemplo n.º 33
0
def to_vtk(
        n_array,
        spacing=(1.0, 1.0, 1.0),
        slice_number=0,
        orientation="AXIAL",
        origin=(0, 0, 0),
        padding=(0, 0, 0),
):
    if orientation == "SAGITTAL":
        orientation = "SAGITAL"

    try:
        dz, dy, dx = n_array.shape
    except ValueError:
        dy, dx = n_array.shape
        dz = 1

    px, py, pz = padding

    v_image = numpy_support.numpy_to_vtk(n_array.flat)

    if orientation == "AXIAL":
        extent = (
            0 - px,
            dx - 1 - px,
            0 - py,
            dy - 1 - py,
            slice_number - pz,
            slice_number + dz - 1 - pz,
        )
    elif orientation == "SAGITAL":
        dx, dy, dz = dz, dx, dy
        extent = (
            slice_number - px,
            slice_number + dx - 1 - px,
            0 - py,
            dy - 1 - py,
            0 - pz,
            dz - 1 - pz,
        )
    elif orientation == "CORONAL":
        dx, dy, dz = dx, dz, dy
        extent = (
            0 - px,
            dx - 1 - px,
            slice_number - py,
            slice_number + dy - 1 - py,
            0 - pz,
            dz - 1 - pz,
        )

    # Generating the vtkImageData
    image = vtkImageData()
    image.SetOrigin(origin)
    image.SetSpacing(spacing)
    image.SetDimensions(dx, dy, dz)
    # SetNumberOfScalarComponents and SetScalrType were replaced by
    # AllocateScalars
    #  image.SetNumberOfScalarComponents(1)
    #  image.SetScalarType(numpy_support.get_vtk_array_type(n_array.dtype))
    image.AllocateScalars(numpy_support.get_vtk_array_type(n_array.dtype), 1)
    image.SetExtent(extent)
    image.GetPointData().SetScalars(v_image)

    image_copy = vtkImageData()
    image_copy.DeepCopy(image)

    return image_copy
Exemplo n.º 34
0
    def transform_scalars(self, dataset, minimum_radius=0.5, maximum_radius=6.):
        """Segment pores. The pore size must be greater than the minimum radius
        and less than the maximum radius.  Pores will be separated according to
        the minimum radius."""

        # Initial progress
        self.progress.value = 0
        self.progress.maximum = 100

        # Approximate percentage of work completed after each step in the
        # transform
        step_pct = iter([5, 5, 5, 5, 5, 5, 5, 5, 5, 30, 10, 5, 5, 5])

        try:
            import itk
            from vtkmodules.vtkCommonDataModel import vtkImageData
            from tomviz import itkutils
            from tomviz import utils
            import numpy as np
        except Exception as exc:
            print("Could not import necessary module(s)")
            raise exc

        # Return values
        returnValues = None

        # Add a try/except around the ITK portion. ITK exceptions are
        # passed up to the Python layer, so we can at least report what
        # went wrong with the script, e.g,, unsupported image type.
        try:
            self.progress.message = "Converting data to ITK image"

            # Get the ITK image
            itk_input_image = itkutils.convert_vtk_to_itk_image(dataset)
            self.progress.value = next(step_pct)

            # Reduce noise
            smoothed = median_filter(self, step_pct, itk_input_image)

            # Enhance pore contrast
            enhanced = unsharp_mask(self, step_pct, smoothed)

            thresholded = threshold(self, step_pct, enhanced)

            dimension = itk_input_image.GetImageDimension()
            spacing = itk_input_image.GetSpacing()
            closing_radius = itk.Size[dimension]()
            closing_radius.Fill(1)
            for dim in range(dimension):
                radius = int(np.round(maximum_radius / spacing[dim]))
                if radius > closing_radius[dim]:
                    closing_radius[dim] = radius
            StructuringElementType = itk.FlatStructuringElement[dimension]
            structuring_element = \
                StructuringElementType.Ball(closing_radius)
            particle_mask = morphological_closing(self, step_pct, thresholded,
                                                  structuring_element)

            encapsulated = encapsulate(self, step_pct, thresholded,
                                       particle_mask, structuring_element)

            distance = get_distance(self, step_pct, encapsulated)

            segmented = watershed(self, step_pct, distance, minimum_radius)

            inverted = invert(self, step_pct, thresholded)

            segmented.DisconnectPipeline()
            inverted.DisconnectPipeline()
            separated = apply_mask(self, step_pct, segmented, inverted)

            separated.DisconnectPipeline()
            particle_mask.DisconnectPipeline()
            in_particles = apply_mask(self, step_pct, separated, particle_mask)

            opening_radius = itk.Size[dimension]()
            opening_radius.Fill(1)
            for dim in range(dimension):
                radius = int(np.round(minimum_radius / spacing[dim]))
                if radius > opening_radius[dim]:
                    opening_radius[dim] = radius
            structuring_element = \
                StructuringElementType.Ball(opening_radius)
            opened = opening_by_reconstruction(self, step_pct, in_particles,
                                               structuring_element)

            self.progress.message = "Saving results"

            label_buffer = itk.PyBuffer[type(opened)] \
                .GetArrayFromImage(opened)

            # temp
            label_buffer = label_buffer.copy()
            label_map_dataset = vtkImageData()
            label_map_dataset.CopyStructure(dataset)
            utils.set_array(label_map_dataset, label_buffer, isFortran=False)

            # Set up dictionary to return operator results
            returnValues = {}
            returnValues["label_map"] = label_map_dataset

        except Exception as exc:
            print("Problem encountered while running %s" %
                  self.__class__.__name__)
            raise exc

        return returnValues
Exemplo n.º 35
0
    def transform_scalars(self,
                          dataset,
                          lower_threshold=40.0,
                          upper_threshold=255.0):
        """This filter computes a binary threshold on the data set and
        stores the result in a child data set. It does not modify the dataset
        passed in."""

        # Initial progress
        self.progress.value = 0
        self.progress.maximum = 100

        # Approximate percentage of work completed after each step in the
        # transform
        STEP_PCT = [20, 40, 75, 90, 100]

        # Set up return value
        returnValue = None

        # Try imports to make sure we have everything that is needed
        try:
            self.progress.message = "Loading modules"
            import itk
            from vtkmodules.vtkCommonDataModel import vtkImageData
            from tomviz import itkutils
        except Exception as exc:
            print("Could not import necessary module(s)")
            raise exc

        # Add a try/except around the ITK portion. ITK exceptions are
        # passed up to the Python layer, so we can at least report what
        # went wrong with the script, e.g,, unsupported image type.
        try:
            self.progress.value = STEP_PCT[0]
            self.progress.message = "Converting data to ITK image"

            # Get the ITK image
            itk_image = itkutils.convert_vtk_to_itk_image(dataset)
            itk_input_image_type = type(itk_image)
            self.progress.value = STEP_PCT[1]
            self.progress.message = "Running filter"

            # We change the output type to unsigned char 3D
            # (itk.Image.UC3D) to save memory in the output label map
            # representation.
            itk_output_image_type = itk.Image.UC3

            # ITK's BinaryThresholdImageFilter does the hard work
            threshold_filter = itk.BinaryThresholdImageFilter[
                itk_input_image_type, itk_output_image_type].New()
            python_cast = itkutils.get_python_voxel_type(itk_image)
            threshold_filter.SetLowerThreshold(python_cast(lower_threshold))
            threshold_filter.SetUpperThreshold(python_cast(upper_threshold))
            threshold_filter.SetInsideValue(1)
            threshold_filter.SetOutsideValue(0)
            threshold_filter.SetInput(itk_image)
            itkutils.observe_filter_progress(self, threshold_filter,
                                             STEP_PCT[2], STEP_PCT[3])

            try:
                threshold_filter.Update()
            except RuntimeError:
                return returnValue

            self.progress.message = "Creating child data set"

            # Set the output as a new child data object of the current data set
            label_map_dataset = vtkImageData()
            label_map_dataset.CopyStructure(dataset)

            itkutils.set_array_from_itk_image(label_map_dataset,
                                              threshold_filter.GetOutput())
            self.progress.value = STEP_PCT[4]

            returnValue = {"thresholded_segmentation": label_map_dataset}

        except Exception as exc:
            print("Problem encountered while running %s" %
                  self.__class__.__name__)
            raise exc

        return returnValue
Exemplo n.º 36
0
    def transform_scalars(self, dataset, number_of_thresholds=1,
                          enable_valley_emphasis=False):
        """This filter performs semi-automatic multithresholding of a data set.
        Voxels are automatically classified into a chosen number of classes such
        that inter-class variance of the voxel values is minimized. The output
        is a label map with one label per voxel class.
        """

        # Initial progress
        self.progress.value = 0
        self.progress.maximum = 100

        # Approximate percentage of work completed after each step in the
        # transform
        STEP_PCT = [10, 20, 70, 90, 100]

        try:
            import itk
            import itkExtras
            import itkTypes
            from vtkmodules.vtkCommonDataModel import vtkImageData
            from tomviz import itkutils
            from tomviz import utils
        except Exception as exc:
            print("Could not import necessary module(s)")
            raise exc

        # Return values
        returnValues = None

        # Add a try/except around the ITK portion. ITK exceptions are
        # passed up to the Python layer, so we can at least report what
        # went wrong with the script, e.g,, unsupported image type.
        try:
            self.progress.value = STEP_PCT[0]
            self.progress.message = "Converting data to ITK image"

            # Get the ITK image
            itk_image = itkutils.convert_vtk_to_itk_image(dataset)
            itk_input_image_type = type(itk_image)

            # OtsuMultipleThresholdsImageFilter's wrapping requires that the
            # input and output image types be the same.
            itk_threshold_image_type = itk_input_image_type

            # Otsu multiple threshold filter
            otsu_filter = itk.OtsuMultipleThresholdsImageFilter[
                itk_input_image_type, itk_threshold_image_type].New()
            otsu_filter.SetNumberOfThresholds(number_of_thresholds)
            otsu_filter.SetValleyEmphasis(enable_valley_emphasis)
            otsu_filter.SetInput(itk_image)
            itkutils.observe_filter_progress(self, otsu_filter,
                                             STEP_PCT[1], STEP_PCT[2])

            try:
                otsu_filter.Update()
            except RuntimeError:
                return

            print("Otsu threshold(s): %s" % (otsu_filter.GetThresholds(),))

            itk_image_data = otsu_filter.GetOutput()

            # Cast threshold output to an integral type if needed.
            py_buffer_type = itk_threshold_image_type
            voxel_type = itkExtras.template(itk_threshold_image_type)[1][0]
            if voxel_type is itkTypes.F or voxel_type is itkTypes.D:
                self.progress.message = "Casting output to integral type"

                # Unsigned char supports 256 labels, or 255 threshold levels.
                # This should be sufficient for all but the most unusual use
                # cases.
                py_buffer_type = itk.Image.UC3
                caster = itk.CastImageFilter[itk_threshold_image_type,
                                             py_buffer_type].New()
                caster.SetInput(itk_image_data)
                itkutils.observe_filter_progress(self, caster,
                                                 STEP_PCT[2], STEP_PCT[3])

                try:
                    caster.Update()
                except RuntimeError:
                    return

                itk_image_data = caster.GetOutput()

            self.progress.value = STEP_PCT[3]
            self.progress.message = "Saving results"

            label_buffer = itk.PyBuffer[py_buffer_type] \
                .GetArrayFromImage(itk_image_data)

            label_map_dataset = vtkImageData()
            label_map_dataset.CopyStructure(dataset)
            utils.set_array(label_map_dataset, label_buffer, isFortran=False)

            self.progress.value = STEP_PCT[4]

            # Set up dictionary to return operator results
            returnValues = {}
            returnValues["label_map"] = label_map_dataset

        except Exception as exc:
            print("Problem encountered while running %s" %
                  self.__class__.__name__)
            raise exc

        return returnValues
Exemplo n.º 37
0
    def transform_scalars(self,
                          dataset,
                          number_of_thresholds=1,
                          enable_valley_emphasis=False):
        """This filter performs semi-automatic multithresholding of a data set.
        Voxels are automatically classified into a chosen number of classes such
        that inter-class variance of the voxel values is minimized. The output
        is a label map with one label per voxel class.
        """

        # Initial progress
        self.progress.value = 0
        self.progress.maximum = 100

        # Approximate percentage of work completed after each step in the
        # transform
        STEP_PCT = [10, 20, 70, 90, 100]

        try:
            import itk
            import itkExtras
            import itkTypes
            from vtkmodules.vtkCommonDataModel import vtkImageData
            from tomviz import itkutils
            from tomviz import utils
        except Exception as exc:
            print("Could not import necessary module(s)")
            raise exc

        # Return values
        returnValues = None

        # Add a try/except around the ITK portion. ITK exceptions are
        # passed up to the Python layer, so we can at least report what
        # went wrong with the script, e.g,, unsupported image type.
        try:
            self.progress.value = STEP_PCT[0]
            self.progress.message = "Converting data to ITK image"

            # Get the ITK image
            itk_image = itkutils.convert_vtk_to_itk_image(dataset)
            itk_input_image_type = type(itk_image)

            # OtsuMultipleThresholdsImageFilter's wrapping requires that the
            # input and output image types be the same.
            itk_threshold_image_type = itk_input_image_type

            # Otsu multiple threshold filter
            otsu_filter = itk.OtsuMultipleThresholdsImageFilter[
                itk_input_image_type, itk_threshold_image_type].New()
            otsu_filter.SetNumberOfThresholds(number_of_thresholds)
            otsu_filter.SetValleyEmphasis(enable_valley_emphasis)
            otsu_filter.SetInput(itk_image)
            itkutils.observe_filter_progress(self, otsu_filter, STEP_PCT[1],
                                             STEP_PCT[2])

            try:
                otsu_filter.Update()
            except RuntimeError:
                return

            print("Otsu threshold(s): %s" % (otsu_filter.GetThresholds(), ))

            itk_image_data = otsu_filter.GetOutput()

            # Cast threshold output to an integral type if needed.
            py_buffer_type = itk_threshold_image_type
            voxel_type = itkExtras.template(itk_threshold_image_type)[1][0]
            if voxel_type is itkTypes.F or voxel_type is itkTypes.D:
                self.progress.message = "Casting output to integral type"

                # Unsigned char supports 256 labels, or 255 threshold levels.
                # This should be sufficient for all but the most unusual use
                # cases.
                py_buffer_type = itk.Image.UC3
                caster = itk.CastImageFilter[itk_threshold_image_type,
                                             py_buffer_type].New()
                caster.SetInput(itk_image_data)
                itkutils.observe_filter_progress(self, caster, STEP_PCT[2],
                                                 STEP_PCT[3])

                try:
                    caster.Update()
                except RuntimeError:
                    return

                itk_image_data = caster.GetOutput()

            self.progress.value = STEP_PCT[3]
            self.progress.message = "Saving results"

            label_buffer = itk.PyBuffer[py_buffer_type] \
                .GetArrayFromImage(itk_image_data)

            label_map_dataset = vtkImageData()
            label_map_dataset.CopyStructure(dataset)
            utils.set_array(label_map_dataset, label_buffer, isFortran=False)

            self.progress.value = STEP_PCT[4]

            # Set up dictionary to return operator results
            returnValues = {}
            returnValues["label_map"] = label_map_dataset

        except Exception as exc:
            print("Problem encountered while running %s" %
                  self.__class__.__name__)
            raise exc

        return returnValues
Exemplo n.º 38
0
    def transform_scalars(self, dataset, lower_threshold=40.0,
                          upper_threshold=255.0):
        """This filter computes a binary threshold on the data set and
        stores the result in a child data set. It does not modify the dataset
        passed in."""

        # Initial progress
        self.progress.value = 0
        self.progress.maximum = 100

        # Approximate percentage of work completed after each step in the
        # transform
        STEP_PCT = [20, 40, 75, 90, 100]

        # Set up return value
        returnValue = None

        # Try imports to make sure we have everything that is needed
        try:
            self.progress.message = "Loading modules"
            import itk
            from vtkmodules.vtkCommonDataModel import vtkImageData
            from tomviz import itkutils
        except Exception as exc:
            print("Could not import necessary module(s)")
            raise exc

        # Add a try/except around the ITK portion. ITK exceptions are
        # passed up to the Python layer, so we can at least report what
        # went wrong with the script, e.g,, unsupported image type.
        try:
            self.progress.value = STEP_PCT[0]
            self.progress.message = "Converting data to ITK image"

            # Get the ITK image
            itk_image = itkutils.convert_vtk_to_itk_image(dataset)
            itk_input_image_type = type(itk_image)
            self.progress.value = STEP_PCT[1]
            self.progress.message = "Running filter"

            # We change the output type to unsigned char 3D
            # (itk.Image.UC3D) to save memory in the output label map
            # representation.
            itk_output_image_type = itk.Image.UC3

            # ITK's BinaryThresholdImageFilter does the hard work
            threshold_filter = itk.BinaryThresholdImageFilter[
                itk_input_image_type, itk_output_image_type].New()
            python_cast = itkutils.get_python_voxel_type(itk_image)
            threshold_filter.SetLowerThreshold(python_cast(lower_threshold))
            threshold_filter.SetUpperThreshold(python_cast(upper_threshold))
            threshold_filter.SetInsideValue(1)
            threshold_filter.SetOutsideValue(0)
            threshold_filter.SetInput(itk_image)
            itkutils.observe_filter_progress(self, threshold_filter,
                                             STEP_PCT[2], STEP_PCT[3])

            try:
                threshold_filter.Update()
            except RuntimeError:
                return returnValue

            self.progress.message = "Creating child data set"

            # Set the output as a new child data object of the current data set
            label_map_dataset = vtkImageData()
            label_map_dataset.CopyStructure(dataset)

            itkutils.set_array_from_itk_image(label_map_dataset,
                                              threshold_filter.GetOutput())
            self.progress.value = STEP_PCT[4]

            returnValue = {
                "thresholded_segmentation": label_map_dataset
            }

        except Exception as exc:
            print("Problem encountered while running %s" %
                  self.__class__.__name__)
            raise exc

        return returnValue
Exemplo n.º 39
0
def ascent_to_vtk(node, topology=None, extent=None):
    '''
    Read from Ascent node ("topologies/" + topology) into VTK data.
    topology is one of the names returned by topology_names(node) or
    topology_names(node)[0] if the parameter is None
    '''
    global _keep_around
    # we use the same Python interpreter between time steps
    _keep_around = []
    if topology is None:
        topology = topology_names(node)[0]
    data = None
    coords = node["topologies/" + topology + "/coordset"]
    if (node["topologies/" + topology + "/type"] == "uniform"):
        # tested with noise
        data = vtkImageData()
        origin = np.array([
            float(o) for o in [
                node["coordsets/" + coords +
                     "/origin/x"], node["coordsets/" + coords +
                                        "/origin/y"], node["coordsets/" +
                                                           coords +
                                                           "/origin/z"]
            ]
        ])
        spacing = np.array([
            float(s) for s in [
                node["coordsets/" + coords +
                     "/spacing/dx"], node["coordsets/" + coords +
                                          "/spacing/dy"], node["coordsets/" +
                                                               coords +
                                                               "/spacing/dz"]
            ]
        ])
        if extent is None:
            data.SetDimensions(node["coordsets/" + coords + "/dims/i"],
                               node["coordsets/" + coords + "/dims/j"],
                               node["coordsets/" + coords + "/dims/k"])
            data.SetOrigin(origin)
        else:
            data.SetExtent(extent)
            origin = origin - np.array([extent[0], extent[2], extent[4]
                                        ]) * spacing
            data.SetOrigin(origin)
        data.SetSpacing(spacing)
    elif (node["topologies/" + topology + "/type"] == "rectilinear"):
        # tested on cloverleaf3d and kripke
        data = vtkRectilinearGrid()
        xn = node["coordsets/" + coords + "/values/x"]
        xa = numpy_support.numpy_to_vtk(xn)
        data.SetXCoordinates(xa)

        yn = node["coordsets/" + coords + "/values/y"]
        ya = numpy_support.numpy_to_vtk(yn)
        data.SetYCoordinates(ya)

        zn = node["coordsets/" + coords + "/values/z"]
        za = numpy_support.numpy_to_vtk(zn)
        data.SetZCoordinates(za)
        if (extent is None):
            data.SetDimensions(xa.GetNumberOfTuples(), ya.GetNumberOfTuples(),
                               za.GetNumberOfTuples())
        else:
            data.SetExtent(extent)
    elif (node["coordsets/" + coords + "/type"] == "explicit"):
        xn = node["coordsets/" + coords + "/values/x"]
        yn = node["coordsets/" + coords + "/values/y"]
        zn = node["coordsets/" + coords + "/values/z"]
        xyzn = np.stack((xn, yn, zn), axis=1)
        _keep_around.append(xyzn)
        xyza = numpy_support.numpy_to_vtk(xyzn)
        points = vtkPoints()
        points.SetData(xyza)
        if (node["topologies/" + topology + "/type"] == "structured"):
            # tested on lulesh
            data = vtkStructuredGrid()
            data.SetPoints(points)
            # elements are one less than points
            nx = node["topologies/" + topology + "/elements/dims/i"] + 1
            ny = node["topologies/" + topology + "/elements/dims/j"] + 1
            nz = node["topologies/" + topology + "/elements/dims/k"] + 1
            data.SetDimensions(nx, ny, nz)
        elif (node["topologies/" + topology + "/type"] == "unstructured"):
            # tested with laghos
            data = vtkUnstructuredGrid()
            data.SetPoints(points)
            shape = node["topologies/" + topology + "/elements/shape"]
            c = node["topologies/" + topology + "/elements/connectivity"]
            # vtkIdType is int64
            c = c.astype(np.int64)
            if (shape == "hex"):
                npoints = 8
                cellType = vtkConstants.VTK_HEXAHEDRON
            elif (shape == "quad"):
                npoints = 4
                cellType = vtkConstants.VTK_QUAD
            else:
                print("Error: Shape not implemented")
                return None
            c = c.reshape(c.shape[0] / npoints, npoints)
            # insert the number of points before point references
            c = np.insert(c, 0, npoints, axis=1)
            ncells = c.shape[0]
            c = c.flatten()
            _keep_around.append(c)
            ita = numpy_support.numpy_to_vtkIdTypeArray(c)
            cells = vtkCellArray()
            cells.SetCells(ncells, ita)
            data.SetCells((cellType), cells)
    read_fields(node, topology, data)
    return data
Exemplo n.º 40
0
    def transform_scalars(self, dataset):
        """3D Reconstruct from a tilt series using Direct Fourier Method"""

        self.progress.maximum = 1

        # Get Tilt angles
        tiltAngles = utils.get_tilt_angles(dataset)

        tiltSeries = utils.get_array(dataset)
        if tiltSeries is None:
            raise RuntimeError("No scalars found!")

        tiltSeries = np.double(tiltSeries)
        (Nx, Ny, Nproj) = tiltSeries.shape
        Npad = Ny * 2

        tiltAngles = np.double(tiltAngles)
        pad_pre = int(np.ceil((Npad - Ny) / 2.0))
        pad_post = int(np.floor((Npad - Ny) / 2.0))

        # Initialization
        self.progress.message = 'Initialization'
        Nz = Ny
        w = np.zeros((Nx, Ny, Nz // 2 + 1)) #store weighting factors
        v = pyfftw.empty_aligned(
            (Nx, Ny, Nz // 2 + 1), dtype='complex64', n=16)

        p = pyfftw.empty_aligned((Nx, Npad), dtype='float32', n=16)
        pF = pyfftw.empty_aligned(
            (Nx, Npad // 2 + 1), dtype='complex64', n=16)
        p_fftw_object = pyfftw.FFTW(p, pF, axes=(0, 1))

        dk = np.double(Ny) / np.double(Npad)

        self.progress.maximum = Nproj + 1
        step = 0

        t0 = time.time()
        etcMessage = 'Estimated time to complete: n/a'
        counter = 1
        for a in range(Nproj):
            if self.canceled:
                return
            self.progress.message = 'Tilt image No.%d/%d. ' % (
                a + 1, Nproj) + etcMessage

            ang = tiltAngles[a] * np.pi / 180
            projection = tiltSeries[:, :, a] #2D projection image
            p = np.lib.pad(projection, ((0, 0), (pad_pre, pad_post)),
                           'constant', constant_values=(0, 0)) #pad zeros
            p = np.float32(np.fft.ifftshift(p))
            p_fftw_object.update_arrays(p, pF)
            p_fftw_object()
            p = None #Garbage collector (gc)

            if ang < 0:
                pF = np.conj(pF)
                pF[1:, :] = np.flipud(pF[1:, :])
                ang = np.pi + ang

            # Bilinear extrapolation
            for i in range(0, np.int(np.ceil(Npad / 2)) + 1):
                ky = i * dk
                #kz = 0
                ky_new = np.cos(ang) * ky #new coord. after rotation
                kz_new = np.sin(ang) * ky
                sy = abs(np.floor(ky_new) - ky_new) #calculate weights
                sz = abs(np.floor(kz_new) - kz_new)
                for b in range(1, 5): #bilinear extrapolation
                    pz, py, weight = bilinear(kz_new, ky_new, sz, sy, Ny, b)
                    if (py >= 0 and py < Ny and pz >= 0 and pz < Nz / 2 + 1):
                        w[:, py, pz] = w[:, py, pz] + weight
                        v[:, py, pz] = v[:, py, pz] + \
                            weight * pF[:, i]
            step += 1
            self.progress.value = step
            timeLeft = (time.time() - t0) / counter * (Nproj - counter)
            counter += 1
            timeLeftMin, timeLeftSec = divmod(timeLeft, 60)
            timeLeftHour, timeLeftMin = divmod(timeLeftMin, 60)
            etcMessage = 'Estimated time to complete: %02d:%02d:%02d' % (
                timeLeftHour, timeLeftMin, timeLeftSec)

        p = pF = None #gc

        self.progress.message = 'Inverse Fourier transform'
        v_temp = v.copy()
        recon = pyfftw.empty_aligned(
            (Nx, Ny, Nz), dtype='float32', order='F', n=16)
        recon_fftw_object = pyfftw.FFTW(
            v_temp, recon, direction='FFTW_BACKWARD', axes=(0, 1, 2))
        v[w != 0] = v[w != 0] / w[w != 0]
        recon_fftw_object.update_arrays(v, recon)
        v = v_temp = []    #gc
        recon_fftw_object()
        recon[:] = np.fft.fftshift(recon)

        step += 1
        self.progress.value = step

        self.progress.message = 'Passing data to Tomviz'
        from vtkmodules.vtkCommonDataModel import vtkImageData
        recon_dataset = vtkImageData()
        recon_dataset.CopyStructure(dataset)
        utils.set_array(recon_dataset, recon)
        utils.mark_as_volume(recon_dataset)

        recon = None #gc

        returnValues = {}
        returnValues["reconstruction"] = recon_dataset
        return returnValues
Exemplo n.º 41
0
    def test(self):

        p = dm.vtkPartitionedDataSet()

        s = ic.vtkRTAnalyticSource()
        s.SetWholeExtent(0, 10, 0, 10, 0, 5)
        s.Update()

        p1 = dm.vtkImageData()
        p1.ShallowCopy(s.GetOutput())

        s.SetWholeExtent(0, 10, 0, 10, 5, 10)
        s.Update()

        p2 = dm.vtkImageData()
        p2.ShallowCopy(s.GetOutput())

        p.SetPartition(0, p1)
        p.SetPartition(1, p2)

        p2 = dm.vtkPartitionedDataSet()
        p2.ShallowCopy(p)

        c = dm.vtkPartitionedDataSetCollection()
        c.SetPartitionedDataSet(0, p)
        c.SetPartitionedDataSet(1, p2)

        # SimpleFilter:
        sf = SimpleFilter()
        sf.SetInputDataObject(c)
        sf.Update()
        self.assertEqual(
            sf.GetOutputDataObject(0).GetNumberOfPartitionedDataSets(), 2)
        for i in (0, 1):
            pdsc = sf.GetOutputDataObject(0)
            self.assertEqual(pdsc.GetClassName(),
                             "vtkPartitionedDataSetCollection")
            pds = pdsc.GetPartitionedDataSet(i)
            self.assertEqual(pds.GetClassName(), "vtkPartitionedDataSet")
            self.assertEqual(pds.GetNumberOfPartitions(), 2)
            for j in (0, 1):
                part = pds.GetPartition(j)
                countArray = part.GetFieldData().GetArray("counter")
                info = countArray.GetInformation()
                self.assertEqual(countArray.GetValue(0), i * 2 + j)
                self.assertEqual(info.Get(dm.vtkDataObject.DATA_TYPE_NAME()),
                                 "vtkImageData")

        # PartitionAwareFilter
        pf = PartitionAwareFilter()
        pf.SetInputDataObject(c)
        pf.Update()
        self.assertEqual(
            pf.GetOutputDataObject(0).GetNumberOfPartitionedDataSets(), 2)
        for i in (0, 1):
            pdsc = pf.GetOutputDataObject(0)
            self.assertEqual(pdsc.GetClassName(),
                             "vtkPartitionedDataSetCollection")
            pds = pdsc.GetPartitionedDataSet(i)
            self.assertEqual(pds.GetClassName(), "vtkPartitionedDataSet")
            self.assertEqual(pds.GetNumberOfPartitions(), 0)
            countArray = pds.GetFieldData().GetArray("counter")
            info = countArray.GetInformation()
            self.assertEqual(countArray.GetValue(0), i)
            self.assertEqual(info.Get(dm.vtkDataObject.DATA_TYPE_NAME()),
                             "vtkPartitionedDataSet")

        # PartitionCollectionAwareFilter
        pcf = PartitionCollectionAwareFilter()
        pcf.SetInputDataObject(c)
        pcf.Update()
        self.assertEqual(
            pcf.GetOutputDataObject(0).GetNumberOfPartitionedDataSets(), 0)
        pdsc = pcf.GetOutputDataObject(0)
        self.assertEqual(pdsc.GetClassName(),
                         "vtkPartitionedDataSetCollection")
        countArray = pdsc.GetFieldData().GetArray("counter")
        info = countArray.GetInformation()
        self.assertEqual(countArray.GetValue(0), 0)
        self.assertEqual(info.Get(dm.vtkDataObject.DATA_TYPE_NAME()),
                         "vtkPartitionedDataSetCollection")

        # CompositeAwareFilter
        cf = CompositeAwareFilter()
        cf.SetInputDataObject(c)
        cf.Update()
        self.assertEqual(
            pcf.GetOutputDataObject(0).GetNumberOfPartitionedDataSets(), 0)
        pdsc = pcf.GetOutputDataObject(0)
        self.assertEqual(pdsc.GetClassName(),
                         "vtkPartitionedDataSetCollection")
        countArray = pdsc.GetFieldData().GetArray("counter")
        info = countArray.GetInformation()
        self.assertEqual(countArray.GetValue(0), 0)
        self.assertEqual(info.Get(dm.vtkDataObject.DATA_TYPE_NAME()),
                         "vtkPartitionedDataSetCollection")
Exemplo n.º 42
0
    def run(self):
        global vtk_error

        # ----- verify extension ------------------
        extension = VerifyDataType(self.filepath)

        file_name = self.filepath.split(os.path.sep)[-1]

        n_array = ReadBitmap(self.filepath)

        if not (isinstance(n_array, numpy.ndarray)):
            return False

        image = converters.to_vtk(n_array,
                                  spacing=(1, 1, 1),
                                  slice_number=1,
                                  orientation="AXIAL")

        dim = image.GetDimensions()
        x = dim[0]
        y = dim[1]

        img = vtkImageResample()
        img.SetInputData(image)
        img.SetAxisMagnificationFactor(0, 0.25)
        img.SetAxisMagnificationFactor(1, 0.25)
        img.SetAxisMagnificationFactor(2, 1)
        img.Update()

        tp = img.GetOutput().GetScalarTypeAsString()

        image_copy = vtkImageData()
        image_copy.DeepCopy(img.GetOutput())

        thumbnail_path = tempfile.mktemp()

        write_png = vtkPNGWriter()
        write_png.SetInputConnection(img.GetOutputPort())
        write_png.AddObserver("WarningEvent", VtkErrorPNGWriter)
        write_png.SetFileName(thumbnail_path)
        write_png.Write()

        if vtk_error:
            img = vtkImageCast()
            img.SetInputData(image_copy)
            img.SetOutputScalarTypeToUnsignedShort()
            # img.SetClampOverflow(1)
            img.Update()

            write_png = vtkPNGWriter()
            write_png.SetInputConnection(img.GetOutputPort())
            write_png.SetFileName(thumbnail_path)
            write_png.Write()

            vtk_error = False

        id = wx.NewId()

        bmp_item = [
            self.filepath,
            thumbnail_path,
            extension,
            x,
            y,
            str(x) + " x " + str(y),
            file_name,
            id,
        ]
        self.bmp_file.Add(bmp_item)
Exemplo n.º 43
0
reslice = vtkImageReslice()
reslice.SetInputConnection(0, reader.GetOutputPort())
reslice.SetOutputDimensionality(2)
#reslice.SetMagnificationFactors(2, 0, 0)
reslice.SetResliceAxes(axial)

reslice2 = vtkImageReslice()
reslice2.SetInputConnection(0, reader.GetOutputPort())
reslice2.SetOutputDimensionality(2)
reslice2.SetResliceAxes(reslice.GetResliceAxes())

reslice.SetInterpolationModeToLinear()
reslice2.SetInterpolationModeToLinear()

image = reader.GetOutput()
roiData = vtkImageData()
roiData.DeepCopy(image)
extent = roiData.GetExtent()

print("generating ROI...")
"""
for i in range(extent[0], extent[1]):
    for j in range(extent[2], extent[3]):
        for k in range(extent[4], extent[5]):
            if image.GetScalarComponentAsDouble(i, j, k, 0) > -100:
                roiData.SetScalarComponentFromDouble(i, j, k, 0, 1.0)
                #roiData.SetScalarComponentFromDouble(0, i, j, k, 1)
                
            else:   #just in case
                roiData.SetScalarComponentFromDouble(i, j, k, 0, 0.0)
                #roiData.SetScalarComponentFromDouble(0, i, j, k, 0.0)