Exemplo n.º 1
0
 def __init__(self, module_manager):
     SimpleVTKClassModuleBase.__init__(
         self, module_manager,
         vtk.vtkQuadricClustering(), 'Processing.',
         ('vtkPolyData',), ('vtkPolyData',),
         replaceDoc=True,
         inputFunctions=None, outputFunctions=None)
Exemplo n.º 2
0
def vtk_quadric_clustering(inputdata,
                           divisions=[50, 50, 50],
                           division_spacing=None,
                           auto_adjust_divisions=True,
                           use_input_points=False,
                           verbose=False):
    """Wrapper for VTK QuadricClustering

    Based on Lindstrom (2000)
    See documentation: https://www.vtk.org/doc/nightly/html/classvtkQuadricClustering.html#details

    Arguments:
        inputdata (vtk.vtkPolyData): ytk object containing vertices and simplices
        divisions ([int,int,int]): number of subdivisions in x,y and z direction (default [50, 50, 50]).
        division_spacing([double, double, double]): This is an alternative way to set up the bins. If you are trying to match boundaries between pieces, then you should use these methods rather than SetNumberOfDivisions. To use these methods, specify the origin and spacing of the spatial binning (default None instead of [1.,1.,1.]).
        auto_adjust_divisions (bool): Enable automatic adjustment of number of divisions. If off, the number of divisions specified by the user is always used (as long as it is valid) (default True).
        use_input_points (bool): Normally the point that minimizes the quadric error function is used as the output of the bin. When this flag is on, the bin point is forced to be one of the points from the input (the one with the smallest error) (default False).
        verbose (bool): Print out steps and basic statistics (default False).

    Returns:
        decimatedPoly: vtkPolyData object
    """

    title_string = 'VTK Quadric Clustering'

    if not isinstance(inputdata, vtkPolyData):
        raise TypeError(
            'Unknown dtype for inputdata! vtk.vtkPolyData expected.')

    inputPoly = vtkPolyData()
    inputPoly.ShallowCopy(inputdata)

    decimate = vtkQuadricClustering()
    decimate.SetInputData(inputPoly)

    # set parameters
    # apparently, AutoAdjustNumberOfDivisions does not affect DivisionSpacing
    if division_spacing is None:
        decimate.SetNumberOfDivisions(divisions)
        decimate.SetAutoAdjustNumberOfDivisions(auto_adjust_divisions)
    else:
        decimate.SetDivisionSpacing(division_spacing)

    decimate.SetUseInputPoints(use_input_points)

    # execute
    decimate.Update()

    decimatedPoly = vtkPolyData()
    decimatedPoly.ShallowCopy(decimate.GetOutput())

    if verbose:
        print_decimation_result(title_string, inputPoly, decimatedPoly,
                                ('(auto-adjusted) divisions:' +
                                 str(decimate.GetNumberOfDivisions())))

    return decimatedPoly
Exemplo n.º 3
0
 def Cluster(self, i):
     self.inputPolyData.GetBounds(self.bounds)
     self.xRange = (self.bounds[1]-self.bounds[0])
     self.yRange = (self.bounds[3]-self.bounds[2])
     self.zRange = (self.bounds[5]-self.bounds[4])
     #Decimation Starts Here
     self.decimate = vtk.vtkQuadricClustering()
     self.decimate.AutoAdjustNumberOfDivisionsOff()
     self.decimate.UseInputPointsOn()
     self.decimate.SetNumberOfDivisions(int(self.xRange/(0.001*i)), int(self.yRange/(0.001*i)), int(self.zRange/(0.001*i)))
     self.decimate.SetInputData(self.inputPolyData)        
     self.decimate.Update()
     self.NumberOfDivisions = range(3)
     self.decimate.GetNumberOfDivisions(self.NumberOfDivisions)
     self.XBinSize = self.xRange/self.NumberOfDivisions[0]
     self.YBinSize = self.yRange/self.NumberOfDivisions[1]
     self.ZBinSize = self.zRange/self.NumberOfDivisions[2]
     self.outputPolyData = self.decimate.GetOutput()
Exemplo n.º 4
0
 def get(self):
     lineData = self.reader.get("fibers", self.subj, space='World')
     
     decimate = vtk.vtkQuadricClustering()
     decimate.SetInputData(lineData)
     #decimate.SetTargetReduction(float(self.get_argument("res", 0)))
     decimate.Update()                
     lineData2 = decimate.GetOutput()        
     
     resultado = {}
     resultado['subject'] = self.subj
     resultado['type'] = 'lines'
     resultado['bounds'] = lineData2.GetBounds()
     resultado['points'] = numpy_support.vtk_to_numpy(lineData2.GetPoints().GetData()).flatten().tolist() #[:150000]
     resultado['lines'] = numpy_support.vtk_to_numpy(lineData2.GetLines().GetData()).tolist() #[:45000]
     
     self.set_header("Content-Type","application/json")
     self.write(json.dumps(resultado, separators=(',',':')))       
def decimate(poly_data, reduction, data_is_big):
    decimator = None
    #decimator = vtk.vtkDecimatePro()
    #decimator.PreserveTopologyOn()
    #decimator.SplittingOff()
    #decimator.BoundaryVertexDeletionOff()
    #decimator.SetMaximumError(vtk.VTK_DOUBLE_MAX)
    if data_is_big:
        decimator = vtk.vtkQuadricClustering()
        decimator.SetNumberOfXDivisions(1024)
        decimator.SetNumberOfYDivisions(1024)
        decimator.SetNumberOfZDivisions(1024)
    else:
        decimator = vtk.vtkQuadricDecimation()
        decimator.SetTargetReduction(reduction)
    decimator.SetInputData(poly_data)
    decimator.Update()

    decimated = vtk.vtkPolyData()
    decimated.ShallowCopy(decimator.GetOutput())
    print("Decimated:", decimated.GetNumberOfPoints(), "points",
          decimated.GetNumberOfPolys(), "polygons, by ", type(decimator))
    # draw(decimated, color_scale='Rainbow', write_file=True, file_name='after.png')
    return decimated
tris.InsertCellPoint(2)
tris.InsertCellPoint(3)
tris.InsertNextCell(3)
tris.InsertCellPoint(0)
tris.InsertCellPoint(3)
tris.InsertCellPoint(1)
tris.InsertNextCell(3)
tris.InsertCellPoint(1)
tris.InsertCellPoint(2)
tris.InsertCellPoint(3)

polys = vtk.vtkPolyData()
polys.SetPoints(pts)
polys.SetPolys(tris)

mesh = vtk.vtkQuadricClustering()
mesh.SetInputConnection(sphere.GetOutputPort())
mesh.SetNumberOfXDivisions(10)
mesh.SetNumberOfYDivisions(10)
mesh.SetNumberOfZDivisions(10)

mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(mesh.GetOutputPort())

actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetDiffuseColor(GetRGBColor('tomato'))
actor.GetProperty().SetDiffuse(.8)
actor.GetProperty().SetSpecular(.4)
actor.GetProperty().SetSpecularPower(30)
Exemplo n.º 7
0
def image_to_vtk_cell_polydata(img,considered_cells=None,mesh_center=None,coef=1.0,mesh_fineness=1.0,smooth_factor=1.0):

    start_time = time()
    print "--> Generating vtk mesh from image"

    vtk_mesh = vtk.vtkPolyData()
    vtk_points = vtk.vtkPoints()
    vtk_triangles = vtk.vtkCellArray()
    vtk_cells = vtk.vtkLongArray()
    
    nx, ny, nz = img.shape
    data_string = img.tostring('F')

    reader = vtk.vtkImageImport()
    reader.CopyImportVoidPointer(data_string, len(data_string))
    if img.dtype == np.uint8:
        reader.SetDataScalarTypeToUnsignedChar()
    else:
        reader.SetDataScalarTypeToUnsignedShort()
    reader.SetNumberOfScalarComponents(1)
    reader.SetDataExtent(0, nx - 1, 0, ny - 1, 0, nz - 1)
    reader.SetWholeExtent(0, nx - 1, 0, ny - 1, 0, nz - 1)
    reader.SetDataSpacing(*img.resolution)
    reader.Update()

    if considered_cells is None:
        considered_cells = np.unique(img)[1:]

    if mesh_center is None:
        #mesh_center = np.array(img.resolution)*np.array(img.shape)/2.
        mesh_center = np.array([0,0,0])

    for label in considered_cells:

        cell_start_time = time()

        cell_volume = (img==label).sum()*np.array(img.resolution).prod()

        # mask_data = vtk.vtkImageThreshold()
        # mask_data.SetInputConnection(reader.GetOutputPort())
        # mask_data.ThresholdBetween(label, label)
        # mask_data.ReplaceInOn()
        # mask_data.SetInValue(label)
        # mask_data.SetOutValue(0)
        contour = vtk.vtkDiscreteMarchingCubes()
        # contour.SetInput(mask_data.GetOutput())
        SetInput(contour,reader.GetOutput())
        contour.ComputeNormalsOn()
        contour.ComputeGradientsOn()
        contour.SetValue(0,label)
        contour.Update()

        # print "    --> Marching Cubes : ",contour.GetOutput().GetPoints().GetNumberOfPoints()," Points,",contour.GetOutput().GetNumberOfCells()," Triangles,  1 Cell"

        # decimate = vtk.vtkDecimatePro()
        # decimate.SetInputConnection(contour.GetOutputPort())
        # # decimate.SetTargetReduction(0.75)
        # decimate.SetTargetReduction(0.66)
        # # decimate.SetTargetReduction(0.5)
        # # decimate.SetMaximumError(2*np.sqrt(3))
        # decimate.Update()

        smooth_iterations = int(np.ceil(smooth_factor*8.))

        smoother = vtk.vtkWindowedSincPolyDataFilter()
        SetInput(smoother,contour.GetOutput())
        smoother.BoundarySmoothingOn()
        # smoother.BoundarySmoothingOff()
        smoother.FeatureEdgeSmoothingOn()
        # smoother.FeatureEdgeSmoothingOff()
        smoother.SetFeatureAngle(120.0)
        # smoother.SetPassBand(1)
        smoother.SetPassBand(0.01)
        smoother.SetNumberOfIterations(smooth_iterations)
        smoother.NonManifoldSmoothingOn()
        smoother.NormalizeCoordinatesOn()
        smoother.Update()

        divisions = int(np.ceil(np.power(cell_volume,1/3.)*mesh_fineness))

        decimate = vtk.vtkQuadricClustering()
        # decimate = vtk.vtkQuadricDecimation()
        # decimate = vtk.vtkDecimatePro()
        # decimate.SetInput(contour.GetOutput())
        SetInput(decimate,smoother.GetOutput())
        # decimate.SetTargetReduction(0.95)
        # decimate.AutoAdjustNumberOfDivisionsOff()
        decimate.SetNumberOfDivisions(divisions,divisions,divisions)
        decimate.SetFeaturePointsAngle(120.0)
        # decimate.AttributeErrorMetricOn()
        # decimate.ScalarsAttributeOn()
        # decimate.PreserveTopologyOn()
        # decimate.CopyCellDataOn()
        # decimate.SetMaximumCost(1.0)
        # decimate.SetMaximumCollapsedEdges(10000.0)
        decimate.Update()

        # print "    --> Decimation     : ",decimate.GetOutput().GetPoints().GetNumberOfPoints()," Points,",decimate.GetOutput().GetNumberOfCells()," Triangles,  1 Cell"

        cell_polydata = decimate.GetOutput()
        # cell_polydata = smoother.GetOutput()

        polydata_points = np.array([cell_polydata.GetPoints().GetPoint(p) for p in xrange(cell_polydata.GetPoints().GetNumberOfPoints())])
        polydata_center = polydata_points.mean(axis=0)
        polydata_points = polydata_center + coef*(polydata_points-polydata_center) - mesh_center

        cell_points = []
        for p in xrange(cell_polydata.GetPoints().GetNumberOfPoints()):
            pid = vtk_points.InsertNextPoint(polydata_points[p])
            cell_points.append(pid)
        cell_points = array_dict(cell_points,np.arange(cell_polydata.GetPoints().GetNumberOfPoints()))

        for t in xrange(cell_polydata.GetNumberOfCells()):
            poly = vtk_triangles.InsertNextCell(3)
            for i in xrange(3):
                pid = cell_polydata.GetCell(t).GetPointIds().GetId(i)
                vtk_triangles.InsertCellPoint(cell_points[pid])
                vtk_cells.InsertValue(poly,label)

        cell_end_time = time()
        print "  --> Cell",label,":",decimate.GetOutput().GetNumberOfCells(),"triangles (",cell_volume," microm3 ) [",cell_end_time-cell_start_time,"s]"

    vtk_mesh.SetPoints(vtk_points)
    vtk_mesh.SetPolys(vtk_triangles)
    vtk_mesh.GetCellData().SetScalars(vtk_cells)

    print "  <-- Cell Mesh      : ",vtk_mesh.GetPoints().GetNumberOfPoints()," Points,",vtk_mesh.GetNumberOfCells()," Triangles, ",len(considered_cells)," Cells"

    end_time = time()
    print "<-- Generating vtk mesh from image      [",end_time-start_time,"s]"

    return vtk_mesh
Exemplo n.º 8
0
def image_to_vtk_polydata(img,considered_cells=None,mesh_center=None,coef=1.0,mesh_fineness=1.0):
    start_time = time()
    print "--> Generating vtk mesh from image"

    vtk_mesh = vtk.vtkPolyData()
    vtk_points = vtk.vtkPoints()
    vtk_triangles = vtk.vtkCellArray()
    vtk_cells = vtk.vtkLongArray()
    
    nx, ny, nz = img.shape
    data_string = img.tostring('F')

    reader = vtk.vtkImageImport()
    reader.CopyImportVoidPointer(data_string, len(data_string))
    if img.dtype == np.uint8:
        reader.SetDataScalarTypeToUnsignedChar()
    else:
        reader.SetDataScalarTypeToUnsignedShort()
    reader.SetNumberOfScalarComponents(1)
    reader.SetDataExtent(0, nx - 1, 0, ny - 1, 0, nz - 1)
    reader.SetWholeExtent(0, nx - 1, 0, ny - 1, 0, nz - 1)
    reader.SetDataSpacing(*img.resolution)

    if considered_cells is None:
        considered_cells = np.unique(img)[1:]

    if mesh_center is None:
        mesh_center = np.array(img.resolution)*np.array(img.shape)/2.

    marching_cube_start_time = time()
    print "  --> Marching Cubes"
    contour = vtk.vtkDiscreteMarchingCubes()
    SetInput(contour,reader.GetOutput())
    contour.ComputeNormalsOn()
    contour.ComputeGradientsOn()
    contour.ComputeScalarsOn()
    for i,label in enumerate(considered_cells):
        contour.SetValue(i,label)
    contour.Update()
    marching_cube_end_time = time()
    print "  <-- Marching Cubes : ",contour.GetOutput().GetPoints().GetNumberOfPoints()," Points,",contour.GetOutput().GetNumberOfCells()," Triangles, ",len(np.unique(img)[1:])," Cells [",marching_cube_end_time - marching_cube_start_time,"s]"

    marching_cubes = contour.GetOutput()

    marching_cubes_cell_data = marching_cubes.GetCellData().GetArray(0)

    triangle_cell_start_time = time()
    print "    --> Listing triangles"
    print "      - ",marching_cubes.GetNumberOfCells()," triangles"
    marching_cubes_triangles = np.sort([[marching_cubes.GetCell(t).GetPointIds().GetId(i) for i in xrange(3)] for t in xrange(marching_cubes.GetNumberOfCells())])   
    triangle_cell_end_time = time()
    print "    <-- Listing triangles            [",triangle_cell_end_time - triangle_cell_start_time,"s]"

    triangle_cell_start_time = time()
    print "    --> Listing triangle cells"
    triangle_cell = np.array([marching_cubes_cell_data.GetTuple(t)[0] for t in xrange(marching_cubes.GetNumberOfCells())],np.uint16)
    triangle_cell_end_time = time()
    print "    <-- Listing triangle cells     [",triangle_cell_end_time - triangle_cell_start_time,"s]"

    triangle_cell_start_time = time()
    print "    --> Updating marching cubes mesh"
    vtk_mesh = vtk.vtkPolyData()
    vtk_points = vtk.vtkPoints()
    vtk_triangles = vtk.vtkCellArray()
    vtk_cells = vtk.vtkLongArray()

    for label in considered_cells:

        # cell_start_time = time()

        cell_marching_cubes_triangles = marching_cubes_triangles[np.where(triangle_cell == label)]

        marching_cubes_point_ids = np.unique(cell_marching_cubes_triangles)

        marching_cubes_points = np.array([marching_cubes.GetPoints().GetPoint(p) for p in marching_cubes_point_ids])
        marching_cubes_center = marching_cubes_points.mean(axis=0)
        marching_cubes_points = marching_cubes_center + coef*(marching_cubes_points-marching_cubes_center) - mesh_center

        cell_points = []
        for p in xrange(marching_cubes_points.shape[0]):
            pid = vtk_points.InsertNextPoint(marching_cubes_points[p])
            cell_points.append(pid)
        cell_points = array_dict(cell_points,marching_cubes_point_ids)

        for t in xrange(cell_marching_cubes_triangles.shape[0]):
            poly = vtk_triangles.InsertNextCell(3)
            for i in xrange(3):
                pid = cell_marching_cubes_triangles[t][i]
                vtk_triangles.InsertCellPoint(cell_points[pid])
            vtk_cells.InsertValue(poly,label)

        # cell_end_time = time()
        # print "  --> Cell",label,":",cell_marching_cubes_triangles.shape[0],"triangles [",cell_end_time-cell_start_time,"s]"

    vtk_mesh.SetPoints(vtk_points)
    vtk_mesh.SetPolys(vtk_triangles)
    vtk_mesh.GetCellData().SetScalars(vtk_cells)

    triangle_cell_end_time = time()
    print "    <-- Updating marching cubes mesh [",triangle_cell_end_time - triangle_cell_start_time,"s]"

    decimation_start_time = time()
    print "  --> Decimation"
    smoother = vtk.vtkWindowedSincPolyDataFilter()
    SetInput(smoother,vtk_mesh)
    smoother.SetFeatureAngle(30.0)
    smoother.SetPassBand(0.05)
    smoother.SetNumberOfIterations(25)
    smoother.NonManifoldSmoothingOn()
    smoother.NormalizeCoordinatesOn()
    smoother.Update()

    decimate = vtk.vtkQuadricClustering()
    SetInput(decimate,smoother.GetOutput())
    decimate.SetNumberOfDivisions(*tuple(mesh_fineness*np.array(np.array(img.shape)*np.array(img.resolution)/2.,np.uint16)))
    decimate.SetFeaturePointsAngle(30.0)
    decimate.CopyCellDataOn()
    decimate.Update()

    decimation_end_time = time()
    print "  <-- Decimation     : ",decimate.GetOutput().GetPoints().GetNumberOfPoints()," Points,",decimate.GetOutput().GetNumberOfCells()," Triangles, ",len(considered_cells)," Cells [",decimation_end_time - decimation_start_time,"s]"

    end_time = time()
    print "<-- Generating vtk mesh from image      [",end_time-start_time,"s]"

    return decimate.GetOutput()
Exemplo n.º 9
0
    piece.CreateGhostCellsOff()
    # purposely put seams in here.

    pdn = vtk.vtkPolyDataNormals()
    pdn.SetInputConnection(piece.GetOutputPort())

# Just playing with an alternative that is not currently used.
deci = vtk.vtkDecimatePro()
deci.SetInputConnection(sphere.GetOutputPort())
# this did not remove seams as I thought it would
deci.BoundaryVertexDeletionOff()
# deci.PreserveTopologyOn()

# Since quadric Clustering does not handle borders properly yet,
# the pieces will have dramatic "seams"
q = vtk.vtkQuadricClustering()
q.SetInputConnection(sphere.GetOutputPort())
q.SetNumberOfXDivisions(5)
q.SetNumberOfYDivisions(5)
q.SetNumberOfZDivisions(10)
q.UseInputPointsOn()

streamer = vtk.vtkPolyDataStreamer()
# streamer.SetInputConnection(deci.GetOutputPort())
streamer.SetInputConnection(q.GetOutputPort())
# streamer.SetInputConnection(pdn.GetOutputPort())
streamer.SetNumberOfStreamDivisions(NUMBER_OF_PIECES)

mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(streamer.GetOutputPort())
mapper.ScalarVisibilityOff()
Exemplo n.º 10
0
def run(args):
    try:
        # inspired by /my/github/3dbar/lib/pymodules/python2.6/bar/rec/default_pipeline.xml

        if op.exists(args.out):
            if not args.replace:
                print('Skipping creation of image "{}", it already exists.'.
                      format(args.out))
                result = {'Status': 'Skipped'}
                report.success(result)
                return

        nii = nibabel.load(args.inp)

        # Nifti data is supposed to be in RAS orientation.
        # For Nifti files that violate the standard, the reorient string can be used to correct the orientation.
        if isinstance(args.reorient, str):
            nii = nit.reorient(nii, args.reorient)

        nii = nibabel.as_closest_canonical(nii)
        # numpy.array() will copy image to 'clean' data buffer
        img = numpy.squeeze(nii.get_data())

        if (args.bg_color == "auto"):
            bgColor = img[0, 0, 0]
        else:
            bgColor = int(args.bg_color)

        mask = nit.imageMask(img, [bgColor])
        img[mask] = 8
        if img.dtype != numpy.uint8:
            img = img.astype(numpy.uint8)

        #if numpy.prod(img.shape) > 512*512*512:
        hdr = nii.get_header()
        q = hdr.get_best_affine()
        scalef = [1 + v / 512 for v in img.shape]
        if any(numpy.array(scalef) > 1):
            print('Downsampling by a factor {} to reduce memory load'.format(
                scalef))
            print 'Best affine before downsampling: {}'.format(q)
            (img, q) = nit.downsample3d(img, q.tolist(), scalef)
            print 'Best affine after downsampling: {}'.format(q)

        # image zero-padding
        bb = nit.get_boundingbox(img, 2)
        w = 2
        padded = numpy.zeros((bb[3] + 2 * w, bb[4] + 2 * w, bb[5] + 2 * w),
                             numpy.uint8,
                             order='F')
        padded[w:-w, w:-w,
               w:-w] = img[bb[0]:bb[0] + bb[3], bb[1]:bb[1] + bb[4],
                           bb[2]:bb[2] + bb[5]]
        img = padded
        dims = img.shape
        spacing = q.diagonal()[0:3]
        print('Origin before {}'.format(q[0:3, 3]))
        # origin adjusted for zero-padding
        q[0:3, 3] = apply_affine(q, [bb[0] - w, bb[1] - w, bb[2] - w])
        origin = q[0:3, 3]
        print('Origin after {}'.format(origin))

        doRender = args.render

        # create a rendering window and renderer
        ren = vtk.vtkRenderer()
        if doRender:
            renWin = vtk.vtkRenderWindow()
            iren = vtk.vtkRenderWindowInteractor()
            iren.SetRenderWindow(renWin)
        else:
            renWin = vtk.vtkRenderWindow()
            renWin.SetOffScreenRendering(1)

        renWin.AddRenderer(ren)
        WIDTH = 800
        HEIGHT = 600
        renWin.SetSize(WIDTH, HEIGHT)

        # import data
        dataImporter = vtk.vtkImageImport()
        dataImporter.SetImportVoidPointer(img)
        dataImporter.SetDataScalarTypeToUnsignedChar()
        dataImporter.SetNumberOfScalarComponents(1)
        dataImporter.SetDataExtent(0, dims[0] - 1, 0, dims[1] - 1, 0,
                                   dims[2] - 1)
        dataImporter.SetWholeExtent(0, dims[0] - 1, 0, dims[1] - 1, 0,
                                    dims[2] - 1)
        # new since October 2015
        dataImporter.SetDataSpacing(spacing)
        dataImporter.SetDataOrigin(origin)
        print 'ORIGIN: {}'.format(dataImporter.GetDataOrigin())

        print 'Dimensions {}'.format(dims)

        # create iso surface
        iso = vtk.vtkMarchingCubes()
        iso.SetInputConnection(dataImporter.GetOutputPort())
        iso.ComputeNormalsOff()
        iso.SetValue(0, 3)

        iso.Update()

        #DEBUG
        #from vtk.util.numpy_support import vtk_to_numpy
        #iso.Update()
        #output = iso.GetOutput()
        #A = vtk_to_numpy(output.GetPoints().GetData())
        #print 'iso Output {}'.format(A)

        tf = vtk.vtkTriangleFilter()
        tf.SetInput(iso.GetOutput())

        # apply smoothing
        smth = vtk.vtkSmoothPolyDataFilter()
        smth.SetRelaxationFactor(0.5)
        smth.SetInput(tf.GetOutput())

        # reduce triangles
        qc = vtk.vtkQuadricClustering()
        qc.SetNumberOfXDivisions(90)
        qc.SetNumberOfYDivisions(90)
        qc.SetNumberOfZDivisions(90)
        qc.SetInput(smth.GetOutput())

        # map data
        volumeMapper = vtk.vtkPolyDataMapper()
        volumeMapper.SetInput(qc.GetOutput())
        volumeMapper.ScalarVisibilityOff()

        # actor
        act = vtk.vtkActor()
        act.SetMapper(volumeMapper)

        # assign actor to the renderer
        ren.AddActor(act)
        bgColor = [0, 0, 0]
        ren.SetBackground(bgColor)

        camera = ren.GetActiveCamera()
        camera.SetViewUp(0, 0, 1)
        camera.SetFocalPoint(dims[0] / 2, dims[1] / 2, dims[2] / 2)
        camera.SetPosition(dims[0], dims[1] / 2, dims[2] / 2)
        camera.ParallelProjectionOn()
        camera.Yaw(180)
        ren.SetActiveCamera(camera)
        ren.ResetCamera()
        ren.ResetCameraClippingRange()
        ren.GetActiveCamera().Zoom(1.5)

        if args.out:
            take_screenshot(args.out, ren)
            im = Image.open(args.out)
            A = numpy.array(im)
            mask = nit.imageMask(A, [bgColor])
            bgColor = [238, 221, 170]
            A[numpy.invert(mask)] = bgColor
            nonzero = numpy.argwhere(mask)
            if nonzero.size > 0:
                lefttop = [v if v > 0 else 0 for v in (nonzero.min(0) - 4)]
                rightbottom = [
                    v if v < A.shape[i] else A.shape[i]
                    for i, v in enumerate(nonzero.max(0) + 4)
                ]
                A = A[lefttop[0]:rightbottom[0] + 1,
                      lefttop[1]:rightbottom[1] + 1]
            im = Image.fromarray(A)
            sz = numpy.array(im.size, 'double')
            sz = numpy.ceil((128.0 / sz[1]) * sz)
            print 'sz {}'.format(sz)
            im.thumbnail(sz.astype(int), Image.ANTIALIAS)
            im = im.convert('P', palette=Image.ADAPTIVE, colors=256)
            palette = numpy.array(im.getpalette()).reshape(256, 3)
            match = numpy.where(numpy.all(palette == bgColor, axis=1))
            im.save(args.out, transparency=match[0])
            print('Created image "{}"'.format(args.out))

        if args.x3d:
            vtkX3DExporter = vtk.vtkX3DExporter()
            vtkX3DExporter.SetInput(renWin)
            vtkX3DExporter.SetFileName(args.x3d)
            vtkX3DExporter.Write()

        if args.stl:
            stlWriter = vtk.vtkSTLWriter()
            stlWriter.SetInputConnection(qc.GetOutputPort())
            stlWriter.SetFileTypeToBinary()
            stlWriter.SetFileName(args.stl)
            stlWriter.Write()

        # enable user interface interactor
        if doRender:
            iren.Initialize()
            renWin.Render()
            pos = camera.GetPosition()
            ornt = camera.GetOrientation()

            print 'Camera position; orientation {};{}'.format(pos, ornt)
            iren.Start()

        result = {'Status': 'Done'}
        report.success(result)
    except:
        report.fail(__file__)
Exemplo n.º 11
0
    def procData(self):
        # double negative so that the name doesn't collide with the same option that is off by default in dpLoadh5
        #if not self.no_legacy_transpose: cube = self.data_cube.transpose((1,0,2))
        # changed this to off by default and use same flag from dpLoadh5
        if self.legacy_transpose: cube = self.data_cube.transpose((1,0,2))
        else: cube = self.data_cube

        # for easy saving of scale as attribute in hdf5 output
        self.scale = self.data_attrs['scale']

        # Pad data with zeros so that meshes are closed on the edges
        sizes = np.array(cube.shape); r = self.RAD; sz = sizes + 2*r;
        dataPad = np.zeros(sz, dtype=self.data_type); dataPad[r:sz[0]-r, r:sz[1]-r, r:sz[2]-r] = cube

        # old method
        #        # get all unique seeds in the cube
        #        self.seeds = np.unique(cube)
        #        # remove the background label (label 0)
        #        if self.seeds.size > 0 and self.seeds[0] == 0: self.seeds = self.seeds[1:]

        # get sizes first with hist (prevents sums in meshing loop)
        #self.nVoxels = emLabels.getSizes(cube)[1:]
        self.nVoxels = emLabels.getSizesMax(cube, sum(self.data_attrs['types_nlabels']))[1:]
        self.seeds = np.arange(1, self.nVoxels.size+1, dtype=np.int64); self.seeds = self.seeds[self.nVoxels>0]
        #print(np.argmax(self.nVoxels))

        assert( self.seeds.size > 0 )   # error, no labels
        n = self.seeds.size; #self.nVoxels = np.zeros((n,), dtype=np.int64)
        assert( n == self.seeds[-1] or not self.mesh_outfile_stl )   # for consistency with stl file, no empty labels

        # intended for debug, only process a subset of the seeds
        if self.seed_range[0] < 1 or self.seed_range[0] > n: self.seed_range[0] = 0
        if self.seed_range[1] < 1 or self.seed_range[1] < 0: self.seed_range[1] = n

        # threw me off in debug twice, if the supervoxels are contiguous then have the seed_range mean actual seed
        if n == self.seeds[-1] and self.seed_range[0] > 0: self.seed_range[0] -= 1

        # other inits
        if self.do_smooth: W = np.ones(self.smooth, dtype=self.PDTYPE) / self.smooth.prod()

        # allocate outputs
        self.faces = n * [None]; self.vertices = n * [None]; self.mins = n * [None]; self.rngs = n * [None]
        self.bounds_beg = n * [None]; self.bounds_end = n * [None]
        self.nFaces = np.zeros((n,), dtype=np.uint64); self.nVertices = np.zeros((n,), dtype=np.uint64);
        if self.doplots or self.mesh_outfile_stl: self.allPolyData = vtk.vtkAppendPolyData()

        # get bounding boxes for each supervoxel
        svox_bnd = nd.measurements.find_objects(dataPad, n)

        if self.dpLabelMesher_verbose:
            tloop = time.time(); t = time.time()
        for i in range(self.seed_range[0], self.seed_range[1]):
            if self.dpLabelMesher_verbose and i % self.print_every == 0:
                print('seed : %d is %d / %d' % (self.seeds[i],i+1,self.seed_range[1]))

            # old method
            #            # select the labels
            #            #bwdpls = (dataPad == self.seeds[i]);
            #            #self.nVoxels[i] = bwdpls.sum();
            #            if self.dpLabelMesher_verbose: print('\tnVoxels = %d' % self.nVoxels[i])
            #
            #            # get the voxel coordinates relative to padded and non-padded cube
            #            idpls = np.argwhere(bwdpls)
            #            # bounding box within zero padded cube
            #            imin = idpls.min(axis=0); imax = idpls.max(axis=0)

            cur_bnd = svox_bnd[self.seeds[i]-1]
            imin = np.array([x.start for x in cur_bnd]); imax = np.array([x.stop-1 for x in cur_bnd])
            # min and max coordinates of this seed within zero padded cube
            pmin = imin - r; pmax = imax + r;
            # min coordinates of this seed relative to original (non-padded cube)
            self.mins[i] = pmin - r; self.rngs[i] = pmax - pmin + 1

            # old method
            # crop out the bounding box plus the padding, then optionally smooth
            #crpdpls = bwdpls[pmin[0]:pmax[0]+1,pmin[1]:pmax[1]+1,pmin[2]:pmax[2]+1].astype(self.PDTYPE)
            # crop out the bounding box then binarize this seed within bounding box
            crpdpls = (dataPad[pmin[0]:pmax[0]+1,pmin[1]:pmax[1]+1,
                               pmin[2]:pmax[2]+1] == self.seeds[i]).astype(self.PDTYPE)
            if self.do_smooth:
                crpdplsSm = filters.convolve(crpdpls, W, mode='reflect', cval=0.0, origin=0)
                # if smoothing results in nothing above contour level, use original without smoothing
                if (crpdplsSm > self.contour_lvl).any():
                    del crpdpls; crpdpls = crpdplsSm; del crpdplsSm
            if self.doplots: showImgData(np.squeeze(crpdpls[:,:,crpdpls.shape[2]/2]),'slice')

            # vtkImageImport is used to create image data from memory in vtk
            # http://wiki.scipy.org/Cookbook/vtkVolumeRendering
            dataImporter = vtk.vtkImageImport()
            # The preaviusly created array is converted to a byte string (not string, see np docs) and imported.
            data_string = crpdpls.transpose((2,1,0)).tostring();
            dataImporter.CopyImportVoidPointer(data_string, len(data_string))
            # Set the type of the newly imported data
            #dataImporter.SetDataScalarTypeToUnsignedChar()
            #dataImporter.SetDataScalarTypeToUnsignedShort()
            dataImporter.SetDataScalarTypeToDouble()
            # Because the data that is imported only contains an intensity value (i.e. not RGB), the importer
            # must be told this is the case.
            dataImporter.SetNumberOfScalarComponents(1)
            if self.set_voxel_scale:
                # Have to set the voxel anisotropy here, as there does not seem an easy way once the poly is created.
                dataImporter.SetDataSpacing(self.data_attrs['scale'])
            # Data extent is the extent of the actual buffer, whole extent is ???
            # Use extents that are relative to non-padded cube
            beg = self.mins[i]; end = self.mins[i] + self.rngs[i] - 1
            dataImporter.SetDataExtent(beg[0], end[0], beg[1], end[1], beg[2], end[2])
            dataImporter.SetWholeExtent(beg[0], end[0], beg[1], end[1], beg[2], end[2])

            # save bounds relative to entire dataset
            self.bounds_beg[i] = beg + self.dataset_index; self.bounds_end[i] = end + self.dataset_index;

            # use vtk for isosurface contours and surface mesh reduction
            iso = vtk.vtkContourFilter()
            iso.SetInputConnection(dataImporter.GetOutputPort())
            iso.SetComputeNormals(0)
            iso.SetValue(0, self.contour_lvl)
            if self.decimatePro:
                deci = vtk.vtkDecimatePro()
                rf = 1-self.reduce_frac; deci.SetTargetReduction(rf); df = 0.01
                deci.SplittingOn(); deci.PreserveTopologyOff(); deci.BoundaryVertexDeletionOn()
                if self.min_faces > 0: updates = range(100)
                else: updates = ['deci.BoundaryVertexDeletionOff()','deci.PreserveTopologyOn()','0']
            else:
                deci = vtk.vtkQuadricClustering()
                #deci.SetDivisionOrigin(0.0,0.0,0.0); deci.SetDivisionSpacing(self.reduce_spacing)
                nb = self.reduce_nbins; deci.SetNumberOfDivisions(nb,nb,nb); deci.AutoAdjustNumberOfDivisionsOff()
                updates = ['deci.AutoAdjustNumberOfDivisionsOn()','0']

            # thought of adding checking for closed surfaces, http://comments.gmane.org/gmane.comp.lib.vtk.user/47957
            # this did not work, for low reduce_frac, many open edges remain even for large objects

            # not clear that triangle filter does anything, contour filter already makes triangulated meshes?
            # send polygonal mesh from isosurface to triangle filter to convert to triangular mesh
            #tri = vtk.vtkTriangleFilter(); tri.SetInputConnection(iso.GetOutputPort());
            #deci.SetInputConnection(tri.GetOutputPort())

            deci.SetInputConnection(iso.GetOutputPort())
            # xxx - this is kindof a cheap trick, if we reduce down "too much", then rerun to preserve more
            for update in updates:
                deci.Update()

                # http://forrestbao.blogspot.com/2012/06/vtk-polygons-and-other-cells-as.html
                # http://stackoverflow.com/questions/6684306/how-can-i-read-a-vtk-file-into-a-python-datastructure
                dOut = deci.GetOutput()
                # xxx - points seem to be single instead of inputted type, probably depends on vtk version:
                #   http://public.kitware.com/pipermail/vtkusers/2010-April/059413.html
                self.vertices[i] = nps.vtk_to_numpy(dOut.GetPoints().GetData())
                if self.center_origin:
                    self.vertices[i][:,0] -= sizes[0]/2; self.vertices[i][:,1] -= sizes[1]/2
                    self.vertices[i][:,2] = sizes[2]/2 - self.vertices[i][:,2]
                self.faces[i] = nps.vtk_to_numpy(dOut.GetPolys().GetData()).reshape((-1,4))[:,1:]
                if self.flip_faces: self.faces[i] = self.faces[i][:,::-1]
                self.nVertices[i] = self.vertices[i].shape[0]
                self.nFaces[i] = self.faces[i].shape[0]
                if self.dpLabelMesher_verbose and i % self.print_every == 0:
                    print('\t%d vertices, %d faces' % (self.nVertices[i], self.nFaces[i]))
                if self.min_faces > 0:
                    if self.nFaces[i] >= self.min_faces: break
                    rf -= df; deci.SetTargetReduction(rf)
                else:
                    if self.nVertices[i] > 2 and self.nFaces[i] > 0: break
                    eval(update)
            assert( self.nVertices[i] > 2 and self.nFaces[i] > 0 )  # there has to be at least one face

            if self.doplots:
                mapper = vtk.vtkPolyDataMapper()
                mapper.SetInputConnection(deci.GetOutputPort())
                dpLabelMesher.vtkShow(mapper)

            # append the current surface to vtk object with all the surfaces
            if self.doplots or self.mesh_outfile_stl:
                self.allPolyData.AddInputConnection(deci.GetOutputPort())
            if self.doplots:
                connectivityFilter = vtk.vtkPolyDataConnectivityFilter()
                connectivityFilter.SetInputConnection(self.allPolyData.GetOutputPort())
                connectivityFilter.SetExtractionModeToAllRegions()
                connectivityFilter.ColorRegionsOn()
                connectivityFilter.Update()
                mapper = vtk.vtkPolyDataMapper()
                mapper.SetInputConnection(connectivityFilter.GetOutputPort())
                mapper.SetScalarRange(connectivityFilter.GetOutput().GetPointData().GetArray("RegionId").GetRange())
                dpLabelMesher.vtkShow(mapper)

            if self.dpLabelMesher_verbose and i % self.print_every == 0:
                print('\tdone in %.3f s' % (time.time() - t,)); t = time.time()
        if self.dpLabelMesher_verbose: print('Total ellapsed time meshing %.3f s' % (time.time() - tloop,))
Exemplo n.º 12
0
def quadric_clustering(data,ratio):
    sim = vtk.vtkQuadricClustering();
    sim.SetInputData(data);
    sim.Update()
    return sim.GetOutput();
Exemplo n.º 13
0
def run(args):
    try:
        nii = nibabel.load(args.inp)
        nii = nibabel.as_closest_canonical(nii)
        img = numpy.squeeze(nii.get_data())

        bgColor = img[0, 0, 0]
        mask = (img != bgColor)
        #img = numpy.zeros(mask.shape,numpy.uint8)
        img[mask] = 16

        if numpy.prod(img.shape) > 256 * 256 * 256:
            print('Downsampling by a factor 2 to reduce memory load')
            hdr = nii.get_header()
            q = hdr.get_best_affine()
            (img, q) = nit.downsample(img, q, 2)

        dims = img.shape

        doRender = args.render

        # create a rendering window and renderer
        ren = vtk.vtkRenderer()
        if doRender:
            renWin = vtk.vtkRenderWindow()
            iren = vtk.vtkRenderWindowInteractor()
            iren.SetRenderWindow(renWin)
        else:
            renWin = vtk.vtkXOpenGLRenderWindow()
        renWin.AddRenderer(ren)
        WIDTH = 250
        HEIGHT = 160
        renWin.SetSize(WIDTH, HEIGHT)

        # import data
        dataImporter = vtk.vtkImageImport()
        dataImporter.SetImportVoidPointer(img)
        dataImporter.SetDataScalarTypeToUnsignedChar()
        dataImporter.SetNumberOfScalarComponents(1)
        dataImporter.SetDataExtent(0, dims[0] - 1, 0, dims[1] - 1, 0,
                                   dims[2] - 1)
        dataImporter.SetWholeExtent(0, dims[0] - 1, 0, dims[1] - 1, 0,
                                    dims[2] - 1)

        print 'Dimensions {}'.format(dims)

        # create iso surface
        iso = vtk.vtkMarchingCubes()
        iso.SetInputConnection(dataImporter.GetOutputPort())
        iso.ComputeNormalsOff()
        iso.SetValue(0, 5)
        ## necessary?? See /my/github/3dbar/lib/pymodules/python2.6/bar/rec/default_pipeline.xml

        tf = vtk.vtkTriangleFilter()
        tf.SetInput(iso.GetOutput())

        # apply smoothing
        smth = vtk.vtkSmoothPolyDataFilter()
        smth.SetRelaxationFactor(0.5)
        smth.SetInput(tf.GetOutput())

        # reduce triangles
        qc = vtk.vtkQuadricClustering()
        qc.SetNumberOfXDivisions(90)
        qc.SetNumberOfYDivisions(90)
        qc.SetNumberOfZDivisions(90)
        qc.SetInput(smth.GetOutput())

        # map data
        volumeMapper = vtk.vtkPolyDataMapper()
        volumeMapper.SetInput(qc.GetOutput())
        volumeMapper.ScalarVisibilityOff()

        # actor
        act = vtk.vtkActor()
        act.SetMapper(volumeMapper)

        # assign actor to the renderer
        ren.AddActor(act)

        camera = ren.GetActiveCamera()
        camera.SetViewUp(0, 0, 1)
        camera.SetFocalPoint(dims[0] / 2, dims[1] / 2, dims[2] / 2)
        camera.SetPosition(dims[0], dims[1] / 2, dims[2] / 2)
        camera.ParallelProjectionOn()
        camera.Yaw(180)
        ren.SetActiveCamera(camera)
        ren.ResetCamera()
        ren.ResetCameraClippingRange()
        ren.GetActiveCamera().Zoom(1.5)
        if args.out:
            take_screenshot(args.out, ren)

        images = []
        for i in range(0, 360, 15):
            az = 3 * numpy.sin(2 * numpy.pi * i / 360)
            el = 3 * numpy.cos(2 * numpy.pi * i / 360)
            ren.GetActiveCamera().Azimuth(az)
            ren.GetActiveCamera().Elevation(el)
            renWin.Render()
            pngfile = "/tmp/test{}.png".format(i)
            take_screenshot(pngfile, ren)
            im = Image.open(pngfile)
            images.append(im)
            ren.GetActiveCamera().Elevation(-el)
            ren.GetActiveCamera().Azimuth(-az)

        images2gif.writeGif('/tmp/test.gif', images, 0.2, dither=0)

        # enable user interface interactor
        if doRender:
            iren.Initialize()
            renWin.Render()
            pos = camera.GetPosition()
            ornt = camera.GetOrientation()

            print 'Camera position; orientation {};{}'.format(pos, ornt)
            iren.Start()

        result = {'Status': 'Done'}
        report.success(result)
    except:
        report.fail(__file__)
Exemplo n.º 14
0
sphere = vtk.vtkSphereSource()
# #dome_sphere.SetCenter(meristem_model.shape_model['dome_center'])
sphere.SetRadius(1)
sphere.SetThetaResolution(16)
sphere.SetPhiResolution(16)
sphere.Update()

#subdivide = vtk.vtkLoopSubdivisionFilter()
#subdivide = vtk.vtkButterflySubdivisionFilter()
subdivide = vtk.vtkLinearSubdivisionFilter()
subdivide.SetNumberOfSubdivisions(2)
subdivide.SetInputConnection(sphere.GetOutputPort())
#subdivide.SetInputConnection(ico.GetOutputPort())
subdivide.Update()

decimate = vtk.vtkQuadricClustering()
decimate.SetInput(subdivide.GetOutput())
decimate.SetNumberOfDivisions(100,100,100)
decimate.SetFeaturePointsAngle(30.0)
decimate.CopyCellDataOn()
decimate.Update()

scale_transform = vtk.vtkTransform()
scale_factor = dome_radius/(np.sqrt(2)/2.)
scale_transform.Scale(scale_factor,scale_factor,scale_factor)

dome_sphere = vtk.vtkTransformPolyDataFilter()
#dome_sphere.SetInput(subdivide.GetOutput())
dome_sphere.SetInput(sphere.GetOutput())
#dome_sphere.SetInput(decimate.GetOutput())
dome_sphere.SetTransform(scale_transform)
Exemplo n.º 15
0
def makeTriangulatedMesh(coordination_dictionary, division_spacing, sigma,
                         num_threads):

    max_X = -1e10
    min_X = 1e10
    max_Y = -1e10
    min_Y = 1e10

    for key in sorted(coordination_dictionary.keys()):

        plane = coordination_dictionary[key]

        for a_contour in plane:

            for point in a_contour:

                if point[0] > max_X:
                    max_X = point[0]
                if point[0] < min_X:
                    min_X = point[0]
                if point[1] > max_Y:
                    max_Y = point[1]
                if point[1] < min_Y:
                    min_Y = point[1]

    max_X = int(max_X) + 1
    min_X = int(min_X) - 1
    max_Y = int(max_Y) + 1
    min_Y = int(min_Y) - 1

    image_X = max_X - min_X + 10
    image_Y = max_Y - min_Y + 10

    args = []
    min_Z = min(coordination_dictionary.keys())

    for key in sorted(coordination_dictionary.keys()):

        plane = coordination_dictionary[key]

        for a_contour in plane:

            image = np.zeros((image_X, image_Y), dtype=float)

            arg = (a_contour, image, min_X, min_Y, key)

            args.append(arg)

    p = Pool(int(num_threads))

    imagesWithKey = p.map(makeBinaryImageWighTag, [a for a in args])

    imageDict = {}

    for imageWithKey in imagesWithKey:

        if not imageWithKey[1] in imageDict.keys():

            imageDict[imageWithKey[1]] = imageWithKey[0]

        else:

            imageDict[imageWithKey[1]] += imageWithKey[0]

    spacer_image = np.zeros((image_X, image_Y, 5), dtype=float)
    binary_image = spacer_image

    keyList = sorted(imageDict.keys())

    for i, key in enumerate(keyList):

        a_image = imageDict[key]

        try:

            nextKey = keyList[i + 1]

            for j in range(int(float(nextKey) - float(key))):

                binary_image = np.dstack((binary_image, a_image))

        except:

            binary_image = np.dstack((binary_image, a_image))

    binary_image = np.dstack((binary_image, spacer_image))

    itkImage = numpyToITK3D(binary_image)

    CharPixelType = itk.UC
    RealPixelType = itk.F
    Dimension = 3

    CharImageType = itk.Image[CharPixelType, Dimension]
    RealImageType = itk.Image[RealPixelType, Dimension]

    smoothFilter = itk.SmoothingRecursiveGaussianImageFilter[RealImageType,
                                                             RealImageType]
    smoothFilter = smoothFilter.New()
    smoothFilter.SetInput(itkImage)
    smoothFilter.SetSigma(float(sigma))
    smoothFilter.Update()

    rescaleFilter = itk.RescaleIntensityImageFilter[RealImageType,
                                                    CharImageType]
    rescale = rescaleFilter.New()
    rescale.SetOutputMinimum(0)
    rescale.SetOutputMaximum(255)
    rescale.SetInput(smoothFilter.GetOutput())
    rescale.Update()

    converter = itk.ImageToVTKImageFilter[CharImageType].New()
    converter.SetInput(rescale.GetOutput())
    converter.Update()

    surface = vtk.vtkMarchingCubes()
    surface.SetInput(converter.GetOutput())
    surface.ComputeNormalsOn()
    surface.SetValue(0, 80.0)

    transform = vtk.vtkTransform()
    transform.Translate(min_X - 5, min_Y - 5, min_Z - 5)

    tf = vtk.vtkTransformPolyDataFilter()
    tf.SetInputConnection(surface.GetOutputPort())
    tf.SetTransform(transform)
    tf.Update()

    triangleFilter = vtk.vtkTriangleFilter()
    triangleFilter.SetInputConnection(tf.GetOutputPort())
    triangleFilter.Update()

    decimate = vtk.vtkQuadricClustering()
    decimate.SetDivisionSpacing(float(division_spacing),
                                float(division_spacing),
                                float(division_spacing))
    decimate.SetInput(triangleFilter.GetOutput())

    return decimate.GetOutput()