Beispiel #1
0
    def __init__(self, module_manager):
        ModuleBase.__init__(self, module_manager)

        # this has no progress feedback
        self._icp = vtk.vtkIterativeClosestPointTransform()

        self._config.max_iterations = 50
        self._config.mode = 'RigidBody'
        self._config.align_centroids = True

        config_list = [
            ('Transformation mode:', 'mode', 'base:str', 'choice',
             'Rigid: rotation + translation;\n'
             'Similarity: rigid + isotropic scaling\n'
             'Affine: rigid + scaling + shear', ('RigidBody', 'Similarity',
                                                 'Affine')),
            ('Maximum number of iterations:', 'max_iterations', 'base:int',
             'text', 'Maximum number of iterations for ICP.'),
            ('Align centroids before start', 'align_centroids', 'base:bool',
             'checkbox',
             'Align centroids before iteratively optimizing closest points? (required for large relative translations)'
             )
        ]

        ScriptedConfigModuleMixin.__init__(
            self, config_list, {
                'Module (self)': self,
                'vtkIterativeClosestPointTransform': self._icp
            })

        self.sync_module_logic_with_config()
Beispiel #2
0
    def __init__(self):
        VTKPythonAlgorithmBase.__init__(self,
                                        nInputPorts=2,
                                        nOutputPorts=1,
                                        outputType='vtkPolyData')

        self.icp = vtk.vtkIterativeClosestPointTransform()
Beispiel #3
0
    def _compute(self):
        from vtk import vtkIterativeClosestPointTransform

        source_polydata = self.as_polydata(self.source)
        target_polydata = self.as_polydata(self.target)

        icp_transform = vtkIterativeClosestPointTransform()
        icp_transform.SetSource(source_polydata)
        icp_transform.SetTarget(target_polydata)
        icp_transform.SetMaximumNumberOfIterations(self.max_iterations)
        if self.match_centroids:
            icp_transform.StartByMatchingCentroidsOn()
        else:
            icp_transform.StartByMatchingCentroidsOff()
        icp_transform.SetMaximumNumberOfLandmarks( \
            source_polydata.GetNumberOfPoints())
        icp_transform.SetMaximumMeanDistance(self.tolerance)
        icp_transform.Update()

        matrix = icp_transform.GetMatrix()

        self.scale_and_rotation = numpy.zeros((3,3))
        self.translation = numpy.zeros(3)

        for i in range(3):
            for j in range(3):
                self.scale_and_rotation[i, j] = matrix.GetElement(i, j)

        self.translation[0] = matrix.GetElement(0, 3)
        self.translation[1] = matrix.GetElement(1, 3)
        self.translation[2] = matrix.GetElement(2, 3)

        print 'Number of iterations:', icp_transform.GetNumberOfIterations()
        print 'Scale and Rotation:\n', self.scale_and_rotation
        print 'Translation:', self.translation
Beispiel #4
0
def align(source, target, iters=100, rigid=False, legend=None):
    '''
    Return a copy of source actor which is aligned to
    target actor through vtkIterativeClosestPointTransform class.
    
    The core of the algorithm is to match each vertex in one surface with
    the closest surface point on the other, then apply the transformation 
    that modify one surface to best match the other (in the least-square sense). 
    
    [**Example1**](https://github.com/marcomusy/vtkplotter/blob/master/examples/basic/align1.py)
    [**Example2**](https://github.com/marcomusy/vtkplotter/blob/master/examples/basic/align2.py)
    '''
    if isinstance(source, Actor): source = source.polydata()
    if isinstance(target, Actor): target = target.polydata()
    
    icp = vtk.vtkIterativeClosestPointTransform()
    icp.SetSource(source)
    icp.SetTarget(target)
    icp.SetMaximumNumberOfIterations(iters)
    if rigid:
        icp.GetLandmarkTransform().SetModeToRigidBody()
    icp.StartByMatchingCentroidsOn()
    icp.Update()
    icpTransformFilter = vtk.vtkTransformPolyDataFilter()
    icpTransformFilter.SetInputData(source)
    icpTransformFilter.SetTransform(icp)
    icpTransformFilter.Update()
    poly = icpTransformFilter.GetOutput()
    actor = Actor(poly, legend=legend)
    actor.info['transform'] = icp.GetLandmarkTransform()
    return actor
Beispiel #5
0
    def _compute(self):
        from vtk import vtkIterativeClosestPointTransform

        source_polydata = self.as_polydata(self.source)
        target_polydata = self.as_polydata(self.target)

        icp_transform = vtkIterativeClosestPointTransform()
        icp_transform.SetSource(source_polydata)
        icp_transform.SetTarget(target_polydata)
        icp_transform.SetMaximumNumberOfIterations(self.max_iterations)
        if self.match_centroids:
            icp_transform.StartByMatchingCentroidsOn()
        else:
            icp_transform.StartByMatchingCentroidsOff()
        icp_transform.SetMaximumNumberOfLandmarks( \
            source_polydata.GetNumberOfPoints())
        icp_transform.SetMaximumMeanDistance(self.tolerance)
        icp_transform.Update()

        matrix = icp_transform.GetMatrix()

        self.scale_and_rotation = numpy.zeros((3, 3))
        self.translation = numpy.zeros(3)

        for i in range(3):
            for j in range(3):
                self.scale_and_rotation[i, j] = matrix.GetElement(i, j)

        self.translation[0] = matrix.GetElement(0, 3)
        self.translation[1] = matrix.GetElement(1, 3)
        self.translation[2] = matrix.GetElement(2, 3)

        print 'Number of iterations:', icp_transform.GetNumberOfIterations()
        print 'Scale and Rotation:\n', self.scale_and_rotation
        print 'Translation:', self.translation
Beispiel #6
0
    def __init__(self, module_manager):
        ModuleBase.__init__(self, module_manager)

        # this has no progress feedback
        self._icp = vtk.vtkIterativeClosestPointTransform()

        self._config.max_iterations = 50
        self._config.mode = 'RigidBody' 
        self._config.align_centroids = True

        config_list = [
            ('Transformation mode:', 'mode', 'base:str', 'choice',
              'Rigid: rotation + translation;\n'
              'Similarity: rigid + isotropic scaling\n'
              'Affine: rigid + scaling + shear',
              ('RigidBody', 'Similarity', 'Affine')),
            ('Maximum number of iterations:', 'max_iterations',
                'base:int', 'text',
             'Maximum number of iterations for ICP.'),
            ('Align centroids before start', 'align_centroids', 'base:bool', 'checkbox',
             'Align centroids before iteratively optimizing closest points? (required for large relative translations)')
            ]
             
        ScriptedConfigModuleMixin.__init__(
            self, config_list,
            {'Module (self)' : self,
             'vtkIterativeClosestPointTransform' : self._icp})

        self.sync_module_logic_with_config()
Beispiel #7
0
    def align(self):
        '''
        Uses the built-in icp landmark transformation provided by vtk to outline actors, and then updates the renderer and the stored homogeneous transformation matrix transM
        '''
        self.unsaved_changes = True

        if self.averaged == True:  #then set it false and change the button
            self.averaged = False
            self.ui.averageButton.setStyleSheet("background-color : None ")

        if self.aligned == True:  #then set it false and change the button
            self.aligned = False
            self.ui.alignButton.setStyleSheet("background-color : None ")

        self.ui.statLabel.setText("Starting alignment . . .")
        QtWidgets.QApplication.processEvents()

        if self.ui.useVTKalignButton.isChecked():
            icp_trans = vtk.vtkIterativeClosestPointTransform()
            icp_trans.SetSource(self.fOPC)
            icp_trans.SetTarget(self.rOPC)

            icp_trans.StartByMatchingCentroidsOn()
            icp_trans.GetLandmarkTransform().SetModeToRigidBody()
            icp_trans.SetMeanDistanceModeToRMS()
            icp_trans.CheckMeanDistanceOn()
            icp_trans.SetMeanDistanceModeToAbsoluteValue()
            # icp_trans.SetMaximumNumberOfLandmarks(200)
            icp_trans.DebugOn()
            icp_trans.Modified()
            icp_trans.Update()
            icp_trans.Inverse()

            T = np.ones(shape=(4, 4))
            for i in range(4):
                for j in range(4):
                    T[i, j] = icp_trans.GetMatrix().GetElement(i, j)
            T = np.linalg.inv(T)

        if self.ui.useICPalignButton.isChecked():
            self.reduce_outline()
            T, _, _ = icp(self.fO_local, self.rO_local)

        #apply operation
        self.flp = apply_trans(self.flp, T)
        self.fO_local = apply_trans(self.fO_local, T)
        self.floatTrans.append(T)

        self.update_float()
        self.update_limits()

        if self.mirrored == False:
            self.ui.statLabel.setText(
                "WARNING alignment proceeded without a mirror operation. Alignment complete."
            )
        else:
            self.ui.statLabel.setText("Alignment complete.")
Beispiel #8
0
 def register(self, src, target, iter=100):
     icp = vtk.vtkIterativeClosestPointTransform()
     icp.SetSource(src)
     icp.SetTarget(target)
     icp.GetLandmarkTransform().SetModeToRigidBody()
     icp.SetMaximumNumberOfIterations(iter)
     #icp.StartByMatchingCentroidsOn()
     icp.Modified()
     icp.Update()
     return icp.GetMatrix()
def ICPSimilitudRegistration(fix, mov, fix_ldm=None, mov_ldm=None, do_rigid=False):
    """
    IterativeClosestPoint registration between two vtkPolyData
    return the vtkTransform

    can be initialized using landmarks
    (then return a composed vtkTransform instead of a vtkIterativeClosestPointTransform)
    """
    transform = vtk.vtkIterativeClosestPointTransform()
    if do_rigid:
        transform.GetLandmarkTransform().SetModeToRigidBody()
    else:
        transform.GetLandmarkTransform().SetModeToSimilarity()
    transform.SetTarget(fix)

    #transform.SetMaximumNumberOfIterations(10)
    #transform.SetMaximumNumberOfLandmarks(100)

    # initialize using landmarks
    if fix_ldm is not None and mov_ldm is not None:
        ldm_transform = vtk.vtkLandmarkTransform()
        ldm_transform.SetSourceLandmarks(mov_ldm)
        ldm_transform.SetTargetLandmarks(fix_ldm)

        if do_rigid:
            ldm_transform.SetModeToRigidBody()
        else:
            ldm_transform.SetModeToSimilarity()
        ldm_transform.Update()

        warper = vtk.vtkTransformPolyDataFilter()
        warper.SetTransform(ldm_transform)
        warper.SetInputData(mov)
        warper.Update()

        # ICP registration
        transform.SetSource(warper.GetOutput())
        transform.Update()

        composed = vtk.vtkTransform()
        composed.SetInput(transform)
        composed.PreMultiply()
        composed.Concatenate(ldm_transform)
        return composed

    else:
        transform.SetStartByMatchingCentroids(True)

        # ICP registration
        transform.SetSource(mov)
        transform.Update()
        return transform
Beispiel #10
0
 def do_icp(src, tgt):
     icp = vtk.vtkIterativeClosestPointTransform()
     icp.SetSource(src)
     icp.SetTarget(tgt)
     icp.GetLandmarkTransform().SetModeToRigidBody()
     icp.SetMaximumNumberOfIterations(maxiters)
     icp.StartByMatchingCentroidsOn()
     icp.Modified()
     icp.Update()
     M = icp.GetMatrix()
     return np.array([[M.GetElement(i, j) for j in range(4)]
                      for i in range(4)],
                     dtype=np.float64)
 def __init__(self):
     self.source = None
     self.target = None
     self.transformed = None
     self.iteration = 200
     self.icp = vtk.vtkIterativeClosestPointTransform()
     self.icptf = vtk.vtkTransformPolyDataFilter()
     self.mappers = vtk.vtkPolyDataMapper()
     self.mappert = vtk.vtkPolyDataMapper()
     self.mappertf = vtk.vtkPolyDataMapper()
     self.actors = vtk.vtkActor()
     self.actort = vtk.vtkActor()
     self.actortf = vtk.vtkActor()
     self.centroidon = False
 def __init__(self):
     self.source = None
     self.target = None
     self.transformed = None
     self.iteration =200
     self.icp= vtk.vtkIterativeClosestPointTransform()
     self.icptf = vtk.vtkTransformPolyDataFilter()
     self.mappers =vtk.vtkPolyDataMapper()
     self.mappert =vtk.vtkPolyDataMapper()
     self.mappertf =vtk.vtkPolyDataMapper()
     self.actors = vtk.vtkActor()
     self.actort = vtk.vtkActor()
     self.actortf = vtk.vtkActor()
     self.centroidon= False
Beispiel #13
0
def IterativeClosestPoints(stlpath1: str, stlpath2: str, type: int):
    # ============ create source points ==============
    reader1 = vtk.vtkSTLReader()
    reader1.SetFileName(stlpath1)
    reader1.Update()

    # ============ 对模型进行裁剪,只保留配准点附近的网格 ==============
    source = surface_clip(reader1.GetOutput(), type)

    # ============ create target points ==============
    reader2 = vtk.vtkSTLReader()
    reader2.SetFileName(stlpath2)
    reader2.Update()

    # ============ 对模型进行裁剪,只保留配准点附近的网格 ==============
    target = surface_clip(reader2.GetOutput(), type)

    # ============ run ICP ==============
    icp = vtk.vtkIterativeClosestPointTransform()
    icp.SetSource(source)
    icp.SetTarget(target)
    # icp.GetLandmarkTransform().SetModeToRigidBody()
    # icp.GetLandmarkTransform().SetModeToSimilarity()
    icp.GetLandmarkTransform().SetModeToAffine()  # 当前实验发现仿射配准的效果最好
    # icp.DebugOn()
    icp.SetMaximumNumberOfIterations(1000)
    icp.StartByMatchingCentroidsOn()
    icp.Modified()
    icp.Update()

    icpTransformFilter = vtk.vtkTransformPolyDataFilter()
    icpTransformFilter.SetInputData(source)

    icpTransformFilter.SetTransform(icp)
    icpTransformFilter.Update()

    # ============ write the transformed stl ==============

    output_path = stlpath1[0:-4] + "transform3.stl"
    writer = vtk.vtkSTLWriter()
    writer.SetInputConnection(icpTransformFilter.GetOutputPort())
    writer.SetFileName(output_path)
    writer.SetFileTypeToBinary()
    writer.Write()

    matrix = icp.GetMatrix()
    # nparray = arrayFromVTKMatrix(matrix)

    return matrix
Beispiel #14
0
    def execute(self, contour, alignment, surfaceFilter):
        """
    Contour: Result from US segmentation (world coordinates).
    Alignment: Pseudo transform applied to CT surface for misalignment
    SurfaceFilter: Filter, which output surface with normals
    """
        surface = surfaceFilter.GetOutput()

        # Invert transform, such that the misalignment can be applied to US data
        inverse = vtk.vtkTransform()
        inverse.DeepCopy(alignment)
        inverse.PostMultiply()
        inverse.Inverse()

        # We do not want to move CT data, so we apply the inverse to US
        transformPolyDataFilter0 = vtk.vtkTransformPolyDataFilter()
        transformPolyDataFilter0.SetInputData(contour)
        transformPolyDataFilter0.SetTransform(inverse)
        transformPolyDataFilter0.Update()
        wrongContours = transformPolyDataFilter0.GetOutput()

        icp = vtk.vtkIterativeClosestPointTransform()
        icp.SetSource(wrongContours)  # US contour
        icp.SetTarget(surface)  # CT surface
        icp.GetLandmarkTransform().SetModeToRigidBody()
        icp.DebugOn()
        icp.SetMaximumNumberOfIterations(10)
        icp.StartByMatchingCentroidsOff()  # Must be off
        icp.SetMeanDistanceModeToRMS()
        icp.Modified()
        icp.Update()

        # Transform US to find RMSE
        transformPolyDataFilter1 = vtk.vtkTransformPolyDataFilter()
        transformPolyDataFilter1.SetInputData(wrongContours)
        transformPolyDataFilter1.SetTransform(icp)
        transformPolyDataFilter1.Update()

        # Compute RMSE
        correctedContours = transformPolyDataFilter1.GetOutput()
        rmse = CloudMeanDist(correctedContours, surface)

        # We cannot call Inverse on transform, since it is an ICP and will
        # issue a registration where source and target are interchanged
        mat = vtk.vtkMatrix4x4()
        mat.DeepCopy(icp.GetMatrix())

        # Emit done with output
        self.done.emit(rmse, mat, correctedContours)
    def applyICP(self, pList, iterNum):
        # Iterative closest point applied for number of iterations

        polyTarget = pList[0]
        polySource = pList[1]

        icp = vtk.vtkIterativeClosestPointTransform()
        icp.SetSource(polySource)
        icp.SetTarget(polyTarget)
        icp.GetLandmarkTransform().SetModeToRigidBody()
        icp.SetMaximumNumberOfIterations(iterNum)
        icp.Update()
        print "icp tranform is : ", icp

        return icp
Beispiel #16
0
def get_icp(spoints, tpoints, mode='Similarity'):
    #iterative closest point, using VTK.
    sourcePoints = vtk.vtkPoints()
    sourceVertices = vtk.vtkCellArray()

    for i in range(0, spoints.shape[0]):
        row = spoints[i, :]
        id = sourcePoints.InsertNextPoint(row[:])
        sourceVertices.InsertNextCell(1)
        sourceVertices.InsertCellPoint(id)

    source = vtk.vtkPolyData()
    source.SetPoints(sourcePoints)
    source.SetVerts(sourceVertices)
    if vtk.VTK_MAJOR_VERSION <= 5:
        source.Update()

    targetPoints = vtk.vtkPoints()
    targetVertices = vtk.vtkCellArray()

    for i in range(0, tpoints.shape[0]):
        row = tpoints[i, :]
        id = targetPoints.InsertNextPoint(row[:])
        targetVertices.InsertNextCell(1)
        targetVertices.InsertCellPoint(id)
    target = vtk.vtkPolyData()
    target.SetPoints(targetPoints)
    target.SetVerts(targetVertices)
    if vtk.VTK_MAJOR_VERSION <= 5:
        target.Update()
    icp = vtk.vtkIterativeClosestPointTransform()
    icp.SetSource(source)
    icp.SetTarget(target)
    if mode == 'Affine':
        icp.GetLandmarkTransform().SetModeToAffine()
    elif mode == 'Rigid':
        icp.GetLandmarkTransform().SetModeToRigidBody()
    icp.SetMaximumNumberOfIterations(
        1)  #iterations=1 for finding nearest neighbors only
    icp.StartByMatchingCentroidsOn()
    icp.SetMaximumMeanDistance(0.001)
    icp.Modified()
    icp.Update()
    return icp
def vtk_icp(source,
            target,
            locator=None,
            max_iterations=100,
            max_landmarks=1000,
            check_mean_distance=False,
            maximum_mean_distance=0.001):
    """
    An iterative closest point algorithm, delegating to vtk.
    Target is a point set, source is a point cloud
    """

    if source.GetNumberOfCells() == 0:
        raise ValueError("vtk_icp needs a polydata surface",
                         source.GetNumberOfCells())

    vtk_icp_transform = vtkIterativeClosestPointTransform()
    vtk_icp_transform.GetLandmarkTransform().SetModeToRigidBody()

    vtk_icp_transform.SetSource(target)
    vtk_icp_transform.SetTarget(source)
    if locator is None:
        locator = vtkCellLocator()
        locator.SetDataSet(source)
        locator.SetNumberOfCellsPerBucket(1)
        locator.BuildLocator()

    vtk_icp_transform.SetLocator(locator)
    vtk_icp_transform.SetMaximumNumberOfIterations(max_iterations)
    vtk_icp_transform.SetMaximumNumberOfLandmarks(max_landmarks)
    vtk_icp_transform.SetCheckMeanDistance(check_mean_distance)
    vtk_icp_transform.SetMaximumMeanDistance(maximum_mean_distance)

    vtk_icp_transform.Modified()
    vtk_icp_transform.Update()

    result = vtkMatrix4x4()
    result = vtk_icp_transform.GetMatrix()

    inverted = vtkMatrix4x4()

    vtkMatrix4x4.Invert(result, inverted)

    return inverted
def register(ref_poly, mv_poly):
    """
    Input:
    ref_poly: reference vertebra, type: vtkPolyData
    mv_poly:  moving vertebra, type: vtkPolyData
    Return:
    vtkMatrix4x4
    """

    icp = vtk.vtkIterativeClosestPointTransform()
    icp.SetSource(ref_poly)
    icp.SetTarget(mv_poly)
    icp.GetLandmarkTransform().SetModeToRigidBody()
    icp.SetMaximumNumberOfIterations(20)
    #icp.StartByMatchingCentroidsOn()
    icp.Modified()
    icp.Update()
    mat = icp.GetMatrix()
    return mat
Beispiel #19
0
 def applyICPTransform(self):
     if (self.in2_pipe != None):
         icp = vtk.vtkIterativeClosestPointTransform()
         icp.SetTarget(self.in1_pipe.getPolyData())
         icp.SetSource(self.in2_pipe.getPolyData())
         icp.SetMaximumNumberOfIterations(10)
         icp.StartByMatchingCentroidsOn()
         icp.GetInverse()
         icp.Update()
         # Concatenate the transform
         mat = icp.GetMatrix()
         self.in2_pipe.setRigidBodyTransformConcatenateMatrix(mat)
         # Clean up
         self.pickerstyle.removePoints("in1_pipeline")
         self.pickerstyle.removePoints("in2_pipeline")
         self.refreshRenderWindow()
         self.updateGUI()
         self.statusBar().clearMessage()
         self.statusBar().showMessage("ICP transform complete", 4000)
Beispiel #20
0
def Alignement(surf,surf_GT):
	print('Alignement ')
	# CenterSurf = surf.GetCenter()
	# print("center CenterSurf: ", CenterSurf)
	# print(' ')
	# CenterSurf_GT = surf_GT.GetCenter()
	# print("center CenterSurf_GT: ", CenterSurf_GT)
	# print(' ')
	# direction = np.array(list(surf_GT.GetCenter())) - np.array(list(surf.GetCenter()))
	# print('direction = ', direction)
	# print(' ')

	# trnf = vtk.vtkTransform()
	# trnf.Translate(direction)

	# tpd = vtk.vtkTransformPolyDataFilter()
	# tpd.SetTransform(trnf)
	# tpd.SetInputData(surf)
	# tpd.Update()

	# return tpd.GetOutput()


	icp = vtk.vtkIterativeClosestPointTransform()
	icp.StartByMatchingCentroidsOn()
	icp.SetSource(surf)
	icp.SetTarget(surf_GT)
	icp.GetLandmarkTransform().SetModeToRigidBody()
	icp.SetMaximumNumberOfLandmarks(100)
	icp.SetMaximumMeanDistance(.00001)
	icp.SetMaximumNumberOfIterations(500)
	icp.CheckMeanDistanceOn()
	icp.StartByMatchingCentroidsOn()
	icp.Update()

	lmTransform = icp.GetLandmarkTransform()
	transform = vtk.vtkTransformPolyDataFilter()
	transform.SetInputData(surf)
	transform.SetTransform(lmTransform)
	transform.SetTransform(icp)
	transform.Update()

	return transform.GetOutput()
Beispiel #21
0
def icp(opts,args):
	v1 = nv.readVTK(args[0])
	v2 = nv.readVTK(args[1])
	icp = vtk.vtkIterativeClosestPointTransform()
	icp.SetSource(v2)
	icp.SetTarget(v1)
	icp.StartByMatchingCentroidsOn()
	icp.SetMaximumNumberOfIterations(500)
	icp.SetMaximumNumberOfLandmarks(20000)
	tx = icp.GetLandmarkTransform()
	tx.SetModeToRigidBody()
	icp.Update()
	tx = icp.GetLandmarkTransform()
	txf = vtk.vtkTransformPolyDataFilter()
	txf.SetInput(v2)
	txf.SetTransform(tx)
	txf.Update()
	v3 = txf.GetOutput()
	nv.writeVTK(args[2], v3)
	return
Beispiel #22
0
def icp_transform(target,
                  source,
                  numberOfIterations=100,
                  number_landmarks=1000,
                  transform_mode='rigid'):
    icp = vtk.vtkIterativeClosestPointTransform()
    if transform_mode == 'rigid':
        icp.GetLandmarkTransform().SetModeToRigidBody()
    elif transform_mode == 'similarity':
        icp.GetLandmarkTransform().SetModeToSimilarity()
    else:
        raise ('Error invalid transform mode')
    icp.SetTarget(target)
    icp.SetSource(source)
    icp.SetMaximumNumberOfIterations(numberOfIterations)
    icp.StartByMatchingCentroidsOn()
    icp.Modified()
    icp.Update()
    icp.SetMaximumNumberOfLandmarks(number_landmarks)
    return icp
Beispiel #23
0
def icp(opts, args):
    v1 = nv.readVTK(args[0])
    v2 = nv.readVTK(args[1])
    icp = vtk.vtkIterativeClosestPointTransform()
    icp.SetSource(v2)
    icp.SetTarget(v1)
    icp.StartByMatchingCentroidsOn()
    icp.SetMaximumNumberOfIterations(500)
    icp.SetMaximumNumberOfLandmarks(20000)
    tx = icp.GetLandmarkTransform()
    tx.SetModeToRigidBody()
    icp.Update()
    tx = icp.GetLandmarkTransform()
    txf = vtk.vtkTransformPolyDataFilter()
    txf.SetInput(v2)
    txf.SetTransform(tx)
    txf.Update()
    v3 = txf.GetOutput()
    nv.writeVTK(args[2], v3)
    return
Beispiel #24
0
def get_icp_transform(source,
                      target,
                      max_n_iter=1000,
                      n_landmarks=1000,
                      reg_mode='similarity'):
    """
    transform = ('rigid': true rigid, translation only; similarity': rigid + equal scale)
    """
    icp = vtk.vtkIterativeClosestPointTransform()
    icp.SetSource(source)
    icp.SetTarget(target)
    if reg_mode == 'rigid':
        icp.GetLandmarkTransform().SetModeToRigidBody()
    elif reg_mode == 'similarity':
        icp.GetLandmarkTransform().SetModeToSimilarity()
    icp.SetMaximumNumberOfIterations(max_n_iter)
    icp.StartByMatchingCentroidsOn()
    icp.Modified()
    icp.Update()
    icp.SetMaximumNumberOfLandmarks(n_landmarks)
    return icp
    def surfaceRegistration(self, initialModel, inputModel):
        # Registers the input scan to the initial scan where the breast boundaries were defined

        # Uses ICP to create the transform
        icpTransform = vtk.vtkIterativeClosestPointTransform()
        icpTransform.SetSource(initialModel.GetPolyData())
        icpTransform.SetTarget(inputModel.GetPolyData())
        icpTransform.GetLandmarkTransform().SetModeToRigidBody()
        icpTransform.SetMaximumNumberOfIterations(100)
        icpTransform.Inverse()
        icpTransform.Modified()
        icpTransform.Update()

        # Applies the transform
        transformNode = slicer.vtkMRMLLinearTransformNode()
        transformNode.SetAndObserveMatrixTransformToParent(icpTransform.GetMatrix())
        slicer.mrmlScene.AddNode(transformNode)

        inputModel.SetAndObserveTransformNodeID(transformNode.GetID())
        inputModel.HardenTransform()

        return transformNode
Beispiel #26
0
def align(source, target, iters=100, legend=None):
    '''
    Return a copy of source actor which is aligned to
    target actor through vtkIterativeClosestPointTransform() method.
    '''
    sprop = source.GetProperty()
    source = vu.polydata(source)
    target = vu.polydata(target)
    icp = vtk.vtkIterativeClosestPointTransform()
    icp.SetSource(source)
    icp.SetTarget(target)
    icp.SetMaximumNumberOfIterations(iters)
    icp.StartByMatchingCentroidsOn()
    icp.Update()
    icpTransformFilter = vtk.vtkTransformPolyDataFilter()
    vu.setInput(icpTransformFilter, source)
    icpTransformFilter.SetTransform(icp)
    icpTransformFilter.Update()
    poly = icpTransformFilter.GetOutput()
    actor = vu.makeActor(poly, legend=legend)
    actor.SetProperty(sprop)
    setattr(actor, 'transform', icp.GetLandmarkTransform())
    return actor
Beispiel #27
0
def Alignement(surf,surf_GT):
    copy_surf = vtk.vtkPolyData()
    copy_surf.DeepCopy(surf)

    icp = vtk.vtkIterativeClosestPointTransform()
    icp.StartByMatchingCentroidsOn()
    icp.SetSource(copy_surf)
    icp.SetTarget(surf_GT)
    icp.GetLandmarkTransform().SetModeToRigidBody()
    icp.SetMaximumNumberOfLandmarks(100)
    icp.SetMaximumMeanDistance(.00001)
    icp.SetMaximumNumberOfIterations(500)
    icp.CheckMeanDistanceOn()
    icp.StartByMatchingCentroidsOn()
    icp.Update()

    lmTransform = icp.GetLandmarkTransform()
    transform = vtk.vtkTransformPolyDataFilter()
    transform.SetInputData(copy_surf)
    transform.SetTransform(lmTransform)
    transform.SetTransform(icp)
    transform.Update()

    return surf, transform.GetOutput()
    def _deform(self):
        r"""
        Calculates the affine transform that best maps the reference polygonal
        surface to its corresponding deformed surface. This transform is calculated
        through an interactive closest point optimization, that seeks to minimize
        the sum of distances between the reference surface vertices and the current
        affine transformed surface.

        Assuming a uniform deformation, the non-translational elements of this affine
        transform compose the deformation gradient :math:`\mathbf{F}`. The Green-Lagrange
        strain tensor is then defined as

        :math:`\mathbf{E} = \frac{1}{2}(\mathbf{F}^T.\mathbf{F} - \mathbf{1})`,

        where :math:`\mathbf{1}` is the identity.

        Returns
        -------
        cell_strains
        """
        for i in range(len(self.rcentroids)):
            # volumetric strains
            self.vstrains.append(old_div(self.dvols[i], self.rvols[i]) - 1)

            ICP = vtk.vtkIterativeClosestPointTransform()
            rcopy = vtk.vtkPolyData()
            dcopy = vtk.vtkPolyData()
            rcopy.DeepCopy(self.rsurfs[i])
            dcopy.DeepCopy(self.dsurfs[i])
            ICP.SetSource(rcopy)
            ICP.SetTarget(dcopy)
            if self.rigidInitial:
                ICP.GetLandmarkTransform().SetModeToRigidBody()
                ICP.SetMaximumMeanDistance(0.001)
                ICP.SetCheckMeanDistance(1)
                ICP.SetMaximumNumberOfIterations(5000)
                ICP.StartByMatchingCentroidsOn()
                ICP.Update()
                trans = vtk.vtkTransform()
                trans.SetMatrix(ICP.GetMatrix())
                trans.Update()
                self.rigidTransforms.append(trans)
                rot = vtk.vtkTransformPolyDataFilter()
                rot.SetInputData(rcopy)
                rot.SetTransform(trans)
                rot.Update()
                ICP.GetLandmarkTransform().SetModeToAffine()
                ICP.SetSource(rot.GetOutput())
                ICP.Update()
            else:
                ICP.GetLandmarkTransform().SetModeToAffine()
                ICP.SetMaximumMeanDistance(0.001)
                ICP.SetCheckMeanDistance(1)
                ICP.SetMaximumNumberOfIterations(5000)
                ICP.StartByMatchingCentroidsOn()
                ICP.Update()

            F = np.zeros((3, 3), float)
            for j in range(3):
                for k in range(3):
                    F[j, k] = ICP.GetMatrix().GetElement(j, k)
            E = 0.5 * (np.dot(F.T, F) - np.eye(3))
            self.cell_strains.append(E)
Beispiel #29
0
	def align(self):
		'''
		Uses the built-in icp landmark transformation provided by vtk to outline actors, and then updates the renderer and the stored homogeneous transformation matrix transM
		'''
		self.unsaved_changes=True
		
		if self.averaged == True: #then set it false and change the button
			self.averaged = False
			self.ui.averageButton.setStyleSheet("background-color : None ")
			
		if self.aligned == True: #then set it false and change the button
			self.aligned = False
			self.ui.alignButton.setStyleSheet("background-color : None ")

			
		self.ui.statLabel.setText("Starting alignment . . .")
		QtWidgets.QApplication.processEvents()
		
		
		
		if self.ui.useVTKalignButton.isChecked():
			icp_trans=vtk.vtkIterativeClosestPointTransform()
			icp_trans.SetSource(self.fOPC)
			icp_trans.SetTarget(self.rOPC)


			icp_trans.StartByMatchingCentroidsOn()
			icp_trans.GetLandmarkTransform().SetModeToRigidBody()
			icp_trans.SetMeanDistanceModeToRMS()
			icp_trans.CheckMeanDistanceOn()
			icp_trans.SetMeanDistanceModeToAbsoluteValue()
			# icp_trans.SetMaximumNumberOfLandmarks(200)
			icp_trans.DebugOn()
			icp_trans.Modified()
			icp_trans.Update()
			icp_trans.Inverse()
			
			T=np.ones(shape=(4,4))
			for i in range(4):
				for j in range(4):
					T[i,j]=icp_trans.GetMatrix().GetElement(i, j)
			T=np.linalg.inv(T)

		if self.ui.useICPalignButton.isChecked():
			self.reduce_outline()
			T,_,_ = icp(self.fO_local,self.rO_local)
			
			
			
			
		#apply operation
		self.flp=apply_trans(self.flp,T)
		self.fO_local=apply_trans(self.fO_local,T)
		self.floatTrans.append(T)

		
		self.update_float()
		self.update_limits()
		
		
		if self.mirrored==False:
			self.ui.statLabel.setText("WARNING alignment proceeded without a mirror operation. Alignment complete.")
		else:
			self.ui.statLabel.setText("Alignment complete.")
def main():
    colors = vtk.vtkNamedColors()

    src_fn, tgt_fn = get_program_parameters()
    print('Loading source:', src_fn)
    sourcePolyData = ReadPolyData(src_fn)
    # Save the source polydata in case the align does not improve
    # segmentation
    originalSourcePolyData = vtk.vtkPolyData()
    originalSourcePolyData.DeepCopy(sourcePolyData)

    print('Loading target:', tgt_fn)
    targetPolyData = ReadPolyData(tgt_fn)

    # If the target orientation is markedly different,
    # you may need to apply a transform to orient the
    # target with the source.
    # For example, when using Grey_Nurse_Shark.stl as the source and
    # greatWhite.stl as the target, you need to uncomment the following
    # two rotations.
    trnf = vtk.vtkTransform()
    # trnf.RotateX(90)
    # trnf.RotateY(-90)
    tpd = vtk.vtkTransformPolyDataFilter()
    tpd.SetTransform(trnf)
    tpd.SetInputData(targetPolyData)
    tpd.Update()

    renderer = vtk.vtkRenderer()
    renderWindow = vtk.vtkRenderWindow()
    renderWindow.AddRenderer(renderer)
    interactor = vtk.vtkRenderWindowInteractor()
    interactor.SetRenderWindow(renderWindow)

    distance = vtk.vtkHausdorffDistancePointSetFilter()
    distance.SetInputData(0, tpd.GetOutput())
    distance.SetInputData(1, sourcePolyData)
    distance.Update()

    distanceBeforeAlign = distance.GetOutput(0).GetFieldData().GetArray(
        'HausdorffDistance').GetComponent(0, 0)

    # Get initial alignment using oriented bounding boxes
    AlignBoundingBoxes(sourcePolyData, tpd.GetOutput())

    distance.SetInputData(0, tpd.GetOutput())
    distance.SetInputData(1, sourcePolyData)
    distance.Modified()
    distance.Update()
    distanceAfterAlign = distance.GetOutput(0).GetFieldData().GetArray(
        'HausdorffDistance').GetComponent(0, 0)

    bestDistance = min(distanceBeforeAlign, distanceAfterAlign)

    if distanceAfterAlign > distanceBeforeAlign:
        sourcePolyData.DeepCopy(originalSourcePolyData)

    # Refine the alignment using IterativeClosestPoint
    icp = vtk.vtkIterativeClosestPointTransform()
    icp.SetSource(sourcePolyData)
    icp.SetTarget(tpd.GetOutput())
    icp.GetLandmarkTransform().SetModeToRigidBody()
    icp.SetMaximumNumberOfLandmarks(100)
    icp.SetMaximumMeanDistance(.00001)
    icp.SetMaximumNumberOfIterations(500)
    icp.CheckMeanDistanceOn()
    icp.StartByMatchingCentroidsOn()
    icp.Update()

    #  print(icp)

    lmTransform = icp.GetLandmarkTransform()
    transform = vtk.vtkTransformPolyDataFilter()
    transform.SetInputData(sourcePolyData)
    transform.SetTransform(lmTransform)
    transform.SetTransform(icp)
    transform.Update()

    distance.SetInputData(0, tpd.GetOutput())
    distance.SetInputData(1, transform.GetOutput())
    distance.Update()

    distanceAfterICP = distance.GetOutput(0).GetFieldData().GetArray(
        'HausdorffDistance').GetComponent(0, 0)

    if distanceAfterICP < bestDistance:
        bestDistance = distanceAfterICP

    print(
        'Distance before, after align, after ICP, min: {:0.5f}, {:0.5f}, {:0.5f}, {:0.5f}'
        .format(distanceBeforeAlign, distanceAfterAlign, distanceAfterICP,
                bestDistance))
    # Select
    sourceMapper = vtk.vtkDataSetMapper()
    if bestDistance == distanceBeforeAlign:
        sourceMapper.SetInputData(originalSourcePolyData)
        print('Using original alignment')
    elif bestDistance == distanceAfterAlign:
        sourceMapper.SetInputData(sourcePolyData)
        print('Using alignment by OBB')
    else:
        sourceMapper.SetInputConnection(transform.GetOutputPort())
        print('Using alignment by ICP')
    sourceMapper.ScalarVisibilityOff()

    sourceActor = vtk.vtkActor()
    sourceActor.SetMapper(sourceMapper)
    sourceActor.GetProperty().SetOpacity(.6)
    sourceActor.GetProperty().SetDiffuseColor(colors.GetColor3d('White'))
    renderer.AddActor(sourceActor)

    targetMapper = vtk.vtkDataSetMapper()
    targetMapper.SetInputData(tpd.GetOutput())
    targetMapper.ScalarVisibilityOff()

    targetActor = vtk.vtkActor()
    targetActor.SetMapper(targetMapper)
    targetActor.GetProperty().SetDiffuseColor(colors.GetColor3d('Tomato'))
    renderer.AddActor(targetActor)

    renderWindow.AddRenderer(renderer)
    renderer.SetBackground(colors.GetColor3d("sea_green_light"))
    renderer.UseHiddenLineRemovalOn()

    renderWindow.SetSize(640, 480)
    renderWindow.Render()
    renderWindow.SetWindowName('AlignTwoPolyDatas')
    renderWindow.Render()
    interactor.Start()
# don't need this now
trans_target = vtk.vtkTransform()
trans_target.Scale(0.001, 0.001, 0.001)

trans_target_filt = vtk.vtkTransformPolyDataFilter()
trans_target_filt.SetInputConnection(reader2.GetOutputPort())
trans_target_filt.SetTransform(trans_target)
trans_target_filt.Update()

source = vtk.vtkPolyData()
target = vtk.vtkPolyData()
source.ShallowCopy(pass_filt.GetOutput())
target.ShallowCopy(trans_target_filt.GetOutput())

icp = vtk.vtkIterativeClosestPointTransform()
icp.SetSource(source)
icp.SetTarget(target)
icp.SetMaximumNumberOfLandmarks(source.GetNumberOfPoints())
#icp.SetCheckMeanDistance(1)
#icp.SetMaximumMeanDistance( 0.5)
icp.SetMaximumNumberOfIterations(400)
#icp.SetMaximumNumberOfLandmarks(500)
icp.GetLandmarkTransform().SetModeToRigidBody()

icp.Modified()
icp.Update()
mat = vtk.vtkMatrix4x4()
mat = icp.GetMatrix()
print(mat)
Beispiel #32
0
target.SetVerts(targetVertices)
target.Update()

# ============ display target points ==============
print "Displaying target points..."
pointCount = 3
index = 0
while index < pointCount:
    point = [0,0,0]
    targetPoints.GetPoint(index, point)
    print "target point[%s]=%s" % (index,point)
    index += 1

print "Running ICP ----------------"
# ============ run ICP ==============
icp = vtk.vtkIterativeClosestPointTransform()
icp.SetSource(source)
icp.SetTarget(target)
icp.GetLandmarkTransform().SetModeToRigidBody()
#icp.DebugOn()
icp.SetMaximumNumberOfIterations(20)
icp.StartByMatchingCentroidsOn()
icp.Modified()
icp.Update()

icpTransformFilter = vtk.vtkTransformPolyDataFilter()
icpTransformFilter.SetInput(source)
icpTransformFilter.SetTransform(icp)
icpTransformFilter.Update()

transformedSource = icpTransformFilter.GetOutput()
 def register(self, fixedData, movingData, discard = False):
     index = self.gui.getDataIndex({'Contour': 0, 'Centerline': 1}, 'Select the object')
     if index is None:
         return None, None, None
     if index == 0:
         fixed_points = fixedData.getPointSet('Contour')
         moving_points = movingData.getPointSet('Contour')
     else:
         fixed_points = fixedData.getPointSet('Centerline')
         moving_points = movingData.getPointSet('Centerline')
     
     fixed_res = fixedData.getResolution().tolist()
     moving_res = movingData.getResolution().tolist()
     fixed_points = fixed_points.copy()[npy.where(fixed_points[:, 0] >= 0)]
     moving_points = moving_points.copy()[npy.where(moving_points[:, 0] >= 0)]
     # Use the bifurcation as the initial position
     fixed_bif = db.getBifurcation(fixed_points)
     moving_bif = db.getBifurcation(moving_points)
     if (fixed_bif < 0) or (moving_bif < 0):
         fixed_min = 0
     else:
         temp = moving_points[:, 2:]
         moving_delta = moving_bif - npy.min(temp[npy.where(npy.round(temp[:, 1]) == 0), 0])
         fixed_min = fixed_bif - moving_delta * moving_res[-1] / fixed_res[-1]
     #print moving_res
     #print fixed_res
     
     # Augmentation of pointset
     fixed = fixed_points[npy.where(fixed_points[:, 2] >= fixed_min)]
     moving = moving_points.copy()
     fixed = util.augmentPointset(fixed, int(fixed_res[-1] / moving_res[-1] + 0.5), moving.shape[0], fixed_bif)
     moving = util.augmentPointset(moving, int(moving_res[-1] / fixed_res[-1] + 0.5), fixed.shape[0], moving_bif)
     
     fixed = fixed[:, :3]
     moving = moving[:, :3]
     fixed[:, :3] *= fixed_res[:3]
     moving[:, :3] *= moving_res[:3]
     if (fixed_bif >= 0) and (moving_bif >= 0):
         fixed[:, 2] -= (fixed_bif * fixed_res[2] - moving_bif * moving_res[2])
     print fixed.shape[0], moving.shape[0]
     #return None, None, None
     
     sourcePoints = vtk.vtkPoints()
     sourceVertices = vtk.vtkCellArray()
     for x in moving:
         id = sourcePoints.InsertNextPoint(x[0], x[1], x[2])
         sourceVertices.InsertNextCell(1)
         sourceVertices.InsertCellPoint(id)
     source = vtk.vtkPolyData()
     source.SetPoints(sourcePoints)
     source.SetVerts(sourceVertices)
     
     targetPoints = vtk.vtkPoints()
     targetVertices = vtk.vtkCellArray()
     for x in fixed:
         id = targetPoints.InsertNextPoint(x[0], x[1], x[2])
         targetVertices.InsertNextCell(1)
         targetVertices.InsertCellPoint(id)
     target = vtk.vtkPolyData()
     target.SetPoints(targetPoints)
     target.SetVerts(targetVertices)
     
     icp = vtk.vtkIterativeClosestPointTransform()
     icp.SetSource(source)
     icp.SetTarget(target)
     icp.GetLandmarkTransform().SetModeToRigidBody()
     icp.Modified()
     icp.Update()
     
     icp_filter = vtk.vtkTransformPolyDataFilter()
     icp_filter.SetInput(source)
     icp_filter.SetTransform(icp)
     icp_filter.Update()
     
     # Get the result transformation parameters
     matrix = icp.GetMatrix()
     T = ml.mat([matrix.GetElement(0, 3), matrix.GetElement(1, 3), matrix.GetElement(2, 3)]).T;
     R = ml.mat([[matrix.GetElement(0, 0), matrix.GetElement(0, 1), matrix.GetElement(0, 2)], 
                 [matrix.GetElement(1, 0), matrix.GetElement(1, 1), matrix.GetElement(1, 2)], 
                 [matrix.GetElement(2, 0), matrix.GetElement(2, 1), matrix.GetElement(2, 2)]]).I;
     if (fixed_bif >= 0) and (moving_bif >= 0):
         T[2] += (fixed_bif * fixed_res[2] - moving_bif * moving_res[2])
     
     moving_points = movingData.getPointSet('Contour').copy()
     moving_center = movingData.getPointSet('Centerline').copy()                
     new_trans_points, result_center_points = util.resliceTheResultPoints(moving_points, moving_center, 20, moving_res, fixed_res, discard, R, T)
     
     T = -T
     T = R * T
     
     transform = sitk.Transform(3, sitk.sitkAffine)
     para = R.reshape(1, -1).tolist()[0] + T.T.tolist()[0]
     transform.SetParameters(para)
     
     movingImage = movingData.getSimpleITKImage()
     fixedImage = fixedData.getSimpleITKImage()
     resultImage = sitk.Resample(movingImage, fixedImage, transform, sitk.sitkLinear, 0, sitk.sitkFloat32)
     
     return sitk.GetArrayFromImage(resultImage), {'Contour': trans_points, 'Centerline': result_center_points}, para + [0, 0, 0]
Beispiel #34
0
    def testICPTransform(self):

        renWin = vtk.vtkRenderWindow()

        #iren = vtk.vtkRenderWindowInteractor()
        #iren.SetRenderWindow(renWin)

        # Create objects

        sscale = {2: [0.7, 0.7, 0.7], 3: [0.5, 0.5, 0.5]}
        scenter = {2: [-0.25, 0.25, 0.0], 3: [0.4, -0.3, 0.0]}
        scolors = {2: [0.2, 0.6, 0.1], 3: [0.1, 0.2, 0.6]}

        s = dict()  # The super quadric sources

        for sidx in range(1, 4):
            s.update({sidx: vtk.vtkSuperquadricSource()})
            s[sidx].ToroidalOff()
            s[sidx].SetThetaResolution(20)
            s[sidx].SetPhiResolution(20)
            s[sidx].SetPhiRoundness(0.7 + (sidx - 2) * 0.4)
            s[sidx].SetThetaRoundness(0.85 + (sidx - 1) * 0.4)
            if sidx in sscale:
                s[sidx].SetScale(sscale[sidx])
            if sidx in scenter:
                s[sidx].SetCenter(scenter[sidx])

            s[sidx].Update()

        ren = dict()  # Renderers
        sm = dict()  # Mappers for the super quadric source
        sa = dict()  # Actors for the super quadric source
        fe = dict()  # Feature edges
        fem = dict()  # Feature edges mappers
        fea = dict()  # Feature edges actors
        icp = dict()  # Iterated closest point transforms

        # Create the renderers

        for ridx in range(1, 4):
            ren.update({ridx: vtk.vtkRenderer()})
            ren[ridx].SetViewport((ridx - 1) / 3.0, 0.0, ridx / 3.0, 1.0)
            ren[ridx].SetBackground(0.7, 0.8, 1.0)
            cam = ren[ridx].GetActiveCamera()
            cam.SetPosition(1.7, 1.4, 1.7)
            renWin.AddRenderer(ren[ridx])

            # renderer 1 has all 3 objects, render i has object 1 and i (i=2, 3)
            # add actors (corresponding to the objects) to each renderer
            # and ICP transforms from objects i or to 1.
            # object 1 has feature edges too.

            for sidx in range(1, 4):

                if ridx == 1 or sidx == 1 or ridx == sidx:
                    sm.update({ridx: {sidx: vtk.vtkPolyDataMapper()}})
                    sm[ridx][sidx].SetInputConnection(s[sidx].GetOutputPort())

                    sa.update({ridx: {sidx: vtk.vtkActor()}})
                    sa[ridx][sidx].SetMapper(sm[ridx][sidx])

                    prop = sa[ridx][sidx].GetProperty()
                    if sidx in scolors:
                        prop.SetColor(scolors[sidx])

                    if sidx == 1:
                        prop.SetOpacity(0.2)

                        fe.update({ridx: {sidx: vtk.vtkFeatureEdges()}})
                        src = s[sidx]
                        fe[ridx][sidx].SetInputConnection(src.GetOutputPort())
                        fe[ridx][sidx].BoundaryEdgesOn()
                        fe[ridx][sidx].ColoringOff()
                        fe[ridx][sidx].ManifoldEdgesOff()

                        fem.update({ridx: {sidx: vtk.vtkPolyDataMapper()}})
                        fem[ridx][sidx].SetInputConnection(
                            fe[ridx][sidx].GetOutputPort())
                        fem[ridx][
                            sidx].SetResolveCoincidentTopologyToPolygonOffset(
                            )

                        fea.update({ridx: {sidx: vtk.vtkActor()}})
                        fea[ridx][sidx].SetMapper(fem[ridx][sidx])

                        ren[ridx].AddActor(fea[ridx][sidx])

                    ren[ridx].AddActor(sa[ridx][sidx])

                if ridx > 1 and ridx == sidx:
                    icp.update({
                        ridx: {
                            sidx: vtk.vtkIterativeClosestPointTransform()
                        }
                    })
                    icp[ridx][sidx].SetSource(s[sidx].GetOutput())
                    icp[ridx][sidx].SetTarget(s[1].GetOutput())
                    icp[ridx][sidx].SetCheckMeanDistance(1)
                    icp[ridx][sidx].SetMaximumMeanDistance(0.001)
                    icp[ridx][sidx].SetMaximumNumberOfIterations(30)
                    icp[ridx][sidx].SetMaximumNumberOfLandmarks(50)
                    sa[ridx][sidx].SetUserTransform(icp[ridx][sidx])

        icp[3][3].StartByMatchingCentroidsOn()

        renWin.SetSize(400, 100)

        # render and interact with data

        iRen = vtk.vtkRenderWindowInteractor()
        iRen.SetRenderWindow(renWin)
        renWin.Render()

        img_file = "TestICPTransform.png"
        vtk.test.Testing.compareImage(
            iRen.GetRenderWindow(),
            vtk.test.Testing.getAbsImagePath(img_file),
            threshold=25)
        vtk.test.Testing.interact()
Beispiel #35
0
def register(target_df, source_df, df_to_transform):
    """Register source on top of target by ICP and transform df in place"""
    # Create vtk data structures for source points
    TargetPoints = vtk.vtkPoints()
    TargetVertices = vtk.vtkCellArray()
    for cell in target_df.index:
        id = TargetPoints.InsertNextPoint(
            target_df['x'][cell],
            target_df['y'][cell],
            target_df['z'][cell])
        TargetVertices.InsertNextCell(1)
        TargetVertices.InsertCellPoint(id)

    TargetPolyData = vtk.vtkPolyData()
    TargetPolyData.SetPoints(TargetPoints)
    TargetPolyData.SetVerts(TargetVertices)
    if vtk.VTK_MAJOR_VERSION <= 5:
        TargetPolyData.Update()

    # Create vtk data structures for target points
    SourcePoints = vtk.vtkPoints()
    SourceVertices = vtk.vtkCellArray()
    for cell in source_df.index:
        id = SourcePoints.InsertNextPoint(
            source_df['x'][cell],
            source_df['y'][cell],
            source_df['z'][cell])
        SourceVertices.InsertNextCell(1)
        SourceVertices.InsertCellPoint(id)

    SourcePolyData = vtk.vtkPolyData()
    SourcePolyData.SetPoints(SourcePoints)
    SourcePolyData.SetVerts(SourceVertices)
    if vtk.VTK_MAJOR_VERSION <= 5:
        SourcePolyData.Update()

    # Create vtk data structures for all points that need to be transformed
    PointsToTransform = vtk.vtkPoints()
    VerticesToTransform = vtk.vtkCellArray()
    for cell in df_to_transform.index:
        id = PointsToTransform.InsertNextPoint(
            df_to_transform['x'][cell],
            df_to_transform['y'][cell],
            df_to_transform['z'][cell])
        VerticesToTransform.InsertNextCell(1)
        VerticesToTransform.InsertCellPoint(id)

    PolyDataToTransform = vtk.vtkPolyData()
    PolyDataToTransform.SetPoints(PointsToTransform)
    PolyDataToTransform.SetVerts(VerticesToTransform)
    if vtk.VTK_MAJOR_VERSION <= 5:
        PolyDataToTransform.Update()

    # Register source df on top of target df
    icp = vtk.vtkIterativeClosestPointTransform()
    icp.SetSource(SourcePolyData)
    icp.SetTarget(TargetPolyData)
    icp.GetLandmarkTransform().SetModeToRigidBody()
    icp.SetMaximumNumberOfIterations(20)
    icp.StartByMatchingCentroidsOn()
    icp.Modified()
    icp.Update()

    # Transform all cells
    icpTransformFilter = vtk.vtkTransformPolyDataFilter()
    if vtk.VTK_MAJOR_VERSION <= 5:
        icpTransformFilter.SetInput(PolyDataToTransform)
    else:
        icpTransformFilter.SetInputData(PolyDataToTransform)

    icpTransformFilter.SetTransform(icp)
    icpTransformFilter.Update()

    transformedSource = icpTransformFilter.GetOutput()

    for i, index in enumerate(df_to_transform.index):
        point = [0,0,0]
        transformedSource.GetPoint(i, point)
        df_to_transform.loc[index, 'x'] = point[0]
        df_to_transform.loc[index, 'y'] = point[1]
        df_to_transform.loc[index, 'z'] = point[2]
Beispiel #36
0
    def testICPTransform(self):

        renWin = vtk.vtkRenderWindow()

        #iren = vtk.vtkRenderWindowInteractor()
        #iren.SetRenderWindow(renWin)

        # Create objects

        sscale = {2:[0.7, 0.7, 0.7],
                  3:[0.5, 0.5, 0.5]}
        scenter = {2:[-0.25, 0.25, 0.0],
                   3:[ 0.4, -0.3, 0.0]}
        scolors = {2:[0.2, 0.6, 0.1],
                   3:[0.1, 0.2, 0.6]}

        s = dict() # The super quadric sources

        for sidx in range(1, 4):
            s.update({sidx:vtk.vtkSuperquadricSource()})
            s[sidx].ToroidalOff()
            s[sidx].SetThetaResolution(20)
            s[sidx].SetPhiResolution(20)
            s[sidx].SetPhiRoundness(0.7 + (sidx - 2) * 0.4)
            s[sidx].SetThetaRoundness(0.85 + (sidx - 1) * 0.4)
            if sidx in sscale:
                s[sidx].SetScale(sscale[sidx])
            if sidx in scenter:
                s[sidx].SetCenter(scenter[sidx])

            s[sidx].Update()

        ren = dict() # Renderers
        sm = dict() # Mappers for the super quadric source
        sa = dict() # Actors for the super quadric source
        fe = dict() # Feature edges
        fem = dict() # Feature edges mappers
        fea = dict() # Feature edges actors
        icp = dict() # Iterated closest point transforms

        # Create the renderers

        for ridx in range(1, 4):
            ren.update({ridx: vtk.vtkRenderer()})
            ren[ridx].SetViewport((ridx - 1) / 3.0, 0.0, ridx / 3.0, 1.0)
            ren[ridx].SetBackground(0.7, 0.8, 1.0)
            cam = ren[ridx].GetActiveCamera()
            cam.SetPosition(1.7, 1.4, 1.7)
            renWin.AddRenderer(ren[ridx])

            # renderer 1 has all 3 objects, render i has object 1 and i (i=2, 3)
            # add actors (corresponding to the objects) to each renderer
            # and ICP transforms from objects i or to 1.
            # object 1 has feature edges too.

            for sidx in range(1, 4):

                if ridx == 1 or sidx == 1 or ridx == sidx:
                    sm.update({ridx:{sidx:vtk.vtkPolyDataMapper()}})
                    sm[ridx][sidx].SetInputConnection(s[sidx].GetOutputPort())

                    sa.update({ridx:{sidx:vtk.vtkActor()}})
                    sa[ridx][sidx].SetMapper(sm[ridx][sidx])

                    prop = sa[ridx][sidx].GetProperty()
                    if sidx in scolors:
                        prop.SetColor(scolors[sidx])

                    if sidx == 1:
                        prop.SetOpacity(0.2)

                        fe.update({ridx:{sidx:vtk.vtkFeatureEdges()}})
                        src = s[sidx]
                        fe[ridx][sidx].SetInputConnection(src.GetOutputPort())
                        fe[ridx][sidx].BoundaryEdgesOn()
                        fe[ridx][sidx].ColoringOff()
                        fe[ridx][sidx].ManifoldEdgesOff()

                        fem.update({ridx:{sidx:vtk.vtkPolyDataMapper()}})
                        fem[ridx][sidx].SetInputConnection(fe[ridx][sidx].GetOutputPort())
                        fem[ridx][sidx].SetResolveCoincidentTopologyToPolygonOffset()

                        fea.update({ridx:{sidx:vtk.vtkActor()}})
                        fea[ridx][sidx].SetMapper(fem[ridx][sidx])

                        ren[ridx].AddActor(fea[ridx][sidx])


                    ren[ridx].AddActor(sa[ridx][sidx])


                if ridx > 1 and ridx == sidx:
                    icp.update({ridx:{sidx:vtk.vtkIterativeClosestPointTransform()}})
                    icp[ridx][sidx].SetSource(s[sidx].GetOutput())
                    icp[ridx][sidx].SetTarget(s[1].GetOutput())
                    icp[ridx][sidx].SetCheckMeanDistance(1)
                    icp[ridx][sidx].SetMaximumMeanDistance(0.001)
                    icp[ridx][sidx].SetMaximumNumberOfIterations(30)
                    icp[ridx][sidx].SetMaximumNumberOfLandmarks(50)
                    sa[ridx][sidx].SetUserTransform(icp[ridx][sidx])


        icp[3][3].StartByMatchingCentroidsOn()

        renWin.SetSize(400, 100)

        # render and interact with data

        iRen = vtk.vtkRenderWindowInteractor()
        iRen.SetRenderWindow(renWin);
        renWin.Render()

        img_file = "TestICPTransform.png"
        vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25)
        vtk.test.Testing.interact()
Beispiel #37
0
def ICP(args):
    # This method takes one source surface files and ICP align it to a set of target surfaces.

    targetNameList = args[0]  # List of target names
    sourceSurfaceFile = args[1]  # name and full path of the source surface.
    dirSurface = args[2]  # Path of the surface files.
    dirOutputSurface = args[3]  # Output path the aligned source surfaces.
    dirLandmark = args[4]  #Path of the landmark files
    sourceLandmarkFile = args[5]
    dirOutputLandmarks = args[6]

    sourceName = sourceSurfaceFile.split('\\')[-1][:-4]

    #sourceSurfaceFile = os.path.join(dirSurface,sourceName + '.vtk')
    targetSurfaceFile = os.path.join(dirSurface, targetNameList + '.vtk')
    filename = os.path.join(dirOutputSurface, targetNameList + '.vtk')

    # Reading surfaces
    reader = vtk.vtkPolyDataReader()
    reader.SetFileName(sourceSurfaceFile)
    reader.Update()
    sourceSurface = reader.GetOutput()

    reader = vtk.vtkPolyDataReader()
    reader.SetFileName(targetSurfaceFile)
    reader.Update()
    targetSurface = reader.GetOutput()
    if targetSurface.GetNumberOfPoints() < 1:
        print('Could not read', targetSurfaceFile)
        return

    #------------------------------------------------------------------------------------------------
    # Reading landmarks
    targetLandmarkFile = os.path.join(dirLandmark, targetNameList + '.txt')

    targetLandmarks = ((np.loadtxt(targetLandmarkFile, skiprows=2)))
    if not os.path.isfile(sourceLandmarkFile):
        print('\nSource Surface: %s has no landmarks file' % sourceName)
        return np.float64(), np.float64()
    sourceLandmarks = ((np.loadtxt(sourceLandmarkFile, skiprows=2)))

    numberOfLandmarks = np.size(targetLandmarks, 0)

    # Set points in vtk
    sourcePoints = vtk.vtkPoints()
    for point in sourceLandmarks:
        sourcePoints.InsertNextPoint(point)
    source = vtk.vtkPolyData()
    source.SetPoints(sourcePoints)

    targetPoints = vtk.vtkPoints()
    for point in targetLandmarks:
        targetPoints.InsertNextPoint(point)
    target = vtk.vtkPolyData()
    target.SetPoints(targetPoints)

    #------------------------------------------------------------------------------------------------
    # Find landmark transformation
    landmarkTransform = vtk.vtkLandmarkTransform()
    landmarkTransform.SetSourceLandmarks(sourcePoints)
    landmarkTransform.SetTargetLandmarks(targetPoints)
    landmarkTransform.SetModeToSimilarity()
    landmarkTransform.Update()

    # Apply landmarks transformation to source surface
    TransformFilter = vtk.vtkTransformPolyDataFilter()
    TransformFilter.SetInputData(sourceSurface)
    TransformFilter.SetTransform(landmarkTransform)
    TransformFilter.Update()
    transformedSourceSurface = TransformFilter.GetOutput()

    # Apply landmarks transformation to source landmarks
    TransformFilter = vtk.vtkTransformPolyDataFilter()
    TransformFilter.SetInputData(source)
    TransformFilter.SetTransform(landmarkTransform)
    TransformFilter.Update()
    transformedSourceLandmarks = TransformFilter.GetOutput()

    #------------------------------------------------------------------------------------------------
    # Find icp transformation
    icp = vtk.vtkIterativeClosestPointTransform()
    icp.SetSource(transformedSourceSurface)
    icp.SetTarget(targetSurface)
    icp.GetLandmarkTransform().SetModeToRigidBody()

    icp.Modified()
    icp.Update()
    #------------------------------------------------------------------------------------------------
    # Apply icp transformation to transformed source surface
    TransformFilter = vtk.vtkTransformPolyDataFilter()
    TransformFilter.SetInputData(transformedSourceSurface)
    TransformFilter.SetTransform(icp)
    TransformFilter.Update()
    icptransformedSourceSurface = TransformFilter.GetOutput()

    # Apply icp transformation to transformed source landmarks
    TransformFilter = vtk.vtkTransformPolyDataFilter()
    TransformFilter.SetInputData(transformedSourceLandmarks)
    TransformFilter.SetTransform(icp)
    TransformFilter.Update()
    icptransformedSourceLandmarks = TransformFilter.GetOutput()

    #===========================================================================
    # RMS_transformedSurface = Metrics.RMS(icptransformedSourceSurface,targetSurface,VTKobj=True)
    # RMS_SourceTarget = Metrics.RMS(sourceSurface,targetSurface,VTKobj=True)
    #===========================================================================

    #------------------------------------------------------------------------------------------------
    # Write transformed source surface to .vtk file

    writer = vtk.vtkPolyDataWriter()
    writer.SetInputData(icptransformedSourceSurface)
    #writer.SetInputData(transformedSourceSurface)
    writer.SetFileName(filename)
    writer.Write()

    # Write transformed landmarks to .txt file readable by elastix
    npTranformedLandmarks = np.zeros([numberOfLandmarks, 3])
    for index in range(numberOfLandmarks):
        point = [0, 0, 0]
        icptransformedSourceLandmarks.GetPoint(index, point)
        #transformedSourceLandmarks.GetPoint(index, point)
        npTranformedLandmarks[index, :] = point
    filename = os.path.join(dirOutputLandmarks, targetNameList + '.txt')

    with open(filename, 'w+') as f:
        f.write('point\n')
        f.write(str(numberOfLandmarks))
        for idx in range(numberOfLandmarks):
            f.write('\n' + str(npTranformedLandmarks[idx, 0]) + ' ')
            for Val in npTranformedLandmarks[idx, 1:]:
                f.write(str(Val) + ' ')
    def register(self, fixedData, movingData, discard=False):
        index = self.gui.getDataIndex({
            'Contour': 0,
            'Centerline': 1
        }, 'Select the object')
        if index is None:
            return None, None, None
        if index == 0:
            fixed_points = fixedData.getPointSet('Contour')
            moving_points = movingData.getPointSet('Contour')
        else:
            fixed_points = fixedData.getPointSet('Centerline')
            moving_points = movingData.getPointSet('Centerline')

        fixed_res = fixedData.getResolution().tolist()
        moving_res = movingData.getResolution().tolist()
        fixed_points = fixed_points.copy()[npy.where(fixed_points[:, 0] >= 0)]
        moving_points = moving_points.copy()[npy.where(
            moving_points[:, 0] >= 0)]
        # Use the bifurcation as the initial position
        fixed_bif = db.getBifurcation(fixed_points)
        moving_bif = db.getBifurcation(moving_points)
        if (fixed_bif < 0) or (moving_bif < 0):
            fixed_min = 0
        else:
            temp = moving_points[:, 2:]
            moving_delta = moving_bif - npy.min(
                temp[npy.where(npy.round(temp[:, 1]) == 0), 0])
            fixed_min = fixed_bif - moving_delta * moving_res[-1] / fixed_res[
                -1]
        #print moving_res
        #print fixed_res

        # Augmentation of pointset
        fixed = fixed_points[npy.where(fixed_points[:, 2] >= fixed_min)]
        moving = moving_points.copy()
        fixed = util.augmentPointset(fixed,
                                     int(fixed_res[-1] / moving_res[-1] + 0.5),
                                     moving.shape[0], fixed_bif)
        moving = util.augmentPointset(
            moving, int(moving_res[-1] / fixed_res[-1] + 0.5), fixed.shape[0],
            moving_bif)

        fixed = fixed[:, :3]
        moving = moving[:, :3]
        fixed[:, :3] *= fixed_res[:3]
        moving[:, :3] *= moving_res[:3]
        if (fixed_bif >= 0) and (moving_bif >= 0):
            fixed[:,
                  2] -= (fixed_bif * fixed_res[2] - moving_bif * moving_res[2])
        print fixed.shape[0], moving.shape[0]
        #return None, None, None

        sourcePoints = vtk.vtkPoints()
        sourceVertices = vtk.vtkCellArray()
        for x in moving:
            id = sourcePoints.InsertNextPoint(x[0], x[1], x[2])
            sourceVertices.InsertNextCell(1)
            sourceVertices.InsertCellPoint(id)
        source = vtk.vtkPolyData()
        source.SetPoints(sourcePoints)
        source.SetVerts(sourceVertices)

        targetPoints = vtk.vtkPoints()
        targetVertices = vtk.vtkCellArray()
        for x in fixed:
            id = targetPoints.InsertNextPoint(x[0], x[1], x[2])
            targetVertices.InsertNextCell(1)
            targetVertices.InsertCellPoint(id)
        target = vtk.vtkPolyData()
        target.SetPoints(targetPoints)
        target.SetVerts(targetVertices)

        icp = vtk.vtkIterativeClosestPointTransform()
        icp.SetSource(source)
        icp.SetTarget(target)
        icp.GetLandmarkTransform().SetModeToRigidBody()
        icp.Modified()
        icp.Update()

        icp_filter = vtk.vtkTransformPolyDataFilter()
        icp_filter.SetInput(source)
        icp_filter.SetTransform(icp)
        icp_filter.Update()

        # Get the result transformation parameters
        matrix = icp.GetMatrix()
        T = ml.mat([
            matrix.GetElement(0, 3),
            matrix.GetElement(1, 3),
            matrix.GetElement(2, 3)
        ]).T
        R = ml.mat([[
            matrix.GetElement(0, 0),
            matrix.GetElement(0, 1),
            matrix.GetElement(0, 2)
        ],
                    [
                        matrix.GetElement(1, 0),
                        matrix.GetElement(1, 1),
                        matrix.GetElement(1, 2)
                    ],
                    [
                        matrix.GetElement(2, 0),
                        matrix.GetElement(2, 1),
                        matrix.GetElement(2, 2)
                    ]]).I
        if (fixed_bif >= 0) and (moving_bif >= 0):
            T[2] += (fixed_bif * fixed_res[2] - moving_bif * moving_res[2])

        moving_points = movingData.getPointSet('Contour').copy()
        moving_center = movingData.getPointSet('Centerline').copy()
        new_trans_points, result_center_points = util.resliceTheResultPoints(
            moving_points, moving_center, 20, moving_res, fixed_res, discard,
            R, T)

        T = -T
        T = R * T

        transform = sitk.Transform(3, sitk.sitkAffine)
        para = R.reshape(1, -1).tolist()[0] + T.T.tolist()[0]
        transform.SetParameters(para)

        movingImage = movingData.getSimpleITKImage()
        fixedImage = fixedData.getSimpleITKImage()
        resultImage = sitk.Resample(movingImage, fixedImage, transform,
                                    sitk.sitkLinear, 0, sitk.sitkFloat32)

        return sitk.GetArrayFromImage(resultImage), {
            'Contour': trans_points,
            'Centerline': result_center_points
        }, para + [0, 0, 0]
Beispiel #39
0
    def _deform(self):
        r"""
        Calculates the affine transform that best maps the reference polygonal
        surface to its corresponding deformed surface. This transform is calculated
        through an interactive closest point optimization, that seeks to minimize
        the sum of distances between the reference surface vertices and the current
        affine transformed surface.

        Assuming a uniform deformation, the non-translational elements of this affine
        transform compose the deformation gradient :math:`\mathbf{F}`. The Green-Lagrange
        strain tensor is then defined as

        :math:`\mathbf{E} = \frac{1}{2}(\mathbf{F}^T.\mathbf{F} - \mathbf{1})`,

        where :math:`\mathbf{1}` is the identity.

        Returns
        -------
        cell_strains
        """
        for i in range(len(self.rcentroids)):
            # volumetric strains
            self.vstrains.append(old_div(self.dvols[i], self.rvols[i]) - 1)

            ICP = vtk.vtkIterativeClosestPointTransform()
            rcopy = vtk.vtkPolyData()
            dcopy = vtk.vtkPolyData()
            rcopy.DeepCopy(self.rsurfs[i])
            dcopy.DeepCopy(self.dsurfs[i])
            ICP.SetSource(rcopy)
            ICP.SetTarget(dcopy)
            if self.rigidInitial:
                ICP.GetLandmarkTransform().SetModeToRigidBody()
                ICP.SetMaximumMeanDistance(0.001)
                ICP.SetCheckMeanDistance(1)
                ICP.SetMaximumNumberOfIterations(5000)
                ICP.StartByMatchingCentroidsOn()
                ICP.Update()
                trans = vtk.vtkTransform()
                trans.SetMatrix(ICP.GetMatrix())
                trans.Update()
                self.rigidTransforms.append(trans)
                rot = vtk.vtkTransformPolyDataFilter()
                rot.SetInputData(rcopy)
                rot.SetTransform(trans)
                rot.Update()
                ICP.GetLandmarkTransform().SetModeToAffine()
                ICP.SetSource(rot.GetOutput())
                ICP.Update()
            else:
                ICP.GetLandmarkTransform().SetModeToAffine()
                ICP.SetMaximumMeanDistance(0.001)
                ICP.SetCheckMeanDistance(1)
                ICP.SetMaximumNumberOfIterations(5000)
                ICP.StartByMatchingCentroidsOn()
                ICP.Update()

            F = np.zeros((3, 3), float)
            for j in range(3):
                for k in range(3):
                    F[j, k] = ICP.GetMatrix().GetElement(j, k)
            E = 0.5 * (np.dot(F.T, F) - np.eye(3))
            self.cell_strains.append(E)
Beispiel #40
0
	def align(self):
		self.ui.statusLabel.setText("Starting alignment . . .")
		QtGui.qApp.processEvents()
		
		icp=vtk.vtkIterativeClosestPointTransform()
		if self.ui.alignPointCloudButton.isChecked():
			icp.SetSource(self.fPC)
			icp.SetTarget(self.rPC)
		elif self.ui.alignOutlineButton.isChecked():
			icp.SetSource(self.fOPC)
			icp.SetTarget(self.rOPC)
		
		if hasattr(self,'fActor'): #then remove this actor and the associated outline actor
			self.ren.RemoveActor(self.fActor)
			self.ren.RemoveActor(self.fOutlineActor)
			

		
		icp.SetMaximumNumberOfIterations(200)
		icp.StartByMatchingCentroidsOn()
		icp.Modified()
		icp.Update()
		icp.Inverse()
		
		self.transM=np.zeros(shape=(4,4))
		for i in range(4):
			for j in range(4):
				self.transM[i,j]=icp.GetMatrix().GetElement(i, j)
		self.transM=np.linalg.inv(self.transM)

		#apply operation
		self.flp=np.dot(self.flp,self.transM[0:3,0:3])+self.transM[0:3,-1]
		self.fO=np.dot(self.fO,self.transM[0:3,0:3])+self.transM[0:3,-1]
		
		color=(255, 205, 52)
		self.fPC, self.fActor, _, = gen_point_cloud(self.flp,color,self.PointSize)
		self.ren.AddActor(self.fActor)
		self.fOutlineActor, self.fOPC = gen_outline(self.fO,color,self.PointSize)
		self.ren.AddActor(self.fOutlineActor)
		
		#recalculate extents of interactor
		RefMin=np.amin(np.vstack((self.flp,self.rp)),axis=0)
		RefMax=np.amax(np.vstack((self.flp,self.rp)),axis=0)

		extents=RefMax-RefMin #extents
		rl=0.1*(np.amin(extents)) #linear 'scale' to set up interactor
		self.limits=[RefMin[0]-rl, \
		RefMax[0]+rl, \
		RefMin[1]-rl, \
		RefMax[1]+rl, \
		RefMin[2],RefMax[2]]

		#add axes
		self.add_axis(self.limits,[1,1,1])
		
		s,nl,axs=self.get_scale()

		self.fActor.SetScale(s)
		self.fActor.Modified()

		self.add_axis(nl,axs)
		
		#update
		self.ren.ResetCamera()
		self.ui.vtkWidget.update()
		self.ui.vtkWidget.setFocus()
		if self.mirrored==False:
			self.ui.statusLabel.setText("WARNING alignment proceeding without a mirror operation. Alignment complete. Idle.")
		else:
			self.ui.statusLabel.setText("Alignment complete. Idle.")
		
		self.aligned = True