Пример #1
0
    def run(self):
        coreg_data = self.coreg_data
        view_obj = 1

        trck_init, trck_id = self.tracker.GetTrackerInfo()

        # print('CoordCoreg: event {}'.format(self.event.is_set()))
        while not self.event.is_set():
            try:
                if not self.icp_queue.empty():
                    self.icp, self.m_icp = self.icp_queue.get_nowait()

                if not self.object_at_target_queue.empty():
                    self.target_flag = self.object_at_target_queue.get_nowait()

                # print(f"Set the coordinate")
                coord_raw, markers_flag = self.tracker.TrackerCoordinates.GetCoordinates()
                coord, m_img = corregistrate_object_dynamic(coreg_data, coord_raw, self.ref_mode_id, [self.icp, self.m_icp])

                # XXX: This is not the best place to do the logic related to approaching the target when the
                #      debug tracker is in use. However, the trackers (including the debug trackers) operate in
                #      the tracker space where it is hard to make the tracker approach the target in the image space.
                #      Ideally, the transformation from the tracker space to the image space (the function
                #      corregistrate_object_dynamic above) would be encapsulated in a class together with the
                #      tracker, and then the whole class would be mocked when using the debug tracker. However,
                #      those abstractions do not currently exist and doing them would need a larger refactoring.
                #
                if self.tracker_id == const.DEBUGTRACKAPPROACH and self.target is not None:

                    if self.last_coord is None:
                        self.last_coord = np.array(coord)
                    else:
                        coord = self.last_coord + (self.target - self.last_coord) * 0.05
                        self.last_coord = coord

                    angles = [np.radians(coord[3]), np.radians(coord[4]), np.radians(coord[5])]
                    translate = coord[0:3]
                    m_img = tr.compose_matrix(angles=angles, translate=translate)

                m_img_flip = m_img.copy()
                m_img_flip[1, -1] = -m_img_flip[1, -1]
                # self.pipeline.set_message(m_img_flip)

                if self.icp:
                    m_img = bases.transform_icp(m_img, self.m_icp)

                self.coord_queue.put_nowait([coord, [coord_raw, markers_flag], m_img, view_obj])
                # print('CoordCoreg: put {}'.format(count))
                # count += 1

                if self.view_tracts:
                    self.coord_tracts_queue.put_nowait(m_img_flip)

                if not self.icp_queue.empty():
                    self.icp_queue.task_done()
                # The sleep has to be in both threads
                sleep(self.sle)
            except queue.Full:
                pass
Пример #2
0
    def OpenOtherFiles(self, group):
        # Retreaving matrix from image data
        self.matrix, scalar_range, self.filename = image_utils.img2memmap(
            group)

        hdr = group.header
        # if group.affine.any():
        #     self.affine = group.affine
        #     Publisher.sendMessage('Update affine matrix',
        #                           affine=self.affine, status=True)
        hdr.set_data_dtype('int16')
        dims = hdr.get_zooms()
        dimsf = tuple([float(s) for s in dims])

        wl = float((scalar_range[0] + scalar_range[1]) * 0.5)
        ww = float((scalar_range[1] - scalar_range[0]))

        self.Slice = sl.Slice()
        self.Slice.matrix = self.matrix
        self.Slice.matrix_filename = self.filename

        self.Slice.spacing = dimsf
        self.Slice.window_level = wl
        self.Slice.window_width = ww

        if group.affine.any():
            # TODO: replace the inverse of the affine by the actual affine in the whole code
            # remove scaling factor for non-unitary voxel dimensions
            # self.affine = image_utils.world2invspace(affine=group.affine)
            scale, shear, angs, trans, persp = tr.decompose_matrix(
                group.affine)
            self.affine = np.linalg.inv(
                tr.compose_matrix(scale=None,
                                  shear=shear,
                                  angles=angs,
                                  translate=trans,
                                  perspective=persp))
            # print("repos_img: {}".format(repos_img))
            self.Slice.affine = self.affine
            Publisher.sendMessage('Update affine matrix',
                                  affine=self.affine,
                                  status=True)

        scalar_range = int(scalar_range[0]), int(scalar_range[1])
        Publisher.sendMessage('Update threshold limits list',
                              threshold_range=scalar_range)
        return self.matrix, self.filename
Пример #3
0
def world2invspace(affine=None):
    """
    Normalize image pixel intensity for int16 gray scale values.

    :param repos: list of translation and rotation [trans_x, trans_y, trans_z, rot_x, rot_y, rot_z] to reposition the
    vtk object prior to applying the affine matrix transformation. Note: rotation given in degrees
    :param user_matrix: affine matrix from image header, prefered QForm matrix
    :return: vtk transform filter for repositioning the polydata and affine matrix to be used as SetUserMatrix in actor
    """

    # remove scaling factor for non-unitary voxel dimensions
    scale, shear, angs, trans, persp = tr.decompose_matrix(affine)
    affine_noscale = tr.compose_matrix(scale=None,
                                       shear=shear,
                                       angles=angs,
                                       translate=trans,
                                       perspective=persp)
    # repos_img = [0.] * 6
    # repos_img[1] = -float(shape[1])
    #
    # repos_mat = np.identity(4)
    # # translation
    # repos_mat[:3, -1] = repos_img[:3]
    # # rotation (in principle for invesalius space no rotation is needed)
    # repos_mat[:3, :3] = tr.euler_matrix(*np.deg2rad(repos_img[3:]), axes='sxyz')[:3, :3]

    # if repos:
    #     transx, transy, transz, rotx, roty, rotz = repos
    #     # create a transform that rotates the stl source
    #     transform = vtk.vtkTransform()
    #     transform.PostMultiply()
    #     transform.RotateX(rotx)
    #     transform.RotateY(roty)
    #     transform.RotateZ(rotz)
    #     transform.Translate(transx, transy, transz)
    #
    #     transform_filt = vtk.vtkTransformPolyDataFilter()
    #     transform_filt.SetTransform(transform)
    #     transform_filt.Update()

    # assuming vtk default transformation order is PreMultiply, the user matrix is set so:
    # 1. Replace the object -> 2. Transform the object to desired position/orientation
    # PreMultiplty: M = M*A where M is current transformation and A is applied transformation
    # user_matrix = np.linalg.inv(user_matrix) @ repos_mat

    return np.linalg.inv(affine_noscale)
Пример #4
0
    def OpenOtherFiles(self, group):
        # Retreaving matrix from image data
        self.matrix, scalar_range, self.filename = image_utils.img2memmap(
            group)

        hdr = group.header
        hdr.set_data_dtype('int16')

        wl = float((scalar_range[0] + scalar_range[1]) * 0.5)
        ww = float((scalar_range[1] - scalar_range[0]))

        self.Slice = sl.Slice()
        self.Slice.matrix = self.matrix
        self.Slice.matrix_filename = self.filename
        # even though the axes 0 and 2 are swapped when creating self.matrix
        # the spacing should be kept the original, as it is modified somewhere later
        # otherwise generate wrong results
        # also need to convert to float because original get_zooms return numpy.float32
        # which is unsupported by the plist for saving the project
        self.Slice.spacing = tuple([float(s) for s in hdr.get_zooms()])
        self.Slice.window_level = wl
        self.Slice.window_width = ww

        if group.affine.any():
            # remove scaling factor for non-unitary voxel dimensions
            scale, shear, angs, trans, persp = tr.decompose_matrix(
                group.affine)
            self.Slice.affine = np.linalg.inv(
                tr.compose_matrix(scale=None,
                                  shear=shear,
                                  angles=angs,
                                  translate=trans,
                                  perspective=persp))
        else:
            self.Slice.affine = None

        scalar_range = int(scalar_range[0]), int(scalar_range[1])

        Publisher.sendMessage('Update threshold limits list',
                              threshold_range=scalar_range)

        return self.matrix, self.filename