コード例 #1
0
    def run(self):
        m_change, obj_ref_mode = self.coreg_data
        trck_init, trck_id, trck_mode = self.trck_info

        while self.nav_id:
            coord_raw = dco.GetCoordinates(trck_init, trck_id, trck_mode)

            psi, theta, phi = radians(coord_raw[obj_ref_mode, 3:])
            r_probe = tr.euler_matrix(psi, theta, phi, 'rzyx')
            t_probe = tr.translation_matrix(coord_raw[obj_ref_mode, :3])
            m_probe = asmatrix(tr.concatenate_matrices(t_probe, r_probe))

            psi_ref, theta_ref, phi_ref = radians(coord_raw[1, 3:])
            r_ref = tr.euler_matrix(psi_ref, theta_ref, phi_ref, 'rzyx')
            t_ref = tr.translation_matrix(coord_raw[1, :3])
            m_ref = asmatrix(tr.concatenate_matrices(t_ref, r_ref))

            m_dyn = m_ref.I * m_probe
            m_dyn[2, -1] = -m_dyn[2, -1]

            m_img = m_change * m_dyn

            scale, shear, angles, trans, persp = tr.decompose_matrix(m_img)

            coord = m_img[0, -1], m_img[1, -1], m_img[2, -1], \
                    degrees(angles[0]), degrees(angles[1]), degrees(angles[2])

            wx.CallAfter(Publisher.sendMessage, 'Co-registered points', arg=m_img, position=coord)

            # TODO: Optimize the value of sleep for each tracking device.
            sleep(0.175)

            if self._pause_:
                return
コード例 #2
0
    def run(self):
        m_change, obj_ref_mode = self.coreg_data
        trck_init, trck_id, trck_mode = self.trck_info

        while self.nav_id:
            coord_raw = dco.GetCoordinates(trck_init, trck_id, trck_mode)

            psi, theta, phi = radians(coord_raw[obj_ref_mode, 3:])
            r_probe = tr.euler_matrix(psi, theta, phi, 'rzyx')
            t_probe = tr.translation_matrix(coord_raw[obj_ref_mode, :3])
            m_probe = asmatrix(tr.concatenate_matrices(t_probe, r_probe))

            psi_ref, theta_ref, phi_ref = radians(coord_raw[1, 3:])
            r_ref = tr.euler_matrix(psi_ref, theta_ref, phi_ref, 'rzyx')
            t_ref = tr.translation_matrix(coord_raw[1, :3])
            m_ref = asmatrix(tr.concatenate_matrices(t_ref, r_ref))

            m_dyn = m_ref.I * m_probe
            m_dyn[2, -1] = -m_dyn[2, -1]

            m_img = m_change * m_dyn

            scale, shear, angles, trans, persp = tr.decompose_matrix(m_img)

            coord = m_img[0, -1], m_img[1, -1], m_img[2, -1], \
                    degrees(angles[0]), degrees(angles[1]), degrees(angles[2])

            wx.CallAfter(Publisher.sendMessage, 'Co-registered points',
                         (m_img, coord))

            # TODO: Optimize the value of sleep for each tracking device.
            sleep(0.175)

            if self._pause_:
                return
コード例 #3
0
ファイル: control.py プロジェクト: dhanzhang/invesalius3
    def OpenOtherFiles(self, group):
        # Retreaving matrix from image data
        self.matrix, scalar_range, self.filename = image_utils.img2memmap(
            group)

        hdr = group.header
        # if group.affine.any():
        #     self.affine = group.affine
        #     Publisher.sendMessage('Update affine matrix',
        #                           affine=self.affine, status=True)
        hdr.set_data_dtype('int16')
        dims = hdr.get_zooms()
        dimsf = tuple([float(s) for s in dims])

        wl = float((scalar_range[0] + scalar_range[1]) * 0.5)
        ww = float((scalar_range[1] - scalar_range[0]))

        self.Slice = sl.Slice()
        self.Slice.matrix = self.matrix
        self.Slice.matrix_filename = self.filename

        self.Slice.spacing = dimsf
        self.Slice.window_level = wl
        self.Slice.window_width = ww

        if group.affine.any():
            # TODO: replace the inverse of the affine by the actual affine in the whole code
            # remove scaling factor for non-unitary voxel dimensions
            # self.affine = image_utils.world2invspace(affine=group.affine)
            scale, shear, angs, trans, persp = tr.decompose_matrix(
                group.affine)
            self.affine = np.linalg.inv(
                tr.compose_matrix(scale=None,
                                  shear=shear,
                                  angles=angs,
                                  translate=trans,
                                  perspective=persp))
            # print("repos_img: {}".format(repos_img))
            self.Slice.affine = self.affine
            Publisher.sendMessage('Update affine matrix',
                                  affine=self.affine,
                                  status=True)

        scalar_range = int(scalar_range[0]), int(scalar_range[1])
        Publisher.sendMessage('Update threshold limits list',
                              threshold_range=scalar_range)
        return self.matrix, self.filename
コード例 #4
0
def world2invspace(affine=None):
    """
    Normalize image pixel intensity for int16 gray scale values.

    :param repos: list of translation and rotation [trans_x, trans_y, trans_z, rot_x, rot_y, rot_z] to reposition the
    vtk object prior to applying the affine matrix transformation. Note: rotation given in degrees
    :param user_matrix: affine matrix from image header, prefered QForm matrix
    :return: vtk transform filter for repositioning the polydata and affine matrix to be used as SetUserMatrix in actor
    """

    # remove scaling factor for non-unitary voxel dimensions
    scale, shear, angs, trans, persp = tr.decompose_matrix(affine)
    affine_noscale = tr.compose_matrix(scale=None,
                                       shear=shear,
                                       angles=angs,
                                       translate=trans,
                                       perspective=persp)
    # repos_img = [0.] * 6
    # repos_img[1] = -float(shape[1])
    #
    # repos_mat = np.identity(4)
    # # translation
    # repos_mat[:3, -1] = repos_img[:3]
    # # rotation (in principle for invesalius space no rotation is needed)
    # repos_mat[:3, :3] = tr.euler_matrix(*np.deg2rad(repos_img[3:]), axes='sxyz')[:3, :3]

    # if repos:
    #     transx, transy, transz, rotx, roty, rotz = repos
    #     # create a transform that rotates the stl source
    #     transform = vtk.vtkTransform()
    #     transform.PostMultiply()
    #     transform.RotateX(rotx)
    #     transform.RotateY(roty)
    #     transform.RotateZ(rotz)
    #     transform.Translate(transx, transy, transz)
    #
    #     transform_filt = vtk.vtkTransformPolyDataFilter()
    #     transform_filt.SetTransform(transform)
    #     transform_filt.Update()

    # assuming vtk default transformation order is PreMultiply, the user matrix is set so:
    # 1. Replace the object -> 2. Transform the object to desired position/orientation
    # PreMultiplty: M = M*A where M is current transformation and A is applied transformation
    # user_matrix = np.linalg.inv(user_matrix) @ repos_mat

    return np.linalg.inv(affine_noscale)
コード例 #5
0
ファイル: control.py プロジェクト: rmatsuda/invesalius3
    def OpenOtherFiles(self, group):
        # Retreaving matrix from image data
        self.matrix, scalar_range, self.filename = image_utils.img2memmap(
            group)

        hdr = group.header
        hdr.set_data_dtype('int16')

        wl = float((scalar_range[0] + scalar_range[1]) * 0.5)
        ww = float((scalar_range[1] - scalar_range[0]))

        self.Slice = sl.Slice()
        self.Slice.matrix = self.matrix
        self.Slice.matrix_filename = self.filename
        # even though the axes 0 and 2 are swapped when creating self.matrix
        # the spacing should be kept the original, as it is modified somewhere later
        # otherwise generate wrong results
        # also need to convert to float because original get_zooms return numpy.float32
        # which is unsupported by the plist for saving the project
        self.Slice.spacing = tuple([float(s) for s in hdr.get_zooms()])
        self.Slice.window_level = wl
        self.Slice.window_width = ww

        if group.affine.any():
            # remove scaling factor for non-unitary voxel dimensions
            scale, shear, angs, trans, persp = tr.decompose_matrix(
                group.affine)
            self.Slice.affine = np.linalg.inv(
                tr.compose_matrix(scale=None,
                                  shear=shear,
                                  angles=angs,
                                  translate=trans,
                                  perspective=persp))
        else:
            self.Slice.affine = None

        scalar_range = int(scalar_range[0]), int(scalar_range[1])

        Publisher.sendMessage('Update threshold limits list',
                              threshold_range=scalar_range)

        return self.matrix, self.filename
コード例 #6
0
def corregistrate_object_dynamic(inp, coord_raw, ref_mode_id):

    m_change, obj_ref_mode, t_obj_raw, s0_raw, r_s0_raw, s0_dyn, m_obj_raw, r_obj_img = inp

    # transform raw marker coordinate to object center
    m_probe = object_marker_to_center(coord_raw, obj_ref_mode, t_obj_raw, s0_raw, r_s0_raw)
    # transform object center to reference marker if specified as dynamic reference
    if ref_mode_id:
        m_probe_ref = object_to_reference(coord_raw, m_probe)
    else:
        m_probe_ref = m_probe
    # invert y coordinate
    m_probe_ref[2, -1] = -m_probe_ref[2, -1]
    # corregistrate from tracker to image space
    m_img = tracker_to_image(m_change, m_probe_ref, r_obj_img, m_obj_raw, s0_dyn)
    # compute rotation angles
    _, _, angles, _, _ = tr.decompose_matrix(m_img)
    # create output coordiante list
    coord = m_img[0, -1], m_img[1, -1], m_img[2, -1], \
            np.degrees(angles[0]), np.degrees(angles[1]), np.degrees(angles[2])

    return coord, m_img
コード例 #7
0
def corregistrate_dynamic(inp, coord_raw, ref_mode_id):

    m_change, obj_ref_mode = inp

    # transform raw marker coordinate to object center
    m_probe = compute_marker_transformation(coord_raw, obj_ref_mode)
    # transform object center to reference marker if specified as dynamic reference
    if ref_mode_id:
        m_ref = compute_marker_transformation(coord_raw, 1)
        m_probe_ref = np.linalg.inv(m_ref) @ m_probe
    else:
        m_probe_ref = m_probe

    # invert y coordinate
    m_probe_ref[2, -1] = -m_probe_ref[2, -1]
    # corregistrate from tracker to image space
    m_img = m_change @ m_probe_ref
    # compute rotation angles
    _, _, angles, _, _ = tr.decompose_matrix(m_img)
    # create output coordiante list
    coord = m_img[0, -1], m_img[1, -1], m_img[2, -1],\
            np.degrees(angles[0]), np.degrees(angles[1]), np.degrees(angles[2])

    return coord, m_img
コード例 #8
0
    def run(self):

        m_change, obj_ref_mode, t_obj_raw, s0_raw, r_s0_raw, s0_dyn, m_obj_raw, r_obj_img = self.coreg_data
        trck_init, trck_id, trck_mode = self.trck_info

        while self.nav_id:
            coord_raw = dco.GetCoordinates(trck_init, trck_id, trck_mode)

            as1, bs1, gs1 = radians(coord_raw[obj_ref_mode, 3:])
            r_probe = asmatrix(tr.euler_matrix(as1, bs1, gs1, 'rzyx'))
            t_probe_raw = asmatrix(tr.translation_matrix(coord_raw[obj_ref_mode, :3]))
            t_offset_aux = r_s0_raw.I * r_probe * t_obj_raw
            t_offset = asmatrix(identity(4))
            t_offset[:, -1] = t_offset_aux[:, -1]
            t_probe = s0_raw * t_offset * s0_raw.I * t_probe_raw
            m_probe = asmatrix(tr.concatenate_matrices(t_probe, r_probe))

            a, b, g = radians(coord_raw[1, 3:])
            r_ref = tr.euler_matrix(a, b, g, 'rzyx')
            t_ref = tr.translation_matrix(coord_raw[1, :3])
            m_ref = asmatrix(tr.concatenate_matrices(t_ref, r_ref))

            m_dyn = m_ref.I * m_probe
            m_dyn[2, -1] = -m_dyn[2, -1]

            m_img = m_change * m_dyn
            r_obj = r_obj_img * m_obj_raw.I * s0_dyn.I * m_dyn * m_obj_raw

            m_img[:3, :3] = r_obj[:3, :3]

            scale, shear, angles, trans, persp = tr.decompose_matrix(m_img)

            coord = m_img[0, -1], m_img[1, -1], m_img[2, -1],\
                    degrees(angles[0]), degrees(angles[1]), degrees(angles[2])

            wx.CallAfter(Publisher.sendMessage, 'Co-registered points', arg=m_img, position=coord)
            wx.CallAfter(Publisher.sendMessage, 'Update object matrix', m_img=m_img, coord=coord)

            # TODO: Optimize the value of sleep for each tracking device.
            sleep(0.175)

            # Debug tracker is not working with 0.175 so changed to 0.2
            # However, 0.2 is too low update frequency ~5 Hz. Need optimization URGENTLY.
            # sleep(.3)

            # partially working for translate and offset,
            # but offset is kept always in same axis, have to fix for rotation
            # M_dyn = M_reference.I * T_stylus
            # M_dyn[2, -1] = -M_dyn[2, -1]
            # M_dyn_ch = M_change * M_dyn
            # ddd = M_dyn_ch[0, -1], M_dyn_ch[1, -1], M_dyn_ch[2, -1]
            # M_dyn_ch[:3, -1] = asmatrix(db.flip_x_m(ddd)).reshape([3, 1])
            # M_final = S0 * M_obj_trans_0 * S0.I * M_dyn_ch

            # this works for static reference object rotation
            # R_dyn = M_vtk * M_obj_rot_raw.I * S0_rot_raw.I * R_stylus * M_obj_rot_raw
            # this works for dynamic reference in rotation but not in translation
            # R_dyn = M_vtk * M_obj_rot_raw.I * S0_rot_dyn.I * R_reference.I * R_stylus * M_obj_rot_raw

            if self._pause_:
                return
コード例 #9
0
ファイル: coregistration.py プロジェクト: xiazt/invesalius3
    def run(self):

        m_change, obj_ref_mode, t_obj_raw, s0_raw, r_s0_raw, s0_dyn, m_obj_raw, r_obj_img = self.coreg_data
        trck_init, trck_id, trck_mode = self.trck_info

        while self.nav_id:
            coord_raw = dco.GetCoordinates(trck_init, trck_id, trck_mode)

            as1, bs1, gs1 = radians(coord_raw[obj_ref_mode, 3:])
            r_probe = asmatrix(tr.euler_matrix(as1, bs1, gs1, 'rzyx'))
            t_probe_raw = asmatrix(
                tr.translation_matrix(coord_raw[obj_ref_mode, :3]))
            t_offset_aux = r_s0_raw.I * r_probe * t_obj_raw
            t_offset = asmatrix(identity(4))
            t_offset[:, -1] = t_offset_aux[:, -1]
            t_probe = s0_raw * t_offset * s0_raw.I * t_probe_raw
            m_probe = asmatrix(tr.concatenate_matrices(t_probe, r_probe))

            a, b, g = radians(coord_raw[1, 3:])
            r_ref = tr.euler_matrix(a, b, g, 'rzyx')
            t_ref = tr.translation_matrix(coord_raw[1, :3])
            m_ref = asmatrix(tr.concatenate_matrices(t_ref, r_ref))

            m_dyn = m_ref.I * m_probe
            m_dyn[2, -1] = -m_dyn[2, -1]

            m_img = m_change * m_dyn
            r_obj = r_obj_img * m_obj_raw.I * s0_dyn.I * m_dyn * m_obj_raw

            m_img[:3, :3] = r_obj[:3, :3]

            scale, shear, angles, trans, persp = tr.decompose_matrix(m_img)

            coord = m_img[0, -1], m_img[1, -1], m_img[2, -1],\
                    degrees(angles[0]), degrees(angles[1]), degrees(angles[2])

            wx.CallAfter(Publisher.sendMessage,
                         'Co-registered points',
                         arg=m_img,
                         position=coord)
            wx.CallAfter(Publisher.sendMessage,
                         'Update object matrix',
                         m_img=m_img,
                         coord=coord)

            # TODO: Optimize the value of sleep for each tracking device.
            sleep(0.175)

            # Debug tracker is not working with 0.175 so changed to 0.2
            # However, 0.2 is too low update frequency ~5 Hz. Need optimization URGENTLY.
            # sleep(.3)

            # partially working for translate and offset,
            # but offset is kept always in same axis, have to fix for rotation
            # M_dyn = M_reference.I * T_stylus
            # M_dyn[2, -1] = -M_dyn[2, -1]
            # M_dyn_ch = M_change * M_dyn
            # ddd = M_dyn_ch[0, -1], M_dyn_ch[1, -1], M_dyn_ch[2, -1]
            # M_dyn_ch[:3, -1] = asmatrix(db.flip_x_m(ddd)).reshape([3, 1])
            # M_final = S0 * M_obj_trans_0 * S0.I * M_dyn_ch

            # this works for static reference object rotation
            # R_dyn = M_vtk * M_obj_rot_raw.I * S0_rot_raw.I * R_stylus * M_obj_rot_raw
            # this works for dynamic reference in rotation but not in translation
            # R_dyn = M_vtk * M_obj_rot_raw.I * S0_rot_dyn.I * R_reference.I * R_stylus * M_obj_rot_raw

            if self._pause_:
                return