Пример #1
0
    def calculateRayCastSettings(self, viewmat):
        """Calculates a camera direction and ray casting step vector, based
        on the given view matrix.
        """

        d2tmat = self.getTransform('display', 'texture')
        xform = transform.concat(d2tmat, viewmat)
        cdir = np.array([0, 0, 1])
        cdir = transform.transform(cdir, xform, vector=True)
        cdir = transform.normalise(cdir)

        # sqrt(3) so the maximum number
        # of samplews is taken along the
        # diagonal of a cube
        rayStep = np.sqrt(3) * cdir / self.numSteps

        return cdir, rayStep
Пример #2
0
def planeEquation2(origin, normal):
    """Calculates the equation of a plane equation from a normal vector
    and a single point on the plane.

    Returns a ``numpy`` array containing four values, the coefficients of the
    equation:

    See also :func:`planeEquation`.
    """

    normal = transform.normalise(normal)
    ax, by, cz = np.array(origin) * normal

    eqn = np.zeros(4, dtype=np.float64)
    eqn[:3] = normal
    eqn[3] = -np.sum((ax, by, cz))

    return eqn
Пример #3
0
    def get3DClipPlane(self, planeIdx):
        """A convenience method which calculates a point-vector description
        of the specified clipping plane. ``planeIdx`` is an index into the
        :attr:`clipPosition`, :attr:`clipAzimuth`, and
        :attr:`clipInclination`, properties.

        Returns the clip plane at the given ``planeIdx`` as an origin and
        normal vector, in the display coordinate system..
        """

        pos = self.clipPosition[planeIdx]
        azimuth = self.clipAzimuth[planeIdx]
        incline = self.clipInclination[planeIdx]

        b = self.bounds
        pos = pos / 100.0
        azimuth = azimuth * np.pi / 180.0
        incline = incline * np.pi / 180.0

        xmid = b.xlo + 0.5 * b.xlen
        ymid = b.ylo + 0.5 * b.ylen
        zmid = b.zlo + 0.5 * b.zlen

        centre = [xmid, ymid, zmid]
        normal = [0, 0, -1]

        rot1 = transform.axisAnglesToRotMat(incline, 0, 0)
        rot2 = transform.axisAnglesToRotMat(0, 0, azimuth)
        rotation = transform.concat(rot2, rot1)

        normal = transform.transformNormal(normal, rotation)
        normal = transform.normalise(normal)

        offset = (pos - 0.5) * max((b.xlen, b.ylen, b.zlen))
        origin = centre + normal * offset

        return origin, normal
Пример #4
0
    def calculateRayCastSettings(self, view=None, proj=None):
        """Calculates various parameters required for 3D ray-cast rendering
        (see the :class:`.GLVolume` class).


        :arg view: Transformation matrix which transforms from model
                   coordinates to view coordinates (i.e. the GL view matrix).


        :arg proj: Transformation matrix which transforms from view coordinates
                   to normalised device coordinates (i.e. the GL projection
                   matrix).

        Returns a tuple containing:

          - A vector defining the amount by which to move along a ray in a
            single iteration of the ray-casting algorithm. This can be added
            directly to the volume texture coordinates.

          - A transformation matrix which transforms from image texture
            coordinates into the display coordinate system.

        .. note:: This method will raise an error if called on a
                  ``GLImageObject`` which is managing an overlay that is not
                  associated with a :class:`.Volume3DOpts` instance.
        """

        if view is None: view = np.eye(4)
        if proj is None: proj = np.eye(4)

        # In GL, the camera position
        # is initially pointing in
        # the -z direction.
        eye = [0, 0, -1]
        target = [0, 0, 1]

        # We take this initial camera
        # configuration, and transform
        # it by the inverse modelview
        # matrix
        t2dmat = self.getTransform('texture', 'display')
        xform = transform.concat(view, t2dmat)
        ixform = transform.invert(xform)

        eye = transform.transform(eye, ixform, vector=True)
        target = transform.transform(target, ixform, vector=True)

        # Direction that the 'camera' is
        # pointing, normalied to unit length
        cdir = transform.normalise(eye - target)

        # Calculate the length of one step
        # along the camera direction in a
        # single iteration of the ray-cast
        # loop. Multiply by sqrt(3) so that
        # the maximum number of steps will
        # be reached across the longest axis
        # of the image texture cube.
        rayStep = np.sqrt(3) * cdir / self.getNumSteps()

        # A transformation matrix which can
        # transform image texture coordinates
        # into the corresponding screen
        # (normalised device) coordinates.
        # This allows the fragment shader to
        # convert an image texture coordinate
        # into a relative depth value.
        #
        # The projection matrix puts depth into
        # [-1, 1], but we want it in [0, 1]
        zscale = transform.scaleOffsetXform([1, 1, 0.5], [0, 0, 0.5])
        xform = transform.concat(zscale, proj, xform)

        return rayStep, xform
Пример #5
0
    def _pickModeLeftMouseDown(self, ev, canvas, mousePos, canvasPos):
        """Called on mouse down events in ``pick`` mode.

        Updates the :attr:`DisplayContext.vertexIndex` property if the
        currently selected overlay is a :class:`.Mesh`, otherwise
        updates the :attr:`DisplayContext.location` property.
        """

        from fsl.data.mesh import Mesh

        displayCtx = self.displayCtx
        ovl = displayCtx.getSelectedOverlay()

        if ovl is None:
            return

        # The canvasPos is located on the near clipping
        # plane (see Scene3DCanvas.canvasToWorld).
        # We also need the corresponding point on the
        # far clipping plane.
        farPos = canvas.canvasToWorld(mousePos[0], mousePos[1], near=False)

        # For non-mesh overlays, we select a point which
        # is in between the near/far clipping planes.
        if not isinstance(ovl, Mesh):

            posDir = farPos - canvasPos
            dist = transform.veclength(posDir)
            posDir = transform.normalise(posDir)
            midPos = canvasPos + 0.5 * dist * posDir

            self.displayCtx.location.xyz = midPos

        else:
            opts = self.displayCtx.getOpts(ovl)
            rayOrigin = canvasPos
            rayDir = transform.normalise(farPos - canvasPos)

            # transform location from display into model space
            rayOrigin = opts.transformCoords(rayOrigin, 'display', 'mesh')
            rayDir = opts.transformCoords(rayDir,
                                          'display',
                                          'mesh',
                                          vector=True)
            loc, tri = ovl.rayIntersection([rayOrigin], [rayDir],
                                           vertices=True)

            if len(loc) == 0:
                return

            loc = loc[0]
            tri = ovl.indices[int(tri[0]), :]

            # The rayIntersection method gives us a
            # point on one of the mesh triangles -
            # we want the vertex on that triangle
            # which is nearest to the intersection.
            triVerts = ovl.vertices[tri, :]
            triDists = transform.veclength(loc - triVerts)
            vertIdx = np.argsort(triDists)[0]

            self.displayCtx.vertexIndex = tri[vertIdx]
Пример #6
0
def test_normals():

    # vertices of a cube
    verts = np.array([
        [-1, -1, -1],
        [-1, -1, 1],
        [-1, 1, -1],
        [-1, 1, 1],
        [1, -1, -1],
        [1, -1, 1],
        [1, 1, -1],
        [1, 1, 1],
    ])

    # triangles
    # cw  == clockwise, when facing outwards
    #        from the centre of the mesh
    triangles_cw = np.array([
        [0, 4, 6],
        [0, 6, 2],
        [1, 3, 5],
        [3, 7, 5],
        [0, 1, 4],
        [1, 5, 4],
        [2, 6, 7],
        [2, 7, 3],
        [0, 2, 1],
        [1, 2, 3],
        [4, 5, 7],
        [4, 7, 6],
    ])

    # ccw == counter-clockwise
    triangles_ccw = np.array(triangles_cw)
    triangles_ccw[:, [1, 2]] = triangles_ccw[:, [2, 1]]

    # face normals
    fnormals = np.array([
        [0, 0, -1],
        [0, 0, -1],
        [0, 0, 1],
        [0, 0, 1],
        [0, -1, 0],
        [0, -1, 0],
        [0, 1, 0],
        [0, 1, 0],
        [-1, 0, 0],
        [-1, 0, 0],
        [1, 0, 0],
        [1, 0, 0],
    ])

    # vertex normals
    vnormals = np.zeros((8, 3))
    for i in range(8):
        faces = np.where(triangles_cw == i)[0]
        vnormals[i] = fnormals[faces].sum(axis=0)
    vnormals = transform.normalise(vnormals)

    cw_nofix = fslmesh.TriangleMesh(verts, triangles_cw)
    cw_fix = fslmesh.TriangleMesh(verts, triangles_cw, fixWinding=True)
    ccw_nofix = fslmesh.TriangleMesh(verts, triangles_ccw)
    ccw_fix = fslmesh.TriangleMesh(verts, triangles_ccw, fixWinding=True)

    # ccw triangles should give correct
    # normals without unwinding
    assert np.all(np.isclose(cw_nofix.normals, -fnormals))
    assert np.all(np.isclose(cw_nofix.vnormals, -vnormals))
    assert np.all(np.isclose(cw_fix.normals, fnormals))
    assert np.all(np.isclose(cw_fix.vnormals, vnormals))
    assert np.all(np.isclose(ccw_nofix.normals, fnormals))
    assert np.all(np.isclose(ccw_nofix.vnormals, vnormals))
    assert np.all(np.isclose(ccw_fix.normals, fnormals))
    assert np.all(np.isclose(ccw_fix.vnormals, vnormals))