def UpdateViewProjForLight(self, stepView, stepProj, light, effectValue):
     """
     Renderstep callback to make sure the rendersteps that deal with view transform and
     projection, are still up to date with any changes to the lights.  Works out the values
     for this light and then delegates to the individual MeshData instances.
     """
     eye = light.position
     at = geo2.Add(eye, light.coneDirection)
     up = (0, 0, 1)
     if SkinSpotLightShadows.REUSE_ENGINE_MAPS:
         VP = light.GetViewProjMatrix()
     else:
         if effectValue:
             effectValue.value = (0,
              0,
              0,
              light.radius)
         viewmat = geo2.MatrixLookAtRH(eye, at, up)
         self.ComputeProjectionMatrix(stepProj.projection, light, viewmat)
         stepView.view.transform = viewmat
         VP1 = geo2.MatrixMultiply(viewmat, stepProj.projection.transform)
         VP = geo2.MatrixMultiply(VP1, self.uvAdjustMatrix)
     VPT = geo2.MatrixTranspose(VP)
     for meshData in self.meshes.itervalues():
         meshData.updateParams(light, VPT)
Ejemplo n.º 2
0
def _GetViewAndProjectionUsingProjectedBoundingBox(
        calculateProjectedBoundingBox,
        scene=None,
        boundingSphereRadius=None,
        boundingSphereCenter=None,
        boundingBoxMin=None,
        boundingBoxMax=None,
        cameraAngle=None):
    """
    Fits an object in frame with view and projection matrices. We first do a rough fit
    using either the bounding sphere or bounding box. We then "zoom in" to the point where
    the projected bounding box fills 90% of the image.
    """
    cameraAngle = cameraAngle or GETPHOTO_ANGLE
    if boundingSphereRadius:
        radius = boundingSphereRadius
        center = boundingSphereCenter if boundingSphereCenter else (0.0, 0.0,
                                                                    0.0)
    else:
        center = geo2.Vec3Add(boundingBoxMin, boundingBoxMax)
        center = geo2.Vec3Scale(center, 0.5)
        radius = geo2.Vec3Length(
            geo2.Vec3Subtract(boundingBoxMax, boundingBoxMin))
    dist = _SphericalFit(radius)
    viewEyeAtUp = _GetViewMatrixFromAngle(cameraAngle, center, dist)
    projTransform = geo2.MatrixPerspectiveFovRH(*GETPHOTO_PROJECTION)
    viewTransform = geo2.MatrixLookAtRH(*viewEyeAtUp)
    combinedTransform = viewTransform
    combinedTransform = geo2.MatrixMultiply(combinedTransform, projTransform)
    safeMin, safeMax = calculateProjectedBoundingBox(combinedTransform)
    deltaX = safeMax[0] - safeMin[0]
    deltaY = safeMax[1] - safeMin[1]
    scalingFactor = 0.9 * (2.0 / max(deltaX, deltaY))
    try:
        if scene.backgroundEffect is not None:
            params = scene.backgroundEffect.Find(['trinity.Tr2FloatParameter'])
            for param in params:
                if param.name == 'ProjectionScaling':
                    param.value = scalingFactor

    except AttributeError:
        pass

    offsetX = -1 * scalingFactor * (safeMin[0] + safeMax[0]) / 2.0
    offsetY = -1 * scalingFactor * (safeMin[1] + safeMax[1]) / 2.0
    scale = 1.0 / tan(GETPHOTO_FOV / 2.0) * scalingFactor
    zn = 1.0
    zf = dist + radius * 2
    t = zn * (1 - offsetY) / scale
    b = -t * (1 + offsetY) / (1 - offsetY)
    r = zn * (1 - offsetX) / scale
    l = -r * (1 + offsetX) / (1 - offsetX)
    projection = trinity.TriProjection()
    projection.PerspectiveOffCenter(l, r, b, t, zn, zf)
    view = trinity.TriView()
    view.SetLookAtPosition(*viewEyeAtUp)
    return (view, projection)
Ejemplo n.º 3
0
 def PointToYawPitchDist(self, pos):
     upVector = (0, 1, 0)
     if trinity.IsRightHanded():
         rotMatrix = geo2.MatrixLookAtRH(pos, self.poi, upVector)
     else:
         rotMatrix = geo2.MatrixLookAtLH(pos, self.poi, upVector)
     rotMatrix = geo2.MatrixTranspose(rotMatrix)
     quat = geo2.QuaternionRotationMatrix(rotMatrix)
     yaw, pitch, roll = geo2.QuaternionRotationGetYawPitchRoll(quat)
     yaw = math.pi / 2 - yaw
     pitch = math.pi / 2 - pitch
     return (yaw, pitch, geo2.Vec3Distance(pos, self.poi))
Ejemplo n.º 4
0
 def Focus(self, point, dist = -1.0):
     dev = trinity.device
     pos = self.GetPosition()
     up = (0.0, 1.0, 0.0)
     t = (self.localViewMatrix[1][0], self.localViewMatrix[1][1], self.localViewMatrix[1][2])
     if geo2.Vec3Dot(t, up) <= 0.0:
         up = (0.0, -1.0, 0.0)
     self.pointOfInterest = point
     self.localViewMatrix = geo2.MatrixLookAtRH(pos, point, up)
     if dist > 0.0:
         view = geo2.Vec3Subtract(pos, point)
         view = geo2.Vec3Normalize(view)
         self.SetPosition(geo2.Vec3Add(point, geo2.Vec3Scale(view, dist)))
Ejemplo n.º 5
0
    def _GetViewAndProjectionUsingProjectedBoundingBox(self, CalculateProjectedBoundingBox, scene = None, boundingSphereRadius = None, boundingSphereCenter = None, boundingBoxMin = None, boundingBoxMax = None, cameraAngle = None):
        self.LogInfo('TakeSnapShotUsingBoundingBox')
        cameraAngle = cameraAngle or GETPHOTO_ANGLE
        if boundingSphereRadius:
            radius = boundingSphereRadius
            center = boundingSphereCenter if boundingSphereCenter else (0.0, 0.0, 0.0)
        elif boundingBoxMin and boundingBoxMax:
            boundingBoxMin = geo2.Vector(boundingBoxMin.x, boundingBoxMin.y, boundingBoxMin.z)
            boundingBoxMax = geo2.Vector(boundingBoxMax.x, boundingBoxMax.y, boundingBoxMax.z)
            center = (boundingBoxMin + boundingBoxMax) / 2.0
            radius = geo2.Vec3Length(boundingBoxMax - boundingBoxMin)
        else:
            raise RuntimeError('Can not do a rough fit without either a bounding sphere or bounding box.')
        dist = self._SphericalFit(radius)
        viewEyeAtUp = self._GetViewMatrixFromAngle(cameraAngle, center, dist)
        projTransform = geo2.MatrixPerspectiveFovRH(*GETPHOTO_PROJECTION)
        viewTransform = geo2.MatrixLookAtRH(*viewEyeAtUp)
        combinedTransform = viewTransform
        combinedTransform = geo2.MatrixMultiply(combinedTransform, projTransform)
        safeMin, safeMax = CalculateProjectedBoundingBox(combinedTransform)
        deltaX = safeMax[0] - safeMin[0]
        deltaY = safeMax[1] - safeMin[1]
        scalingFactor = 0.9 * (2.0 / max(deltaX, deltaY))
        try:
            if scene.backgroundEffect is not None:
                params = scene.backgroundEffect.Find(['trinity.TriFloatParameter', 'trinity.Tr2FloatParameter'])
                for param in params:
                    if param.name == 'ProjectionScaling':
                        param.value = scalingFactor

        except AttributeError:
            pass

        offsetX = -1 * scalingFactor * (safeMin[0] + safeMax[0]) / 2.0
        offsetY = -1 * scalingFactor * (safeMin[1] + safeMax[1]) / 2.0
        scale = 1.0 / tan(GETPHOTO_FOV / 2.0) * scalingFactor
        zn = 1.0
        zf = dist + radius * 2
        t = zn * (1 - offsetY) / scale
        b = -t * (1 + offsetY) / (1 - offsetY)
        r = zn * (1 - offsetX) / scale
        l = -r * (1 + offsetX) / (1 - offsetX)
        projection = trinity.TriProjection()
        projection.PerspectiveOffCenter(l, r, b, t, zn, zf)
        view = trinity.TriView()
        view.SetLookAtPosition(*viewEyeAtUp)
        return (view, projection)
Ejemplo n.º 6
0
 def UpdateViewProjForLight(self, stepView, stepProj, light, effectValue):
     eye = light.position
     at = geo2.Add(eye, light.coneDirection)
     up = (0, 0, 1)
     if SkinSpotLightShadows.REUSE_ENGINE_MAPS:
         VP = light.GetViewProjMatrix()
     else:
         if effectValue:
             effectValue.value = (0, 0, 0, light.radius)
         viewmat = geo2.MatrixLookAtRH(eye, at, up)
         self.ComputeProjectionMatrix(stepProj.projection, light, viewmat)
         stepView.view.transform = viewmat
         VP1 = geo2.MatrixMultiply(viewmat, stepProj.projection.transform)
         VP = geo2.MatrixMultiply(VP1, self.uvAdjustMatrix)
     VPT = geo2.MatrixTranspose(VP)
     for meshData in self.meshes.itervalues():
         meshData.updateParams(light, VPT)
Ejemplo n.º 7
0
 def Focus(self, point, dist = -1.0):
     """
         Focus the camera on a particular point
         dist = How far do you want to be from the focus point. This needs to be a positive floating point value.
     """
     dev = trinity.device
     pos = self.GetPosition()
     up = (0.0, 1.0, 0.0)
     t = (self.localViewMatrix[1][0], self.localViewMatrix[1][1], self.localViewMatrix[1][2])
     if geo2.Vec3Dot(t, up) <= 0.0:
         up = (0.0, -1.0, 0.0)
     self.pointOfInterest = point
     self.localViewMatrix = geo2.MatrixLookAtRH(pos, point, up)
     if dist > 0.0:
         view = geo2.Vec3Subtract(pos, point)
         view = geo2.Vec3Normalize(view)
         self.SetPosition(geo2.Vec3Add(point, geo2.Vec3Scale(view, dist)))
Ejemplo n.º 8
0
    def FollowCamera(self, target, aimTarget=None):
        viewStep, proj = self.GetViewAndProjection()
        if viewStep:
            viewStep.view = trinity.TriView()
        camera = sm.GetService('sceneManager').GetRegisteredCamera(
            None, defaultOnActiveCamera=True)
        globalSceneScale = 1.0
        ballpark = sm.GetService('michelle').GetBallpark()
        ball = ballpark.GetBall(eve.session.shipid)
        self.resetCamera = False
        while not self.resetCamera and target:
            time = blue.os.GetSimTime()
            rot = target.rotationCurve
            if True:
                rotation = rot.GetQuaternionAt(time)
                translation = target.translationCurve.GetVectorAt(time)
                if ball:
                    targetPos = ball.model.worldPosition
                    targetVector = (targetPos[0] - translation.x,
                                    targetPos[0] - translation.x,
                                    targetPos[0] - translation.x)
                    targetVector = geo2.Vec3Normalize(targetVector)
                    dist = 100.0
                    elevation = 0.0
                    translation.x = translation.x - targetVector[0] * dist
                    translation.y = translation.y - targetVector[
                        1] * dist + elevation
                    translation.z = translation.z - targetVector[2] * dist
                    lookat = geo2.MatrixLookAtRH(
                        (translation.x, translation.y, translation.z),
                        targetPos, (0.0, 1.0, 0.0))
                trans = geo2.MatrixTranslation(
                    translation.x * globalSceneScale,
                    translation.y * globalSceneScale,
                    translation.z * globalSceneScale)
                rot = geo2.MatrixRotationQuaternion(
                    (rotation.x, rotation.y, rotation.z, rotation.w))
                if viewStep and viewStep.view:
                    viewStep.view.transform = lookat
            blue.synchro.Yield()

        if viewStep:
            viewStep.view = None
        proj.projection = camera.projectionMatrix
        self.resetCamera = False
Ejemplo n.º 9
0
    def _UpdateCameraAnimation(self,
                               alignToParent=False,
                               alignTargets=None,
                               loop=False,
                               clipName=None,
                               parent=None):
        def FindParametersInPostFx():
            blurScaleH = None
            blurScaleV = None
            blurFade = None
            exposure = None
            rj = self.GetRenderJob()
            if rj:
                for step in rj.steps:
                    if step.name == 'RJ_POSTPROCESSING':
                        if step.job:
                            for jobStep in step.job.steps:
                                if jobStep.name == 'PostProcess Blur':
                                    for fx in jobStep.PostProcess.stages:
                                        for param in fx.parameters:
                                            if param.name == 'ScalingFactor':
                                                if fx.name == 'Gaussian Horizontal Blur':
                                                    blurScaleH = param
                                                if fx.name == 'Gaussianl Vertical Blur':
                                                    blurScaleV = param
                                                if fx.name == '4x Up Filter and Add':
                                                    blurFade = param

                                if jobStep.name == 'PostProcess Exposure':
                                    for fx in jobStep.PostProcess.stages:
                                        for param in fx.parameters:
                                            if param.name == 'ScalingFactor':
                                                if fx.name == '4x Up Filter and Add':
                                                    exposure = param

            return (blurScaleH, blurScaleV, blurFade, exposure)

        transformTrack = None
        shakeSequencer = None
        duration = 0.0
        curveSetName = 'AnimatedCamera'
        scene = sm.GetService('sceneManager').GetRegisteredScene('default')
        viewStep, proj = self.GetViewAndProjection()
        camera = viewStep.camera
        for cset in scene.curveSets:
            if cset.name == curveSetName:
                transformTrack = cset.curves[0]
                if len(cset.curves) > 1:
                    shakeSequencer = cset.curves[1]

        duration = transformTrack.duration - 1 / 10.0
        oldFov = camera.fieldOfView
        ppJob.AddPostProcess('Blur', 'res:/fisfx/postprocess/blur.red')
        ppJob.AddPostProcess('Exposure', 'res:/fisfx/postprocess/exposure.red')
        blurScaleH, blurScaleV, blurFade, exposure = FindParametersInPostFx()
        ballpark = sm.GetService('michelle').GetBallpark()
        ball = ballpark.GetBall(session.shipid)
        if parent:
            ball = parent.translationCurve
        if alignTargets:
            ball = alignTargets[0]
        if viewStep:
            viewStep.view = trinity.TriView()
        startTime = blue.os.GetSimTime()
        if loop:
            endTime = startTime + 36000000000L
        else:
            endTime = startTime + duration * 10000000
        time = startTime
        globalSceneScale = 4.0 / 30.0 * ball.model.boundingSphereRadius
        lastWorldPos = None
        lastWorldRot = None
        while time < endTime and not self.resetCamera and not self.interrupt:
            time = blue.os.GetSimTime()
            weight1 = 0.0
            weight2 = 0.0
            if self.vectorTracks:
                currentTime = trinity.device.animationTime
                for cvt in self.vectorTracks:
                    if cvt == 'targetWeight1' or cvt == 'targetWeight2':
                        vecTrack = self.vectorTracks[cvt]
                        if cvt == 'targetWeight1':
                            weight1 = vecTrack.value
                        else:
                            weight2 = vecTrack.value

            if viewStep:
                trans = geo2.MatrixTranslation(
                    transformTrack.translation[0] * globalSceneScale,
                    transformTrack.translation[1] * globalSceneScale,
                    transformTrack.translation[2] * globalSceneScale)
                rot = geo2.MatrixRotationQuaternion(transformTrack.rotation)
                comp = geo2.MatrixMultiply(rot, trans)
                if alignToParent:
                    if not ball.model and lastWorldPos:
                        translation = lastWorldPos
                        rotation = lastWorldRot
                    else:
                        rotation = ball.GetQuaternionAt(time)
                        translation = ball.model.worldPosition
                    lastWorldPos = translation
                    lastWorldRot = rotation
                    transOffset = geo2.MatrixTranslation(
                        translation[0], translation[1], translation[2])
                    rotOffset = geo2.MatrixRotationQuaternion(
                        (rotation.x, rotation.y, rotation.z, rotation.w))
                    comp = geo2.MatrixMultiply(comp, rotOffset)
                    comp = geo2.MatrixMultiply(comp, transOffset)
                if alignTargets:
                    t1 = alignTargets[0].model.worldPosition
                    t2 = alignTargets[1].model.worldPosition
                    if True:
                        sphereOffset = alignTargets[
                            1].model.boundingSphereCenter
                        qr = alignTargets[
                            1].model.rotationCurve.GetQuaternionAt(time)
                        quatRotation = (qr.x, qr.y, qr.z, qr.w)
                        correctedOffset = geo2.QuaternionTransformVector(
                            quatRotation, sphereOffset)
                        t2 = geo2.Vec3Add(t2, correctedOffset)
                    rot = geo2.MatrixLookAtRH(t2, t1, (0.0, 1.0, 0.0))
                    rot = geo2.MatrixInverse(rot)
                    rot = (rot[0], rot[1], rot[2], (t1[0], t1[1], t1[2], 1.0))
                    comp = geo2.MatrixMultiply(comp, rot)
                    if weight1 > 0.0001:
                        shake = shakeSequencer.value
                        pos = (comp[3][0], comp[3][1], comp[3][2])
                        targetPos = (t2[0] + shake.x, t2[1] + shake.y,
                                     t2[2] + shake.z)
                        lookat = geo2.MatrixLookAtRH(pos, targetPos,
                                                     (0.0, 1.0, 0.0))
                        lookat = geo2.MatrixInverse(lookat)
                        qlookat = geo2.QuaternionRotationMatrix(lookat)
                        qorig = geo2.QuaternionRotationMatrix(comp)
                        qresult = geo2.Lerp(qorig, qlookat, weight1)
                        mresult = geo2.MatrixRotationQuaternion(qresult)
                        comp = (mresult[0], mresult[1], mresult[2], comp[3])
                if viewStep.view:
                    viewStep.view.transform = geo2.MatrixInverse(comp)
            if self.vectorTracks:
                currentTime = trinity.device.animationTime
                for cvt in self.vectorTracks:
                    if cvt == 'fov':
                        vecTrack = self.vectorTracks['fov']
                        fovValue = vecTrack.value
                        camera.fieldOfView = fovValue
                        proj.projection.PerspectiveFov(
                            fovValue, trinity.device.width /
                            float(trinity.device.height), camera.frontClip,
                            camera.backClip)
                    if cvt == 'blur':
                        vecTrack = self.vectorTracks['blur']
                        blurValue = vecTrack.value
                        if blurScaleH and blurScaleV and blurFade:
                            blurScaleH.value = blurValue
                            blurScaleV.value = blurValue
                            if blurValue > 0.01:
                                blurFade.value = 1.0
                            else:
                                blurFade.value = 0.0
                    if cvt == 'exposure':
                        vecTrack = self.vectorTracks['exposure']
                        exposureValue = vecTrack.value
                        if exposure:
                            exposure.value = exposureValue

                if 'fov' not in self.vectorTracks:
                    camera.fieldOfView = oldFov
                    proj.projection.PerspectiveFov(
                        oldFov,
                        trinity.device.width / float(trinity.device.height),
                        camera.frontClip, camera.backClip)
            blue.synchro.Yield()

        if exposure and blurFade:
            exposure.value = 0.0
            blurFade.value = 0.0
        if viewStep:
            viewStep.view = None
        camera.fieldOfView = oldFov
        if not self.interrupt:
            if not camera.fieldOfView == 1.0:
                self.LogWarn('Warning: Camera fov not 1, correcting...')
                camera.fieldOfView = 1.0
            proj.projection = camera.projectionMatrix
        self.playingClip = False
        if self.continuousType and not self.interrupt and not self.resetCamera:
            self.interrupt = False
            self.UpdateContinuous()
        self.resetCamera = False
        self.interrupt = False
        if clipName:
            self.LogInfo('Camera clip done:', clipName)