コード例 #1
0
 def GetValue(self, v0=None, v1=None):
     if v0 is None:
         v0 = self._v0
     t = self.GetTime()
     if self._IsTimePastDuration(t):
         self._v1 = v1
     elif hasattr(v0, '__iter__'):
         self._v1 = geo2.Lerp(v0, v1, t / self.duration)
     else:
         self._v1 = v0 + (v1 - v0) * t
     return self._v1
コード例 #2
0
ファイル: baseCamera.py プロジェクト: connoryang/1v1dec
    def RotateUpdateThread(self):
        try:
            while True:
                if self.rotateTarget is None:
                    break
                distLeft = geo2.Vec2Length(
                    geo2.Vec2Subtract(self.rotateTarget, self._rotateOffset))
                if not distLeft:
                    break
                moveProp = self._GetRotateSpeed() / blue.os.fps
                if math.fabs(distLeft) < self.kRotateStopDist:
                    moveProp *= self.kRotateStopDist / math.fabs(distLeft)
                moveProp = min(moveProp, 1.0)
                self._rotateOffset = geo2.Lerp(self._rotateOffset,
                                               self.rotateTarget, moveProp)
                blue.synchro.Yield()

        finally:
            self.rotateUpdateThread = None
コード例 #3
0
    def _UpdateCameraAnimation(self,
                               alignToParent=False,
                               alignTargets=None,
                               loop=False,
                               clipName=None,
                               parent=None):
        def FindParametersInPostFx():
            blurScaleH = None
            blurScaleV = None
            blurFade = None
            exposure = None
            rj = self.GetRenderJob()
            if rj:
                for step in rj.steps:
                    if step.name == 'RJ_POSTPROCESSING':
                        if step.job:
                            for jobStep in step.job.steps:
                                if jobStep.name == 'PostProcess Blur':
                                    for fx in jobStep.PostProcess.stages:
                                        for param in fx.parameters:
                                            if param.name == 'ScalingFactor':
                                                if fx.name == 'Gaussian Horizontal Blur':
                                                    blurScaleH = param
                                                if fx.name == 'Gaussianl Vertical Blur':
                                                    blurScaleV = param
                                                if fx.name == '4x Up Filter and Add':
                                                    blurFade = param

                                if jobStep.name == 'PostProcess Exposure':
                                    for fx in jobStep.PostProcess.stages:
                                        for param in fx.parameters:
                                            if param.name == 'ScalingFactor':
                                                if fx.name == '4x Up Filter and Add':
                                                    exposure = param

            return (blurScaleH, blurScaleV, blurFade, exposure)

        transformTrack = None
        shakeSequencer = None
        duration = 0.0
        curveSetName = 'AnimatedCamera'
        scene = sm.GetService('sceneManager').GetRegisteredScene('default')
        viewStep, proj = self.GetViewAndProjection()
        camera = viewStep.camera
        for cset in scene.curveSets:
            if cset.name == curveSetName:
                transformTrack = cset.curves[0]
                if len(cset.curves) > 1:
                    shakeSequencer = cset.curves[1]

        duration = transformTrack.duration - 1 / 10.0
        oldFov = camera.fieldOfView
        ppJob.AddPostProcess('Blur', 'res:/fisfx/postprocess/blur.red')
        ppJob.AddPostProcess('Exposure', 'res:/fisfx/postprocess/exposure.red')
        blurScaleH, blurScaleV, blurFade, exposure = FindParametersInPostFx()
        ballpark = sm.GetService('michelle').GetBallpark()
        ball = ballpark.GetBall(session.shipid)
        if parent:
            ball = parent.translationCurve
        if alignTargets:
            ball = alignTargets[0]
        if viewStep:
            viewStep.view = trinity.TriView()
        startTime = blue.os.GetSimTime()
        if loop:
            endTime = startTime + 36000000000L
        else:
            endTime = startTime + duration * 10000000
        time = startTime
        globalSceneScale = 4.0 / 30.0 * ball.model.boundingSphereRadius
        lastWorldPos = None
        lastWorldRot = None
        while time < endTime and not self.resetCamera and not self.interrupt:
            time = blue.os.GetSimTime()
            weight1 = 0.0
            weight2 = 0.0
            if self.vectorTracks:
                currentTime = trinity.device.animationTime
                for cvt in self.vectorTracks:
                    if cvt == 'targetWeight1' or cvt == 'targetWeight2':
                        vecTrack = self.vectorTracks[cvt]
                        if cvt == 'targetWeight1':
                            weight1 = vecTrack.value
                        else:
                            weight2 = vecTrack.value

            if viewStep:
                trans = geo2.MatrixTranslation(
                    transformTrack.translation[0] * globalSceneScale,
                    transformTrack.translation[1] * globalSceneScale,
                    transformTrack.translation[2] * globalSceneScale)
                rot = geo2.MatrixRotationQuaternion(transformTrack.rotation)
                comp = geo2.MatrixMultiply(rot, trans)
                if alignToParent:
                    if not ball.model and lastWorldPos:
                        translation = lastWorldPos
                        rotation = lastWorldRot
                    else:
                        rotation = ball.GetQuaternionAt(time)
                        translation = ball.model.worldPosition
                    lastWorldPos = translation
                    lastWorldRot = rotation
                    transOffset = geo2.MatrixTranslation(
                        translation[0], translation[1], translation[2])
                    rotOffset = geo2.MatrixRotationQuaternion(
                        (rotation.x, rotation.y, rotation.z, rotation.w))
                    comp = geo2.MatrixMultiply(comp, rotOffset)
                    comp = geo2.MatrixMultiply(comp, transOffset)
                if alignTargets:
                    t1 = alignTargets[0].model.worldPosition
                    t2 = alignTargets[1].model.worldPosition
                    if True:
                        sphereOffset = alignTargets[
                            1].model.boundingSphereCenter
                        qr = alignTargets[
                            1].model.rotationCurve.GetQuaternionAt(time)
                        quatRotation = (qr.x, qr.y, qr.z, qr.w)
                        correctedOffset = geo2.QuaternionTransformVector(
                            quatRotation, sphereOffset)
                        t2 = geo2.Vec3Add(t2, correctedOffset)
                    rot = geo2.MatrixLookAtRH(t2, t1, (0.0, 1.0, 0.0))
                    rot = geo2.MatrixInverse(rot)
                    rot = (rot[0], rot[1], rot[2], (t1[0], t1[1], t1[2], 1.0))
                    comp = geo2.MatrixMultiply(comp, rot)
                    if weight1 > 0.0001:
                        shake = shakeSequencer.value
                        pos = (comp[3][0], comp[3][1], comp[3][2])
                        targetPos = (t2[0] + shake.x, t2[1] + shake.y,
                                     t2[2] + shake.z)
                        lookat = geo2.MatrixLookAtRH(pos, targetPos,
                                                     (0.0, 1.0, 0.0))
                        lookat = geo2.MatrixInverse(lookat)
                        qlookat = geo2.QuaternionRotationMatrix(lookat)
                        qorig = geo2.QuaternionRotationMatrix(comp)
                        qresult = geo2.Lerp(qorig, qlookat, weight1)
                        mresult = geo2.MatrixRotationQuaternion(qresult)
                        comp = (mresult[0], mresult[1], mresult[2], comp[3])
                if viewStep.view:
                    viewStep.view.transform = geo2.MatrixInverse(comp)
            if self.vectorTracks:
                currentTime = trinity.device.animationTime
                for cvt in self.vectorTracks:
                    if cvt == 'fov':
                        vecTrack = self.vectorTracks['fov']
                        fovValue = vecTrack.value
                        camera.fieldOfView = fovValue
                        proj.projection.PerspectiveFov(
                            fovValue, trinity.device.width /
                            float(trinity.device.height), camera.frontClip,
                            camera.backClip)
                    if cvt == 'blur':
                        vecTrack = self.vectorTracks['blur']
                        blurValue = vecTrack.value
                        if blurScaleH and blurScaleV and blurFade:
                            blurScaleH.value = blurValue
                            blurScaleV.value = blurValue
                            if blurValue > 0.01:
                                blurFade.value = 1.0
                            else:
                                blurFade.value = 0.0
                    if cvt == 'exposure':
                        vecTrack = self.vectorTracks['exposure']
                        exposureValue = vecTrack.value
                        if exposure:
                            exposure.value = exposureValue

                if 'fov' not in self.vectorTracks:
                    camera.fieldOfView = oldFov
                    proj.projection.PerspectiveFov(
                        oldFov,
                        trinity.device.width / float(trinity.device.height),
                        camera.frontClip, camera.backClip)
            blue.synchro.Yield()

        if exposure and blurFade:
            exposure.value = 0.0
            blurFade.value = 0.0
        if viewStep:
            viewStep.view = None
        camera.fieldOfView = oldFov
        if not self.interrupt:
            if not camera.fieldOfView == 1.0:
                self.LogWarn('Warning: Camera fov not 1, correcting...')
                camera.fieldOfView = 1.0
            proj.projection = camera.projectionMatrix
        self.playingClip = False
        if self.continuousType and not self.interrupt and not self.resetCamera:
            self.interrupt = False
            self.UpdateContinuous()
        self.resetCamera = False
        self.interrupt = False
        if clipName:
            self.LogInfo('Camera clip done:', clipName)
コード例 #4
0
    def PositionShipModel(self, model):
        trinity.WaitForResourceLoads()
        localBB = model.GetLocalBoundingBox()
        boundingCenter = model.boundingSphereCenter[1]
        radius = model.boundingSphereRadius - self.shipPositionMinSize
        val = radius / (self.shipPositionMaxSize - self.shipPositionMinSize)
        if val > 1.0:
            val = 1.0
        if val < 0:
            val = 0
        val = pow(val, 1.0 / self.shipPositionCurveRoot)
        shipDirection = (self.sceneTranslation[0], 0, self.sceneTranslation[2])
        shipDirection = geo2.Vec3Normalize(shipDirection)
        distancePosition = geo2.Lerp(
            (self.shipPositionMinDistance, self.shipPositionTargetHeightMin),
            (self.shipPositionMaxDistance, self.shipPositionTargetHeightMax),
            val)
        y = distancePosition[1] - boundingCenter
        y = y + self.sceneTranslation[1]
        if y < -localBB[0][1] + 180:
            y = -localBB[0][1] + 180
        boundingBoxZCenter = localBB[0][2] + localBB[1][2]
        boundingBoxZCenter *= 0.5
        shipPos = geo2.Vec3Scale(shipDirection, -distancePosition[0])
        shipPos = geo2.Vec3Add(shipPos, self.sceneTranslation)
        shipPosition = (shipPos[0], y, shipPos[2])
        model.translationCurve = trinity.TriVectorCurve()
        model.translationCurve.value = shipPosition
        model.rotationCurve = trinity.TriRotationCurve()
        model.rotationCurve.value = geo2.QuaternionRotationSetYawPitchRoll(
            self.shipPositionRotation * math.pi / 180, 0, 0)
        model.modelTranslationCurve = blue.resMan.LoadObject(
            'res:/dx9/scene/hangar/ship_modelTranslationCurve.red')
        model.modelTranslationCurve.ZCurve.offset -= boundingBoxZCenter
        scaleMultiplier = 0.35 + 0.65 * (1 - val)
        capitalShips = [
            const.groupDreadnought, const.groupSupercarrier, const.groupTitan,
            const.groupFreighter, const.groupJumpFreighter, const.groupCarrier,
            const.groupCapitalIndustrialShip, const.groupIndustrialCommandShip
        ]
        dogmaLocation = self.clientDogmaIM.GetDogmaLocation()
        if getattr(dogmaLocation.GetDogmaItem(util.GetActiveShip()), 'groupID',
                   None) in capitalShips:
            scaleMultiplier = 0.35 + 0.25 * (1 - val)
            model.modelRotationCurve = blue.resMan.LoadObject(
                'res:/dx9/scene/hangar/ship_modelRotationCurve.red')
            model.modelRotationCurve.PitchCurve.speed *= scaleMultiplier
            model.modelRotationCurve.RollCurve.speed *= scaleMultiplier
            model.modelRotationCurve.YawCurve.speed *= scaleMultiplier
        else:
            if val > 0.6:
                val = 0.6
            scaleMultiplier = 0.35 + 0.65 * (1 - val / 0.6)
            model.modelRotationCurve = blue.resMan.LoadObject(
                'res:/dx9/scene/hangar/ship_modelRotationCurveSpinning.red')
            model.modelRotationCurve.PitchCurve.speed *= scaleMultiplier
            model.modelRotationCurve.RollCurve.speed *= scaleMultiplier
            model.modelRotationCurve.YawCurve.start = blue.os.GetSimTime()
            model.modelRotationCurve.YawCurve.ScaleTime(6 * val + 1)
        yValues = [(0, model.translationCurve.value[1] - 20.0),
                   (6.0, model.translationCurve.value[1] + 3.0),
                   (9.0, model.translationCurve.value[1])]
        for time, yValue in yValues:
            k = trinity.TriVectorKey()
            k.value = (model.translationCurve.value[0], yValue,
                       model.translationCurve.value[2])
            k.interpolation = trinity.TRIINT_HERMITE
            k.time = time
            model.translationCurve.keys.append(k)

        model.translationCurve.Sort()
        model.translationCurve.extrapolation = trinity.TRIEXT_CONSTANT
        model.translationCurve.start = blue.os.GetWallclockTimeNow()
        self.SetupFakeAudioTransformLocation(shipPosition)