Exemple #1
0
 def calcDrift(baseT, trait, fasterDriftIsBetter=False):
     value = trait.percentile
     if not trait.higherIsBetter:
         value = 1.0 - value
     if fasterDriftIsBetter:
         if value < 0.5:
             factor = lerp(2.0, 1.0, value * 2.0)
         else:
             rebased = (value - 0.5) * 2.0
             factor = lerp(1.0, 0.1, rebased * rebased)
     elif value < 0.5:
         factor = lerp(0.75, 1.0, value * 2.0)
     else:
         rebased = (value - 0.5) * 2.0
         factor = lerp(1.0, 28.0, rebased * rebased)
     return baseT * factor
Exemple #2
0
    def move(self, task=None):
        if self.isEmpty():
            try:
                self.air.writeServerEvent('Late Pet Move Call', self.doId, ' ')
            except:
                pass

            taskMgr.remove(task.name)
            return Task.done
        if not self.isLockMoverEnabled():
            self.mover.move()
        numNearby = len(self.brain.nearbyAvs) - 1
        minNearby = 5
        if numNearby > minNearby:
            delay = 0.08 * (numNearby - minNearby)
            self.setPosHprBroadcastPeriod(PetConstants.PosBroadcastPeriod +
                                          lerp(delay *
                                               0.75, delay, random.random()))
        maxDist = 1000
        if abs(self.getX()) > maxDist or abs(self.getY()) > maxDist:
            DistributedPetAI.notify.warning(
                'deleting pet %s before he wanders off too far' % self.doId)
            self._outOfBounds = True
            self.stopPosHprBroadcast()
            self.requestDelete()
            return Task.done
        if __dev__:
            self.pscMoveResc.start()
        taskMgr.doMethodLater(simbase.petMovePeriod, self.move,
                              self.getMoveTaskName())
        if __dev__:
            self.pscMoveResc.stop()
        return Task.done
    def update(self, pet):
        if not pet:
            return
        for trickId in PetTricks.TrickId2scIds.keys():
            trickText = TTLocalizer.PetTrickStrings[trickId]
            if trickId < len(pet.trickAptitudes):
                aptitude = pet.trickAptitudes[trickId]
                bar = self.bars.get(trickId)
                label = self.bars.get(trickId)
                if aptitude != 0:
                    healRange = PetTricks.TrickHeals[trickId]
                    hp = lerp(healRange[0], healRange[1], aptitude)
                    if hp == healRange[1]:
                        hp = healRange[1]
                        length = 1
                        barColor = (0.7, 0.8, 0.5, 1)
                    else:
                        hp, length = divmod(hp, 1)
                        barColor = (0.9, 1, 0.7, 1)
                    if not label:
                        self.labels[trickId] = DirectLabel(
                            parent=self,
                            relief=None,
                            pos=(0, 0, 0.43 - trickId * 0.155),
                            scale=0.7,
                            text=trickText,
                            text_scale=TTLocalizer.PDPtrickText,
                            text_fg=(0.05, 0.14, 0.4, 1),
                            text_align=TextNode.ALeft,
                            text_pos=(-1.4, -0.05))
                    else:
                        label['text'] = trickText
                    if not bar:
                        self.bars[trickId] = DirectWaitBar(
                            parent=self,
                            pos=(0, 0, 0.43 - trickId * 0.155),
                            relief=DGG.SUNKEN,
                            frameSize=(-0.5, 0.9, -0.1, 0.1),
                            borderWidth=(0.025, 0.025),
                            scale=0.7,
                            frameColor=(0.4, 0.6, 0.4, 1),
                            barColor=barColor,
                            range=1.0 + FUDGE_FACTOR,
                            value=length + FUDGE_FACTOR,
                            text=str(int(hp)) + ' ' + TTLocalizer.Laff,
                            text_scale=TTLocalizer.PDPlaff,
                            text_fg=(0.05, 0.14, 0.4, 1),
                            text_align=TextNode.ALeft,
                            text_pos=TTLocalizer.PDPlaffPos)
                    else:
                        bar['value'] = length + FUDGE_FACTOR
                        bar['text'] = (str(int(hp)) + ' ' + TTLocalizer.Laff, )

        return
Exemple #4
0
 def _handleDidTrick(self, trickId):
     DistributedPetAI.notify.debug('_handleDidTrick: %s' % trickId)
     if trickId == PetTricks.Tricks.BALK:
         return
     aptitude = self.getTrickAptitude(trickId)
     self.setTrickAptitude(trickId,
                           aptitude + PetTricks.AptitudeIncrementDidTrick)
     self.addToMood(
         'fatigue',
         lerp(PetTricks.MaxTrickFatigue, PetTricks.MinTrickFatigue,
              aptitude))
     self.trickLogger.addEvent(trickId)
 def _handleDidTrick(self, trickId):
     DistributedPetProxyAI.notify.debug('_handleDidTrick: %s' % trickId)
     if trickId == PetTricks.Tricks.BALK:
         return
     aptitude = self.getTrickAptitude(trickId)
     self.setTrickAptitude(trickId,
                           aptitude + PetTricks.AptitudeIncrementDidTrick)
     self.addToMood(
         'fatigue',
         lerp(PetTricks.MaxTrickFatigue, PetTricks.MinTrickFatigue,
              aptitude))
     self.d_setDominantMood(self.mood.getDominantMood())
Exemple #6
0
 def _willDoTrick(self, trickId):
     if self.isContented():
         minApt = PetTricks.MinActualTrickAptitude
         maxApt = PetTricks.MaxActualTrickAptitude
     else:
         minApt = PetTricks.NonHappyMinActualTrickAptitude
         maxApt = PetTricks.NonHappyMaxActualTrickAptitude
     randVal = random.random()
     cutoff = lerp(minApt, maxApt, self.getTrickAptitude(trickId))
     if self.mood.isComponentActive('fatigue'):
         cutoff *= 0.5
     cutoff *= PetTricks.TrickAccuracies[trickId]
     DistributedPetAI.notify.debug('_willDoTrick: %s / %s' %
                                   (randVal, cutoff))
     return randVal < cutoff
Exemple #7
0
    def driftMood(self, dt=None, curMood=None):
        now = globalClock.getFrameTime()
        if not hasattr(self, 'lastDriftTime'):
            self.lastDriftTime = now
        if dt is None:
            dt = now - self.lastDriftTime
        self.lastDriftTime = now
        if dt <= 0.0:
            return
        if curMood is None:
            curMood = self

        def doDrift(curValue, timeToMedian, dt=float(dt)):
            newValue = curValue + dt / (timeToMedian * 7200)
            return clampScalar(newValue, 0.0, 1.0)

        self.boredom = doDrift(curMood.boredom, self.tBoredom)
        self.loneliness = doDrift(curMood.loneliness, self.tLoneliness)
        self.sadness = doDrift(curMood.sadness, self.tSadness)
        self.fatigue = doDrift(curMood.fatigue, self.tFatigue)
        self.hunger = doDrift(curMood.hunger, self.tHunger)
        self.confusion = doDrift(curMood.confusion, self.tConfusion)
        self.excitement = doDrift(curMood.excitement, self.tExcitement)
        self.surprise = doDrift(curMood.surprise, self.tSurprise)
        self.affection = doDrift(curMood.affection, self.tAffection)
        abuse = average(curMood.hunger, curMood.hunger, curMood.hunger,
                        curMood.boredom, curMood.loneliness)
        tipPoint = 0.6
        if abuse < tipPoint:
            tAnger = lerp(self.tAngerDec, -PetMood.LONGTIME, abuse / tipPoint)
        else:
            tAnger = lerp(PetMood.LONGTIME, self.tAngerInc,
                          (abuse - tipPoint) / (1.0 - tipPoint))
        self.anger = doDrift(curMood.anger, tAnger)
        self.announceChange()
        return
        def finish(avatar=avatar, trickId=trickId, self=self):
            if hasattr(self.pet, 'brain'):
                healRange = PetTricks.TrickHeals[trickId]
                aptitude = self.pet.getTrickAptitude(trickId)
                healAmt = int(lerp(healRange[0], healRange[1], aptitude))
                if healAmt:
                    for avId in self.pet.brain.getAvIdsLookingAtUs():
                        av = self.pet.air.doId2do.get(avId)
                        if av:
                            if isinstance(av,
                                          DistributedToonAI.DistributedToonAI):
                                av.toonUp(healAmt)

                self.pet._handleDidTrick(trickId)
                if not self.pet.isLockedDown():
                    self.pet.unlockPet()
                messenger.send(self.getTrickDoneEvent())
Exemple #9
0
    def updatePriorities(self):
        if len(self.goals) == 0:
            return
        if __dev__:
            self.pscSetup.start()
        if self.primaryGoal is None:
            highestPriority = -99999.0
            candidates = []
        else:
            highestPriority = self.primaryGoal.getPriority()
            candidates = [self.primaryGoal]
            decayDur = PetConstants.PrimaryGoalDecayDur
            priFactor = PetConstants.PrimaryGoalScale
            elapsed = min(decayDur,
                          globalClock.getFrameTime() - self.primaryStartT)
            highestPriority *= lerp(priFactor, 1.0, elapsed / decayDur)
        if __dev__:
            self.pscSetup.stop()
        if __dev__:
            self.pscFindPrimary.start()
        for goal in self.goals:
            thisPri = goal.getPriority()
            if thisPri >= highestPriority:
                if thisPri > highestPriority:
                    highestPriority = thisPri
                    candidates = [goal]
                else:
                    candidates.append(goal)

        if __dev__:
            self.pscFindPrimary.stop()
        if __dev__:
            self.pscSetPrimary.start()
        newPrimary = random.choice(candidates)
        if self.primaryGoal != newPrimary:
            self.pet.notify.debug(
                'new goal: %s, priority=%s' %
                (newPrimary.__class__.__name__, highestPriority))
            self._setPrimaryGoal(newPrimary)
        if __dev__:
            self.pscSetPrimary.stop()
        return
Exemple #10
0
    def startSuitWalkTask(self):
        ival = Parallel(name='catchGameMetaSuitWalk')
        rng = RandomNumGen(self.randomNumGen)
        delay = 0.0
        while delay < CatchGameGlobals.GameDuration:
            delay += lerp(self.SuitPeriodRange[0], self.SuitPeriodRange[0],
                          rng.random())
            walkIval = Sequence(name='catchGameSuitWalk')
            walkIval.append(Wait(delay))

            def pickY(self=self, rng=rng):
                return lerp(-self.StageHalfHeight, self.StageHalfHeight,
                            rng.random())

            m = [2.5, 2.5, 2.3, 2.1][self.getNumPlayers() - 1]
            startPos = Point3(-(self.StageHalfWidth * m), pickY(), 0)
            stopPos = Point3(self.StageHalfWidth * m, pickY(), 0)
            if rng.choice([0, 1]):
                startPos, stopPos = stopPos, startPos
            walkIval.append(self.getSuitWalkIval(startPos, stopPos, rng))
            ival.append(walkIval)

        ival.start()
        self.suitWalkIval = ival
Exemple #11
0
 def lerpMood(self, component, factor):
     curVal = self.mood.getComponent(component)
     if factor < 0:
         self.setMoodComponent(component, lerp(curVal, 0.0, -factor))
     else:
         self.setMoodComponent(component, lerp(curVal, 1.0, factor))
Exemple #12
0
    def getDropIval(self, x, y, dropObjName, generation, num):
        objType = PartyGlobals.Name2DropObjectType[dropObjName]
        id = (generation, num)
        dropNode = hidden.attachNewNode('catchDropNode%s' % (id, ))
        dropNode.setPos(x, y, 0)
        shadow = self.dropShadow.copyTo(dropNode)
        shadow.setZ(PartyGlobals.CatchDropShadowHeight)
        shadow.setColor(1, 1, 1, 1)
        object = self.getObjModel(dropObjName)
        object.reparentTo(hidden)
        if dropObjName in ['watermelon', 'anvil']:
            objH = object.getH()
            absDelta = {'watermelon': 12, 'anvil': 15}[dropObjName]
            delta = (self.randomNumGen.random() * 2.0 - 1.0) * absDelta
            newH = objH + delta
        else:
            newH = self.randomNumGen.random() * 360.0
        object.setH(newH)
        sphereName = 'FallObj%s' % (id, )
        radius = self.ObjRadius
        if objType.good:
            radius *= lerp(1.0, 1.3, 0.5)
        collSphere = CollisionSphere(0, 0, 0, radius)
        collSphere.setTangible(0)
        collNode = CollisionNode(sphereName)
        collNode.setCollideMask(PartyGlobals.CatchActivityBitmask)
        collNode.addSolid(collSphere)
        collNodePath = object.attachNewNode(collNode)
        collNodePath.hide()
        if self.ShowObjSpheres:
            collNodePath.show()
        catchEventName = 'ltCatch' + sphereName

        def eatCollEntry(forward, collEntry):
            forward()

        self.accept(
            catchEventName,
            Functor(eatCollEntry, Functor(self.__handleCatch, id[0], id[1])))

        def cleanup(self=self, dropNode=dropNode, id=id, event=catchEventName):
            self.ignore(event)
            dropNode.removeNode()

        duration = objType.fallDuration
        onscreenDuration = objType.onscreenDuration
        targetShadowScale = 0.3
        if self.trickShadows:
            intermedScale = targetShadowScale * (self.OffscreenTime /
                                                 self.BaselineDropDuration)
            shadowScaleIval = Sequence(
                LerpScaleInterval(shadow,
                                  self.OffscreenTime,
                                  intermedScale,
                                  startScale=0))
            shadowScaleIval.append(
                LerpScaleInterval(shadow,
                                  duration - self.OffscreenTime,
                                  targetShadowScale,
                                  startScale=intermedScale))
        else:
            shadowScaleIval = LerpScaleInterval(shadow,
                                                duration,
                                                targetShadowScale,
                                                startScale=0)
        targetShadowAlpha = 0.4
        shadowAlphaIval = LerpColorScaleInterval(
            shadow,
            self.OffscreenTime,
            Point4(1, 1, 1, targetShadowAlpha),
            startColorScale=Point4(1, 1, 1, 0))
        shadowIval = Parallel(shadowScaleIval, shadowAlphaIval)
        if self.useGravity:

            def setObjPos(t, objType=objType, object=object):
                z = objType.trajectory.calcZ(t)
                object.setZ(z)

            setObjPos(0)
            dropIval = LerpFunctionInterval(setObjPos,
                                            fromData=0,
                                            toData=onscreenDuration,
                                            duration=onscreenDuration)
        else:
            startPos = Point3(0, 0, self.MinOffscreenHeight)
            object.setPos(startPos)
            dropIval = LerpPosInterval(object,
                                       onscreenDuration,
                                       Point3(0, 0, 0),
                                       startPos=startPos,
                                       blendType='easeIn')
        ival = Sequence(Func(Functor(dropNode.reparentTo, self.root)),
                        Parallel(
                            Sequence(
                                WaitInterval(self.OffscreenTime),
                                Func(Functor(object.reparentTo, dropNode)),
                                dropIval), shadowIval),
                        Func(cleanup),
                        name='drop%s' % (id, ))
        if objType == PartyGlobals.Name2DropObjectType['anvil']:
            ival.append(Func(self.playAnvil))
        return ival
Exemple #13
0
 def pickY(self=self, rng=rng):
     return lerp(-self.StageHalfHeight, self.StageHalfHeight,
                 rng.random())
Exemple #14
0
    def calcDifficultyConstants(self, difficulty, numPlayers):
        ToonSpeedRange = [16.0, 25.0]
        self.ToonSpeed = lerp(ToonSpeedRange[0], ToonSpeedRange[1], difficulty)
        self.SuitSpeed = self.ToonSpeed / 2.0
        self.SuitPeriodRange = [
            lerp(5.0, 3.0, self.getDifficulty()),
            lerp(15.0, 8.0, self.getDifficulty())
        ]

        def scaledDimensions(widthHeight, scale):
            w, h = widthHeight
            return [math.sqrt(scale * w * w), math.sqrt(scale * h * h)]

        BaseStageDimensions = [20, 15]
        areaScales = [1.0, 1.0, 3.0 / 2, 4.0 / 2]
        self.StageAreaScale = areaScales[numPlayers - 1]
        self.StageLinearScale = math.sqrt(self.StageAreaScale)
        self.notify.debug('StageLinearScale: %s' % self.StageLinearScale)
        self.StageDimensions = scaledDimensions(BaseStageDimensions,
                                                self.StageAreaScale)
        self.notify.debug('StageDimensions: %s' % self.StageDimensions)
        self.StageHalfWidth = self.StageDimensions[0] / 2.0
        self.StageHalfHeight = self.StageDimensions[1] / 2.0
        MOHs = [24] * 2 + [26, 28]
        self.MinOffscreenHeight = MOHs[self.getNumPlayers() - 1]
        distance = math.sqrt(self.StageDimensions[0] *
                             self.StageDimensions[0] +
                             self.StageDimensions[1] * self.StageDimensions[1])
        distance /= self.StageLinearScale
        if self.DropPlacerType == PathDropPlacer:
            distance /= 1.5
        ToonRunDuration = distance / self.ToonSpeed
        offScreenOnScreenRatio = 1.0
        fraction = 1.0 / 3 * 0.85
        self.BaselineOnscreenDropDuration = ToonRunDuration / (
            fraction * (1.0 + offScreenOnScreenRatio))
        self.notify.debug('BaselineOnscreenDropDuration=%s' %
                          self.BaselineOnscreenDropDuration)
        self.OffscreenTime = offScreenOnScreenRatio * self.BaselineOnscreenDropDuration
        self.notify.debug('OffscreenTime=%s' % self.OffscreenTime)
        self.BaselineDropDuration = self.BaselineOnscreenDropDuration + self.OffscreenTime
        self.MaxDropDuration = self.BaselineDropDuration
        self.DropPeriod = self.BaselineDropDuration / 2.0
        scaledNumPlayers = (numPlayers - 1.0) * 0.75 + 1.0
        self.DropPeriod /= scaledNumPlayers
        typeProbs = {'fruit': 3, 'anvil': 1}
        probSum = reduce(lambda x, y: x + y, typeProbs.values())
        for key in typeProbs.keys():
            typeProbs[key] = float(typeProbs[key]) / probSum

        scheduler = DropScheduler(CatchGameGlobals.GameDuration,
                                  self.FirstDropDelay, self.DropPeriod,
                                  self.MaxDropDuration, self.FasterDropDelay,
                                  self.FasterDropPeriodMult)
        self.totalDrops = 0
        while not scheduler.doneDropping():
            scheduler.stepT()
            self.totalDrops += 1

        self.numFruits = int(self.totalDrops * typeProbs['fruit'])
        self.numAnvils = int(self.totalDrops - self.numFruits)
 def adjustShadowScale(t, self=self):
     modelY = self.model.getY()
     maxHeight = 10
     a = min(-modelY / maxHeight, 1.0)
     self.shadow.setScale(lerp(1, 0.2, a))
     self.shadow.setAlphaScale(lerp(1, 0.2, a))