def getActionDeviationLevel(trajectory):
     AccTrial = []
     for timeStepIndex in range(len(trajectory) - 2):
         timeStep = trajectory[timeStepIndex]
         actionReal = np.array(timeStep[1])
         actionOnTruth = np.array(timeStep[4])
         if timeStepIndex >= startStatsIndex:
             deviateLevel = round(
                 agf.computeAngleBetweenVectors(
                     actionReal, actionOnTruth) / (math.pi / 4))
             AccTrial.append(deviateLevel)
     meanAcc = np.mean(AccTrial)
     return meanAcc
Example #2
0
def computeObserveDF(hypothesisInformation, positionOldTimeDF,
                     positionCurrentTimeDF):
    hypothesis = hypothesisInformation.index
    observeDF = pd.DataFrame(index=hypothesis, columns=['wolfDeviation'])
    wolfObjNums = hypothesis.get_level_values('wolfIdentity')
    sheepObjNums = hypothesis.get_level_values('sheepIdentity')
    wolfLocBefore = positionOldTimeDF.loc[wolfObjNums]
    sheepLocBefore = positionOldTimeDF.loc[sheepObjNums]
    wolfLocNow = positionCurrentTimeDF.loc[wolfObjNums]
    sheepLocNow = positionCurrentTimeDF.loc[sheepObjNums]
    wolfMotion = wolfLocNow - wolfLocBefore
    sheepMotion = sheepLocNow - sheepLocBefore
    seekingOrAvoidMotion = sheepLocBefore.values - wolfLocBefore.values
    distanceBetweenWolfAndSheep = np.sqrt(
        np.sum(np.power(wolfLocNow.values - sheepLocNow.values, 2), axis=1))
    chasingAngle = ag.computeAngleBetweenVectors(wolfMotion,
                                                 seekingOrAvoidMotion)
    escapingAngle = ag.computeAngleBetweenVectors(sheepMotion,
                                                  seekingOrAvoidMotion)
    deviationAngleForWolf = np.random.vonmises(
        0, hypothesisInformation['perceptionPrecision'].values)
    deviationAngleForSheep = np.random.vonmises(
        0, hypothesisInformation['perceptionPrecision'].values)
    observeDF['wolfDeviation'] = pd.DataFrame(chasingAngle.values +
                                              deviationAngleForWolf,
                                              index=hypothesis,
                                              columns=['wolfDeviation'])
    observeDF['sheepDeviation'] = pd.DataFrame(escapingAngle.values +
                                               deviationAngleForSheep,
                                               index=hypothesis,
                                               columns=['sheepDeviation'])
    observeDF['distanceBetweenWolfAndSheep'] = pd.DataFrame(
        distanceBetweenWolfAndSheep,
        index=hypothesis,
        columns=['distanceBetweenWolfAndSheep'])
    return observeDF
Example #3
0
 def __call__(self, state):
     for frame in range(self.maxFrame):
         physicalState, beliefAndAttention = state
         agentStates, agentActions, timeStep, wolfIdAndSubtlety = physicalState
         if self.renderOn == True:
             self.render(state)
         if self.isTerminal(state):
             break
         change = np.random.randint(0, self.maxFrame, len(agentStates))
         changeLabel = 1 * (change == 0)
         changeLabel[0] = 0
         changeLabel[wolfIdAndSubtlety[0]] = 0
         currentActionsPolar = np.array([
             ag.transiteCartesianToPolar(action) for action in agentActions
         ])
         polarAfterChange = np.random.uniform(
             -math.pi * 1 / 3, math.pi * 1 / 3,
             len(agentStates)) * np.array(changeLabel) + currentActionsPolar
         actionsAfterChange = np.array([
             ag.transitePolarToCartesian(polar)
             for polar in polarAfterChange
         ]) * np.linalg.norm(agentActions[1])
         actionsAfterChange[
             0] = actionsAfterChange[0] * 1.0 * np.linalg.norm(
                 agentActions[0]) / np.linalg.norm(agentActions[1])
         #print(np.linalg.norm(actionsAfterChange[0]), np.linalg.norm(actionsAfterChange[1]))
         newAgentStates, newAgentActions = self.transiteMultiAgentMotion(
             agentStates, actionsAfterChange)
         newPhysicalState = [
             newAgentStates, newAgentActions, timeStep, wolfIdAndSubtlety
         ]
         stateAfterNoActionChangeTransition = [
             newPhysicalState, beliefAndAttention
         ]
         state = stateAfterNoActionChangeTransition
     return state
Example #4
0
 def __call__(self):
     startWolfIdAndSubtlety = self.resetWolfIdAndSubtlety()
     wolfId, subtlety = startWolfIdAndSubtlety
     distractorsIds = [
         id for id in range(self.numAgent)
         if id not in [self.sheepId, wolfId]
     ]
     startAgentStates = np.array(
         self.resetAgentPositions(wolfId, distractorsIds))
     #startAgentActions = np.array([[0, 0] for agentId in range(self.numAgent)])
     startAgentActions = np.array([
         ag.transitePolarToCartesian(np.random.uniform(-math.pi, math.pi))
         for agentId in range(self.numAgent)
     ])
     startAgentActions[0] = np.array([0, 0])
     startTimeStep = np.array([0])
     startPhysicalState = [
         startAgentStates, startAgentActions, startTimeStep,
         startWolfIdAndSubtlety
     ]
     return startPhysicalState