Example #1
0
  def sampleTrajectory(self, pi = None, state = None, hori = numpy.inf, to = 'occupancy'):
    # sample a trajectory by following pi starting from self.state until state that is self.isTerminal
    # pi: S, A -> prob
    u = util.Counter()
    states = []
    actions = []

    t = 0
    if state: self.cmp.state = state

    # we use self.cmp for simulation. we reset it after running
    while True:
      if self.cmp.isTerminal(self.cmp.state) or t >= hori: break

      # now, sample an action following this policy
      if pi == None:
        a = random.choice(self.cmp.getPossibleActions())
      else:
        a = util.sample({a: pi(self.cmp.state, a) for a in self.cmp.getPossibleActions()})
      u[(self.cmp.state, a)] = 1
      states.append(self.cmp.state)
      actions.append(a)
      t += 1

      self.cmp.doAction(a)
    self.cmp.reset()
    
    if to == 'occupancy':
      return u
    elif to == 'trajectory':
      return states
    elif to == 'saPairs':
      return zip(states, actions)
    else:
      raise Exception('unknown return type')
Example #2
0
  def observe(self, observation, gameState):
    "Update beliefs based on the given distance observation."
    emissionModel = busters.getObservationDistribution(observation)
    pacmanPosition = gameState.getPacmanPosition()
    "*** YOUR CODE HERE ***"
    weights = util.Counter()

    hasNonZero = False
    for p in self.particles:
        distance = util.manhattanDistance(p, pacmanPosition)
        prob = emissionModel[distance]
        weights[p] += prob
        if(prob > 0):
            hasNonZero = True

    if (not hasNonZero):
        newParticles = []
        for i in range(self.numParticles):
            ghostPos = random.choice(self.legalPositions)
            newParticles.append(ghostPos)

        self.particles = newParticles
        return

    resampledParticles = []
    for i in range(self.numParticles):
        resampledParticles.append(util.sample(weights))

    self.particles = resampledParticles
Example #3
0
    def elapseTime(self, gameState):
        """
        Update beliefs for a time step elapsing.

        As in the elapseTime method of ExactInference, you should use:

          newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))

        to obtain the distribution over new positions for the ghost, given its
        previous position (oldPos) as well as Pacman's current position.

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.
        """
        "*** YOUR CODE HERE ***"
        allPossible = []

        for particle in self.particles:
            xarray=[]
            yarray=[]
            newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, particle))
            for x,y in newPosDist:
                xarray.append(x)
                yarray.append(y)
            allPossible.append(util.sample(xarray,yarray))
        #allPossible.normalize()
        self.beliefs = allPossible
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make
        sure to handle the special case where all particles have weight
        0 after reweighting based on observation. If this happens,
        resample particles uniformly at random from the set of legal
        positions (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, **all** particles should be updated so
             that the ghost appears in its prison cell, self.getJailPosition()

             You can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None (a noisy distance
             of None will be returned if, and only if, the ghost is
             captured).

          2) When all particles receive 0 weight, they should be recreated from the
             prior distribution by calling initializeUniformly. The total weight
             for a belief distribution can be found by calling totalCount on
             a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution

        You may also want to use util.manhattanDistance to calculate the distance
        between a particle and pacman's position.
        """

        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()
        
        "*** YOUR CODE HERE ***"
        newBeliefs = util.Counter()
        beliefs = self.getBeliefDistribution()
        
        # Special Case #1
        # this will handle the case when pacman captures a ghost, it will update the
        # particles to reflect this change
        if noisyDistance == None:
            newBeliefs[self.getJailPosition()] = 1
        # go through all the legal positions and calculate their manhattan distance
        # to pacman's position
        else:
            for p in self.legalPositions:
                distance = util.manhattanDistance(p, pacmanPosition)
                # if the probability of that distance is greater than zero, than recalculate
                # the new beliefs as accounting for that probability
                if emissionModel[distance] > 0:
                    newBeliefs[p] = emissionModel[distance] * beliefs[p]
        newBeliefs.normalize()
                   
        # Special Case #2
        # when all the particles have a weight of 0, we reinitialize
        if newBeliefs.totalCount() == 0:
            self.initializeUniformly(self.numParticles)
            return
        
        self.particles = [util.sample(newBeliefs) for _ in range(self.numParticles)]
  def elapseTime(self, gameState):
    """
    Update beliefs for a time step elapsing.

    As in the elapseTime method of ExactInference, you should use:

      newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))

    to obtain the distribution over new positions for the ghost, given
    its previous position (oldPos) as well as Pacman's current
    position.
    """
    "*** YOUR CODE HERE ***"
    newP           = self.belief.copy()
    count = 0
    newParticles = []
    for part in self.particles:
        newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, part))
        # newPosDist = self.getPositionDistribution(gameState, part)
        position   = util.sample(newPosDist)
        newParticles.append(position)


       
    self.particles = newParticles
Example #6
0
  def resample(self, noisyDistances):
    print "resample"
    dist = util.Counter()
    for i in range(self.numParticles):
      newParticle = []
      particle = self.particles[i]
      weight = self.weights[i]
      for ghost in range(self.numGhosts):
        if (noisyDistances[ghost] == None):
          newParticle.append(self.getJailPosition(ghost))
        else:
          newParticle.append(particle[ghost])

      dist[tuple(newParticle)] += weight
    dist.normalize()

    resampleParticles = []
    resampleWeights = []

    for particle in range(self.numParticles):
      sample = util.sample(dist)
      resampleParticles.append(sample)
      resampleWeights.append(1)

    self.particles = resampleParticles
    self.weights = resampleWeights
Example #7
0
    def elapseTime(self, gameState):
        """
        Update beliefs for a time step elapsing.

        As in the elapseTime method of ExactInference, you should use:

          newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))

        to obtain the distribution over new positions for the ghost, given
        its previous position (oldPos) as well as Pacman's current
        position.

        util.sample(Counter object) is a helper method to generate a sample from a
        belief distribution
        """

        allParticles = self.particles
        # Create an empty list
        particles = []

        # Iterate through every particle and add sample to list
        for particle in allParticles:
            self.setGhostPosition(gameState, particle)
            currentDistribution = self.getPositionDistribution(gameState)
            particles.append(util.sample(currentDistribution))
        self.particles = particles
Example #8
0
  def observeState(self, gameState):
    """
    Resamples the set of particles using the likelihood of the noisy observations.

    As in elapseTime, to loop over the ghosts, use:

      for i in range(self.numGhosts):
        ...

    A correct implementation will handle two special cases:
      1) When a ghost is captured by Pacman, all particles should be updated so
         that the ghost appears in its prison cell, position (2 * i + 1, 1),
         where "i" is the 0-based index of the ghost.

         You can check if a ghost has been captured by Pacman by
         checking if it has a noisyDistance of 999 (a noisy distance
         of 999 will be returned if, and only if, the ghost is
         captured).

      2) When all particles receive 0 weight, they should be recreated from the
          prior distribution by calling initializeParticles.
    """

    pacmanPos = gameState.getPacmanPosition()
    noisyDistances = gameState.getNoisyGhostDistances()
    if len(noisyDistances) < self.numGhosts: return
    emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]

    jailed = [ noisy == 999 for noisy in noisyDistances ]

    partials = [ tuple() ] * self.numParticles

    for g in xrange(self.numGhosts):
      weighted = util.Counter()
      if jailed[g]:
        # handle the jailed ghost
        jailLocation = (2 * g + 1, 1)
        for i in xrange(self.numParticles):
          partials[i] += (jailLocation, )
        continue
      for oldAssign, counts in self.sampledCounts.iteritems():
        for assign, oldCount in counts.iteritems():
          if oldCount <= 0:
            continue
          trueDistance = util.manhattanDistance(pacmanPos, assign[g])
          delta = abs(trueDistance - noisyDistances[g])
          if emissionModels[g][trueDistance] > 0 and delta <= MAX_DIST_DELTA:
            # no need to normalize by constant
            pTrue = math.exp( -delta )
            weighted[assign[g]] = oldCount * emissionModels[g][trueDistance] * pTrue / self.proposals[oldAssign][assign]
      totalWeight = weighted.totalCount()
      if totalWeight != 0: weighted.normalize()
      for i in xrange(self.numParticles):
        if totalWeight == 0:
          #  handle the zero weights case
          partials[i] += (random.choice(self.legalPositions), )
        else:
          partials[i] += (util.sample(weighted), )

    self.particles = CounterFromIterable(partials)
Example #9
0
  def elapseTime(self, gameState):
    "Update beliefs for a time step elapsing."
    "*** YOUR CODE HERE ***"
    temp = util.Counter()
    #print 'len of self.beliefs at the start of elapse time ',len(self.beliefs.keys())
    #print 'number of particles: ',self.numParticles
    for pos in self.beliefs.keys():

      #if self.beliefs[pos]==0: continue
            #print 'Ghost position ',pos
      #print 'Position distribution ', self.getPositionDistribution(gameState)
      #import time
      #print 'position ', pos
      state = self.setGhostPosition(gameState,pos)

      #if not pos in self.legalPositions:
        #print 'the position is not a legal position!! KERNEL PANIC'
        #time.sleep(20)

      #if self.getPositionDistribution
      #if state is None:
        #print 'state was none'
          
      #import time 
      #if len(self.getPositionDistribution(state)) is 0: 
        #print 'the class of state is ', state.__class__.__name__
        #print 'position distribution ', self.getPositionDistribution(state)
        #time.sleep(10000000)
      #print self.beliefs[pos]
      for i in range(self.beliefs[pos]):
        newSample = util.sample(self.getPositionDistribution(state))
        temp[newSample]+=1
    self.beliefs=temp
Example #10
0
    def elapseTime(self, gameState):
        """
        Update beliefs for a time step elapsing.

        As in the elapseTime method of ExactInference, you should use:

          newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))

        to obtain the distribution over new positions for the ghost, given
        its previous position (oldPos) as well as Pacman's current
        position.

        util.sample(Counter object) is a helper method to generate a sample from a
        belief distribution
        """
        "*** YOUR CODE HERE ***"
            
        allPossible = util.Counter()
        newPosDist = util.Counter()
        currParticle = 0
        newParticleList = []
        for i in range(0, len(self.particleList)):
            newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, self.particleList[i]))
            newParticleList.append(util.sample(newPosDist))
        self.particleList = newParticleList
Example #11
0
    def elapseTime(self, gameState):
        """
        Update beliefs for a time step elapsing.

        As in the elapseTime method of ExactInference, you should use:

          newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))

        to obtain the distribution over new positions for the ghost, given its
        previous position (oldPos) as well as Pacman's current position.

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.
        """
        "*** YOUR CODE HERE ***"

        newParticleList = []

        allPossibleGhostDist = util.Counter()

        if self.particles[0] == self.getJailPosition():
            return

        '''
        for p in self.legalPositions:
            allPossibleGhostDist[p] = self.getPositionDistribution(self.setGhostPosition(gameState, p))
        '''

        for particle in self.particles:
            newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, particle))
            newParticleList.append(util.sample(newPosDist))

        self.particles = newParticleList
Example #12
0
  def elapseTime(self, gameState):
    """
    Update beliefs for a time step elapsing.

    As in the elapseTime method of ExactInference, you should use:

      newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))

    to obtain the distribution over new positions for the ghost, given
    its previous position (oldPos) as well as Pacman's current
    position.
    """
    "*** YOUR CODE HERE ***"    
    temporaryPos = []
    temporaryWeight = []
    cnt = 0
    for oldPos in self.particles:
        oldWeight = self.particlesWeight[cnt]
        cnt += 1
        if not oldWeight > 0: continue
        newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))
        newProb = 0
        while(newProb == 0):
            newPos = util.sample(newPosDist)
            newProb = newPosDist[newPos]
        temporaryPos.append(newPos)
        temporaryWeight.append(oldWeight)
    self.particles = temporaryPos
    self.particlesWeight = temporaryWeight
  def observe(self, observation, gameState):
    """
    Update beliefs based on the given distance observation. Make
    sure to handle the special case where all particles have weight
    0 after reweighting based on observation. If this happens,
    resample particles uniformly at random from the set of legal
    positions (self.legalPositions).

    A correct implementation will handle two special cases:
      1) When a ghost is captured by Pacman, all particles should be updated so
         that the ghost appears in its prison cell, self.getJailPosition()

         You can check if a ghost has been captured by Pacman by
         checking if it has a noisyDistance of None (a noisy distance
         of None will be returned if, and only if, the ghost is
         captured).
         
      2) When all particles receive 0 weight, they should be recreated from the
          prior distribution by calling initializeUniformly. Remember to
          change particles to jail if called for.
    """
    noisyDistance = observation
    emissionModel = busters.getObservationDistribution(noisyDistance)
    pacmanPosition = gameState.getPacmanPosition()

    # check if all weights are zero
    def zeroWeights(weights):
      return all(w == 0 for w in weights.values())

    
    prevBelief = self.getBeliefDistribution()
    allPossible = util.Counter()
    nextParticles = []

    # ghost captured
    if noisyDistance is None:
      jailPosition = self.getJailPosition()
      
      # put ghost to jail
      for i in range(self.numParticles):
        nextParticles.append(jailPosition)

      self.particles = nextParticles

    else:
      # update beliefs
      for pos in self.legalPositions:
        trueDistance = util.manhattanDistance(pos, pacmanPosition)
        allPossible[pos] += emissionModel[trueDistance] * prevBelief[pos]
      
      # weights all zero
      if zeroWeights(allPossible):
        self.initializeUniformly(gameState)

      else:
        # resample particles
        for i in range(self.numParticles):
          nextParticles.append(util.sample(allPossible))

        self.particles = nextParticles
Example #14
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy
        observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell, position
             self.getJailPosition(i) where `i` is the index of the ghost.

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeParticles. After all
             particles are generated randomly, any ghosts that are eaten (have
             noisyDistance of None) must be changed to the jail Position. This
             will involve changing each particle if a ghost has been eaten.

        self.getParticleWithGhostInJail is a helper method to edit a specific
        particle. Since we store particles as tuples, they must be converted to
        a list, edited, and then converted back to a tuple. This is a common
        operation when placing a ghost in jail.
        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts:
            return
        emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]

        "*** YOUR CODE HERE ***"
        #first special case
        for i in xrange(self.numGhosts):
        	if noisyDistances[i]==None:
        		for x, y in enumerate(self.particles):
					self.particles[x] = self.getParticleWithGhostInJail(y, i)
        #create a weighted particle distribution
        weightedParticleDistri = util.Counter()
        for particle in self.particles:
        	product = 1
        	for i in xrange(self.numGhosts):
        		if noisyDistances[i]!=None:
        			trueDistance = util.manhattanDistance(particle[i], pacmanPosition)
        			product *= emissionModels[i][trueDistance]
        	weightedParticleDistri[particle] += product
        # second special case
        if weightedParticleDistri.totalCount()==0:
            self.initializeParticles()
            #change all particles with all the eaten ghosts' positions changed to their respective jail positions
            for i in xrange(self.numGhosts):
	        	if noisyDistances[i]==None:
	        		for x, y in enumerate(self.particles):
						self.particles[x] = self.getParticleWithGhostInJail(y, i)
        else: # resampling
            self.particles = [util.sample(weightedParticleDistri) for particle in self.particles]
Example #15
0
 def elapseTime(self, gameState):
   """
   Samples each particle's next state based on its current state and the gameState.
   
   You will need to use two helper methods provided below:
     1) setGhostPositions(gameState, ghostPositions)
         This method alters the gameState by placing the ghosts in the supplied positions.
     
     2) getPositionDistributionForGhost(gameState, ghostIndex, agent)
         This method uses the supplied ghost agent to determine what positions 
         a ghost (ghostIndex) controlled by a particular agent (ghostAgent) 
         will move to in the supplied gameState.  All ghosts
         must first be placed in the gameState using setGhostPositions above.
         Remember: ghosts start at index 1 (Pacman is agent 0).  
         
         The ghost agent you are meant to supply is self.enemyAgents[ghostIndex-1],
         but in this project all ghost agents are always the same.
   """
   newParticles = []
   for oldParticle in self.particles:
     newParticle = list(oldParticle) # A list of ghost positions
     for enemyIndex in range(len(self.enemyIndices)):
       tmpState = setEnemyPositions(gameState, newParticle, self.enemyIndices)    
       updatedParticle = util.sample(getPositionDistributionForEnemy(tmpState, self.enemyIndices[enemyIndex], self.enemyIndices[enemyIndex]))
       newParticle[enemyIndex] = updatedParticle
     newParticles.append(tuple(newParticle))
   self.particles = newParticles
Example #16
0
    def elapseTime(self, gameState):
        """
        Samples each particle's next state based on its current state and the gameState.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        Then, assuming that "i" refers to the index of the
        ghost, to obtain the distributions over new positions for that
        single ghost, given the list (prevGhostPositions) of previous
        positions of ALL of the ghosts, use this line of code:

          newPosDist = getPositionDistributionForGhost(setGhostPositions(gameState, prevGhostPositions),
                                                       i, self.ghostAgents[i])

        **Note** that you may need to replace "prevGhostPositions" with the
        correct name of the variable that you have used to refer to the
        list of the previous positions of all of the ghosts, and you may
        need to replace "i" with the variable you have used to refer to
        the index of the ghost for which you are computing the new
        position distribution.

        As an implementation detail (with which you need not concern
        yourself), the line of code above for obtaining newPosDist makes
        use of two helper functions defined below in this file:

          1) setGhostPositions(gameState, ghostPositions)
              This method alters the gameState by placing the ghosts in the supplied positions.

          2) getPositionDistributionForGhost(gameState, ghostIndex, agent)
              This method uses the supplied ghost agent to determine what positions
              a ghost (ghostIndex) controlled by a particular agent (ghostAgent)
              will move to in the supplied gameState.  All ghosts
              must first be placed in the gameState using setGhostPositions above.

              The ghost agent you are meant to supply is self.ghostAgents[ghostIndex-1],
              but in this project all ghost agents are always the same.
        """
        newParticles = []

        for oldParticle in self.particles:
            
            newParticle = list(oldParticle[0]) # A list of ghost positions
            # now loop through and update each entry in newParticle...


            prevGhostPositions = oldParticle[0]
            "*** YOUR CODE HERE ***"
            for i in range(self.numGhosts):
                newPosDist = getPositionDistributionForGhost(setGhostPositions(gameState, prevGhostPositions), i , self.ghostAgents[i])
                #update particle based on newPosDist
                newParticle[i] = util.sample(newPosDist);
            


            "*** END YOUR CODE HERE ***"
            newParticles.append([tuple(newParticle), oldParticle[1]])
        self.particles = newParticles
Example #17
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated so
             that the ghost appears in its prison cell, position self.getJailPosition(i)
             where "i" is the index of the ghost.

             You can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None (a noisy distance
             of None will be returned if, and only if, the ghost is
             captured).

          2) When all particles receive 0 weight, they should be recreated from the
              prior distribution by calling initializeParticles. After all particles
              are generated randomly, any ghosts that are eaten (have noisyDistance of 0)
              must be changed to the jail Position. This will involve changing each
              particle if a ghost has been eaten.

        ** Remember ** We store particles as tuples, but to edit a specific particle,
        it must be converted to a list, edited, and then converted back to a tuple. Since
        this is a common operation when placing a ghost in the jail for a particle, we have
        provided a helper method named self.getParticleWithGhostInJail(particle, ghostIndex)
        that performs these three operations for you.

        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts: return
        emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]

        "*** YOUR CODE HERE ***"
        allPossible = util.Counter()
        jail = []
        for p in self.particles:
            prob = 1
            for ghost in range(self.numGhosts):
                if noisyDistances[ghost] == None:
                    if ghost not in jail:
                        jail.append(ghost)
                    p = self.getParticleWithGhostInJail(p, ghost)
                else:
                    trueDistance = util.manhattanDistance(p[ghost], pacmanPosition)
                    prob *= emissionModels[ghost][trueDistance]
            allPossible[p] += prob
        allPossible.normalize()
        if allPossible.totalCount() == 0:
            self.initializeParticles()
        else:
            for i in range(len(self.particles)):
              sample = util.sample(allPossible)
              for ghost in jail:
                  sample = self.getParticleWithGhostInJail(sample, ghost)
              self.particles[i] = sample
Example #18
0
    def elapseTime(self, gameState):
        """
        Update beliefs for a time step elapsing.

        As in the elapseTime method of ExactInference, you should use:

          newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))

        to obtain the distribution over new positions for the ghost, given
        its previous position (oldPos) as well as Pacman's current
        position.

        util.sample(Counter object) is a helper method to generate a sample from a
        belief distribution
        """
        "*** YOUR CODE HERE ***"
        # Updated: Bharadwaj Tanikella 2014

        temp = self.particles
        i = 0
        while i < len(temp):
            # newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))
            #print temp[i], Old Position
            newPostDict = self.getPositionDistribution(self.setGhostPosition(gameState,temp[i]))
            temp[i]=util.sample(newPostDict)
            i+=1
        self.particles= temp
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy
        observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell, position
             self.getJailPosition(i) where `i` is the index of the ghost.

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeParticles. After all
             particles are generated randomly, any ghosts that are eaten (have
             noisyDistance of None) must be changed to the jail Position. This
             will involve changing each particle if a ghost has been eaten.

        self.getParticleWithGhostInJail is a helper method to edit a specific
        particle. Since we store particles as tuples, they must be converted to
        a list, edited, and then converted back to a tuple. This is a common
        operation when placing a ghost in jail.
        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts:
            return
        emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]

        "*** YOUR CODE HERE ***"
        counter = util.Counter()
        
        for particle in self.particles:
           # weight = 0
            weight = 1
            
            for i in range(0, self.numGhosts):
                if noisyDistances[i] != None:
                    manDistance = util.manhattanDistance(particle[i],pacmanPosition)
                    weight = weight * emissionModels[i][manDistance]
                else:
                    listP = list(particle)
                    listP[i] = self.getJailPosition(i)
                    particle = tuple(listP) 
                    
            counter[particle] =  counter[particle] + weight

        if any(counter.values()):
            particles = []
            for i in range(0, self.numParticles):
                particles.append(util.sample(counter))
            self.particles=particles
        else:
            self.initializeParticles()
Example #20
0
    def elapseTime(self, gameState):
        """
        Update beliefs for a time step elapsing.

        As in the elapseTime method of ExactInference, you should use:

          newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))

        to obtain the distribution over new positions for the ghost, given
        its previous position (oldPos) as well as Pacman's current
        position.

        util.sample(Counter object) is a helper method to generate a sample from a
        belief distribution
        """
        "*** YOUR CODE HERE ***"

        #util.raiseNotDefined()
         
        beliefs = self.getBeliefDistribution()

        
        for particle in self.particles:
            oldPos=particle[0]
            newPosDist=self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))
            #update particle based on newPosDist
            particle[0] = util.sample(newPosDist);
Example #21
0
  def observe(self, gameState, selfPosition, shouldClusterToInit):
    "Update beliefs based on the given distance observation."
    observation = gameState.getAgentDistances()[self.index]
    particleWeights = util.Counter()
    newParticleList = []
    beliefDist = self.getBeliefDistribution()
    cumulativeProb = 0
    # Assign weights to particles depending on how likely it is for that location to be
    # correct given the most recent observation
    for particle in self.particles:
      trueDistance = util.manhattanDistance(particle, selfPosition)
      distanceProb = gameState.getDistanceProb(observation, trueDistance)

      particleWeights[particle] = (distanceProb * beliefDist[particle])
      # If the probablity of all particles is 0, we're either way off or we've knocked out the
      # enemy.  We keep track of this, and either reset or cluster to init if it is 0.
      cumulativeProb += distanceProb

    if cumulativeProb != 0:
      # Resample based on new weights
      for _ in range(self.numParticles):
        newParticleList.append(util.sample(particleWeights))
      self.particles = newParticleList
    else:
      # Reset particles if we're too far off
      if shouldClusterToInit:
        self.clusterParticles(gameState.getInitialAgentPosition(self.index))
      else:
        self.resetParticles()
Example #22
0
    def elapseTime(self, gameState):
        """
        Update beliefs for a time step elapsing.

        As in the elapseTime method of ExactInference, you should use:

          newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))

        to obtain the distribution over new positions for the ghost, given
        its previous position (oldPos) as well as Pacman's current
        position.

        util.sample(Counter object) is a helper method to generate a sample from a
        belief distribution
        """
        "*** YOUR CODE HERE ***"
        #util.raiseNotDefined()
        newParticles = []
        self.newPosDistMemo = util.Counter()
        for oldPos in self.particles:
            if oldPos in self.newPosDistMemo.keys():
                newPosDist = self.newPosDistMemo[oldPos]
            else:
                newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))
                self.newPosDistMemo[oldPos] = newPosDist # memoize for speed
            newParticles.append(util.sample(newPosDist))

        self.particles = newParticles
Example #23
0
    def initializeParticles(self):
        """
        Initialize particles to be consistent with a uniform prior.

        Each particle is a tuple of ghost positions. Use self.numParticles for
        the number of particles. You may find the python package 'itertools' helpful.
        Specifically, you will need to think about permutations of legal ghost
        positions, with the additional understanding that ghosts may occupy the
        same space. Look at the 'product' function in itertools to get an
        implementation of the catesian product. Note: If you use
        itertools, keep in mind that permutations are not returned in a random order;
        you must shuffle the list of permutations in order to ensure even placement
        of particles across the board. Use self.legalPositions to obtain a list of
        positions a ghost may occupy.

          ** NOTE **
            the variable you store your particles in must be a list; a list is simply a collection
            of unweighted variables (positions in this case). Storing your particles as a Counter or
            dictionary (where there could be an associated weight with each position) is incorrect
            and will produce errors

        """
        self.beliefs = list()
        dist = util.Counter()
        for p in self.legalPositions: dist[p] = 1.0
        dist.normalize()
        particleCount = 0
        while (particleCount < self.numParticles):
            newTuple = ()
            ghostCount = 0
            while ghostCount < self.numGhosts:
                newTuple += util.sample(dist)
                ghostCount += 1
            self.beliefs.append(newTuple)
            particleCount += 1
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make
        sure to handle the special case where all particles have weight
        0 after reweighting based on observation. If this happens,
        resample particles uniformly at random from the set of legal
        positions (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, **all** particles should be updated so
             that the ghost appears in its prison cell, self.getJailPosition()

             You can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None (a noisy distance
             of None will be returned if, and only if, the ghost is
             captured).

          2) When all particles receive 0 weight, they should be recreated from the
             prior distribution by calling initializeUniformly. The total weight
             for a belief distribution can be found by calling totalCount on
             a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution

        You may also want to use util.manhattanDistance to calculate the distance
        between a particle and pacman's position.
        """

        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()
        "*** YOUR CODE HERE ***"
        '''
        print "Noisy Distance: ", noisyDistance
        print "Emission Model: ", emissionModel
        print "pacman Position: ", pacmanPosition
        print "self.particles: ", self.particles
        '''
        allPossible = util.Counter()
        belief = self.getBeliefDistribution()
        #ghost has been captured
        if noisyDistance is None:
            self.particles = []
            for index in range(0, self.numParticles):
                self.particles.append(self.getJailPosition())
        else:
            #weighting and testing if the weights are 0
            for position in self.legalPositions:
                trueDistance = util.manhattanDistance(position, pacmanPosition)
                allPossible[position] = emissionModel[trueDistance] * belief[position]
            if allPossible.totalCount() == 0:
                self.initializeUniformly(gameState)
                return
            #resampling
            allPossible.normalize()
            self.particles = []
            for index in range(0, self.numParticles):
                newP = util.sample(allPossible)
                self.particles.append(newP)
Example #25
0
 def elapseTime(self, gameState):
   """
   Samples each particle's next state based on its current state and the gameState.
   
   You will need to use two helper methods provided below:
     1) setGhostPositions(gameState, ghostPositions)
         This method alters the gameState by placing the ghosts in the supplied positions.
     
     2) getPositionDistributionForGhost(gameState, ghostIndex, agent)
         This method uses the supplied ghost agent to determine what positions 
         a ghost (ghostIndex) controlled by a particular agent (ghostAgent) 
         will move to in the supplied gameState.  All ghosts
         must first be placed in the gameState using setGhostPositions above.
         Remember: ghosts start at index 1 (Pacman is agent 0).  
         
         The ghost agent you are meant to supply is self.ghostAgents[ghostIndex-1],
         but in this project all ghost agents are always the same.
   """
   newParticles = []
   for oldParticle in self.particles:
     newParticle = util.Counter()
     for enemyIndex in range(len(self.enemyTeam)):
       setGhostPositions(gameState, oldParticle)
       updatedParticle = util.sample(getPositionDistributionForGhost(gameState, enemyIndex, gameState.getAgentState(enemyIndex)))
       newParticle[enemyIndex] = updatedParticle
     newParticles.append(tuple(newParticle.values()))
   self.particles = newParticles
   
   dist = util.Counter()
   for part in self.particles: dist[part] += 1
   dist.normalize()
   self.beliefs = dist
Example #26
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated so
             that the ghost appears in its prison cell, position self.getJailPosition(i)
             where "i" is the index of the ghost.

             You can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None (a noisy distance
             of None will be returned if, and only if, the ghost is
             captured).

          2) When all particles receive 0 weight, they should be recreated from the
              prior distribution by calling initializeParticles. After all particles
              are generated randomly, any ghosts that are eaten (have noisyDistance of 0)
              must be changed to the jail Position. This will involve changing each
              particle if a ghost has been eaten.

        ** Remember ** We store particles as tuples, but to edit a specific particle,
        it must be converted to a list, edited, and then converted back to a tuple. Since
        this is a common operation when placing a ghost in the jail for a particle, we have
        provided a helper method named self.getParticleWithGhostInJail(particle, ghostIndex)
        that performs these three operations for you.

        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances() # before was a single noisyDistance
        if len(noisyDistances) < self.numGhosts: return
        emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]
        # before was a single one:   emissionModel = busters.getObservationDistribution(noisyDistance)
        
        "*** YOUR CODE HERE ***"
        newWeightCounter = util.Counter()
        
        for particle in self.particleList:
            newWeightForParticle = 1            
            for index in range(0, self.numGhosts):
                if(noisyDistances[index] == None):
                    particle = self.getParticleWithGhostInJail(particle, index)                    
                else:
                    ghostsDistanceProduct = util.manhattanDistance(particle[index], pacmanPosition)
                    newWeightForParticle *= emissionModels[index][ghostsDistanceProduct]
            newWeightCounter[particle] = newWeightCounter[particle] + newWeightForParticle              
        if(not newWeightCounter.totalCount() == 0):
            newWeightCounter.normalize()
            for x in range(0, self.numParticles):
                self.particleList[x] = util.sample(newWeightCounter)
        else: #all particles receive 0 weight
            self.initializeParticles()
            for x in range(0, len(self.particleList)):
                for index in range(0, self.numGhosts):
                    if(noisyDistances[index] == None):
                        self.particleList[x] = self.getParticleWithGhostInJail(self.particleList[x], index)                    
Example #27
0
    def observeState(self, gameState):
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts: return
        emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]

        "*** YOUR CODE HERE ***"
	weights = util.Counter()
	for i in range(len(self.particle_positions)):	
		temp = 1
		for ghost_idx in range(self.numGhosts):
			if noisyDistances[ghost_idx] == None:
				new_particle = self.getParticleWithGhostInJail(self.particle_positions[i],ghost_idx)
				self.particle_positions[i] = new_particle
		
			else:
				trueDistance = util.manhattanDistance(self.particle_positions[i][ghost_idx],pacmanPosition)
				model = emissionModels[ghost_idx] 	
				temp *= model[trueDistance]
		weights[self.particle_positions[i]] += temp
		
	i = 0
	if weights.totalCount() != 0:
		self.particle_positions = []
		while i != self.numParticles:
			self.particle_positions.append(util.sample(weights))
			i += 1
	else:
		self.initializeParticles()
    def elapseTime(self, gameState):
        """
        Update beliefs for a time step elapsing.

        As in the elapseTime method of ExactInference, you should use:

          newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))

        to obtain the distribution over new positions for the ghost, given
        its previous position (oldPos) as well as Pacman's current
        position.

        util.sample(Counter object) is a helper method to generate a sample from a
        belief distribution
        """
        "*** YOUR CODE HERE ***"
        #1) go through each particle
        #2) generate the transition probability distribution at the particle's position
        #3) sample from the distribution to get the new position of the particle
        #4) add the particle to the new list

        #counter = util.Counter()
        newParticles = []
        for particle in self.particles:
            #for oldPos in self.legalPositions:
            newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, particle))
            newParticles.append(util.sample(newPosDist))
        self.particles = newParticles
Example #29
0
    def observe(self, observation, gameState):

        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()

        "*** YOUR CODE HERE ***"

	weights = util.Counter()
	i = 0	
	
	if noisyDistance == None:
		self.particle_positions = []
		while i != self.numParticles:
			self.particle_positions.append(self.getJailPosition())
			i += 1
	else:
		for p in self.particle_positions:
			trueDistance = util.manhattanDistance(p, pacmanPosition)
			#maybe more than one particle per position
			weights[p] += emissionModel[trueDistance]
	
		if weights.totalCount()	!= 0:
			self.particle_positions = []
			while i != self.numParticles:
				self.particle_positions.append(util.sample(weights))
				i += 1
		else:
			self.initializeUniformly(gameState)
    def elapseTime(self, gameState):
        """
        Update beliefs for a time step elapsing.

        As in the elapseTime method of ExactInference, you should use:

          newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))

        to obtain the distribution over new positions for the ghost, given its
        previous position (oldPos) as well as Pacman's current position.

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.
        """
        "*** YOUR CODE HERE ***"

        samples = []
        for parPos in self.parDist:
            newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, parPos))
            samples.append(util.sample(newPosDist))
        self.parDist = samples
        
        return self.getBeliefDistribution()
    
        util.raiseNotDefined()
Example #31
0
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make sure to
        handle the special case where all particles have weight 0 after
        reweighting based on observation. If this happens, resample particles
        uniformly at random from the set of legal positions
        (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell,
             self.getJailPosition()

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeUniformly. The total
             weight for a belief distribution can be found by calling totalCount
             on a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.

        You may also want to use util.manhattanDistance to calculate the
        distance between a particle and Pacman's position.
        """
        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()

        #check if ghost is captured
        if (noisyDistance is None):
            num_particles = self.numParticles
            new_particles = []
            while num_particles > 0:
                num_particles -= 1
                new_particles.append(self.getJailPosition())
            self.particles = new_particles
            return

        #initialize weigths
        weights = util.Counter()
        for p in self.particles:
            weights[p] = 0

        #compute weights for every particle
        for p in self.particles:
            dist = util.manhattanDistance(pacmanPosition, p)
            w = emissionModel[dist]
            weights[p] += w

        #case where all weights are 0
        if all(value == 0 for value in weights.values()):
            self.initializeUniformly(gameState)
            return

        weights.normalize()
        new_particles = []
        num_parts = self.numParticles
        while (num_parts > 0):
            p = util.sample(weights)
            new_particles.append(p)
            num_parts -= 1
        self.particles = new_particles

        "*** YOUR CODE HERE ***"
Example #32
0
 def eval_obj(self,data):
     samples = util.sample(data,np.minimum(1000,self.test_batch_size))
     loss = self.fprop(samples, volatile=True).data.cpu().numpy()
     return loss
Example #33
0
    def observeState(self):
        global static_particles
        self.particles = static_particles
        state = self.getCurrentObservation()
        pacmanPosition = state.getAgentPosition(self.index)
        oppList = self.getOpponents(state)
        noisyDistances = [state.getAgentDistances()[x] for x in oppList]
        food = state.getRedFood() if self.isOnRedTeam else state.getBlueFood()
        if len(noisyDistances) < self.num_opponents:
            return

        #If we see a ghost, we know a ghost
        for i, opp in enumerate(oppList):
            pos = state.getAgentPosition(opp)
            if pos is not None:
                for j, particle in enumerate(self.particles):
                    newParticle = list(particle)
                    newParticle[i] = pos
                    self.particles[j] = tuple(newParticle)

            else:
                for j, particle in enumerate(self.particles):
                    distance = util.manhattanDistance(pacmanPosition,
                                                      particle[i])
                    if distance <= 5:
                        newParticle = list(particle)
                        newParticle[i] = state.getInitialAgentPosition(opp)
                        self.particles[j] = tuple(newParticle)

        opp_position = self.getLikelyOppPosition()
        # If some of our food is missing, we know where a culprit might be!
        if food.count(True) != self.food.count(True):
            greedy_position = None
            for i, row in enumerate(food):
                for j, element in enumerate(row):
                    if food[i][j] != self.food[i][j]:
                        greedy_position = (i, j)
                        break

            self.food = food

            greedy_opp = min(
                [(util.manhattanDistance(greedy_position, opp_position[i]), i)
                 for i, opponent in enumerate(oppList)],
                key=lambda x: x[0])[1]
            for i, particle in enumerate(self.particles):
                newParticle = list(particle)
                newParticle[greedy_opp] = greedy_position
                self.particles[i] = tuple(newParticle)

        weights = util.Counter()
        for particle in self.particles:
            weight = 1
            for i, opponent in enumerate(oppList):
                distance = util.manhattanDistance(pacmanPosition, particle[i])
                # If we thought a ghost was near us but evidence is against it, prob is 0
                if state.getAgentPosition(opponent) is None and distance <= 5:
                    prob = 0
                else:
                    prob = state.getDistanceProb(distance, noisyDistances[i])
                weight *= prob
            weights[particle] += weight

        if weights.totalCount() == 0:
            self.particles = self.initializeParticles(state)
        else:
            weights.normalize()
            for i in xrange(len(self.particles)):
                self.particles[i] = util.sample(weights)
        static_particles = self.particles
Example #34
0
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make sure to
        handle the special case where all particles have weight 0 after
        reweighting based on observation. If this happens, resample particles
        uniformly at random from the set of legal positions
        (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell,
             self.getJailPosition()

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeUniformly. The total
             weight for a belief distribution can be found by calling totalCount
             on a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.

        You may also want to use util.manhattanDistance to calculate the
        distance between a particle and Pacman's position.
        """
        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()
        "*** YOUR CODE HERE ***"

        DEBUG = False
        beliefs = util.Counter()
        prev_beliefs = self.getBeliefDistribution()
        curr_dist = float('inf')

        # Update all particles if Pacman has captured a ghost
        if noisyDistance is None:
            new_particles = list()

            if DEBUG:
                print(
                    'Creating a new list after Pacman captured one of the ghosts.'
                )

            for particle in range(self.numParticles):
                new_particles.append(self.getJailPosition())

            self.particles = new_particles
        else:
            for particle in self.particles:
                curr_dist = util.manhattanDistance(particle, pacmanPosition)
                beliefs[particle] += emissionModel[curr_dist]

                if DEBUG:
                    print('Current particle: {} and distance between pacman and current particle: {}'.\
                           format(particle, curr_dist))

            if beliefs.totalCount() is 0:
                if DEBUG:
                    print(
                        'All the particles have a weight of 0. Recreating with previous distribution.'
                    )

                self.initializeUniformly(gameState)
            else:
                new_particles_list = list()

                for particle in range(self.numParticles):
                    curr_sample = util.sample(beliefs)
                    new_particles_list.append(curr_sample)

                self.particles = new_particles_list

        util.raiseNotDefined()
Example #35
0
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make sure to
        handle the special case where all particles have weight 0 after
        reweighting based on observation. If this happens, resample particles
        uniformly at random from the set of legal positions
        (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell,
             self.getJailPosition()

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeUniformly. The total
             weight for a belief distribution can be found by calling totalCount
             on a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.

        You may also want to use util.manhattanDistance to calculate the
        distance between a particle and Pacman's position.
        """
        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()
        "*** YOUR CODE HERE ***"
        allPossible = util.Counter()
        
        if noisyDistance == None:
            jail_p = self.getJailPosition()
            allPossible[jail_p] = 1.0
            allPossible.normalize()
            self.beliefs = allPossible
            for i in range(self.numParticles):
                self.particles[i]= jail_p
            return
        
    
        total_wt = 0
        for p in self.legalPositions:
            trueDistance = util.manhattanDistance(p, pacmanPosition)
            self.weight[p] = emissionModel[trueDistance]
            total_wt = total_wt + self.particles.count(p) * self.weight[p] 
            
        if total_wt == 0:
            self.initializeUniformly(gameState)
            return
            
        for p in self.legalPositions:
            allPossible[p] = self.particles.count(p) * self.weight[p] / total_wt
            
        for i in range(self.numParticles):
            self.particles[i]= util.sample(allPossible)
            
        allPossible.normalize()
        self.beliefs = allPossible

        "*** END YOUR CODE HERE ***"
Example #36
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated so
             that the ghost appears in its prison cell, position self.getJailPosition(i)
             where "i" is the index of the ghost.

             You can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None (a noisy distance
             of None will be returned if, and only if, the ghost is
             captured).

          2) When all particles receive 0 weight, they should be recreated from the
              prior distribution by calling initializeParticles. After all particles
              are generated randomly, any ghosts that are eaten (have noisyDistance of None)
              must be changed to the jail Position. This will involve changing each
              particle if a ghost has been eaten.

        ** Remember ** We store particles as tuples, but to edit a specific particle,
        it must be converted to a list, edited, and then converted back to a tuple. Since
        this is a common operation when placing a ghost in the jail for a particle, we have
        provided a helper method named self.getParticleWithGhostInJail(particle, ghostIndex)
        that performs these three operations for you.

        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts: return
        emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]

        "*** YOUR CODE HERE ***"

        # instanciate grid world
        grid = util.Counter()


        # # ghost in jail
        # if noisyDistance == None:
        #     pos = self.getJailPosition()
        #     self.particles = [pos for i in range(self.numParticles)]
        #     return



        for particle in self.particles:

            prob = 1.0

            for index in range(self.numGhosts):


                if noisyDistances[index] == None:
                    particle = self.getParticleWithGhostInJail(particle, index)

                else:

                    distance = util.manhattanDistance(particle[index], pacmanPosition)
                    prob = prob*emissionModels[index][distance]

            grid[particle] += prob



        # all ghosts are in jail
        if sum(grid.values()) == 0:
            self.initializeParticles()
            return


        # resample particles based on state of the grid
        grid.normalize()
        self.particles = []
        counter = 0

        while counter < self.numParticles
            particle = util.sample(grid)
            self.particles.append(particle)
            counter++
Example #37
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated so
             that the ghost appears in its prison cell, position self.getJailPosition(i)
             where "i" is the index of the ghost.

             You can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None (a noisy distance
             of None will be returned if, and only if, the ghost is
             captured).

          2) When all particles receive 0 weight, they should be recreated from the
              prior distribution by calling initializeParticles. After all particles
              are generated randomly, any ghosts that are eaten (have noisyDistance of None)
              must be changed to the jail Position. This will involve changing each
              particle if a ghost has been eaten.

        ** Remember ** We store particles as tuples, but to edit a specific particle,
        it must be converted to a list, edited, and then converted back to a tuple. Since
        this is a common operation when placing a ghost in the jail for a particle, we have
        provided a helper method named self.getParticleWithGhostInJail(particle, ghostIndex)
        that performs these three operations for you.

        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts: return
        emissionModels = [
            busters.getObservationDistribution(dist) for dist in noisyDistances
        ]

        "*** YOUR CODE HERE ***"
        InJail = []
        for i in range(self.numGhosts):
            if noisyDistances[i] == None:
                InJail.append(i)
        self.beliefs = util.Counter()

        for particle in self.particles:
            Prob = 1
            for i in InJail:
                particle = self.getParticleWithGhostInJail(particle, i)
            for i in range(self.numGhosts):
                if (i in InJail) == False:
                    model = emissionModels[i]
                    man = util.manhattanDistance(particle[i], pacmanPosition)
                    Prob *= model[man]
            self.beliefs[particle] += Prob
        self.beliefs.normalize()

        if self.beliefs.totalCount() == 0:
            self.initializeParticles()
            return

        for i in range(len(self.particles)):
            self.particles[i] = util.sample(self.beliefs)
    def elapseTime(self, gameState):
        """
        Samples each particle's next state based on its current state and the
        gameState.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        Then, assuming that `i` refers to the index of the ghost, to obtain the
        distributions over new positions for that single ghost, given the list
        (prevGhostPositions) of previous positions of ALL of the ghosts, use
        this line of code:

          newPosDist = getPositionDistributionForGhost(
             setGhostPositions(gameState, prevGhostPositions), i, self.ghostAgents[i]
          )

        Note that you may need to replace `prevGhostPositions` with the correct
        name of the variable that you have used to refer to the list of the
        previous positions of all of the ghosts, and you may need to replace `i`
        with the variable you have used to refer to the index of the ghost for
        which you are computing the new position distribution.

        As an implementation detail (with which you need not concern yourself),
        the line of code above for obtaining newPosDist makes use of two helper
        functions defined below in this file:

          1) setGhostPositions(gameState, ghostPositions)
              This method alters the gameState by placing the ghosts in the
              supplied positions.

          2) getPositionDistributionForGhost(gameState, ghostIndex, agent)
              This method uses the supplied ghost agent to determine what
              positions a ghost (ghostIndex) controlled by a particular agent
              (ghostAgent) will move to in the supplied gameState.  All ghosts
              must first be placed in the gameState using setGhostPositions
              above.

              The ghost agent you are meant to supply is
              self.ghostAgents[ghostIndex-1], but in this project all ghost
              agents are always the same.
        """
        newParticles = []
        for oldParticle in self.particles:
            newParticle = list(oldParticle)  # A list of ghost positions
            # now loop through and update each entry in newParticle...

            "*** YOUR CODE HERE ***"
            i = 0
            #             for i in range(self.numGhosts):
            while i < self.numGhosts:
                newPosDist = getPositionDistributionForGhost(
                    setGhostPositions(gameState, oldParticle), i,
                    self.ghostAgents[i])
                newParticle[i] = util.sample(newPosDist)
                i += 1

            "*** END YOUR CODE HERE ***"
            newParticles.append(tuple(newParticle))
        self.particles = newParticles
Example #39
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy
        observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell, position
             self.getJailPosition(i) where `i` is the index of the ghost.

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeParticles. After all
             particles are generated randomly, any ghosts that are eaten (have
             noisyDistance of None) must be changed to the jail Position. This
             will involve changing each particle if a ghost has been eaten.

        self.getParticleWithGhostInJail is a helper method to edit a specific
        particle. Since we store particles as tuples, they must be converted to
        a list, edited, and then converted back to a tuple. This is a common
        operation when placing a ghost in jail.
        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts:
            return
        emissionModels = [
            busters.getObservationDistribution(dist) for dist in noisyDistances
        ]
        "*** YOUR CODE HERE ***"
        bd = self.getBeliefDistribution()  #belief distribution of all ghosts
        tnz = 0
        for i in range(self.numGhosts):
            newList = []
            noisyDistance = noisyDistances[i]
            if noisyDistance == None:
                self.deadGhosts.append(i)
                for pp in self.ppList:
                    pp = self.getParticleWithGhostInJail(pp, i)
                    newList.append(pp)
            else:
                totalNoneZero = 0
                dist = util.Counter()  #marginal probability
                for t, prob in bd.items():
                    dist[t[i]] += prob
                weights = util.Counter()
                emissionModel = emissionModels[i]
                for pp in self.ppList:
                    trueDistance = util.manhattanDistance(
                        pp[i], pacmanPosition)
                    if emissionModel[trueDistance] > 0:
                        weights[
                            pp[i]] = emissionModel[trueDistance] * dist[pp[i]]
                        totalNoneZero += 1
                if totalNoneZero != 0:
                    for pp in self.ppList:
                        ppl = list(pp)
                        ppl[i] = util.sample(weights)
                        pp = tuple(ppl)
                        newList.append(pp)
                    tnz += 1
            self.ppList = newList
        if tnz == 0:
            self.initializeParticles()
Example #40
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy
        observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell, position
             self.getJailPosition(i) where `i` is the index of the ghost.

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeParticles. After all
             particles are generated randomly, any ghosts that are eaten (have
             noisyDistance of None) must be changed to the jail Position. This
             will involve changing each particle if a ghost has been eaten.

        self.getParticleWithGhostInJail is a helper method to edit a specific
        particle. Since we store particles as tuples, they must be converted to
        a list, edited, and then converted back to a tuple. This is a common
        operation when placing a ghost in jail.
        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts:
            return
        emissionModels = [
            busters.getObservationDistribution(dist) for dist in noisyDistances
        ]

        #initialize weigths
        '''
        weights = [util.Counter(), util.Counter()]
        for p in self.particles:
            weights[0][p] = 0
            weights[1][p] = 0
        '''
        weights = util.Counter()
        for p in self.particles:
            weights[p] = 0
        '''
        #compute weights
        for i in range(self.numGhosts):
            joint_weight = 1.0
            if noisyDistances[i] is None:
                for p in self.particles:
                    p = self.getParticleWithGhostInJail(l_p, i)
            else:
                for p in self.particles:
                    l_p = list(p)
                    dist = util.manhattanDistance(pacmanPosition, l_p[i])
                    w = emissionModels[i][dist]
                    joint_weight *=  w
                    weights[p] += joint_weight
        '''

        for p in self.particles:
            joint_weight = 1.0
            for i in range(self.numGhosts):
                if noisyDistances[i] is None:
                    p = self.getParticleWithGhostInJail(p, i)
                else:
                    dist = util.manhattanDistance(pacmanPosition, p[i])
                    w = emissionModels[i][dist]
                    joint_weight *= w
            weights[p] += joint_weight

        # case when all particles are 0
        if all(value == 0 for value in weights.values()):
            self.initializeParticles()
            for i in range(self.numGhosts):
                if noisyDistances[i] is None:
                    for p in self.particles:
                        p = self.getParticleWithGhostInJail(l_p, i)
            return
        '''
        weights[0].normalize()
        weights[1].normalize()
        weights_f = util.Counter()
        '''
        '''
        for p in self.particles:
            weights_f[p] += weights[1][p] * weights[0][p]

        weights_f.normalize()
        '''

        weights.normalize()
        new_particles = []
        num_parts = self.numParticles
        while (num_parts > 0):
            new_part = util.sample(weights)
            new_particles.append(new_part)
            num_parts -= 1

        self.particles = new_particles

        "*** YOUR CODE HERE ***"
Example #41
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated so
             that the ghost appears in its prison cell, position self.getJailPosition(i)
             where "i" is the index of the ghost.

             You can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None (a noisy distance
             of None will be returned if, and only if, the ghost is
             captured).

          2) When all particles receive 0 weight, they should be recreated from the
              prior distribution by calling initializeParticles. After all particles
              are generated randomly, any ghosts that are eaten (have noisyDistance of None)
              must be changed to the jail Position. This will involve changing each
              particle if a ghost has been eaten.

        ** Remember ** We store particles as tuples, but to edit a specific particle,
        it must be converted to a list, edited, and then converted back to a tuple. Since
        this is a common operation when placing a ghost in the jail for a particle, we have
        provided a helper method named self.getParticleWithGhostInJail(particle, ghostIndex)
        that performs these three operations for you.

        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts:
            return
        emissionModels = [
            busters.getObservationDistribution(dist) for dist in noisyDistances
        ]

        "*** YOUR CODE HERE ***"

        tempC = util.Counter()  # temporary counter
        for particleTuple in self.particles:
            cellProb = 1.0
            for GIndex in range(self.numGhosts):  # GIndex ---> GhostIndex
                if noisyDistances[GIndex] == None:
                    particleTuple = self.getParticleWithGhostInJail(
                        particleTuple, GIndex)
                else:
                    particlePos = particleTuple[GIndex]
                    trueDist = util.manhattanDistance(particlePos,
                                                      pacmanPosition)
                    cellProb = cellProb * \
                        emissionModels[GIndex][trueDist]

            tempC[particleTuple] = tempC[particleTuple] + \
                cellProb

        tempC.normalize()

        if not any(tempC.values()):
            self.initializeParticles()
        else:
            tempC.normalize()
            updatedParticles = []
            for _ in range(0, self.numParticles):
                updatedParticles.append(util.sample(tempC))
            self.particles = updatedParticles
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated so
             that the ghost appears in its prison cell, position self.getJailPosition(i)
             where "i" is the index of the ghost.

             You can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None (a noisy distance
             of None will be returned if, and only if, the ghost is
             captured).

          2) When all particles receive 0 weight, they should be recreated from the
              prior distribution by calling initializeParticles. After all particles
              are generated randomly, any ghosts that are eaten (have noisyDistance of None)
              must be changed to the jail Position. This will involve changing each
              particle if a ghost has been eaten.

        ** Remember ** We store particles as tuples, but to edit a specific particle,
        it must be converted to a list, edited, and then converted back to a tuple. Since
        this is a common operation when placing a ghost in the jail for a particle, we have
        provided a helper method named self.getParticleWithGhostInJail(particle, ghostIndex)
        that performs these three operations for you.

        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts: return
        emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]

        "*** YOUR CODE HERE ***"
        counter = util.Counter()
        for p in self.particles:
            partial = float(1)
            for j in range(0, self.numGhosts):
                if noisyDistances[j] != None:
                    ghostPos = p[j]
                    d = abs(ghostPos[0] - pacmanPosition[0]) + abs(ghostPos[1] - pacmanPosition[1])
                    partial = partial * emissionModels[j][d]
                else:
                    p = self.getParticleWithGhostInJail(p, j)
            counter[p] += partial
        if not any(counter.values()): #all(v == 0 for k, v in counter.items()):
            self.initializeParticles()

            for i in range(len(self.particles)):
                for j in range(self.numGhosts):
                    if noisyDistances[j] == None:
                        # ghost is in jail
                        oldParticle = self.particles[j]
                        newParticle = self.getParticleWithGhostInJail(oldParticle, j)
                        self.particles[j] = newParticle
            return

        counter.normalize()
        self.particles = []
        for i in range(self.numParticles):
            self.particles.append(util.sample(counter))
Example #43
0
def generate(data,
             query_manager,
             epsilon,
             epsilon_0,
             exponential_scale,
             samples,
             alpha=0,
             show_prgress=True):
    domain = data.domain
    D = np.sum(domain.shape)
    N = data.df.shape[0]
    Q_size = query_manager.num_queries
    delta = 1.0 / N**2
    beta = 0.05  ## Fail probability

    prev_queries = []
    neg_queries = []
    rho_comp = 0.0000

    q1 = util.sample(np.ones(Q_size) / Q_size)
    q2 = util.sample(np.ones(Q_size) / Q_size)
    prev_queries.append(q1)  ## Sample a query from the uniform distribution
    neg_queries.append(q2)  ## Sample a query from the uniform distribution

    real_answers = query_manager.get_answer(data, debug=False)
    neg_real_answers = 1 - real_answers

    final_syn_data = []
    t = -1
    start_time = time.time()
    temp = []
    if show_prgress:
        # progress = tqdm(total=0.5 * epsilon ** 2)
        progress = tqdm(total=epsilon)
    last_eps = 0
    while True:
        """
        End early after 10 minutes
        """
        if time.time() - start_time > 600: break

        t += 1
        rho = 0.5 * epsilon_0**2
        rho_comp += rho  ## EM privacy
        current_eps = rho_comp + 2 * np.sqrt(rho_comp * np.log(1 / delta))

        if current_eps > epsilon:
            break
        if show_prgress:
            progress.update(current_eps - last_eps)
            last_eps = current_eps
        """
        Sample s times from FTPL
        """
        util.blockPrint()
        num_processes = 8
        s2 = int(1.0 + samples / num_processes)
        samples_rem = samples
        processes = []
        manager = mp.Manager()
        fake_temp = manager.list()

        query_workload = query_manager.get_query_workload(prev_queries)
        neg_query_workload = query_manager.get_query_workload(neg_queries)

        for i in range(num_processes):
            temp_s = samples_rem if samples_rem - s2 < 0 else s2
            samples_rem -= temp_s
            noise = np.random.exponential(exponential_scale, (temp_s, D))
            proc = mp.Process(target=gen_fake_data,
                              args=(fake_temp, query_workload,
                                    neg_query_workload, noise, domain, alpha,
                                    temp_s))

            proc.start()
            processes.append(proc)

        assert samples_rem == 0, "samples_rem = {}".format(samples_rem)
        for p in processes:
            p.join()

        util.enablePrint()
        oh_fake_data = []
        assert len(fake_temp) > 0
        for x in fake_temp:
            oh_fake_data.append(x)
            temp.append(x)
            if current_eps >= epsilon / 2:  ## this trick haves the final error
                final_syn_data.append(x)

        assert len(oh_fake_data
                   ) == samples, "len(D_hat) = {} len(fake_data_ = {}".format(
                       len(oh_fake_data), len(fake_temp))
        for i in range(samples):
            assert len(oh_fake_data[i]) == D, "D_hat dim = {}".format(
                len(D_hat[0]))
        assert not final_syn_data or len(
            final_syn_data[0]) == D, "D_hat dim = {}".format(
                len(oh_fake_data[0]))

        fake_data = Dataset(
            pd.DataFrame(util.decode_dataset(oh_fake_data, domain),
                         columns=domain.attrs), domain)
        """
        Compute Exponential Mechanism distribution
        """
        fake_answers = query_manager.get_answer(fake_data, debug=False)
        neg_fake_answers = 1 - fake_answers

        score = np.append(real_answers - fake_answers,
                          neg_real_answers - neg_fake_answers)

        EM_dist_0 = np.exp(epsilon_0 * score * N / 2, dtype=np.float128)
        sum = np.sum(EM_dist_0)
        assert sum > 0 and not np.isinf(sum)
        EM_dist = EM_dist_0 / sum
        assert not np.isnan(
            EM_dist).any(), "EM_dist_0 = {} EM_dist = {} sum = {}".format(
                EM_dist_0, EM_dist, sum)
        assert not np.isinf(
            EM_dist).any(), "EM_dist_0 = {} EM_dist = {} sum = {}".format(
                EM_dist_0, EM_dist, sum)
        """
        Sample from EM
        """
        q_t_ind = util.sample(EM_dist)

        if q_t_ind < Q_size:
            prev_queries.append(q_t_ind)
        else:
            neg_queries.append(q_t_ind - Q_size)

    if len(final_syn_data) == 0:
        final_syn_data = temp
    fake_data = Dataset(
        pd.DataFrame(util.decode_dataset(final_syn_data, domain),
                     columns=domain.attrs), domain)

    return fake_data
Example #44
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy
        observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell, position
             self.getJailPosition(i) where `i` is the index of the ghost.

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeParticles. After all
             particles are generated randomly, any ghosts that are eaten (have
             noisyDistance of None) must be changed to the jail Position. This
             will involve changing each particle if a ghost has been eaten.

        self.getParticleWithGhostInJail is a helper method to edit a specific
        particle. Since we store particles as tuples, they must be converted to
        a list, edited, and then converted back to a tuple. This is a common
        operation when placing a ghost in jail.
        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts: return
        emissionModels = [
            busters.getObservationDistribution(dist) for dist in noisyDistances
        ]

        "*** YOUR CODE HERE ***"

        # print ""
        # for k, v in self.getBeliefDistribution().items(): print k, v

        pacmanPosition = gameState.getPacmanPosition()

        # update particles based on observation
        for iPart, ps in enumerate(self.particles):
            if any([pos == pacmanPosition for pos in ps]):
                self.weights[iPart] = 0
                continue

            weight = 1.0
            for iGhost in range(self.numGhosts):
                # if just captured ghost, you know its in jail
                if noisyDistances[iGhost] == None:
                    # set to believe this ghost is in jail
                    self.particles[iPart] = \
                        self.getParticleWithGhostInJail(ps, iGhost)
                    weight *= 1.0
                # otherwise, handle normal (ghost wasn't captured)
                else:
                    weight *= \
                        emissionModels[iGhost] \
                        [util.manhattanDistance \
                        (ps[iGhost], pacmanPosition)]
            self.weights[iPart] = weight

        # new belief distribution
        beliefs = self.getBeliefDistribution()

        # if all particles are assigned 0 weight, resample uniformly
        if beliefs.totalCount() == 0:
            self.currentPacmanPosition = pacmanPosition
            self.initializeParticles()
            # and make sure that all particles believe
            # known jailed ghosts are in jail
            for iGhost in range(self.numGhosts):
                if noisyDistances[iGhost] == None:
                    for iPart, ps in enumerate(self.particles):
                        self.particles[iPart] = \
                            self.getParticleWithGhostInJail(ps, iGhost)

        # otherwise, resample based on weights
        else:
            self.particles = [
                util.sample(beliefs) for _ in range(self.numParticles)
            ]
Example #45
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy
        observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell, position
             self.getJailPosition(i) where `i` is the index of the ghost.

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeParticles. After all
             particles are generated randomly, any ghosts that are eaten (have
             noisyDistance of None) must be changed to the jail Position. This
             will involve changing each particle if a ghost has been eaten.

        self.getParticleWithGhostInJail is a helper method to edit a specific
        particle. Since we store particles as tuples, they must be converted to
        a list, edited, and then converted back to a tuple. This is a common
        operation when placing a ghost in jail.
        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts:
            return
        emissionModels = [
            busters.getObservationDistribution(dist) for dist in noisyDistances
        ]
        #you're code here
        weights = util.Counter()
        for particle in self.particles:
            factor = 1.0
            for i in range(self.numGhosts):
                if noisyDistances[i] == None:
                    particle = self.getParticleWithGhostInJail(particle, i)
                    # factor = 0
                else:
                    distance = util.manhattanDistance(particle[i],
                                                      pacmanPosition)
                    factor *= emissionModels[i][distance]
            weights[particle] += factor

        if not any(weights.values()):
            self.initializeParticles()
            for i in range(self.numGhosts):
                if noisyDistances[i] == None:
                    particle = self.getParticleWithGhostInJail(particle, i)
        # weights.normalize()
        else:
            tmp = []
            weights.normalize()
            for i in range(0, self.numParticles):
                tmp.append(util.sample(weights))
            self.particles = tmp
Example #46
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated so
             that the ghost appears in its prison cell, position self.getJailPosition(i)
             where "i" is the index of the ghost.

             You can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None (a noisy distance
             of None will be returned if, and only if, the ghost is
             captured).

          2) When all particles receive 0 weight, they should be recreated from the
              prior distribution by calling initializeParticles. After all particles
              are generated randomly, any ghosts that are eaten (have noisyDistance of 0)
              must be changed to the jail Position. This will involve changing each
              particle if a ghost has been eaten.

        ** Remember ** We store particles as tuples, but to edit a specific particle,
        it must be converted to a list, edited, and then converted back to a tuple. Since
        this is a common operation when placing a ghost in the jail for a particle, we have
        provided a helper method named self.getParticleWithGhostInJail(particle, ghostIndex)
        that performs these three operations for you.

        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts: return
        emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]
        
        dist = self.getBeliefDistribution();
        
        cout = self.numGhosts;
        sign = False;
        for i in range(len(self.particles)) :
            for j in range(cout) :
                if noisyDistances[j] == None :
                    self.particles[i] = self.getParticleWithGhostInJail(self.particles[i], j);
        
            
        for particle in self.particles :
            weight = 1;
            for i in range(cout) :
                if noisyDistances[i] != None :
                    dis = util.manhattanDistance(particle[i], pacmanPosition);
                    weight *= emissionModels[i][dis];
            dist[particle] += weight;
            if weight > 0 :
                sign = True;
        
        if sign :
            #self.particles = [];
            for i in range(self.numParticles) :
                self.particles[i] = util.sample(dist);
        else :
            self.initializeParticles();
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy
        observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell, position
             self.getJailPosition(i) where `i` is the index of the ghost.

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeParticles. After all
             particles are generated randomly, any ghosts that are eaten (have
             noisyDistance of None) must be changed to the jail Position. This
             will involve changing each particle if a ghost has been eaten.

        self.getParticleWithGhostInJail is a helper method to edit a specific
        particle. Since we store particles as tuples, they must be converted to
        a list, edited, and then converted back to a tuple. This is a common
        operation when placing a ghost in jail.
        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts:
            return
        emissionModels = [
            busters.getObservationDistribution(dist) for dist in noisyDistances
        ]

        "*** YOUR CODE HERE ***"
        particleWeights = util.Counter()
        for particle in self.particles:
            ghost = list(particle)
            #             particleWeights[particle] = 1.0
            weight = 1.0
            for i in range(self.numGhosts):
                if not (noisyDistances[i] == None):
                    trueDistance = util.manhattanDistance(
                        ghost[i], pacmanPosition)
                    weight *= emissionModels[i][trueDistance]
            particleWeights[tuple(particle)] += weight
        particleWeights.normalize()

        #         distribution = util.Counter()
        #         for ghostPos in self.particles:
        #             particle = list(ghostPos)
        #             particleWeight = 1
        #             for i in range(self.numGhosts):
        #                 if not noisyDistances[i] == None:
        #                     trueDistance = util.manhattanDistance(pacmanPosition, particle[i])
        #                     particleWeight *= emissionModels[i][trueDistance]
        #             distribution[tuple(ghostPos)] += particleWeight
        #         distribution.normalize()

        if not (particleWeights.totalCount() == 0):
            particles = []
            for i in range(self.numParticles):
                particles.append(util.sample(particleWeights))
            self.particles = particles

            for i in range(self.numGhosts):
                if noisyDistances[i] == None:
                    particlesNew = []
                    for p in self.particles:
                        particlesNew.append(
                            self.getParticleWithGhostInJail(p, i))
                    self.particles = particlesNew
        else:
            self.initializeParticles()
Example #48
0
    def observeState(self, gameState):
        """Resamples the set of particles using the likelihood of the noisy
        observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
        1) When all particles get weight 0 due to the observation,
           a new set of particles need to be generated from the initial
           prior distribution by calling initializeParticles.

        2) Otherwise after all new particles have been generated by
           resampling you must check if any ghosts have been captured
           by packman (noisyDistances[i] will be None if ghost i has
           ben captured).

           For each captured ghost, you need to change the i'th component
           of every particle (remember that the particles contain a position
           for every ghost---so you need to change the component associated
           with the i'th ghost.). In particular, if ghost i has been captured
           then the i'th component of every particle must be changed so
           the i'th ghost is in its prison cell (position self.getJailPosition(i))

            Note that more than one ghost might be captured---you need
            to ensure that every particle puts every captured ghost in
            its prison cell.

        self.getParticleWithGhostInJail is a helper method to help you
        edit a specific particle. Since we store particles as tuples,
        they must be converted to a list, edited, and then converted
        back to a tuple. This is a common operation when placing a
        ghost in jail. Note that this function
        creates a new particle, that has to replace the old particle in
        your list of particles.

        HINT1. The weight of every particle is the product of the probabilities
               of associated with each ghost's noisyDistance observation
        HINT2. When computing the weight of a particle by looking at each
               ghost's noisyDistance observation make sure you check
               if the ghost has been captured. Captured ghost's are ignored
               in the weight computation (the particle's component for
               the captured ghost is updated the precise position later---so
               this corresponds to multiplying the weight by probability 1
        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts:
            return
        emissionModels = [
            busters.getObservationDistribution(dist) for dist in noisyDistances
        ]
        "*** YOUR CODE HERE ***"
        # emissionModels and noisyDistances separated by ghosts
        allPossible = util.Counter()
        for p in self.particles:
            weight = 1.0
            for i in range(self.numGhosts):
                if noisyDistances[i] is not None:
                    distance = util.manhattanDistance(pacmanPosition, p[i])
                    weight *= emissionModels[i][distance]
                else:
                    p = self.getParticleWithGhostInJail(p, i)
            allPossible[p] += weight
        allPossible.normalize()
        if all(i == 0 for i in allPossible.values()):
            self.initializeParticles()
        else:
            self.particles = [util.sample(allPossible) for i in self.particles]
        "*** END YOUR CODE HERE ***"
Example #49
0
def getNoisyDistance(pos1, pos2):
    if pos2[1] == 1: return None
    distance = util.manhattanDistance(pos1, pos2)
    return max(0, distance + util.sample(SONAR_NOISE_PROBS, SONAR_NOISE_VALUES))
Example #50
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy
        observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell, position
             self.getJailPosition(i) where `i` is the index of the ghost.

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeParticles. After all
             particles are generated randomly, any ghosts that are eaten (have
             noisyDistance of None) must be changed to the jail Position. This
             will involve changing each particle if a ghost has been eaten.

        self.getParticleWithGhostInJail is a helper method to edit a specific
        particle. Since we store particles as tuples, they must be converted to
        a list, edited, and then converted back to a tuple. This is a common
        operation when placing a ghost in jail.
        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts:
            return
        beliefDoc = self.getBeliefDistribution()
        emissionModels = [
            busters.getObservationDistribution(dist) for dist in noisyDistances
        ]
        oldTup = self.particleTuple
        "*** YOUR CODE HERE ***"
        positionTupleMakler = util.Counter()
        probTupler = util.Counter()
        for tup in self.particleTuple:
            probTuple = 1.0
            newtup = tup
            for ghostNum in range(self.numGhosts):
                ghostProbPosition = tup[ghostNum]
                noise = noisyDistances[ghostNum]
                if noise == None:
                    newtup = self.getParticleWithGhostInJail(tup, ghostNum)
                else:
                    probTuple *= emissionModels[ghostNum][
                        util.manhattanDistance(ghostProbPosition,
                                               pacmanPosition)]
            probTupler[newtup] = probTuple * beliefDoc[tup]
        ls = []

        if probTupler.totalCount() == 0.0:
            self.initializeParticles()
        else:
            for i in range(self.numParticles):
                ls.append(util.sample(probTupler))
            self.particleTuple = ls
Example #51
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy
        observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell, position
             self.getJailPosition(i) where `i` is the index of the ghost.

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeParticles. After all
             particles are generated randomly, any ghosts that are eaten (have
             noisyDistance of None) must be changed to the jail Position. This
             will involve changing each particle if a ghost has been eaten.

        self.getParticleWithGhostInJail is a helper method to edit a specific
        particle. Since we store particles as tuples, they must be converted to
        a list, edited, and then converted back to a tuple. This is a common
        operation when placing a ghost in jail.
        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts:
            return
        emissionModels = [
            busters.getObservationDistribution(dist) for dist in noisyDistances
        ]

        "*** YOUR CODE HERE ***"

        #eat test
        for j in range(self.numGhosts):
            if (noisyDistances[j] == None):
                i = 0
                while (i < len(self.particleList)):
                    if (self.particleList[i][j] != self.getJailPosition(j)):
                        self.particleList[i] = self.getParticleWithGhostInJail(
                            self.particleList[i], j)
                    i += 1
        distribution = self.getBeliefDistribution()

        #reweight distrubtion based on new information
        for state in distribution:
            for j in range(self.numGhosts):
                if (noisyDistances[j] != None):
                    distribution[state] = distribution[state] * emissionModels[
                        j][util.manhattanDistance(state[j], pacmanPosition)]
        distribution.normalize()

        #test for bottoming out
        if (distribution.totalCount() == 0):
            self.initializeParticles()
            distribution = self.getBeliefDistribution()

        #resample for next iteration
        self.particleList = []
        i = 0
        while (i < self.numParticles):
            self.particleList.append(util.sample(distribution))
            i += 1
Example #52
0
    def observeState(self, gameState):
        """
    Resamples the set of particles using the likelihood of the noisy observations.

    As in elapseTime, to loop over the ghosts, use:

      for i in range(self.numGhosts):
        ...

    A correct implementation will handle two special cases:
      1) When a ghost is captured by Pacman, all particles should be updated so
         that the ghost appears in its prison cell, position self.getJailPosition(i)
         where "i" is the index of the ghost.

         You can check if a ghost has been captured by Pacman by
         checking if it has a noisyDistance of None (a noisy distance
         of None will be returned if, and only if, the ghost is
         captured).

      2) When all particles receive 0 weight, they should be recreated from the
          prior distribution by calling initializeParticles. Remember to
          change ghosts' positions to jail if called for.
    """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts: return
        emissionModels = [
            busters.getObservationDistribution(dist) for dist in noisyDistances
        ]

        "*** YOUR CODE HERE ***"
        weightedParticles = util.Counter()

        for i in range(self.numGhosts):
            if noisyDistances[i] == None:
                _particles = list()
                for particle in self.particles:
                    _particle = list(particle)
                    _particle[i] = self.getJailPosition(i)
                    _particles.append(tuple(_particle))
                self.particles = _particles

        for particle in self.particles:
            weightedParticles[particle] = 1.0
            for ghostIndex in range(self.numGhosts):
                if noisyDistances[ghostIndex] != None:
                    _distance = util.manhattanDistance(pacmanPosition,
                                                       particle[ghostIndex])
                    weightedParticles[particle] *= emissionModels[ghostIndex][
                        _distance]

        if weightedParticles.totalCount() == 0.0:
            self.initializeParticles()
            for ghostIndex in range(self.numGhosts):
                if noisyDistances[ghostIndex] == None:
                    for i in range(self.numParticles):
                        _particle = list(self.particles[i])
                        _particle[ghostIndex] = self.getJailPosition(
                            ghostIndex)
                        self.particles[i] = (tuple(_particle))
        else:
            beliefDistribution = self.getBeliefDistribution()
            for particle in self.particles:
                beliefDistribution[particle] *= weightedParticles[particle]
                weightedParticles[particle] = 1.0
            beliefDistribution.normalize()
            self.particles = [
                util.sample(beliefDistribution)
                for i in range(self.numParticles)
            ]
Example #53
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy
        observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell, position
             self.getJailPosition(i) where `i` is the index of the ghost.

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeParticles. After all
             particles are generated randomly, any ghosts that are eaten (have
             noisyDistance of None) must be changed to the jail Position. This
             will involve changing each particle if a ghost has been eaten.

        self.getParticleWithGhostInJail is a helper method to edit a specific
        particle. Since we store particles as tuples, they must be converted to
        a list, edited, and then converted back to a tuple. This is a common
        operation when placing a ghost in jail.
        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts:
            return
        emissionModels = [
            busters.getObservationDistribution(dist) for dist in noisyDistances
        ]

        "*** YOUR CODE HERE ***"
        allPossible = util.Counter()

        for eachParticle in self.particles:

            dist1 = util.manhattanDistance(eachParticle[0], pacmanPosition)
            dist2 = util.manhattanDistance(eachParticle[1], pacmanPosition)

            if noisyDistances[0] is None:
                eachParticle = self.getParticleWithGhostInJail(eachParticle, 0)
                for weights in emissionModels[0]:
                    emissionModels[0][weights] = 1.0

            if noisyDistances[1] is None:
                eachParticle = self.getParticleWithGhostInJail(eachParticle, 1)
                for weights in emissionModels[1]:
                    emissionModels[1][weights] = 1.0
            allPossible[eachParticle] += (emissionModels[0][dist1] *
                                          emissionModels[1][dist2])

        allPossible.normalize()
        self.beliefs = allPossible

        if self.beliefs.totalCount() == 0:
            self.initializeParticles()

        else:
            for i in range(len(self.particles)):
                self.particles[i] = util.sample(self.beliefs)
Example #54
0
    def elapseTime(self, gameState):
        """
        Samples each particle's next state based on its current state and the
        gameState.
        To loop over the ghosts, use:
          for i in range(self.numGhosts):
            ...
        Then, assuming that `i` refers to the index of the ghost, to obtain the
        distributions over new positions for that single ghost, given the list
        (prevGhostPositions) of previous positions of ALL of the ghosts, use
        this line of code:
          newPosDist = getPositionDistributionForGhost(
             setGhostPositions(gameState, prevGhostPositions), i, self.ghostAgents[i]
          )
        Note that you may need to replace `prevGhostPositions` with the correct
        name of the variable that you have used to refer to the list of the
        previous positions of all of the ghosts, and you may need to replace `i`
        with the variable you have used to refer to the index of the ghost for
        which you are computing the new position distribution.
        As an implementation detail (with which you need not concern yourself),
        the line of code above for obtaining newPosDist makes use of two helper
        functions defined below in this file:
          1) setGhostPositions(gameState, ghostPositions)
              This method alters the gameState by placing the ghosts in the
              supplied positions.
          2) getPositionDistributionForGhost(gameState, ghostIndex, agent)
              This method uses the supplied ghost agent to determine what
              positions a ghost (ghostIndex) controlled by a particular agent
              (ghostAgent) will move to in the supplied gameState.  All ghosts
              must first be placed in the gameState using setGhostPositions
              above.
              The ghost agent you are meant to supply is
              self.ghostAgents[ghostIndex-1], but in this project all ghost
              agents are always the same.
        """
        newParticles = []
        weights = util.Counter()
        priors = self.getBeliefDistribution()
        
        #cache the distributions
        newPosDists = {}
        for perm in self.permutationsWJail:
          for i in range(self.numGhosts):
            newPosDists[(i, perm)] = getPositionDistributionForGhost(
               setGhostPositions(gameState, perm), i, self.ghostAgents[i]
              ) 
        
        cachedWeights = {}
        priors = self.getBeliefDistribution() 
        
        for oldParticle in self.particles:
            newParticle = list(oldParticle) # A list of ghost positions
            # now loop through and update each entry in newParticle...
            "*** YOUR CODE HERE ***"
            
            for i in range(self.numGhosts): 
                 
              weights = util.Counter()
               
              if (i,oldParticle) in cachedWeights:
                weights = cachedWeights[(i,oldParticle)]  
              else:                 
                newPosDist = newPosDists[(i, oldParticle)]             
                for newPos, prob in newPosDist.items():
                  weights[newPos] += prob * priors[oldParticle]
                cachedWeights[(i,oldParticle)] = weights
              
              if sum(weights.values()) == 0:
                newParticle[i] = oldParticle[i]
              else:
                dist = []
                for x in range(0, len(self.legalPositions)):
                  pos = self.legalPositions[x]
                  dist.append(weights[pos])
                newParticle[i] = util.sample(dist, self.legalPositions)

            "*** END YOUR CODE HERE ***"
            #print newParticle
            newParticles.append(tuple(newParticle))
        self.particles = newParticles
Example #55
0
    def observeState(self, gameState):
        """
    Resamples the set of particles using the likelihood of the noisy observations.

    As in elapseTime, to loop over the ghosts, use:

      for i in range(self.numGhosts):
        ...

    A correct implementation will handle two special cases:
      1) When a ghost is captured by Pacman, all particles should be updated so
         that the ghost appears in its prison cell, position self.getJailPosition(i)
         where "i" is the index of the ghost.

         You can check if a ghost has been captured by Pacman by
         checking if it has a noisyDistance of None (a noisy distance
         of None will be returned if, and only if, the ghost is
         captured).

      2) When all particles receive 0 weight, they should be recreated from the
          prior distribution by calling initializeParticles. Remember to
          change ghosts' positions to jail if called for.
    """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts: return
        emissionModels = [
            busters.getObservationDistribution(dist) for dist in noisyDistances
        ]

        "*** YOUR CODE HERE ***"
        posLegal = self.legalPositions
        ParNum = self.numParticles
        tempPro = util.Counter()
        tempPro2 = util.Counter()

        for eachGhost in range(0, self.numGhosts):
            if noisyDistances[eachGhost] == None:
                tempPro2[self.getJailPosition(eachGhost)] = 1

        for eachSample in self.particles:
            weightParticle = 1
            for eachGhost in range(0, self.numGhosts):
                if tempPro2[self.getJailPosition(eachGhost)] == 1:
                    tupleToList = list(eachSample)
                    newParticle = tupleToList
                    newParticle[eachGhost] = self.getJailPosition(eachGhost)
                    listToTuple = tuple(newParticle)
                    eachSample = listToTuple
                else:
                    trueDistance = util.manhattanDistance(
                        eachSample[eachGhost], pacmanPosition)
                    weightParticle = weightParticle * emissionModels[
                        eachGhost][trueDistance]

            tempPro[eachSample] = tempPro[eachSample] + weightParticle
        # all particles receive 0 weight, call initializeParticles
        if float(tempPro.totalCount()) == 0:
            self.initializeParticles()
            return

        tempPro.normalize()
        for particleIndex in range(ParNum):
            self.particles[particleIndex] = util.sample(tempPro)
Example #56
0
    def observe(self, observation, gameState):
        """
    Update beliefs based on the given distance observation. Make
    sure to handle the special case where all particles have weight
    0 after reweighting based on observation. If this happens,
    resample particles uniformly at random from the set of legal
    positions (self.legalPositions).

    A correct implementation will handle two special cases:
      1) When a ghost is captured by Pacman, **all** particles should be updated so
         that the ghost appears in its prison cell, self.getJailPosition()

         You can check if a ghost has been captured by Pacman by
         checking if it has a noisyDistance of None (a noisy distance
         of None will be returned if, and only if, the ghost is
         captured).

      2) When all particles receive 0 weight, they should be recreated from the
         prior distribution by calling initializeUniformly. The total weight
         for a belief distribution can be found by calling totalCount on
         a Counter object

    util.sample(Counter object) is a helper method to generate a sample from
    a belief distribution

    You may also want to use util.manhattanDistance to calculate the distance
    between a particle and pacman's position.
    """
        """
    noisyDistance = observation
    if noisyDistance == None:
      for counter in range(self.numParticles):
        self.particles[counter] = self.getJailPosition()
    else:
      emissionModel = busters.getObservationDistribution(noisyDistance)
      pacmanPosition = gameState.getPacmanPosition()
      beliefDistribution = self.getBeliefDistribution()
      allPossible = util.Counter()
      for p in self.legalPositions:
        trueDistance = util.manhattanDistance(p, pacmanPosition)
        if emissionModel[trueDistance] > 0:
          allPossible[p] = emissionModel[trueDistance] * beliefDistribution[p]
          # new distribution = weight * belief
      if allPossible.totalCount() == 0:
        self.initializeUniformly(gameState)
      else:
        for counter in range(self.numParticles):
          self.particles[counter] = util.sample(allPossible)
    """
        noisyDistance = observation
        if noisyDistance == None:
            for counter in range(self.numParticles):
                self.particles[counter] = self.getJailPosition()
        else:
            emissionModel = busters.getObservationDistribution(noisyDistance)
            pacmanPosition = gameState.getPacmanPosition()
            beliefDistribution = self.getBeliefDistribution()
            allPossible = util.Counter()
            for p in self.legalPositions:
                trueDistance = util.manhattanDistance(p, pacmanPosition)
                if emissionModel[trueDistance] > 0:
                    allPossible[p] = emissionModel[
                        trueDistance] * beliefDistribution[p]
            if allPossible.totalCount() == 0:
                self.initializeUniformly(gameState)
            else:
                for counter in range(self.numParticles):
                    self.particles[counter] = util.sample(allPossible)
Example #57
0
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make
        sure to handle the special case where all particles have weight
        0 after reweighting based on observation. If this happens,
        resample particles uniformly at random from the set of legal
        positions (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, **all** particles should be updated so
             that the ghost appears in its prison cell, self.getJailPosition()

             You can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None (a noisy distance
             of None will be returned if, and only if, the ghost is
             captured).

          2) When all particles receive 0 weight, they should be recreated from the
             prior distribution by calling initializeUniformly. The total weight
             for a belief distribution can be found by calling totalCount on
             a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution

        You may also want to use util.manhattanDistance to calculate the distance
        between a particle and pacman's position.
        """

        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()

        beliefDistribution = self.getBeliefDistribution(
        )  #particle probability distribution

        allPossible = util.Counter()

        for p in beliefDistribution.keys(
        ):  #For each position in the particle probability distribution
            trueDistance = util.manhattanDistance(
                p, pacmanPosition
            )  #Calculate the distance between Pacman and that space
            if emissionModel[
                    trueDistance] > 0:  #If the evidence of a ghost (particle) being in a space for its distance away from Pacman > 0
                allPossible[
                    p] = emissionModel[trueDistance] * beliefDistribution[
                        p]  #Then calculate P(Ei+1|xi+1) * B(xi)
            #util.Counter() = dictionary data type for this program
            #allpossible = B(xi+1) = belief function probability distribution matrix
        if sum(
                allPossible.values()
        ) == 0:  #If all particles have a weight of 0; if P(Ei+1|xi+1) = 0 (prob of ghost being in a space after event happened)
            self.initializeUniformly(
                gameState)  #Then resample particles uniformly at random
            allPossible = self.getBeliefDistribution(
            )  #Set the belief distribution to the current belief distribution now

        if noisyDistance == None:  #If Pacman eats ghost
            for k in allPossible.keys(
            ):  #For all keys in allPossible positions Pacman can access where the ghost can be
                allPossible[
                    k] = 0  #Set all the possibilities for where the ghost can be to 0
            allPossible[self.getJailPosition(
            )] = 1  #Because Pacman is 100% sure that the ghost is in jail!!!

        allPossible.normalize(
        )  #Normalize the allPossible probability distribution
        newParticles = list()
        for i in range(self.numParticles):  #For each particle on board
            newParticles.append(
                util.sample(allPossible)
            )  #Take a sample from the belief distribution and store it in newParticles to use later
        self.particles = newParticles
Example #58
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated so
             that the ghost appears in its prison cell, position self.getJailPosition(i)
             where "i" is the index of the ghost.

             You can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None (a noisy distance
             of None will be returned if, and only if, the ghost is
             captured).

          2) When all particles receive 0 weight, they should be recreated from the
              prior distribution by calling initializeParticles. After all particles
              are generated randomly, any ghosts that are eaten (have noisyDistance of 0)
              must be changed to the jail Position. This will involve changing each
              particle if a ghost has been eaten.

        ** Remember ** We store particles as tuples, but to edit a specific particle,
        it must be converted to a list, edited, and then converted back to a tuple. Since
        this is a common operation when placing a ghost in the jail for a particle, we have
        provided a helper method named self.getParticleWithGhostInJail(particle, ghostIndex)
        that performs these three operations for you.

        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts: return  #huh?
        emissionModels = [
            busters.getObservationDistribution(dist) for dist in noisyDistances
        ]

        "*** YOUR CODE HERE ***"
        for g in range(self.numGhosts):  #for each ghost
            if noisyDistances[g] == None:  #check if ghost is in jail
                if self.ghostsInJail[
                        g] == False:  #if ghost wasn't in jail last turn
                    self.ghostsInJail[g] = True  #put ghost in jail
                    #print("put ghost "+str(g)+" in jail")
                    #print("marginal distrs before move:", self.getMarginalBeliefDistribution())
                for ix, particle in enumerate(
                        self.particles
                ):  #update particles to reflect ghost in jail (on every turn to be safe)
                    self.particles[ix] = self.getParticleWithGhostInJail(
                        particle, g)
                #print(g)
                #print(self.particles)
                #print("marginal distrs after move:", self.getMarginalBeliefDistribution())

        beliefDistribution = self.getBeliefDistribution(
        )  #get the belief distribution from the particles

        allPossible = util.Counter()  #store the new belief distribution

        for p in beliefDistribution.keys(
        ):  #for each position in the particle prob distr
            weight = 1  #weighting of the particle
            for g in range(
                    self.numGhosts
            ):  #loop over each ghost to get the joint weight of the particle
                if self.ghostsInJail[
                        g]:  #don't worry about ghosts in jail for the weighting
                    #emissionsDist will be 0 for ghost in jail so we need to skip the multiplication
                    continue
                pos = p[g]  #ghost position
                trueDist = util.manhattanDistance(
                    pos,
                    pacmanPosition)  #get manhattan distance as true distance
                weight *= emissionModels[g][
                    trueDist]  #get p(evidence | ghost positions) and multiply by belief matrix
            allPossible[p] = weight * beliefDistribution[p]
        if sum(allPossible.values()
               ) == 0:  #if all weights are 0, we must resample from prior
            self.initializeParticles()
            #print("re-normalized!")
            allPossible = self.getBeliefDistribution()
        allPossible.normalize()  #normalize weights
        newParticles = []
        for i in range(self.numParticles
                       ):  #sample new particles from new belief distribution
            newParticles.append(util.sample(allPossible))
        self.particles = newParticles
Example #59
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy
        observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell, position
             self.getJailPosition(i) where `i` is the index of the ghost.

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeParticles. After all
             particles are generated randomly, any ghosts that are eaten (have
             noisyDistance of None) must be changed to the jail Position. This
             will involve changing each particle if a ghost has been eaten.

        self.getParticleWithGhostInJail is a helper method to edit a specific
        particle. Since we store particles as tuples, they must be converted to
        a list, edited, and then converted back to a tuple. This is a common
        operation when placing a ghost in jail.
        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts:
            return
        emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]

        # initialize our new belief distribution to build
        newBeliefDistribution = util.Counter()

        # build the new belief by iterating through all of the particles
        for i in range(len(self.particles)):
          subCount = 1.0
          newPart = self.particles[i]
          for j in range(self.numGhosts):
            if noisyDistances[j] == None:
              # the distance is 0 so pacman got the ghost
              newPart = self.getParticleWithGhostInJail(self.particles[i], j)
            else:
              # calculate a distance to the particle
              calcDist = util.manhattanDistance(self.particles[i][j], pacmanPosition)
              subCount *= emissionModels[j][calcDist]
          newBeliefDistribution[newPart] += subCount

        # Check to see if any of the belief values are greater than 0
        beliefs = False
        for val in newBeliefDistribution.values():
          if val > 0.0:
            beliefs = True

        # If every belief value is 0, we need to reinitialize particles
        # with they're prior distribution values, and then place any
        # eaten ghosts in jail
        if not beliefs:
          self.initializeParticles()
          for i in range(len(self.particles)):
            for j in range(self.numGhosts):
              if noisyDistances[j] == None:
                self.particles[i][j] = self.getParticleWithGhostInJail(self.particles[i][j], j)
        else:
          # Just normalize the new beliefs and resample for the particles
          newBeliefDistribution.normalize()
          self.particles = []
          for i in range(self.numParticles):
            self.particles.append(util.sample(newBeliefDistribution))
Example #60
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy observations.
        
        As in elapseTime, to loop over the ghosts, use:
        
          for i in range(self.numGhosts):
            ...
        
        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated so
             that the ghost appears in its prison cell, position self.getJailPosition(i)
             where "i" is the index of the ghost.
        
             You can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None (a noisy distance
             of None will be returned if, and only if, the ghost is
             captured).
        
          2) When all particles receive 0 weight, they should be recreated from the
              prior distribution by calling initializeParticles. Remember to
              change ghosts' positions to jail if called for.
        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        emissionModels = [
            busters.getObservationDistribution(dist) for dist in noisyDistances
        ]
        if len(noisyDistances) < self.numGhosts: return

        #------------------------------------------------------------
        # Try by taking running particle filter on each particle
        # collection
        #------------------------------------------------------------
        #
        # Update the weights of the particles. We given each particle
        # a weight based on the collective probabilities of its positions.
        #
        possible = util.Counter()
        particles = []
        for particle in self.particles:
            evidence, nparticle = 1, []
            for ghost, position in enumerate(particle):
                if noisyDistances[ghost] != None:
                    distance = util.manhattanDistance(position, pacmanPosition)
                    evidence *= emissionModels[ghost][distance]
                    nparticle.append(position)
                else:
                    nparticle.append(self.getJailPosition(ghost))
            possible[tuple(nparticle)] += evidence
            particles.append(tuple(nparticle))
        possible.normalize()
        self.particles = particles  # replace for debugging

        #
        # if we don't know where anyone is, just start over
        #
        if possible.totalCount() == 0.0:
            self.initializeParticles()
            return

        #
        # resample the particles as a whole given the weights
        # of the particles as a whole.
        #
        particles = []
        for _ in range(self.numParticles):
            particle = util.sample(possible)
            particles.append(tuple(particle))
        self.particles = particles