Пример #1
0
    def elapseTime(self, gameState):
        """
        Update beliefs for a time step elapsing.

        As in the elapseTime method of ExactInference, you should use:

          newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))

        to obtain the distribution over new positions for the ghost, given its
        previous position (oldPos) as well as Pacman's current position.

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.
        """
        "*** YOUR CODE HERE ***"
        bd = self.getBeliefDistribution()
        allPossible = util.Counter()
        for p in self.legalPositions:
            npd = self.getPositionDistribution(
                self.setGhostPosition(gameState, p))
            for newPos, prob in npd.items():
                allPossible[newPos] += prob * bd[p]
        maxV = max([v for k, v in allPossible.items()])
        if maxV == 0:
            self.initializeUniformly(gameState)
        else:
            items = sorted(allPossible.items())
            self.ppList = util.nSample([v for k, v in items],
                                       [k for k, v in items],
                                       self.numParticles)
Пример #2
0
    def sample_data(self, trainingData, trainingLabels, sample_weights):
        # "*** YOUR CODE HERE ***"
        sampleSize = int(len(trainingData) * 0.5)

        sampleData = util.Counter()
        sampleLabels = util.Counter()

        # cumulative_weights = sample_weights[:]
        # for i in range(1,len(cumulative_weights)):
        #     cumulative_weights[i] += cumulative_weights[i-1]

        # for i in range(sampleSize):
        #     idx = random.uniform(0.0, 1.0)
        #     for j in range(len(trainingData)):
        #         if idx < cumulative_weights[j]:
        #             sampleData[i] = trainingData[j]
        #             sampleLabels[i] = trainingLabels[j]
        #             break

        samplePairs = util.nSample(sample_weights,
                                   zip(trainingData, trainingLabels),
                                   sampleSize)
        for i in range(len(samplePairs)):
            sampleData[i], sampleLabels[i] = samplePairs[i]

        return sampleData, sampleLabels
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make sure to
        handle the special case where all particles have weight 0 after
        reweighting based on observation. If this happens, resample particles
        uniformly at random from the set of legal positions
        (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell,
             self.getJailPosition()

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeUniformly. The total
             weight for a belief distribution can be found by calling totalCount
             on a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.

        You may also want to use util.manhattanDistance to calculate the
        distance between a particle and Pacman's position.
        """
        "*** YOUR CODE HERE ***"
        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(
            noisyDistance)  # P(e|X)
        pacmanPosition = gameState.getPacmanPosition()

        if noisyDistance is None:
            # update all particles so that ghost appears in prison cell
            jailed_position = self.getJailPosition()
            self.particles = [
                jailed_position for _ in range(self.numParticles)
            ]

        else:
            # update particle weights
            particleWeights = util.Counter()
            for p in self.particles:
                trueDistance = util.manhattanDistance(p, pacmanPosition)
                particleWeights[p] += emissionModel[
                    trueDistance]  #update weight with true distance
            # resample
            if particleWeights.totalCount() == 0:
                self.initializeUniformly(gameState)
            else:
                items = sorted(particleWeights.items())
                values, distribution = map(list, zip(*items))
                samples = util.nSample(distribution, values, self.numParticles)
                # samples = list()
                # for _ in range(self.numParticles):
                #     sample = util.sample(particleWeights)
                #     samples.append(sample)
                # samples = util.nSample(particleWeights, self.particles, self.numParticles)
                self.particles = samples
Пример #4
0
    def elapseTime(self, gameState):
        """
        Update beliefs for a time step elapsing.

        As in the elapseTime method of ExactInference, you should use:

          newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))

        to obtain the distribution over new positions for the ghost, given its
        previous position (oldPos) as well as Pacman's current position.

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.
        """
        "*** YOUR CODE HERE ***"
        allPossible = util.Counter()
        for oldPos in self.list_particles:
            newPosDist = self.getPositionDistribution(
                self.setGhostPosition(gameState, oldPos))
            for newPos, prob in newPosDist.items():
                allPossible[newPos] = prob * self.getBeliefDistribution(
                )[oldPos] + allPossible[newPos]
        items = sorted(allPossible.items())
        distribution = [i[1] for i in items]
        values = [i[0] for i in items]
        self.list_particles = util.nSample(distribution, values,
                                           self.numParticles)
    def elapseTime(self, gameState):
        """
        Update beliefs for a time step elapsing.

        As in the elapseTime method of ExactInference, you should use:

          newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))

        to obtain the distribution over new positions for the ghost, given its
        previous position (oldPos) as well as Pacman's current position.

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.
        """
        "*** YOUR CODE HERE ***"

        new_beliefs = util.Counter()

        for p in self.particles:
            newPosDist = self.getPositionDistribution(
                self.setGhostPosition(gameState, p))
            for newPos, prob in newPosDist.items():
                new_beliefs[newPos] += prob
        self.particles = util.nSample(new_beliefs.values(), new_beliefs.keys(),
                                      self.numParticles)
Пример #6
0
    def train(self, trainingData, trainingLabels):
        """
		The training loop samples from the data "num_classifiers" time. Size of each sample is
		specified by "ratio". So len(sample)/len(trainingData) should equal ratio. 
		"""

        self.features = trainingData[0].keys()
        # "*** YOUR CODE HERE ***"

        for i in range(self.num_classifiers):
            sampleData = util.Counter()
            sampleLabels = util.Counter()
            samplePairs = util.nSample([1.0 / len(trainingData)] *
                                       len(trainingData),
                                       zip(trainingData, trainingLabels),
                                       int(self.ratio * len(trainingData)))

            for j in range(len(samplePairs)):
                sampleData[j], sampleLabels[j] = samplePairs[j]

            # for j in range(int(self.ratio * len(trainingData))):
            #     idx = random.randint(0, len(trainingData)-1)
            #     sampleData[j], sampleLabels[j] = trainingData[idx], trainingLabels[idx]

            self.classifiers[i].train(sampleData, sampleLabels)
Пример #7
0
    def elapseTime(self, gameState):
        """
        Update beliefs for a time step elapsing.

        As in the elapseTime method of ExactInference, you should use:

          newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))

        to obtain the distribution over new positions for the ghost, given its
        previous position (oldPos) as well as Pacman's current position.

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.
        """
        "*** YOUR CODE HERE ***"
        # util.raiseNotDefined()
        beliefs = self.getBeliefDistribution()
        allPossible = util.Counter()
        # for all possible legal positions 
        for oldPos in self.legalPositions:
            newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))
            for newPos, prob in newPosDist.items():
                allPossible[newPos] += prob * beliefs[oldPos]

        if(allPossible.totalCount() == 0):
            self.initializeUniformly(gameState)
        else:
            allPossible.normalize()
            items = sorted(allPossible.items())
            values = [i[0] for i in items]
            distribution = [i[1] for i in items]
            self.particles = util.nSample(distribution, values, self.numParticles) 
Пример #8
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy
        observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell, position
             self.getJailPosition(i) where `i` is the index of the ghost.

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeParticles. After all
             particles are generated randomly, any ghosts that are eaten (have
             noisyDistance of None) must be changed to the jail Position. This
             will involve changing each particle if a ghost has been eaten.

        self.getParticleWithGhostInJail is a helper method to edit a specific
        particle. Since we store particles as tuples, they must be converted to
        a list, edited, and then converted back to a tuple. This is a common
        operation when placing a ghost in jail.
        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts:
            return
        emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]

        beliefs = self.getBeliefDistribution()
        legalPositionTuples = beliefs.sortedKeys()

        # "*** YOUR CODE HERE ***"
        new_beliefs = util.Counter()
        
        for p in legalPositionTuples:
            new_beliefs[p] = beliefs[p]
            for i in xrange(self.numGhosts):
                if noisyDistances[i] != None:
                    trueDistance = util.manhattanDistance(p[i], pacmanPosition)
                    new_beliefs[p] *= emissionModels[i][trueDistance]             

        if(new_beliefs.totalCount() == 0):
            self.initializeParticles()
        else:
            new_beliefs.normalize()
            items = sorted(new_beliefs.items())
            values = [i[0] for i in items]
            distribution = [i[1] for i in items]
            self.particles = util.nSample(distribution, values, self.numParticles)

        for i in xrange(self.numGhosts):
            if noisyDistances[i] == None:
                self.particles = [self.getParticleWithGhostInJail(p, i) for p in self.particles]
Пример #9
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy
        observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell, position
             self.getJailPosition(i) where `i` is the index of the ghost.

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeParticles. After all
             particles are generated randomly, any ghosts that are eaten (have
             noisyDistance of None) must be changed to the jail Position. This
             will involve changing each particle if a ghost has been eaten.

        self.getParticleWithGhostInJail is a helper method to edit a specific
        particle. Since we store particles as tuples, they must be converted to
        a list, edited, and then converted back to a tuple. This is a common
        operation when placing a ghost in jail.
        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts:
            return
        emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]

        updated = []
        for i in range(self.numGhosts):
            # print(noisyDistances)
            if noisyDistances[i] == None:
                for index, particle in enumerate(self.particleList):
                    self.particleList[index] = self.getParticleWithGhostInJail(particle, i)

        weights = util.Counter()

        for particle in self.particleList:
            totalprob = 1
            for i in range(self.numGhosts):
                if noisyDistances[i] != None:
                    dist = util.manhattanDistance(particle[i], pacmanPosition)
                    prob = emissionModels[i][dist]
                    totalprob = totalprob * prob
            weights[particle] += totalprob
        # print(weights)
        weights.normalize()
        keys = weights.keys()
        distribution = weights.values()
        if all(val == 0 for val in distribution):
            self.initializeParticles()
        else:
            self.particleList = util.nSample(distribution, keys, len(self.particleList))
Пример #10
0
    def elapseTime(self, gameState):
        """
        Update beliefs for a time step elapsing.

        As in the elapseTime method of ExactInference, you should use:

          newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))

        to obtain the distribution over new positions for the ghost, given its
        previous position (oldPos) as well as Pacman's current position.

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.
        """
        "*** YOUR CODE HERE ***"
        allPossible = util.Counter()
        beliefDistribution = self.getBeliefDistribution()

        for p in self.legalPositions:
            newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, p))
            for newPos, prob in newPosDist.items():
                allPossible[newPos] += prob * beliefDistribution[p]

        allPossible.normalize()

        if not allPossible.totalCount():
            self.initializeUniformly(gameState)
            allPossible = self.getBeliefDistribution()

        self.particles = util.nSample(allPossible.values(), allPossible.keys(), self.numParticles)
Пример #11
0
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make sure to
        handle the special case where all particles have weight 0 after
        reweighting based on observation. If this happens, resample particles
        uniformly at random from the set of legal positions
        (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell,
             self.getJailPosition()

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeUniformly. The total
             weight for a belief distribution can be found by calling totalCount
             on a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.

        You may also want to use util.manhattanDistance to calculate the
        distance between a particle and Pacman's position.
        """
        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()

        beliefs = self.getBeliefDistribution()

        # Replace this code with a correct observation update
        # Be sure to handle the "jail" edge case where the ghost is eaten
        # and noisyDistance is None
        allPossible = util.Counter()

        # First, consider case where ghost is captured.
        if noisyDistance is None:
            jailPos = self.getJailPosition()
            allPossible[jailPos] = 1.0
        else:
            #P(x_t|e_1:t) = P(e_t|x_t) * sum_x_t-1 (P(x_t|x_t-1) * P(x_t-1,e_t:t-1))
            for p in self.particles:
                trueDistance = util.manhattanDistance(p, pacmanPosition)
                cond_prob_ev_t = emissionModel[trueDistance]  #p(e_t|x_t)
                # assume ghost is standing still?
                allPossible[p] = cond_prob_ev_t * beliefs[p]

        allPossible.normalize()
        # First handle the case where all particles get zero weight
        nonZero = [p for p in allPossible if allPossible[p] > 0]
        if len(nonZero) == 0:
            self.initializeUniformly(gameState)
        else:
            weights = [allPossible[p] for p in allPossible]
            positions = [p for p in allPossible]
            self.particles = util.nSample(weights, positions,
                                          self.numParticles)
Пример #12
0
    def elapseTime(self, gameState):
        """
        Update beliefs for a time step elapsing.

        As in the elapseTime method of ExactInference, you should use:

          newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))

        to obtain the distribution over new positions for the ghost, given its
        previous position (oldPos) as well as Pacman's current position.

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.
        """
        "*** YOUR CODE HERE ***"
        # COMPLETE / FIX THIS
        res = []

        for part in self.particles:
            newPosDist = self.getPositionDistribution(
                self.setGhostPosition(gameState, part))
            items = newPosDist.items()
            distribution = [i[1] for i in items]
            values = [i[0] for i in items]
            res.append(util.nSample(distribution, values, 1)[0])
        self.particles = res
        """res = []
Пример #13
0
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make sure to
        handle the special case where all particles have weight 0 after
        reweighting based on observation. If this happens, resample particles
        uniformly at random from the set of legal positions
        (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell,
             self.getJailPosition()

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeUniformly. The total
             weight for a belief distribution can be found by calling totalCount
             on a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.

        You may also want to use util.manhattanDistance to calculate the
        distance between a particle and Pacman's position.
        """
        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()
        "*** YOUR CODE HERE ***"
        # handle the edge case where we eat the
        # ghost and definitely send it to jail
        # by putting all of the particles in jail
        if noisyDistance is None:
            self.particles = [self.getJailPosition()]*self.numParticles
            return

        # otherwise, weight each particle
        new_particles = util.Counter()
        for particle in self.particles:        
            trueDistance = util.manhattanDistance(particle, pacmanPosition)
            # p(e_t | x_t)
            emissionProb = emissionModel[trueDistance]
            # we can't just assign the key here, because
            # we have the sample particle repeated multiple times,
            # so we have to increment
            new_particles.incrementAll([particle], emissionProb)

        if not new_particles.totalCount():
            self.initializeUniformly(gameState)
        else:
            # this way is slower
            #resampled_particles = []
            #for _ in range(self.numParticles):
            #    resampled_particles.append(util.sample(new_particles))
            #self.particles = resampled_particles
            # this way is faster
            self.particles = util.nSample(new_particles.values(), new_particles.keys(), self.numParticles)
Пример #14
0
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make sure to
        handle the special case where all particles have weight 0 after
        reweighting based on observation. If this happens, resample particles
        uniformly at random from the set of legal positions
        (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell,
             self.getJailPosition()

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeUniformly. The total
             weight for a belief distribution can be found by calling totalCount
             on a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution. // can be used for resampling

        You may also want to use util.manhattanDistance to calculate the
        distance between a particle and Pacman's position.
        """
        noisyDistance = observation
        if noisyDistance == None:  # the ghost has been captured reset all the particles to be at jail positon
            self.particles = [self.getJailPosition()]
            return
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()
        "*** YOUR CODE HERE ***"
        weights = util.Counter()
        for pos in self.legalPositions:
            trueDistance = util.manhattanDistance(pacmanPosition, pos)
            posWeight = emissionModel[trueDistance]
            weights[pos] = posWeight

        newBeliefs = util.Counter()
        for particle in self.particles:
            newBeliefs[particle] += weights[particle]

        totalParticlesWeight = newBeliefs.totalCount()
        if totalParticlesWeight == 0.0:
            self.initializeUniformly(gameState)
        else:
            # resample from the new belief distribution
            newBeliefs.divideAll(totalParticlesWeight)
            self.particles = []
            # for particle in self.numParticles:
            #     particleSample = util.sample(newBeliefs)
            #     self.particles.append(particleSample)
            items = sorted(newBeliefs.items())
            distribution = [i[1] for i in items]
            values = [i[0] for i in items]
            self.particles = util.nSample(distribution, values,
                                          self.numParticles)
Пример #15
0
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make sure to
        handle the special case where all particles have weight 0 after
        reweighting based on observation. If this happens, resample particles
        uniformly at random from the set of legal positions
        (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell,
             self.getJailPosition()

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeUniformly. The total
             weight for a belief distribution can be found by calling totalCount
             on a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.

        You may also want to use util.manhattanDistance to calculate the
        distance between a particle and Pacman's position.
        """
        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()
        "*** YOUR CODE HERE ***"
        pos = []
        if noisyDistance is None:
            # if the ghost is captured
            for p in range(self.numParticles):
                pos.append(self.getJailPosition())
            self.particles = pos
        else:
            weights = []
            values = []
            for i in range(len(self.particles)):
                trueDistance = util.manhattanDistance(self.particles[i],
                                                      pacmanPosition)
                if emissionModel[trueDistance] >= 0:
                    weights.append(
                        emissionModel[trueDistance])  # update beliefs
                    values.append(self.particles[i])

            sum_weights = sum(weights)
            if sum_weights == 0:
                self.particles = self.initializeUniformly(gameState)
            else:
                samples = util.nSample(weights, values, self.numParticles)
                self.particles = samples

        allPossible = self.getBeliefDistribution()
        allPossible.normalize()
        return allPossible
        "*** END YOUR CODE HERE ***"
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make sure to
        handle the special case where all particles have weight 0 after
        reweighting based on observation. If this happens, resample particles
        uniformly at random from the set of legal positions
        (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell,
             self.getJailPosition()

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeUniformly. The total
             weight for a belief distribution can be found by calling totalCount
             on a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.

        You may also want to use util.manhattanDistance to calculate the
        distance between a particle and Pacman's position.
        """
        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()
        "*** YOUR CODE HERE ***"

        # References: Textbook pp. 598-599

        # jail edge case
        if noisyDistance == None:
            pos = self.getJailPosition()
            self.particles = [pos]*self.numParticles
            return

        # Step 1: calculate weightss
        weights = util.Counter()
        for pos,prob in self.getBeliefDistribution().items():
            trueDistance = util.manhattanDistance(pos, pacmanPosition)
            weights[pos] = emissionModel[trueDistance]*prob
        weights.normalize()


        # Step 2: resample based on the weights
        if weights.totalCount() == 0:
            self.initializeUniformly(gameState) # edge case
        else:
            distribution = []
            values = []
            for pos, prob in weights.items():
                values.append(pos)
                distribution.append(prob)

            self.particles = util.nSample(distribution, values, self.numParticles)
Пример #17
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy
        observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell, position
             self.getJailPosition(i) where `i` is the index of the ghost.

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeParticles. After all
             particles are generated randomly, any ghosts that are eaten (have
             noisyDistance of None) must be changed to the jail Position. This
             will involve changing each particle if a ghost has been eaten.

        self.getParticleWithGhostInJail is a helper method to edit a specific
        particle. Since we store particles as tuples, they must be converted to
        a list, edited, and then converted back to a tuple. This is a common
        operation when placing a ghost in jail.
        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts:
            return
        emissionModels = [
            busters.getObservationDistribution(dist) for dist in noisyDistances
        ]

        distribution = [1.0] * len(self.particles)

        for ghostIndex in range(self.numGhosts):
            if noisyDistances[ghostIndex] != None:
                for i in xrange(len(self.particles)):
                    weight = emissionModels[ghostIndex][util.manhattanDistance(
                        self.particles[i][ghostIndex], pacmanPosition)]
                    distribution[i] *= weight

        if sum(distribution) == 0.0:
            self.initializeParticles()
        else:
            self.particles = util.nSample(distribution, self.particles,
                                          self.numParticles)

        for ghostIndex in range(self.numGhosts):
            if noisyDistances[ghostIndex] == None:
                for i in xrange(len(self.particles)):
                    self.particles[i] = self.getParticleWithGhostInJail(
                        self.particles[i], ghostIndex)
        """
Пример #18
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated so
             that the ghost appears in its prison cell, position self.getJailPosition(i)
             where "i" is the index of the ghost.

             You can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None (a noisy distance
             of None will be returned if, and only if, the ghost is
             captured).

          2) When all particles receive 0 weight, they should be recreated from the
              prior distribution by calling initializeParticles. After all particles
              are generated randomly, any ghosts that are eaten (have noisyDistance of 0)
              must be changed to the jail Position. This will involve changing each
              particle if a ghost has been eaten.

        ** Remember ** We store particles as tuples, but to edit a specific particle,
        it must be converted to a list, edited, and then converted back to a tuple. Since
        this is a common operation when placing a ghost in the jail for a particle, we have
        provided a helper method named self.getParticleWithGhostInJail(particle, ghostIndex)
        that performs these three operations for you.

        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts: return
        emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]

        "*** YOUR CODE HERE ***"
        weights = util.Counter()
        newParticleList = []
        for x in range(len(noisyDistances)):
            if noisyDistances[x] == None:   
                for y in range(len(self.particleList)):
                    self.particleList[y] = self.getParticleWithGhostInJail(self.particleList[y], x)
        for i in range(len(self.particleList)):
            likelihood = 1
            trueDistances = [util.manhattanDistance(pacmanPosition, self.particleList[i][j]) for j in range(len(self.particleList[i]))]
            for j in range(len(trueDistances)):
                if tuple(list(self.particleList[i])[j]) != self.getJailPosition(j):
                    likelihood = likelihood * emissionModels[j][trueDistances[j]]
            weights[i]= likelihood
        if all(x == 0 for x in weights.values()):
            self.initializeParticles()
            
        else:
            newParticleList = util.nSample(weights,self.particleList, len(self.particleList))
            self.particleList = newParticleList
Пример #19
0
    def elapseTime(self, gameState):
        """
        Update beliefs for a time step elapsing.

        As in the elapseTime method of ExactInference, you should use:

          newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))

        to obtain the distribution over new positions for the ghost, given its
        previous position (oldPos) as well as Pacman's current position.

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.
        """
        "*** YOUR CODE HERE ***"
        #get/set the belief distribution
        self.beliefs = self.getBeliefDistribution()

        #going to use a counter to keep track of the updated particles
        updatedParticles = util.Counter()

        #moving particles based on probability of ghost movement
        for p in self.parts:
            position, numP = p
            ghostPositionDistribution = self.getPositionDistribution(
                self.setGhostPosition(gameState, position))
            for newPosition, probability in ghostPositionDistribution.items():
                updatedParticles[newPosition] += numP * probability

        self.beliefs = updatedParticles
        self.beliefs.normalize()

        #***********Resampling*********************
        #Get the possible particle indicies
        x = 0
        particleIndicies = []
        while x < self.numParticles:
            particleIndicies.append(x)
            x += 1

        #resample each particle and drop them into the location, 'bucket', that
        #corresponds to the new location after sampling
        buckets = util.Counter()
        for b in util.nSample(self.beliefs.values(), particleIndicies,
                              self.numParticles):
            buckets[b] += 1

        #after we have the buckets (resampled locations), convert back to a
        #particle list and reset the list
        count = 0
        sampledList = []
        for l, nump in self.parts:
            sampledList.append((l, buckets[count]))
            count += 1
        self.parts = sampledList

        return self.beliefs
Пример #20
0
 def resample(self, beliefs):
     """
     Helper function, try to resample particles from beliefs(util.Counter type)
     using util.nSample()
     """
     # Resample particles from a belief distribution
     distribution = [i[1] for i in beliefs.items()]
     values = [i[0] for i in beliefs.items()]
     self.particles = util.nSample(distribution, values, self.numParticles)
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy
        observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell, position
             self.getJailPosition(i) where `i` is the index of the ghost.

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeParticles. After all
             particles are generated randomly, any ghosts that are eaten (have
             noisyDistance of None) must be changed to the jail Position. This
             will involve changing each particle if a ghost has been eaten.

        self.getParticleWithGhostInJail is a helper method to edit a specific
        particle. Since we store particles as tuples, they must be converted to
        a list, edited, and then converted back to a tuple. This is a common
        operation when placing a ghost in jail.
        """
        pacmanPosition = self.index
        noisyDistances = gameState.getAgentDistances(
        )  # gives noisy distances of ALL agents
        # emissionModels = [gameState.getDistanceProb(dist) for dist in noisyDistances]

        for enemy_num in self.getOpponents(gameState):
            beliefDist = self.getBeliefDistribution()
            W = util.Counter()

            # JAIL? unhandled so far

            for p in self.particles:
                trueDistance = self.getMazeDistance(p[enemy_num],
                                                    pacmanPosition)
                W[p] = (beliefDist[p] * gameState.getDistanceProb(
                    trueDistance, noisyDistances[enemy_num]))

            # we resample after we get weights for each ghost
            if W.totalCount() == 0:
                self.particles = self.initializeParticles()
            else:
                values = []
                keys = []
                for key, value in W.items():
                    keys.append(key)
                    values.append(value)
                self.particles = util.nSample(values, keys, self.numParticles)
Пример #22
0
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make sure to
        handle the special case where all particles have weight 0 after
        reweighting based on observation. If this happens, resample particles
        uniformly at random from the set of legal positions
        (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell,
             self.getJailPosition()

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeUniformly. The total
             weight for a belief distribution can be found by calling totalCount
             on a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.

        You may also want to use util.manhattanDistance to calculate the
        distance between a particle and Pacman's position.
        """
        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(
            noisyDistance
        )  # dictionary that maps true position to P(Noisy Distance | True distance)
        pacmanPosition = gameState.getPacmanPosition()
        "*** YOUR CODE HERE ***"
        weights = util.Counter()

        if noisyDistance == None:
            self.particleList = []
            num = 0
            while num < self.numParticles:
                self.particleList.append(self.getJailPosition())
                num += 1
            # print self.particleList

        else:
            for particle in self.particleList:
                dist = util.manhattanDistance(particle, pacmanPosition)
                prob = emissionModel[dist]
                weights[particle] += prob
            weights.normalize()
            keys = weights.keys()
            distribution = weights.values()
            if all(val == 0 for val in distribution):
                self.initializeUniformly(gameState)
            # print(weights)
            else:
                self.particleList = util.nSample(distribution, keys, len(self.particleList))
Пример #23
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy
        observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell, position
             self.getJailPosition(i) where `i` is the index of the ghost.

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeParticles. After all
             particles are generated randomly, any ghosts that are eaten (have
             noisyDistance of None) must be changed to the jail Position. This
             will involve changing each particle if a ghost has been eaten.

        self.getParticleWithGhostInJail is a helper method to edit a specific
        particle. Since we store particles as tuples, they must be converted to
        a list, edited, and then converted back to a tuple. This is a common
        operation when placing a ghost in jail.
        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts:
            return
        emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]

        "*** YOUR CODE HERE ***"
        beliefDistribution = util.Counter()

        # For all
        for particle in self.particles:
            partialProbability = 1
            for idx in range(self.numGhosts):
                if noisyDistances[idx] is None:
                    particle = self.getParticleWithGhostInJail(particle, idx)
                else:
                    trueDistance = util.manhattanDistance(particle[idx], pacmanPosition)
                    partialProbability *= emissionModels[idx][trueDistance]
            beliefDistribution[particle] += partialProbability

        if beliefDistribution.totalCount():
            self.particles = util.nSample(beliefDistribution.values(), beliefDistribution.keys(), self.numParticles)
        else:
            self.initializeParticles()
            for idx in range(self.numGhosts):
                if noisyDistances[idx] is None:
                    self.particles = [self.getParticleWithGhostInJail(particle, idx) for particle in self.particles]
Пример #24
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy
        observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell, position
             self.getJailPosition(i) where `i` is the index of the ghost.

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeParticles. After all
             particles are generated randomly, any ghosts that are eaten (have
             noisyDistance of None) must be changed to the jail Position. This
             will involve changing each particle if a ghost has been eaten.

        self.getParticleWithGhostInJail is a helper method to edit a specific
        particle. Since we store particles as tuples, they must be converted to
        a list, edited, and then converted back to a tuple. This is a common
        operation when placing a ghost in jail.
        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts:
            return
        emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]

        "*** YOUR CODE HERE ***"
        currentDistribution = self.getBeliefDistribution()
        for particle in currentDistribution:
            for i in range(self.numGhosts):
                if noisyDistances[i] != None:
                    trueDistance = util.manhattanDistance(pacmanPosition, particle[i])
                    currentDistribution[particle] *= emissionModels[i][trueDistance]

        if currentDistribution.totalCount() == 0:
            self.initializeParticles()
        else:
            items = sorted(currentDistribution.items())
            distribution = [v for k, v in items]
            value = [k for k, v in items]
            self.particles = util.nSample(distribution, value, self.numParticles)

        # put all the particles with ghost in jail to jail
        for i in range(self.numGhosts):
            if noisyDistances[i] == None:
                for j in range(self.numParticles):
                    self.particles[j] = self.getParticleWithGhostInJail(self.particles[j], i)
Пример #25
0
 def sample_data(self, trainingData, trainingLabels, sample_weights):
     "*** YOUR CODE HERE ***"
     #Convert weighted dataset to unweighted dataset
     #total = sum(sample_weights)
     #distribution = [sample_weights[i] *1.0/total for i in range(len(sample_weights))]
     new_pairs = util.nSample(sample_weights,
                              zip(trainingData, trainingLabels),
                              int(0.5 * len(trainingData)))
     new_data, new_labels = zip(*new_pairs)
     return new_data, new_labels
Пример #26
0
def nSampleCounterWR(counts, n, aslist=False):
    if counts.totalCount() != 1:
        counts = util.normalize(counts)
    pairs = counts.items()
    keys = [k for k, v in pairs]
    values = [v for k, v in pairs]
    sampled = util.nSample(values, keys, n)
    if aslist:
        return sampled
    return CounterFromIterable(sampled)
Пример #27
0
def nSampleCounterWR(counts, n, aslist = False):
  if counts.totalCount() != 1:
    counts = util.normalize(counts)
  pairs = counts.items()
  keys = [ k for k,v in pairs ]
  values = [ v for k,v in pairs ]
  sampled = util.nSample(values, keys, n)
  if aslist:
    return sampled
  return CounterFromIterable(sampled)
Пример #28
0
 def sample_data(self, trainingData, trainingLabels, sample_weights):
     # "*** YOUR CODE HERE ***"
     # util.raiseNotDefined()
     L = [i for i in range(len(trainingData))]
     # indices = [np.random.choice(L , p=sample_weights) for _ in range(int(len(trainingData) / 2))]
     indices = util.nSample(sample_weights, L, int(
         len(trainingData) * 0.55))  # sample is set to 0.55
     data = [trainingData[i] for i in indices]
     labels = [trainingLabels[i] for i in indices]
     return data, labels
Пример #29
0
 def sample_data(self, trainingData, trainingLabels, sample_weights):
     "*** YOUR CODE HERE ***"
     N = len(trainingData)
     indices = util.nSample(sample_weights, list(range(N)), int(0.7 * N))
     sampleData = []
     sampleLabels = []
     for i in indices:
         sampleData.append(trainingData[i])
         sampleLabels.append(trainingLabels[i])
     return sampleData, sampleLabels
Пример #30
0
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make sure to
        handle the special case where all particles have weight 0 after
        reweighting based on observation. If this happens, resample particles
        uniformly at random from the set of legal positions
        (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell,
             self.getJailPosition()

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeUniformly. The total
             weight for a belief distribution can be found by calling totalCount
             on a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.

        You may also want to use util.manhattanDistance to calculate the
        distance between a particle and Pacman's position.
        """
        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()
        "*** YOUR CODE HERE ***"

        if noisyDistance is None:
            temp = []
            for p in range(0, self.numParticles):
                temp.append(self.getJailPosition())
            self.listOfParticles = temp
            return

        beliefs = self.getBeliefDistribution()
        iterate = beliefs.keys()

        for p in iterate:
            trueDistance = util.manhattanDistance(p, pacmanPosition)
            beliefs[p] = beliefs[p] * emissionModel[trueDistance]
        beliefs.normalize()
        if beliefs.totalCount() == 0:
            self.initializeUniformly(gameState)
            return

        items = sorted(beliefs.items())
        distribution = [i[1] for i in items]
        values = [i[0] for i in items]
        self.listOfParticles = util.nSample(distribution, values,
                                            self.numParticles)
Пример #31
0
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make sure to
        handle the special case where all particles have weight 0 after
        reweighting based on observation. If this happens, resample particles
        uniformly at random from the set of legal positions
        (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell,
             self.getJailPosition()

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeUniformly. The total
             weight for a belief distribution can be found by calling totalCount
             on a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.

        You may also want to use util.manhattanDistance to calculate the
        distance between a particle and Pacman's position.
        """
        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(
            noisyDistance)  # prob noisy distance given true distance
        pacmanPosition = gameState.getPacmanPosition()

        # create new local variable W, a vector of weighted counts for each value of X, initially zero
        W = util.Counter()

        if noisyDistance == None:
            jailPos = self.getJailPosition()
            self.particles = [jailPos] * self.numParticles
        else:
            beliefDist = self.getBeliefDistribution()

            for p in self.particles:
                trueDistance = util.manhattanDistance(p, pacmanPosition)
                W[p] = beliefDist[p] * emissionModel[trueDistance]

            if W.totalCount() == 0:
                self.particles = self.initializeUniformly(gameState)
            else:
                values = []
                keys = []
                for key, value in W.items():
                    keys.append(key)
                    values.append(value)
                self.particles = util.nSample(values, keys, self.numParticles)
Пример #32
0
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make sure to
        handle the special case where all particles have weight 0 after
        reweighting based on observation. If this happens, resample particles
        uniformly at random from the set of legal positions
        (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell,
             self.getJailPosition()

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeUniformly. The total
             weight for a belief distribution can be found by calling totalCount
             on a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.

        You may also want to use util.manhattanDistance to calculate the
        distance between a particle and Pacman's position.
        """
        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()
        "*** YOUR CODE HERE ***"
        # check for case when noisyDistance = None
        if (noisyDistance == None):
            newParticleList = []
            for dummy in range(self.numParticles):
                newParticleList.append(self.getJailPosition())
            self.particleList = newParticleList
            return
        # observation use the particle list
        allWeightsZero = True
        weights = []
        for p in self.particleList:
            distance = util.manhattanDistance(pacmanPosition, p)
            currentWeight = emissionModel[distance]
            weights.append(currentWeight)
            if (currentWeight != 0.0):
                allWeightsZero = False
        # resample the particles
        if (allWeightsZero == True):
            self.initializeUniformly(self)
        else:
            resampled = util.nSample(weights, self.particleList, self.numParticles)
            #print resampled 
            self.particleList = resampled
Пример #33
0
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make sure to
        handle the special case where all particles have weight 0 after
        reweighting based on observation. If this happens, resample particles
        uniformly at random from the set of legal positions
        (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell,
             self.getJailPosition()

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeUniformly. The total
             weight for a belief distribution can be found by calling totalCount
             on a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.

        You may also want to use util.manhattanDistance to calculate the
        distance between a particle and Pacman's position.
        """
        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()
        "*** YOUR CODE HERE ***"
        belief = self.getBeliefDistribution()
        newBelief = util.Counter()

        if noisyDistance == None:
            self.particles = [
                self.getJailPosition() for particle in self.particles
            ]
            return

        for particle in belief:
            dist = util.manhattanDistance(particle, pacmanPosition)
            newBelief[particle] = emissionModel[dist] * belief[particle]

        if newBelief.totalCount() == 0:
            self.initializeUniformly(gameState)
            newBelief = self.getBeliefDistribution()

        newBelief = util.normalize(newBelief)

        keys = [key for key in newBelief]
        values = [newBelief[key] for key in keys]
        self.particles = util.nSample(values, keys, self.numParticles)
Пример #34
0
    def observe(self, observation, gameState):
        #print "observe"
        """
        Update beliefs based on the given distance observation. Make sure to
        handle the special case where all particles have weight 0 after
        reweighting based on observation. If this happens, resample particles
        uniformly at random from the set of legal positions
        (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell,
             self.getJailPosition()

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeUniformly. The total
             weight for a belief distribution can be found by calling totalCount
             on a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.

        You may also want to use util.manhattanDistance to calculate the
        distance between a particle and Pacman's position.
        """
        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()
        "*** YOUR CODE HERE ***"
        
        weights = []
        priors = self.getBeliefDistribution()
        
        if(noisyDistance != None):
          for x in range (0, len(self.legalPositions)):
            p = self.legalPositions[x]
            trueDistance = util.manhattanDistance(p, pacmanPosition)
            priorBelief = priors[p]
            weights.append(emissionModel[trueDistance] * priorBelief)
            
          if sum(weights) == 0:
            self.initializeUniformly(gameState)
          else:
            self.particles = util.nSample(weights, self.legalPositions, self.numParticles)
        else:
          self.particles = [self.getJailPosition()] * self.numParticles



        "*** END YOUR CODE HERE ***"
Пример #35
0
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make sure to
        handle the special case where all particles have weight 0 after
        reweighting based on observation. If this happens, resample particles
        uniformly at random from the set of legal positions
        (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell,
             self.getJailPosition()

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeUniformly. The total
             weight for a belief distribution can be found by calling totalCount
             on a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.

        You may also want to use util.manhattanDistance to calculate the
        distance between a particle and Pacman's position.
        """
        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()
        #calculates new weights depending on particles
        weights = util.Counter()
        for i in range(0, self.numParticles):
            p = self.particles[i]
            trueDistance = util.manhattanDistance(p, pacmanPosition)
            weights[p] += emissionModel[trueDistance]
        # tha jail case and putting ghost there
        if noisyDistance == None:
            pj = self.getJailPosition()
            for p in self.legalPositions:
                weights[p] = 0
            weights[pj] = 1
        weights.normalize()
        # the case where beliefs are rewighted to zero
        if weights.totalCount() == 0:
            self.initializeUniformly(gameState)
        else:
            # sampling according to new weights
            dist = weights.copy()
            items = sorted(dist.items())
            dist = [i[1] for i in items]
            values = [i[0] for i in items]
            self.particles = util.nSample(dist, values, self.numParticles)
Пример #36
0
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make sure to
        handle the special case where all particles have weight 0 after
        reweighting based on observation. If this happens, resample particles
        uniformly at random from the set of legal positions
        (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell,
             self.getJailPosition()

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeUniformly. The total
             weight for a belief distribution can be found by calling totalCount
             on a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.

        You may also want to use util.manhattanDistance to calculate the
        distance between a particle and Pacman's position.
        """
        "*** YOUR CODE HERE ***"
        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()
        samples = []
        if noisyDistance != None:
            weight = util.Counter()
            i = 0
            for particle in self.parDist:
                dist = util.manhattanDistance(pacmanPosition, particle)
                weight[i] = (emissionModel[dist])
                i += 1
            if weight.totalCount() == 0:
                samples = self.initializeUniformly(gameState)
            else:
                values = [i[0] for i in weight.items()]
                sampleList = util.nSample(weight, values, self.numParticles)
                for i in sampleList:
                    samples.append(self.parDist[i])
        else:
            for i in range(self.numParticles):
                samples.append(self.getJailPosition())
        self.parDist = samples
        return self.getBeliefDistribution()
        util.raiseNotDefined()
Пример #37
0
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make
        sure to handle the special case where all particles have weight
        0 after reweighting based on observation. If this happens,
        resample particles uniformly at random from the set of legal
        positions (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, **all** particles should be updated so
             that the ghost appears in its prison cell, self.getJailPosition()

             You can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None (a noisy distance
             of None will be returned if, and only if, the ghost is
             captured).

          2) When all particles receive 0 weight, they should be recreated from the
             prior distribution by calling initializeUniformly. The total weight
             for a belief distribution can be found by calling totalCount on
             a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution

        You may also want to use util.manhattanDistance to calculate the distance
        between a particle and pacman's position.
        """
        #print "IS THERE ANYBODY OUT TEHRE"
        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()
        "*** YOUR CODE HERE ***"
        junk = []
        if noisyDistance == None:
            for k in range(0, self.numParticles-1):
                junk.append(self.getJailPosition())
            self.parcles = junk
            return
        counter = util.Counter()
        for p in self.parcles:
            t = util.manhattanDistance(p, pacmanPosition)
            counter[p] += emissionModel[t]
        counter.normalize()
        if counter.totalCount() == 0:
            self.initializeUniformly(gameState)
        else:
            items = counter.items()
            distribution = [i[1] for i in items]
            values = [i[0] for i in items]
            trash = util.nSample(distribution, values, self.numParticles)
            self.parcles = trash
Пример #38
0
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make
        sure to handle the special case where all particles have weight
        0 after reweighting based on observation. If this happens,
        resample particles uniformly at random from the set of legal
        positions (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, **all** particles should be updated so
             that the ghost appears in its prison cell, self.getJailPosition()

             You can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None (a noisy distance
             of None will be returned if, and only if, the ghost is
             captured).

          2) When all particles receive 0 weight, they should be recreated from the
             prior distribution by calling initializeUniformly. The total weight
             for a belief distribution can be found by calling totalCount on
             a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution

        You may also want to use util.manhattanDistance to calculate the distance
        between a particle and pacman's position.
        """

        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()
        weights = util.Counter()
        
        "*** YOUR CODE HERE ***"
        
        newParticleList = []
        
        for i in range(len(self.particleList)):
            trueDist = util.manhattanDistance(pacmanPosition, self.particleList[i])
            weights[i] = emissionModel[trueDist]
        if noisyDistance == None:    
                for i in range(len(self.particleList)):
                    self.particleList[i] = self.getJailPosition()
        elif all(x == 0 for x in weights.values()):
            self.initializeUniformly(gameState)
        else:
            newParticleList = util.nSample(weights,self.particleList, len(self.particleList))
            self.particleList = newParticleList
Пример #39
0
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make sure to
        handle the special case where all particles have weight 0 after
        reweighting based on observation. If this happens, resample particles
        uniformly at random from the set of legal positions
        (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell,
             self.getJailPosition()

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeUniformly. The total
             weight for a belief distribution can be found by calling totalCount
             on a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.

        You may also want to use util.manhattanDistance to calculate the
        distance between a particle and Pacman's position.
        """
        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()
        "*** YOUR CODE HERE ***"
        # util.raiseNotDefined()
        if noisyDistance == None:
            self.particles = [self.getJailPosition()] * self.numParticles
        else:
            currentDistribution = self.getBeliefDistribution()
            for p in currentDistribution:
                trueDistance = util.manhattanDistance(p, pacmanPosition)
                currentDistribution[p] *= emissionModel[trueDistance]

            if currentDistribution.totalCount() == 0:
                self.initializeUniformly(gameState)
            else:
                items = sorted(currentDistribution.items())
                distribution = [v for k, v in items]
                value = [k for k, v in items]
                self.particles = util.nSample(distribution, value, self.numParticles)
Пример #40
0
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make sure to
        handle the special case where all particles have weight 0 after
        reweighting based on observation. If this happens, resample particles
        uniformly at random from the set of legal positions
        (self.legalPositions).
        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell,
             self.getJailPosition()
             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.
          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeUniformly. The total
             weight for a belief distribution can be found by calling totalCount
             on a Counter object
        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.
        You may also want to use util.manhattanDistance to calculate the
        distance between a particle and Pacman's position.
        """
        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()
        # "*** YOUR CODE HERE ***"
        # util.raiseNotDefined()

        if noisyDistance is None:
            ghostPos = self.getJailPosition()
            self.particles = [ghostPos for _ in xrange(self.numParticles)]
            return

        # Time update
        weights = util.Counter()
        for particle in self.particles:
            weights[particle] += emissionModel[util.manhattanDistance(pacmanPosition, particle)]

        if sum(weights.values()) == 0:
            self.initializeUniformly(gameState)
            return
        # resample
        # self.particles = [util.sampleFromCounter(weights) for _ in xrange(self.numParticles)]
        values = weights.keys()
        distribution = [weights[key] for key in values]
        self.particles = util.nSample(distribution, values, self.numParticles)
Пример #41
0
  def distributeParticles(self, gameState, particles):
    newEnemyParticles = {}
    for enemyIndex in particles:
      probParticles = util.Counter()

      for (x, y) in particles[enemyIndex]:
        for (a, b) in self.getPotentialPositions(gameState, x, y):
          probParticles[(a, b)] += particles[enemyIndex][(x, y)]

      samples = util.nSample(probParticles.values(), probParticles.keys(), self.numParticles)

      newEnemyParticles[enemyIndex] = util.Counter()
      for sample in samples:
        if sample not in newEnemyParticles[enemyIndex]:
          newEnemyParticles[enemyIndex][sample] = 0
        newEnemyParticles[enemyIndex][sample] += 1

    return newEnemyParticles
Пример #42
0
  def observe(self, noisyDistance, gameState,agentID):
    """
    Update beliefs based on the given distance observation.
    What if a ghost was eaten by agent?
    The former assumption will be reinitialized, which is apparently unnecssary.
    We need to find the method which can determine whether a certain agent is eaten, then like "go to jail", we just put them in the inital pos.gameState.getInitialAgentPosition(agentID)
    """
    AgentPosition = gameState.getAgentPosition(agentID)
    weights=[1 for i in range(self.numParticles)]
    for index in range(self.numParticles):
        for i in range(2):
            trueDistance=util.manhattanDistance(self.Particles[index][i],AgentPosition)
            weights[index]*=gameState.getDistanceProb(trueDistance,noisyDistance[self.enemies[i]])

    if sum(weights)==0:
        self.initializeUniformly(gameState)
        return
    else:
        newParticals=util.nSample(weights,self.Particles,self.numParticles)
        self.Particles=newParticals
Пример #43
0
    def elapseTime(self, gameState):
        """
        Update beliefs for a time step elapsing.

        As in the elapseTime method of ExactInference, you should use:

          newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))

        to obtain the distribution over new positions for the ghost, given its
        previous position (oldPos) as well as Pacman's current position.

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.
        """
        "*** YOUR CODE HERE ***"
        pacmanPosition = gameState.getPacmanPosition()

        weights = util.Counter()
        total = 0;
        # Use a counts array to avoid extra calls to getPositionDistribution()
        counts = util.Counter()

        for oldPos in self.particles:
            counts[oldPos] += 1
        for oldPos, count in counts.items():
            newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))
            for pos, prob in newPosDist.items():
                weights[pos] += count*prob
                total += count*prob

            # if all weights are 0
            if total == 0:
                self.initializeUniformly(gameState)
                return

        items = sorted(weights.items())
        distribution = [i[1] for i in items]
        values = [i[0] for i in items]
        self.particles = util.nSample(distribution,values,self.numParticles)
    def elapseTime(self, gameState):
        """
        Update beliefs for a time step elapsing.

        As in the elapseTime method of ExactInference, you should use:

          newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))

        to obtain the distribution over new positions for the ghost, given its
        previous position (oldPos) as well as Pacman's current position.

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution.
        """
        "*** YOUR CODE HERE ***"        
        #print "elapseTime"
        weights = util.Counter()
        priors = self.getBeliefDistribution()
        
        for p in self.legalPositions:
          
          newPosDist = util.Counter()
          newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, p))
          #print newPosDist
          "newPostDist[p] = Pr( ghost is at position p at time t + 1 | ghost is at position oldPos at time t )"
          
          for newPos, prob in newPosDist.items():
            weights[newPos] += prob * priors[p]
          
        if sum(weights.values()) == 0:
          self.initializeUniformly(gameState)
        else:
          dist = []
          for x in range(0, len(self.legalPositions)):
            pos = self.legalPositions[x]
            dist.append(weights[pos])
          self.particles = util.nSample(dist, self.legalPositions, self.numParticles)
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated so
             that the ghost appears in its prison cell, position self.getJailPosition(i)
             where "i" is the index of the ghost.

             You can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None (a noisy distance
             of None will be returned if, and only if, the ghost is
             captured).

          2) When all particles receive 0 weight, they should be recreated from the
              prior distribution by calling initializeParticles. After all particles
              are generated randomly, any ghosts that are eaten (have noisyDistance of 0)
              must be changed to the jail Position. This will involve changing each
              particle if a ghost has been eaten.

        ** Remember ** We store particles as tuples, but to edit a specific particle,
        it must be converted to a list, edited, and then converted back to a tuple. Since
        this is a common operation when placing a ghost in the jail for a particle, we have
        provided a helper method named self.getParticleWithGhostInJail(particle, ghostIndex)
        that performs these three operations for you.

        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts: return
        emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]

        "*** YOUR CODE HERE ***"


        for ghostIndex in range(self.numGhosts):
          # similar to ParticleFilter
          newWeightsCounter = util.Counter()
          
          if noisyDistances[ghostIndex] is None:
              self.particlesList = [self.getParticleWithGhostInJail(particle, ghostIndex)
                                for particle in self.particlesList]
          else:
            for p in self.legalPositions:
              trueDistance = util.manhattanDistance(pacmanPosition, p)
              if emissionModels[ghostIndex][trueDistance] > 0.0:
                newWeightsCounter[p] = emissionModels[ghostIndex][trueDistance]

            particlesDistribution = []
            particlesForTheGhost = []
            # update particlesList by resampling
            for particle in self.particlesList:
              particlesDistribution.append(newWeightsCounter[particle[ghostIndex]])
              particlesForTheGhost.append(particle[ghostIndex])
            
            particlesDistribution = util.normalize(particlesDistribution)

            # check for all zero distribution
            if sum(particlesDistribution) == 0.0:
              self.initializeParticles()
            else:
              particlesForTheGhost = util.nSample(particlesDistribution, particlesForTheGhost, self.numParticles)
              # replace the position for the current ghost
              self.particlesList = [self.replaceOnePositionInParticle(self.particlesList[i], particlesForTheGhost[i], ghostIndex)
                                    for i in range(self.numParticles)]
Пример #46
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated so
             that the ghost appears in its prison cell, position self.getJailPosition(i)
             where "i" is the index of the ghost.

             You can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None (a noisy distance
             of None will be returned if, and only if, the ghost is
             captured).

          2) When all particles receive 0 weight, they should be recreated from the
              prior distribution by calling initializeParticles. After all particles
              are generated randomly, any ghosts that are eaten (have noisyDistance of 0)
              must be changed to the jail Position. This will involve changing each
              particle if a ghost has been eaten.

        ** Remember ** We store particles as tuples, but to edit a specific particle,
        it must be converted to a list, edited, and then converted back to a tuple. Since
        this is a common operation when placing a ghost in the jail for a particle, we have
        provided a helper method named self.getParticleWithGhostInJail(particle, ghostIndex)
        that performs these three operations for you.

        """

        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts: return
        emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]
        pacmanPosition = gameState.getPacmanPosition()
    
        myWeight = util.Counter()
        for part in self.particles:
            p = list(part)
            pweight =1
            for i in range(self.numGhosts):
                if noisyDistances[i] != None:
                    dist = util.manhattanDistance(pacmanPosition,p[i])
                    pweight = pweight * emissionModels[i][dist]
            myWeight[part] += pweight     
                    
        #Make sure that all weights aren't 0

        if  myWeight.totalCount()==0:
            self.initializeParticles()
            for p in range(self.numParticles):
                for i in range(self.numGhosts):
                    if noisyDistances[i] == None:
                        self.particles[p] = self.getParticleWithGhostInJail(self.particles[p],i)
            return

        #Sample from weights
        newParticles = []
        sample = util.nSample(myWeight.values(), myWeight.keys(), self.numParticles)
        for p in range(self.numParticles):
            for i in range(self.numGhosts):
                if noisyDistances[i] == None:
                    sample[p] = self.getParticleWithGhostInJail(sample[p],i)
        self.particles = sample
        """
        particleMap = {}
        for i in range(self.numGhosts):

           # for part in self.particles:
           #     if part[i] in gameState.data.layout.walls.asList():
           #         print "before observe"
            #generate weights
            myModel = emissionModels[i]
            myWeight = util.Counter()
            if noisyDistances[i] == None:
                particleMap[i] = [self.getJailPosition(i) for p in range(self.numParticles)]
                continue 
            else:
                for p in self.particles:
                    dist = util.manhattanDistance(p[i],pacmanPosition)
                    myWeight[p[i]] += myModel[dist]

            #Make sure that all weights aren't 0
            if  myWeight.totalCount()==0:
                myWeight = util.Counter()
                for position in self.legalPositions:
                    myWeight[position] += 1

            #Sample from weights
            particleMap[i] = util.nSample(myWeight.values(), myWeight.keys(), self.numParticles)

            if noisyDistances[i] == None:
                particleMap[i] = [self.getJailPosition(i) for p in range(self.numParticles)]
            
           # for part in particleMap[i]:
            #    if part in gameState.data.layout.walls.asList():
            #        print "after observe"
        self.particles = zip(*particleMap.values())
        """
        '''
Пример #47
0
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated so
             that the ghost appears in its prison cell, position self.getJailPosition(i)
             where "i" is the index of the ghost.

             You can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None (a noisy distance
             of None will be returned if, and only if, the ghost is
             captured).

          2) When all particles receive 0 weight, they should be recreated from the
              prior distribution by calling initializeParticles. After all particles
              are generated randomly, any ghosts that are eaten (have noisyDistance of 0)
              must be changed to the jail Position. This will involve changing each
              particle if a ghost has been eaten.

        ** Remember ** We store particles as tuples, but to edit a specific particle,
        it must be converted to a list, edited, and then converted back to a tuple. Since
        this is a common operation when placing a ghost in the jail for a particle, we have
        provided a helper method named self.getParticleWithGhostInJail(particle, ghostIndex)
        that performs these three operations for you.

        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        #print noisyDistances
        #print self.particles
        for g in range(0, self.numGhosts):
            if noisyDistances[g] == None:
                junk = []
                for xx in self.particles:
                    junk.append(self.getParticleWithGhostInJail(xx, g))
                self.particles = junk
                #print self.particles
                #util.raiseNotDefined()
        if len(noisyDistances) < self.numGhosts: return
        emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]
        #print emissionModels
        h = util.Counter()
        for x in self.particles:
            w = 1.0
            #print x
            for k in range(0, self.numGhosts):
                if noisyDistances[k] != None:
                    #print k
                    z = util.manhattanDistance(pacmanPosition, x[k])
                    #print emissionModels
                    #print z
                    #print emissionModels[k][z].__str__() + " HELLO " 
                #util.raiseNotDefined()
                    w *= emissionModels[k][z]
            #print w
            h[x] += w
        #print emissionModels
        #h.normalize()
        #print h
        if h.totalCount() == 0:
            self.initializeParticles()
            garbage = []
            for a in range(0, self.numGhosts):
                for x in self.particles:
                    if noisyDistances[a] == None:
                        garbage.append(self.getParticleWithGhostInJail(x, a))
                    else:
                        garbage.append(x)
            self.particles = garbage
            return
        items = h.items()
        distribution = [i[1] for i in items]
        values = [i[0] for i in items]
        trash = util.nSample(distribution, values, self.numParticles)
        #print trash
        self.particles = trash
Пример #48
0
def nSampleFromCounter(ctr, n):
    items = sorted(ctr.items())
    return util.nSample([v for k,v in items], [k for k,v in items], n)
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy
        observations.

        To loop over the ghosts, use:

          for i in range(self.numGhosts):
            ...

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell, position
             self.getJailPosition(i) where `i` is the index of the ghost.

             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.

          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeParticles. After all
             particles are generated randomly, any ghosts that are eaten (have
             noisyDistance of None) must be changed to the jail Position. This
             will involve changing each particle if a ghost has been eaten.

        self.getParticleWithGhostInJail is a helper method to edit a specific
        particle. Since we store particles as tuples, they must be converted to
        a list, edited, and then converted back to a tuple. This is a common
        operation when placing a ghost in jail.
        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts:
            return
        emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]

        "*** YOUR CODE HERE ***"

        # jail edge case
        for i in range(self.numGhosts):
            if noisyDistances[i] == None:
                pos = self.getJailPosition(i)
                for j,particle in enumerate(self.particles):
                    self.particles[j] = self.getParticleWithGhostInJail(particle,i)

        # Step 1: calculate weights
        weights = util.Counter()
        for particle,N in self.getBeliefDistribution().items():
            trueDistance = util.manhattanDistance(particle[i], pacmanPosition)
            prob = 1.0
            for i in range(self.numGhosts):
                if noisyDistances[i] != None: 
                    distance = util.manhattanDistance(particle[i], pacmanPosition)
                    prob *= emissionModels[i][distance]
            weights[particle] += prob*N
        weights.normalize()


        # Step 2: resample based on the weights
        if weights.totalCount() == 0:
            self.initializeParticles() # edge case
        else:
            distribution = []
            values = []
            for pos, prob in weights.items():
                values.append(pos)
                distribution.append(prob)

            self.particles = util.nSample(distribution, values, self.numParticles)
    def observeState(self, gameState):
        """
        Resamples the set of particles using the likelihood of the noisy
        observations.
        To loop over the ghosts, use:
          for i in range(self.numGhosts):
            ...
        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, all particles should be updated
             so that the ghost appears in its prison cell, position
             self.getJailPosition(i) where `i` is the index of the ghost.
             As before, you can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None.
          2) When all particles receive 0 weight, they should be recreated from
             the prior distribution by calling initializeParticles. After all
             particles are generated randomly, any ghosts that are eaten (have
             noisyDistance of None) must be changed to the jail Position. This
             will involve changing each particle if a ghost has been eaten.
        self.getParticleWithGhostInJail is a helper method to edit a specific
        particle. Since we store particles as tuples, they must be converted to
        a list, edited, and then converted back to a tuple. This is a common
        operation when placing a ghost in jail.
        """
        pacmanPosition = gameState.getPacmanPosition()
        noisyDistances = gameState.getNoisyGhostDistances()
        if len(noisyDistances) < self.numGhosts:
            return
        emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]

        #Update jailed ghosts
        #print emissionModels



        "*** YOUR CODE HERE ***"
        for g in range(self.numGhosts):
          if(noisyDistances[g] == None):
            for x in range(0, self.numParticles):
              self.particles[x] = self.getParticleWithGhostInJail(self.particles[x], g)  
            for y in range(0, len(self.permutations)):
              self.permutations[y] = self.getParticleWithGhostInJail(self.permutations[y], g)  

        
        weights = [1.0] * len(self.permutations) 
        priors = self.getBeliefDistribution()

        for x in range (0, len(self.permutations)):
          p = self.permutations[x]
          weights[x] *= priors[p]
          
          for i in range(self.numGhosts):
            if noisyDistances[i] != None:
              trueDistance = util.manhattanDistance(p[i], pacmanPosition)
              weights[x] *= emissionModels[i][trueDistance]
        
        if sum(weights) == 0:
          self.initializeParticles()
          for g in range(self.numGhosts):
            if(noisyDistances[g] == None):
              for x in range(0, self.numParticles):
                self.particles[x] = self.getParticleWithGhostInJail(self.particles[x], g)
        else:
          self.particles = util.nSample(weights, self.permutations, self.numParticles)
    def observe(self, observation, gameState):
        """
        Update beliefs based on the given distance observation. Make
        sure to handle the special case where all particles have weight
        0 after reweighting based on observation. If this happens,
        resample particles uniformly at random from the set of legal
        positions (self.legalPositions).

        A correct implementation will handle two special cases:
          1) When a ghost is captured by Pacman, **all** particles should be updated so
             that the ghost appears in its prison cell, self.getJailPosition()

             You can check if a ghost has been captured by Pacman by
             checking if it has a noisyDistance of None (a noisy distance
             of None will be returned if, and only if, the ghost is
             captured).

          2) When all particles receive 0 weight, they should be recreated from the
             prior distribution by calling initializeUniformly. The total weight
             for a belief distribution can be found by calling totalCount on
             a Counter object

        util.sample(Counter object) is a helper method to generate a sample from
        a belief distribution

        You may also want to use util.manhattanDistance to calculate the distance
        between a particle and pacman's position.
        """

        noisyDistance = observation
        emissionModel = busters.getObservationDistribution(noisyDistance)
        pacmanPosition = gameState.getPacmanPosition()
        "*** YOUR CODE HERE ***"
        newWeights = util.Counter()

        #print "\n********************\n"
        #print "noisyDistance: "
        #print noisyDistance
        #print "emissionModel: "
        #print emissionModel
        #print "\n********************\n"
        
        if noisyDistance is None:
          self.particlesList = [self.getJailPosition()] * self.numParticles
        else:
          # calculate weights
          for p in self.legalPositions:
            trueDistance = util.manhattanDistance(pacmanPosition, p)
            if emissionModel[trueDistance] > 0: newWeights[p] = emissionModel[trueDistance]

          # resample all the particles
          # distribution for each particle
          particlesDist = []

          for i in range(self.numParticles):
            particlesDist.append(newWeights[self.particlesList[i]])
          
          # use util.nSample for speed up reason
          if sum(particlesDist)==0:
            self.initializeUniformly(gameState)
          else:
            particlesDist = util.normalize(particlesDist)
            self.particlesList = util.nSample(particlesDist, self.particlesList, self.numParticles)