def __init__(self, index): CaptureAgent.__init__(self, index) self.target = None self.lastObservedFood = None # This variable will store our patrol points and # the agent probability to select a point as target. self.patrolDict = {}
def __init__(self, index, timeForComputing = .1, actionFn = None, alpha=0.2, epsilon=0.05, gamma=0.8, numTraining = 100, extractor='SimpleExtractor' ): """ Sets options, which can be passed in via the Pacman command line using -a alpha=0.5,... alpha - learning rate epsilon - exploration rate gamma - discount factor numTraining - number of training episodes, i.e. no learning after these many episodes """ if actionFn == None: actionFn = lambda state: state.getLegalActions() self.alpha = float(alpha) self.epsilon = float(epsilon) self.discountRate = float(gamma) self.numTraining = int(numTraining) self.actionFn = actionFn self.episodesSoFar = 0 self.accumTrainRewards = 0.0 self.accumTestRewards = 0.0 self.featExtractor = util.lookup(extractor, globals())() # Create a containers for q values self.q_values = util.Counter() # You might want to initialize weights here. self.weights = util.Counter() #Capture Agent constructor CaptureAgent.__init__(self, index, timeForComputing)
def __init__(self, index, timeForComputing=.1): CaptureAgent.__init__(self, index, timeForComputing=.1) self.escapepath = [] self.eaten = 0 self.height = 0 self.width = 0 self.plan = [[], []]
def __init__(self, index, communication): ''' Some definition of instance variables ''' CaptureAgent.__init__(self, index) self.communication = communication self.simulationResult = [] self.epsilon = 1 self.epsilonFinal = 0.3 self.discount = 0.7 self.depth = 5 self.startDecay = 20 self.weights = {} self.offensive = True self.lastAction = None self.weights = { 'minGhostDistances': 100, 'minCapsuleChasingDistance': -5, '#ofLeftFoods': -100, 'score': 100, '#ofChasingCapsules': -100, 'minFoodDistance': -3, 'distanceFromStart': -10 } self.target = None self.lastObservedFood = None self.patrolDict = {} self.timeReverse = 0
def __init__(self, index): self.weights = util.Counter() self.episodesSoFar = 0 self.epsilon = 0.05 self.gamma = 0.8 self.alpha = 0.2 CaptureAgent.__init__(self, index)
def __init__(self, index, isRed,actionFn = None,numTraining = 0, epsilon = 0.05, alpha =0.2, discount=0.8): # initialize superclass CaptureAgent.__init__(self,index) ##### Debug zone # print "TRAINING", numTraining # print "MY INDEX IS", self.index ##### # parameters for training if actionFn == None: actionFn = lambda state: state.getLegalActions(index) self.actionFn = actionFn self.episodesSoFar = 0 self.accumTrainRewards = 0.0 self.accumTestRewards = 0.0 self.numTraining = int(numTraining) self.epsilon = epsilon self.alpha= alpha self.discount= discount self.weights = util.Counter() self.red = isRed print("i'm on RED team?",self.red) print("[DEBUG] Agent initialized")
def __init__(self, index, isRed, actionFn=None, numTraining=0, epsilon=0.0, alpha=0.2, discount=0.8): # initialize superclass CaptureAgent.__init__(self, index) # parameters for training if actionFn == None: actionFn = lambda state: state.getLegalActions(index) self.actionFn = actionFn # these are for reinforcement (since it's become heuristic search) # I just leave them for further improvement later on self.episodesSoFar = 0 self.accumTrainRewards = 0.0 self.accumTestRewards = 0.0 self.numTraining = int(numTraining) self.epsilon = epsilon self.alpha = alpha self.discount = discount self.weights = util.Counter() self.visitedPositions = {} self.red = isRed
def __init__(self, index, timeForComputing=.1, numTraining=0, epsilon=0.5, alpha=0.5, gamma=1, **args): """ actionFn: Function which takes a state and returns the list of legal actions - REMOVED alpha - learning rate epsilon - exploration rate gamma - discount factor numTraining - number of training episodes, i.e. no learning after these many episodes """ CaptureAgent.__init__(self, index, timeForComputing) self.episodesSoFar = 0 self.accumTrainRewards = 0.0 self.accumTestRewards = 0.0 self.numTraining = int(numTraining) self.epsilon = float(epsilon) self.alpha = float(alpha) self.discount = float(gamma) self.qValues = util.Counter()
def __init__(self, *args, **kwargs): ''' Initialize agent ''' # Initialize Q values for features self.numGamePoint = 0 self.weights = util.Counter() self.numTraining = kwargs.pop('numTraining', 0) self.trainingFindingPower = .5 self.testingFindingPower = .05 self.learningPower = .2 self.findingPower = .8 self.discountFactor = .99 if self.numTraining == 0: self.weights.update({ 'score': 0.6809099995971538, 'numMyTeamFood': 23.6565508664964, 'opponent_0_distance': 2.0699359632136902, 'numOpponentTeamFood': 23.633853866509785, 'bias': 115.8643705168336, 'opponent_2_distance': 1.9917190914963816, 'nearestFoodAStar': -1.9670769570603142 }) CaptureAgent.__init__(self, *args, **kwargs)
def __init__(self, index): CaptureAgent.__init__(self, index) self.simulationResult = [] self.epsilon = 1 self.epsilonFinal = 0.3 self.discount = 0.7 self.depth = 5 self.startDecay = 20 self.weights = {} self.offensive = True self.lastAction = None self.weights = { 'minGhostDistances': 100, 'minCapsuleChasingDistance': -5, '#ofLeftFoods': -100, 'score': 100, '#ofChasingCapsules': -100, 'minFoodDistance': -3, 'distanceFromStart': -10 } self.target = None self.lastObservedFood = None self.attacker = None # This variable will store our patrol points and # the agent probability to select a point as target. self.patrolDict = {}
def __init__(self, index): CaptureAgent.__init__(self, index) self.target = None self.lastTickFoodList = [] self.isFoodEaten = False self.patrolDict = {} self.tick = 0 self.gazeboDict = {}
def __init__(self, index): CaptureAgent.__init__(self, index) # Variables used to verify if the agent os locked self.defendingFood = [] self.index = index self.target = None self.flag = 0
def __init__(self, index): CaptureAgent.__init__(self, index) self.weights = util.Counter() self.numTraining = 0 self.episodesCur = 0 self.epsilon = 0.05 self.discount = 0.8 self.alpha = 0.2
def __init__(self, index, epsilon=0, timeForComputing=.1, depth=4, times=20, alpha=0.9): self.depth = depth self.times = times self.alpha = float(alpha) self.powerTimer = 0 self.epilon = float(epsilon) CaptureAgent.__init__(self, index, timeForComputing=.1)
def __init__(self, index): CaptureAgent.__init__(self, index) self.currentFood = [] self.flag = 0 self.status = 1 self.p = [] self.target = () self.isTargetToFood = False
def __init__(self, index): CaptureAgent.__init__(self, index) # Variables used to verify if the agent os locked self.foodNum = 999 self.trappedTime = 0 self.defendingFood = [] self.flag = 0 self.target = ()
def __init__(self, index, red, timeForComputing=.1): CaptureAgent.__init__(self, index, timeForComputing) object.__init__(self) self._half = self._height = self._maxDist = self._weights = \ self._width = None self.red = red
def __init__(self, index, timeForComputing=.1, **args): "You can initialize Q-values here..." CaptureAgent.__init__(self, index, timeForComputing=.1) self.qValues = util.Counter() self.epsilon = args['epsilon'] self.alpha = args['alpha'] self.gamma = args['gamma'] self.numTraning = args['numTraining']
def __init__(self, gameState): CaptureAgent.__init__(self, gameState) self.foodHeld = 0 self.legal_y = [] self.midx = 0 self.mostlikely = [None] * 4 self.scareTime = 0 self.scared = False
def __init__(self, index): CaptureAgent.__init__(self, index) self.catchState = False self.coreDefendingArea = [] self.target = None self.remainFoodList = [] self.number = 0 self.catchState = False
def __init__(self, index): CaptureAgent.__init__(self, index) self.firstTurnComplete = False self.startingFood = 0 self.theirStartingFood = 0 self.legalPositions = None self.estimate = util.Counter()
def __init__(self, index, teammates=None, timeForComputing=.1): CaptureAgent.__init__(self, index, timeForComputing) self.teammates = [] if teammates is not None: self.teammates.append(teammates) for teammate in self.teammates: if len(teammate.teammates) == 0: teammate.teammates.append(self)
def __init__(self, index): CaptureAgent.__init__(self, index) self.alpha = 0.2 self.reward = 1 self.gamma = 0.8 self.epsilon = 0.01 self.qSa = None self.Q_present = 0 self.numTrainig = 10
def __init__( self, index ): CaptureAgent.__init__(self, index) self.weights = util.Counter() self.numTraining = 0 if 'numTraining' in interestingValues: self.numTraining = interestingValues['numTraining'] self.episodesSoFar = 0 self.epsilon = 0.05 self.discount = 0.8 self.alpha = 0.2
def __init__(self, gameState): CaptureAgent.__init__(self, gameState) self.expectation = [None] * 4 self.godmodeClock = 0 # step time calculation self.timerA = [] self.timerB = [] self.counter = 0 self.foodNum = 0.0
def __init__(self, index, timeForComputing, learning_rate, exploration_rate, discount, numTraining): CaptureAgent.__init__(self, index, timeForComputing) self.alpha = float(learning_rate) self.epsilon = float(exploration_rate) self.gamma = float(discount) self.numTraining = int(numTraining) self.has_no_observation = True self.episodesSoFar = 0 self.actionHistory = []
def __init__(self, index, timeForComputing = .1): CaptureAgent.__init__(self, index, timeForComputing) self.depth = 4 self.numParticles = 10 self.steps = [(0, 0), (0, 1), (1, 0), (-1, 0), (0, -1)] self.teammateLocations = {} self.enemyParticles = {} self.lastAction = None
def __init__(self, index, timeForComputing=.1): CaptureAgent.__init__(self, index, timeForComputing) team.append(self) if self.index % 2 == 0: self.isRed = True self.middle = (ReflexCaptureAgent.MAP_WIDTH / 4, ReflexCaptureAgent.MAP_HEIGHT / 2) else: self.isRed = False self.middle = (ReflexCaptureAgent.MAP_WIDTH * 3 / 4), (ReflexCaptureAgent.MAP_HEIGHT / 2)
def __init__(self, index): CaptureAgent.__init__(self, index) self.weights = util.Counter() self.discountFactor = 0.7 self.ValidPos = {} self.PrevAction = None self.AttackHistory = [] self.DefenceHistory = [] self.offensiveEntry = None self.defensiveEntry = None
def __init__(self, index): CaptureAgent.__init__(self, index) self.legalPositions = [] "tuple with (boolean, belief distribution, actual/most probable position)" "TODO: Assumes 6 agents, will not always work" self.positions = [None, None, None, None, None, None] self.firstMove = True
def __init__( self, index, timeForComputing = .1 ): CaptureAgent.__init__( self, index, timeForComputing) print self.red, index, timeForComputing self.visibleAgents = [] self.foodLeft = 0 self.foodEaten = 0 self.isPacman = False self.a = [] self.first = True self.size = 0
def __init__(self, index): CaptureAgent.__init__(self, index) self.coreDefendingArea = [] self.target = None self.remainFoodList = [] self.isFoodEaten = False self.patrolDict = {} self.tick = 0 self.gazeboDict = {} self.catchState = False
def __init__(self, index): CaptureAgent.__init__(self, index) #self.foodNum = 999 #self.trappedTime = 0 self.currentFood = [] self.flag = 0 self.status = 1 self.p = [] self.target = () self.isTargetToFood = False
def __init__(self, index): CaptureAgent.__init__(self, index) self.weights = util.Counter() self.numTraining = 0 #if 'numTraining' in arguments: # self.numTraining = arguments['numTraining'] self.episodesSoFar = 0 self.epsilon = 0.05 self.discount = 0.8 self.alpha = 0.2
def __init__(self, index): CaptureAgent.__init__(self, index) self.firstTurnComplete = False self.startingFood = 0 self.theirStartingFood = 0 self.discount = .9 self.alpha = 0.002 self.featureHandler = FeatureHandler() self.agentType = 'basicQLearningAgent' self.weights = None self.explorationRate = 0.3
def __init__(self, index, alpha, epsilon): CaptureAgent.__init__(self, index) # self.weights = util.Counter() self.alpha = alpha #learning rate--higher means learn in larger steps self.epsilon = epsilon #exploration rate--higher means explore more self.firstTurnComplete = False self.startingFood = 0 self.theirStartingFood = 0 #used for estimating the enemy pos self.legalPositions = None self.estimate = util.Counter()
def __init__(self, index, timeForComputing = .1, numTraining=0, epsilon=0.5, alpha=0.5, gamma=1, **args): """ actionFn: Function which takes a state and returns the list of legal actions - REMOVED alpha - learning rate epsilon - exploration rate gamma - discount factor numTraining - number of training episodes, i.e. no learning after these many episodes """ CaptureAgent.__init__(self, index, timeForComputing) self.episodesSoFar = 0 self.accumTrainRewards = 0.0 self.accumTestRewards = 0.0 self.numTraining = int(numTraining) self.epsilon = float(epsilon) self.alpha = float(alpha) self.discount = float(gamma) self.qValues = util.Counter()
def __init__(self, index,actionFn = None, numTraining=100, epsilon=0.07, alpha=0.2, gamma=0.7): """ actionFn: Function which takes a state and returns the list of legal actions alpha - learning rate epsilon - exploration rate gamma - discount factor numTraining - number of training episodes, i.e. no learning after these many episodes """ CaptureAgent.__init__(self, index) if actionFn == None: actionFn = lambda state: state.getLegalActions() self.actionFn = actionFn self.episodesSoFar = 0 self.accumTrainRewards = 0.0 self.accumTestRewards = 0.0 self.numTraining = int(numTraining) self.epsilon = float(epsilon) self.alpha = float(alpha) self.discount = float(gamma) self.startEpisode()
def __init__( self, index, timeForComputing = .1 ): CaptureAgent.__init__(self, index, timeForComputing) self.discount = 0.9 self.noise = 0 self.alpha = 0.01 self.epsilon = 0.05 self.preScore = 0 self.preState = None self.preAction = None self.weights = util.Counter() self.weights = util.Counter() self.weights['distanceToFood'] = -1 self.weights['successorScore'] = 100 #self.weights['onOffence'] = 0 #self.weights['numDefenders'] = 0 #self.weights['defenderDistance'] = 0 #self.weights['attackerDistance'] = 0 self.weights['foodsLeft'] = -1 self.weights['foodsRemained'] = 1 self.weights = util.normalize(self.weights)
def __init__(self,index): CaptureAgent.__init__(self,index) self.firstMove = True self.visitedPositions = util.Counter() self.numEnemiesEaten = 0 self.firstTurnComplete = False self.scaredEnemies = [] # Create a new QLearning Agent self.learning = LearningAgent("testReflexAgent_" + str(self.index) + ".p") # Flag indicating if the enemy's capsule has been eaten self.eatenEnemyCapsule = False # Flag indicating if enemy has been scared before self.beenScaredBefore = False # Timers used to calculate when we are no longer scared self.enemyScaredTimer = 0 self.learning.setWeights()#self.getWeights()) self.lastFoodList = util.Counter() self.numDeaths = 0
def __init__(self, index, *args, **kwargs): CaptureAgent.__init__(self, index, *args, **kwargs) if self.engine is None: GoalieAgent.engine = self # the first agent GoalieAgent.caboose = self # the last agent GoalieAgent.team[index] = self
def __init__(self, gameState): CaptureAgent.__init__(self, gameState) self.distanceThreshold = 5 self.carryThreshold = 5
def __init__(self, index): CaptureAgent.__init__(self, index)
def __init__(self, index, timeForComputing = 0.1): CaptureAgent.__init__(self, index, timeForComputing) self.hmm_list = {}
def __init__(self, index, inferenceModule, timeForComputing = .1 ): CaptureAgent.__init__(self, index) self.inferenceModule = inferenceModule
def __init__(self, index): CaptureAgent.__init__(self, index) self.enemyPos = list() self.firstTurnComplete = False self.legalPositions = list()
def __init__( self, index = 0, timeForComputing = .1, inference = "ExactInference", observeEnable = True, elapseTimeEnable = True): CaptureAgent.__init__(self, index, timeForComputing) self.observeEnable = observeEnable self.elapseTimeEnable = elapseTimeEnable self.enemyBeliefs = []
def __init__(self, gameState): CaptureAgent.__init__(self, gameState) self.mostlikely = [None]*4 self.powerTimer = 0
def __init__( self, index, timeForComputing = .1 ): CaptureAgent.__init__( self, index, timeForComputing) self.visibleAgents = []
def __init__(self, index, debug): CaptureAgent.__init__(self, index) self.debug = debug
def __init__(self, index): #initialize values CaptureAgent.__init__(self, index) self.firstTurnComplete = False self.startingFood = 0 self.theirStartingFood = 0
def __init__(self, index, teamData): CaptureAgent.__init__(self, index) self.firstTurnComplete = False self.startingFood = 0 self.theirStartingFood = 0 self.teamData = teamData
def __init__(self, index ,evalFn = 'betterEvaluationFunction', depth = '1'): CaptureAgent.__init__(self, index) #self.index = 0 # Pacman is always agent index 0 self.evaluationFunction = self.betterEvaluationFunction self.depth = int(depth)
def __init__(self, index, factory, role = None): CaptureAgent.__init__(self, index) self.factory = factory self.role = role self.miscDistribution = None self.moveHistory = util.Queue()
def __init__(self, gameState): CaptureAgent.__init__(self, gameState)