def performAction(self, action): """ POMDP tasks, as they have discrete actions, can me used by providing either an index, or an array with a 1-in-n coding (which can be stochastic). """ if type(action) == ndarray: action = drawIndex(action, tolerant=True) self.steps += 1 EpisodicTask.performAction(self, action)
def performAction(self, action): """ POMDP tasks, as they have discrete actions, can me used by providing either an index, or an array with a 1-in-n coding (which can be stochastic). """ if type(action) == ndarray: action = drawIndex(action, tolerant = True) self.steps += 1 EpisodicTask.performAction(self, action)
def performAction(self, action): """ a filtered mapping towards performAction of the underlying environment. """ # scaling self.incStep() action=(action+1.0)/2.0*self.dif+self.env.fraktMin*self.env.dists[0] #Clipping the maximal change in actions (max force clipping) action=clip(action, self.action-self.maxSpeed, self.action+self.maxSpeed) EpisodicTask.performAction(self, action) self.action=action.copy()
def performAction(self, action): """ a filtered mapping towards performAction of the underlying environment. """ # scaling self.incStep() action = (action + 1.0) / 2.0 * self.dif + self.env.fraktMin * self.env.dists[0] #Clipping the maximal change in actions (max force clipping) action = clip(action, self.action - self.maxSpeed, self.action + self.maxSpeed) EpisodicTask.performAction(self, action) self.action = action.copy()
def performAction(self, action): self.t += 1 EpisodicTask.performAction(self, action)
def performAction(self, action): self.t += 1 EpisodicTask.performAction(self, action)
def performAction(self, action): EpisodicTask.performAction(self, action) self.action = action
def performAction(self, action): EpisodicTask.performAction(self, action) self.action = action