def _setup(self, conditions): """ if agent is discrete in states and actions create Q-Table. """ Agent._setup(self, conditions) if not (self.conditions['discreteStates'] and self.conditions['discreteActions']): raise AgentException('QAgent expects discrete states and actions. Use adapter or a different environment.') self.estimator = TableEstimator(self.conditions['stateNum'], self.conditions['actionNum'])
def _setup(self, conditions): """ if agent is discrete in states and actions create Q-Table. """ Agent._setup(self, conditions) if not (self.conditions['discreteStates'] == False and self.conditions['discreteActions']): raise AgentException('FQIAgent expects continuous states and discrete actions. Use adapter or a different environment.') if self.vectorblock: self.estimator = VectorBlockEstimator(self.conditions['stateDim'], self.conditions['actionNum'], faClass=self.faClass, ordered=self.ordered) else: self.estimator = FAEstimator(self.conditions['stateDim'], self.conditions['actionNum'], faClass=self.faClass, ordered=self.ordered)
def _setup(self, conditions): """ if agent is discrete in states and actions create Q-Table. """ Agent._setup(self, conditions) if not (self.conditions['discreteStates'] == False and self.conditions['discreteActions'] == False): raise AgentException('BASAgent expects continuous states and actions. Use adapter or a different environment.') self.estimator = FAEstimator(self.conditions['stateDim'] + self.conditions['actionDim'], 2**self.conditions['actionDim'], self.faClass) # change history to store bas-extended experiences self.history = History(conditions['stateDim']+self.conditions['actionDim'] , 1)