示例#1
0
 def reset(self):
     self.reward[0] = 0.0   
     self.rawReward = 0.0         
     self.env.reset()
     self.action = [self.env.dists[0]] * self.outDim
     self.epiStep = 0
     EpisodicTask.reset(self)
示例#2
0
文件: tasks.py 项目: DanSGraham/code
 def reset(self):
     self.reward[0] = 0.0
     self.rawReward = 0.0
     self.env.reset()
     self.action = [self.env.dists[0]] * self.outDim
     self.epiStep = 0
     EpisodicTask.reset(self)
示例#3
0
 def reset(self):
     EpisodicTask.reset(self)
     self.day = weather.daily(date="random")
     self.t = 0
 def reset(self):
     self.steps = 0
     EpisodicTask.reset(self)
示例#5
0
 def reset(self):
     EpisodicTask.reset(self)
     self.t = 0
 def reset(self):
     EpisodicTask.reset(self)
     self.total_reward = 0.0
示例#7
0
 def reset(self):
     EpisodicTask.reset(self)
     self.score_before = 0
示例#8
0
 def reset(self):
     EpisodicTask.reset(self)
     self.t = 0
     self.lastFitness = 0.0
     self.bestFitness = 0.0
     self.appendLog() # write first line!
示例#9
0
	def reset(self):
		EpisodicTask.reset(self)
		self.count = 0
		self.oldDist = 0.
		self.atGoal = False
		self.reward = 0		
 def reset(self):
     self.steps = 0
     EpisodicTask.reset(self)
示例#11
0
 def reset(self):
     self.env.reset()
     EpisodicTask.reset(self)
     self.t = 0
 def reset(self):
     EpisodicTask.reset(self)
     self.total_reward = 0.0
示例#13
0
 def reset(self):
     EpisodicTask.reset(self)
     self.t = 0
     self.rewardscale = 100.0 / self.env.distance_to_goal
     print self.total_reward
     self.total_reward = 0.0
示例#14
0
 def reset(self):
     """ Re-initialize the environment """
     EpisodicTask.reset(self)
     self.sensors = None
     self.found_cube = False
     self.no_actions = False
示例#15
0
 def reset(self):
     EpisodicTask.reset(self)
     self.score_before = 0
示例#16
0
 def reset(self):
     EpisodicTask.reset(self)
     self.day = weather.daily(date='random')
     self.t = 0