Ejemplo n.º 1
0
    def test(self, test_interval_min, test_interval_max):
        steps_score=np.zeros(self.num_random_tests)
        for k in range(0,self.num_random_tests):
            #Pick an initial state to start from 
            req = RobotActionRequest()
            req.reset_robot = True
            req.reset_pole_angle = np.random.uniform(np.deg2rad(test_interval_min), np.deg2rad(test_interval_max))*self.get_random_sign()
            #print "Initial pole angle is", np.rad2deg(req.reset_pole_angle), "degrees"
            response = self.cartpole_action_service(req)
            for n in range(0, self.steps_max):
                pol = RobotPolicyRequest()
                pol.robot_state = response.robot_state
		#print(pol)
                dqn_response = self.cartpole_policy_service(pol)
                req = RobotActionRequest()
                req.action = dqn_response.action
                req.reset_robot=False
                response = self.cartpole_action_service(req)
                if self.state_out_of_bounds(response.robot_state):
                    #print "Cartpole was balanced for",n,"number of steps"
                    steps_score[k]=n
                    time.sleep(1)
                    break
                if n==self.steps_max-1:
                    #print "Episode ended successfully!"
                    steps_score[k]=self.steps_max
                    time.sleep(1)
                time.sleep(0.01)
        return steps_score
Ejemplo n.º 2
0
 def get_start_state(self):
     angle = np.random.randint(-3, 4)
     angle = angle * np.pi / 180
     req = RobotActionRequest()
     req.reset_robot = True
     req.reset_pole_angle = angle
     resp = self.robot_state(req)
     return resp.robot_state
Ejemplo n.º 3
0
    def train(self):

        for i in range(0, self.num_random_trains):
            req = RobotActionRequest()
            req.reset_robot = True
            episode_reward = 0
            req.reset_pole_angle = np.random.uniform(
                0.0, np.deg2rad(3.0)) * self.get_random_sign()
            response = self.cartpole_action_service(req)
            state = list(response.robot_state)
            global eps
            eps = max(eps * 0.9,
                      0.2)  #max(1- episode_num/self.num_random_trains,0.01)

            for k in range(self.num_steps):  #right
                req = RobotActionRequest()
                req.reset_robot = False
                action = self.selection(state)
                req.action = action

                response = self.cartpole_action_service(req)
                state_next = list(response.robot_state)

                if abs(state_next[0]) > 1.2 or abs(
                        state_next[1]) > (6 * np.pi / 180):
                    reward = [0]
                    state_next = [0, 0, 0, 0]
                    self.memory.push([state, action, reward, state_next])
                    break
                else:
                    episode_reward += 1
                    reward = [1]
                    self.memory.push([state, action, reward, state_next])

                self.optimize_model()
                state = state_next

            print(episode_reward)
            if i % self.episodes_step == 0:
                self.target_net.load_state_dict(self.action_net.state_dict())

        return self.target_net
    def test(self, test_interval_min, test_interval_max):
	#same as executive.py
	
	for episode in range(300):
            #Pick an initial state to start from 
            req = RobotActionRequest()
            req.reset_robot = True
            req.reset_pole_angle = np.random.uniform(np.deg2rad(test_interval_min), np.deg2rad(test_interval_max))*self.get_random_sign()
            #print "Initial pole angle is", np.rad2deg(req.reset_pole_angle), "degrees"
            s_t = self.cartpole_action_service(req)
	    final_reward=0
            for n in range(0, self.steps_max):
		a_t=self.DQN.execute(s_t.robot_state)
		
                
                req = RobotActionRequest()
                req.action = [a_t]
                req.reset_robot=False

                s_t1 = self.cartpole_action_service(req)
                r_t1=1
   		
                if self.state_out_of_bounds(s_t1.robot_state):
                    r_t1=-1
                self.DQN.store(s_t.robot_state, a_t, r_t1, s_t1.robot_state)
		
		if self.state_out_of_bounds(s_t1.robot_state):
		    break
		final_reward+=1
		s_t=s_t1
		#store training data then learn from it
              	self.DQN.learn()

	    self.score.append(final_reward)
	    if np.median(self.score[-21:])>195:
		#if score is good, break
                break
Ejemplo n.º 5
0
 def reset_robot(self, state):
     req = RobotActionRequest()
     req.reset_robot = True
     req.reset_pole_angle = state[1, 0]
     response = self.cartpole_action_service(req)
     return response
Ejemplo n.º 6
0
    def train(self):
        print('start training')
        start = time.time()
        done = False
        n_episodes = 800
        scores = deque(maxlen=20)
        batch_size = 64

        for e in range(n_episodes):
            # initial state
            # self.memory.clear()
            reward = 0

            # reset the robot for each episode
            req = RobotActionRequest()
            req.reset_robot = True
            # np.random.seed(0)
            req.reset_pole_angle = np.random.uniform(
                np.deg2rad(0.0), np.deg2rad(3.0)) * self.get_random_sign()
            # req.reset_pole_angle = np.deg2rad(2.0) * self.get_random_sign()
            state = self.probe_service(req).robot_state
            # print(state)

            i = 0
            done = False
            for j in range(1000):
                action, act_inx = self.act(state)
                # print('action', action, act_inx)
                req = RobotActionRequest()
                req.reset_robot = False
                req.action = [action]
                next_state = self.probe_service(req).robot_state

                done = self.state_out_of_bounds(next_state)
                reward = 10 if not done else -10
                # print(state, action, act_inx, reward, next_state, done)
                self.remember(state, action, act_inx, reward, next_state, done)

                # print(state, action, reward, next_state, done)
                state = next_state
                i = i + 1

                # scores.append(i)
                # mean = np.mean(scores)
                if done:
                    episode_durations.append(i)
                    #self.plot_durations()
                    #print(('episode: {}/{}, score:{}, e:{:.2}').format(e, n_episodes, i, self.epsilon))
                    break

            if e > 100:
                if np.mean(episode_durations[-30:]) > 250 or (time.time() -
                                                              start) >= 290:
                    self.plot_durations()
                    # torch.save(self.model.state_dict(), './x.pth')
                    return

            self.replay(batch_size)

            if e % 10 == 0:
                self.target_net.load_state_dict(self.model.state_dict())
        self.plot_durations()