示例#1
0
class Environment(threading.Thread):
    stop_signal = False

    def __init__(self,
                 render=False,
                 eps_start=EPS_START,
                 eps_end=EPS_STOP,
                 eps_steps=EPS_STEPS):
        threading.Thread.__init__(self)

        self.render = render
        self.env = JacoEnv(64, 64, 100, 0.1, 0.8, True)
        self.agent = Agent(eps_start, eps_end, eps_steps)

    def runEpisode(self):
        s = self.env.reset()

        R = 0
        while True:
            time.sleep(THREAD_DELAY)  # yield

            if self.render: self.env.render()

            a = self.agent.act(s)
            s_, r, done, info = self.env.step(a)
            # print(self.ident, info['step'])

            if done:  # terminal state
                s_ = None

            self.agent.train(s, a, r, s_)

            s = s_
            R += r

            if done or self.stop_signal:
                break

        print("Total R:", R)

    def run(self):
        while not self.stop_signal:
            self.runEpisode()

    def stop(self):
        self.stop_signal = True
示例#2
0
class JacoEnvRandomAgent():
    def __init__(self, width, height, frame_skip, rewarding_distance,
                 control_magnitude, reward_continuous, render):
        self.env = JacoEnv(width, height, frame_skip, rewarding_distance,
                           control_magnitude, reward_continuous)
        self.render = render

    def run(self):
        (_, _, obs_rgb_view2) = self.env.reset()

        if self.render:
            viewer = mujoco_py.MjViewer(self.env.sim)
        else:
            f, ax = plt.subplots()
            im = ax.imshow(obs_rgb_view2)

        while True:
            self.env.reset()

            while True:

                # random action selection
                action = np.random.choice([0, 1, 2, 3, 4], 6)

                # take the random action and observe the reward and next state (2 rgb views and proprioception)
                (obs_joint, obs_rgb_view1,
                 obs_rgb_view2), reward, done = self.env.step(action)

                # print("action : ", action)
                # print("reward : ", reward)

                if done:
                    break

                if self.render:
                    viewer.render()
                else:
                    im.set_data(obs_rgb_view2)
                    plt.draw()
                    plt.pause(0.1)
示例#3
0
class Worker(object):
    def __init__(self, wid):

        self.wid = wid
        #self.env = gym.make(GAME).unwrapped
        self.env = JacoEnv(64, 64, 100)
        self.ppo = GLOBAL_PPO
        if self.wid == 0:
            self.viewer = mujoco_py.MjViewer(self.env.sim)

    def work(self):
        global GLOBAL_EP, GLOBAL_RUNNING_R, GLOBAL_UPDATE_COUNTER
        while not COORD.should_stop():
            s = self.env.reset()
            ep_r = 0
            buffer_s, buffer_a, buffer_r = [], [], []
            for t in range(EP_LEN):
                if not ROLLING_EVENT.is_set():  # while global PPO is updating
                    ROLLING_EVENT.wait()  # wait until PPO is updated
                    buffer_s, buffer_a, buffer_r = [], [], [
                    ]  # clear history buffer, use new policy to collect data

                if self.wid == 0:
                    self.viewer.render()

                a = self.ppo.choose_action(s)
                s_, r, done = self.env.step(a)
                buffer_s.append(s)
                buffer_a.append(a)
                buffer_r.append(
                    (r + 8) / 8)  # normalize reward, find to be useful
                s = s_
                ep_r += r

                GLOBAL_UPDATE_COUNTER += 1  # count to minimum batch size, no need to wait other workers
                if t == EP_LEN - 1 or GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE or done:
                    v_s_ = self.ppo.get_v(s_)
                    discounted_r = []  # compute discounted reward
                    for r in buffer_r[::-1]:
                        v_s_ = r + GAMMA * v_s_
                        discounted_r.append(v_s_)
                    discounted_r.reverse()

                    bs, ba, br = np.vstack(buffer_s), np.vstack(
                        buffer_a), np.array(discounted_r)[:, np.newaxis]
                    buffer_s, buffer_a, buffer_r = [], [], []
                    QUEUE.put(np.hstack((bs, ba, br)))  # put data in the queue

                    if GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE:
                        ROLLING_EVENT.clear()  # stop collecting data
                        UPDATE_EVENT.set()  # globalPPO update

                    if GLOBAL_EP >= EP_MAX:  # stop training
                        COORD.request_stop()
                        break
                    if done:
                        break

            with open("reward.txt", "a") as f:
                f.write(str(ep_r) + '\n')
            # record reward changes, plot later
            if len(GLOBAL_RUNNING_R) == 0: GLOBAL_RUNNING_R.append(ep_r)
            else:
                GLOBAL_RUNNING_R.append(GLOBAL_RUNNING_R[-1] * 0.9 +
                                        ep_r * 0.1)
            GLOBAL_EP += 1
            # r_d = 200 / (sum(GLOBAL_RUNNING_R[:-10:-1])/10 + 250 + GLOBAL_EP)
            # print(r_d)
            #self.env.reduce_rewarding_distance(r_d)
            #if sum(GLOBAL_RUNNING_R[:-11:-1])/10 > 100:
            #    self.env.reset_target()
            # if GLOBAL_EP > 1495 and GLOBAL_EP % 300 == 0:
            #     self.env.reset_target()
            # if GLOBAL_EP > 1495 and GLOBAL_EP % 300 == 1:
            #     self.env.reset_target()
            # if GLOBAL_EP > 1495 and GLOBAL_EP % 300 == 2:
            #     self.env.reset_target()
            # if GLOBAL_EP > 1495 and GLOBAL_EP % 300 == 3:
            #     self.env.reset_target()
            # if sum(GLOBAL_RUNNING_R[:-11:-1])/10 > 1500:
            #     with open("state.txt", "a") as f:
            #         f.write(str(self.env.sim.model.body_pos[-1]) + '\n')
            #         f.write(str(self.env.sim.model.geom_pos[-1]) + '\n')
            #print('{0:.1f}%'.format(GLOBAL_EP/EP_MAX*100), '|W%i' % self.wid,  '|Ep_r: %.2f' % ep_r,)
            print(
                GLOBAL_EP,
                '/',
                EP_MAX,
                '|W%i' % self.wid,
                '|Ep_r: %.2f' % ep_r,
            )
示例#4
0
    GLOBAL_UPDATE_COUNTER, GLOBAL_EP = 0, 0
    GLOBAL_RUNNING_R = []  #Global_reward
    COORD = tf.train.Coordinator()
    QUEUE = queue.Queue()  # workers putting data in this queue
    threads = []
    for worker in workers:  # worker threads
        t = threading.Thread(target=worker.work, args=())
        t.start()  # training
        threads.append(t)
    # add a PPO updating thread
    threads.append(threading.Thread(target=GLOBAL_PPO.update, ))
    threads[-1].start()
    COORD.join(threads)

    # plot reward change and test
    plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
    plt.xlabel('Episode')
    plt.ylabel('Moving reward')
    plt.ion()
    plt.show()

    env = JacoEnv(64, 64, 100)

    viewer = mujoco_py.MjViewer(env.sim)

    while True:
        s = env.reset()
        for t in range(200):
            viewer.render()
            s = env.step(GLOBAL_PPO.choose_action(s))[0]
示例#5
0
def test(rank, args, T, shared_model):
    torch.manual_seed(args.seed + rank)

    env = JacoEnv(args.width,
                  args.height,
                  args.frame_skip,
                  args.rewarding_distance,
                  args.control_magnitude,
                  args.reward_continuous)
    env.seed(args.seed + rank)
    if args.render:
        (_, _, obs_rgb_view2) = env.reset()
        plt.ion()
        f, ax = plt.subplots()
        im = ax.imshow(obs_rgb_view2)

    model = ActorCritic(None, args.non_rgb_state_size, None, args.hidden_size)
    model.eval()
    can_test = True  # Test flag
    t_start = 1  # Test step counter to check against global counter
    rewards, steps = [], []  # Rewards and steps for plotting
    n_digits = str(
        len(str(args.T_max)))  # Max num. of digits for logging steps
    done = True  # Start new episode

    while T.value() <= args.T_max:
        if can_test:
            t_start = T.value()  # Reset counter

            # Evaluate over several episodes and average results
            avg_rewards, avg_episode_lengths = [], []
            for _ in range(args.evaluation_episodes):
                while True:
                    # Reset or pass on hidden state
                    if done:
                        # Sync with shared model every episode
                        model.load_state_dict(shared_model.state_dict())
                        hx = Variable(
                            torch.zeros(1, args.hidden_size), volatile=True)
                        cx = Variable(
                            torch.zeros(1, args.hidden_size), volatile=True)
                        # Reset environment and done flag
                        state = state_to_tensor(env.reset())
                        action, reward, done, episode_length = (0, 0, 0, 0, 0,
                                                                0), 0, False, 0
                        reward_sum = 0

                    # Calculate policy
                    policy, _, (hx, cx) = model(
                        Variable(
                            state[0], volatile=True),
                        Variable(
                            state[1], volatile=True),
                        (hx.detach(),
                         cx.detach()))  # Break graph for memory efficiency

                    # Choose action greedily
                    action = [p.max(1)[1].data[0, 0] for p in policy]

                    # Step
                    state, reward, done = env.step(action)
                    obs_rgb_view1 = state[1]
                    obs_rgb_view2 = state[2]
                    state = state_to_tensor(state)
                    reward_sum += reward
                    done = done or episode_length >= args.max_episode_length  # Stop episodes at a max length
                    episode_length += 1  # Increase episode counter

                    # Optionally render validation states
                    if args.render:
                        # rendering the first camera view
                        im.set_data(obs_rgb_view1)
                        plt.draw()
                        plt.pause(0.05)

                        # rendering mujoco simulation
                        # viewer = mujoco_py.MjViewer(env.sim)
                        # viewer.render()

                    # Log and reset statistics at the end of every episode
                    if done:
                        avg_rewards.append(reward_sum)
                        avg_episode_lengths.append(episode_length)
                        break

            print(('[{}] Step: {:<' + n_digits +
                   '} Avg. Reward: {:<8} Avg. Episode Length: {:<8}').format(
                       datetime.utcnow().strftime(
                           '%Y-%m-%d %H:%M:%S,%f')[:-3], t_start,
                       sum(avg_rewards) / args.evaluation_episodes,
                       sum(avg_episode_lengths) / args.evaluation_episodes))

            rewards.append(avg_rewards)  # Keep all evaluations
            steps.append(t_start)
            plot_line(steps, rewards)  # Plot rewards
            torch.save(model.state_dict(),
                       os.path.join('results', str(t_start) +
                                    '_model.pth'))  # Checkpoint model params
            can_test = False  # Finish testing
            if args.evaluate:
                return
        else:
            if T.value() - t_start >= args.evaluation_interval:
                can_test = True

        time.sleep(0.001)  # Check if available to test every millisecond
示例#6
0
def train(rank, args, T, shared_model, optimiser):
    torch.manual_seed(args.seed + rank)

    env = JacoEnv(args.width, args.height, args.frame_skip,
                  args.rewarding_distance, args.control_magnitude,
                  args.reward_continuous)
    env.seed(args.seed + rank)

    # TODO: pass in the observation and action space
    model = ActorCritic(None, args.non_rgb_state_size, None, args.hidden_size)
    model.train()

    t = 1  # Thread step counter
    done = True  # Start new episode

    while T.value() <= args.T_max:
        # Sync with shared model at least every t_max steps
        model.load_state_dict(shared_model.state_dict())
        # Get starting timestep
        t_start = t

        # Reset or pass on hidden state
        if done:
            hx = Variable(torch.zeros(1, args.hidden_size))
            cx = Variable(torch.zeros(1, args.hidden_size))
            # Reset environment and done flag
            state = state_to_tensor(env.reset())
            action, reward, done, episode_length = (0, 0, 0, 0, 0,
                                                    0), 0, False, 0

        else:
            # Perform truncated backpropagation-through-time (allows freeing buffers after backwards call)
            hx = hx.detach()
            cx = cx.detach()

        # Lists of outputs for training
        policies, Vs, actions, rewards = [], [], [], []

        while not done and t - t_start < args.t_max:
            # Calculate policy and value
            policy, V, (hx, cx) = model(Variable(state[0]), Variable(state[1]),
                                        (hx, cx))

            # Sample action
            action = [
                p.multinomial().data[0, 0] for p in policy
            ]  # Graph broken as loss for stochastic action calculated manually

            # Step
            state, reward, done = env.step(action)
            state = state_to_tensor(state)
            done = done or episode_length >= args.max_episode_length  # Stop episodes at a max length
            episode_length += 1  # Increase episode counter

            # Save outputs for online training
            [
                arr.append(el)
                for arr, el in zip((policies, Vs, actions, rewards), (
                    policy, V, Variable(torch.LongTensor(action)), reward))
            ]

            # Increment counters
            t += 1
            T.increment()

        # Break graph for last values calculated (used for targets, not directly as model outputs)
        if done:
            # R = 0 for terminal s
            R = Variable(torch.zeros(1, 1))

        else:
            # R = V(s_i; θ) for non-terminal s
            _, R, _ = model(Variable(state[0]), Variable(state[1]), (hx, cx))
            R = R.detach()
        Vs.append(R)

        # Train the network
        _train(args, T, model, shared_model, optimiser, policies, Vs, actions,
               rewards, R)
示例#7
0
    mp.set_start_method('spawn')
    torch.manual_seed(args.seed)
    T = Counter()  # Global shared counter

    # Create shared network
    env = JacoEnv(args.width, args.height, args.frame_skip,
                  args.rewarding_distance, args.control_magnitude,
                  args.reward_continuous)

    M = cv2.getRotationMatrix2D((32, 32), 180, 1.)
    done = False
    for i in trange(1000):
        done = False
        j = 0
        while not done:
            obs, reward, done = env.step(
                np.random.randint(0, 4, env.num_actuators))
            img = cv2.warpAffine(obs[2], M, (64, 64))
            cv2.imwrite(
                "training_observations/obs" + str(i) + "_" + str(j) + ".png",
                img)

            new_floor_color = list((0.55 - 0.45) * np.random.random(3) +
                                   0.45) + [1.]
            new_cube_color = list(np.random.random(3)) + [1.]
            env.change_floor_color(new_floor_color)
            env.change_cube_color(new_cube_color)
            j += 1
            # print(i, j, done)
            if done:
                # print("########")
                env.reset()