Beispiel #1
0
 def __init__(self, visualize=False, token=None, max_obstacles=3):
     logger.info("max_obstacles={}".format(max_obstacles))
     if token is None:
         self.remote_env = False
         self.env = RunEnv(visualize=visualize, max_obstacles=max_obstacles)
     else:
         self.remote_env = True
         self.local_env = RunEnv(visualize=False, max_obstacles=max_obstacles)
         self.token = token
         self.env = Client(GRADER_URL)
         self.env_created = False
Beispiel #2
0
    def __init__(self, game_name, display):
        self.game_name = game_name
        self.display = display

        # self.env = gym.make(game_name)
        self.env = RunEnv(self.display)
        self.reset()
Beispiel #3
0
def main():
    env = RunEnv(visualize=False)
    population = [[NN(), 0] for _ in range(100)]
    generation = 0

    for _ in range(2000):
        for i in range(len(population)):
            print i
            population[i][1] = run(population[i][0], env)

        population = sorted(population, key=lambda x: x[1], reverse=True)
        print np.mean([p[1] for p in population[:5]])
        generation += 1

        population = population[:50]

        for _ in range(20):
            population.append([random.choice(population[:50])[0].mutate(), 0])

        for _ in range(20):
            nn1 = random.choice(population[:20])[0]
            nn2 = random.choice(population[:50])[0]
            population.append([nn1.crossover(nn2), 0])

        for _ in range(10):
            population.append([NN(), 0])

        with open('save.p', 'w') as f:
            pickle.dump(population, f)
Beispiel #4
0
def create_env(args):
    env = RunEnv(visualize=True, max_obstacles=args.max_obstacles)

    if hasattr(args, "baseline_wrapper") or hasattr(args, "ddpg_wrapper"):
        env = DdpgWrapper(env, args)

    return env
Beispiel #5
0
def test():
    task_fn = lambda: LTR()
    task = task_fn()
    state_dim = task.env.observation_space.shape[0]
    action_dim = task.env.action_space.shape[0]
    with open('data/ddpg-model-LearningToRun.bin', 'rb') as f:
        model = pickle.load(f)
    actor = DDPGActorNet(state_dim, action_dim)
    actor.load_state_dict(model)

    logger = Logger('./log')

    env = RunEnv(visualize=False)
    state = env.reset(difficulty=0)
    print state
    done = False
    total_reward = 0.0
    step = 0
    while not done:
        action = actor.predict(np.stack([state]), to_numpy=True).flatten()
        state, reward, done, info = env.step(action)
        total_reward += reward
        step += 1
        logger.histo_summary('input', actor.input, step)
        logger.histo_summary('act1', actor.act1, step)
        logger.histo_summary('act2', actor.act2, step)
        logger.histo_summary('pre_act3', actor.pre_act3, step)
        logger.histo_summary('act3', actor.act3, step)
        for tag, value in actor.named_parameters():
            tag = tag.replace('.', '/')
            logger.histo_summary(tag, value.data.numpy(), step)

    print total_reward
    print step
Beispiel #6
0
 def test(skip=4):
     test_env = RunEnv(visualize=True, max_obstacles=0)
     fast_env = FastEnv(test_env, skip)  # 4 is skip factor
     agent.training = False
     agent.play(fast_env, noise_level=1e-11, episode_index=-1)
     agent.training = True
     del test_env
Beispiel #7
0
def main():
    env = RunEnv(visualize=False)

    s = socket.socket()
    s.bind(("localhost", 8000))
    s.listen(10)  # max number of connections

    while True:
        sc, address = s.accept()
        f = open("work.p", 'wb')
        while (True):
            l = sc.recv(1024)
            while (l):
                f.write(l)
                l = sc.recv(1024)
        f.close()

        with open('work.p', 'r') as f:
            nn = pickle.load(f)

        reward = run(nn, env)
        sc.send(str(reward))
        sc.close()

    s.close()
Beispiel #8
0
    def test():

        env = RunEnv(visualize=False)

        observation_d = env.reset(project=False)
        observation = process_obs_dict(observation_d)

        total_reward = 0
        steps = 0

        while True:

            #a = AGENT OUTPUT
            a, q = agent.act(observation)

            observation_d, reward, done, info = env.step(a, project=False)
            observation = process_obs_dict(observation_d)

            total_reward += reward
            steps += 1

            #print(observation)

            print(steps, 'total reward:', total_reward)

            if done:

                break

        print('finished testing!')
Beispiel #9
0
def Simulation(proxy_agent,index, return_dict,  episodes, vis=False):
    print('starting simulation')
    env = RunEnv(visualize=vis)
    observation = env.reset(difficulty=0)

    rewards = np.zeros(episodes)
    totalreward = 0
    for episode in range(0, episodes):
        action = env.action_space.sample()
        observation, reward, done, info = env.step(action)
        observation = np.array(observation)
        Preprocess = Preprocessing(observation, delta=0.01)
        prevState = Preprocess.GetState(observation)
        for i in range(1,1000):
            observation, reward, done, info = env.step(action)
            observation = np.array(observation)
            #means it didn't go the full simulation
            if done and i < 1000:
                reward = 0  

            state = Preprocess.GetState(observation)
            s,a,r,sp = Preprocess.ConvertToTensor(prevState,action, reward, state)

            totalreward += reward
            if done:
                env.reset(difficulty = 0, seed = None) #resets the environment if done is true
                print("reseting environment" + str(episode))
                rewards[episode] = totalreward
                totalreward = 0
                break
            action = proxy_agent(Variable(s, volatile=True))
            action = action.data.numpy()
            prevState = state;
    return_dict[index] = np.sum(rewards) / episodes
    return np.sum(rewards) / episodes
Beispiel #10
0
	def test(frameskip = 1, vis = False):

		env = RunEnv(visualize=vis)
		#env.change_model(model='2D', prosthetic=True, difficulty=0, seed=None)

		observation_d = env.reset(project = False)
		#observation = process_obs_dict(observation_d)


		total_reward = 0
		steps = 0

		while True:

			#a = AGENT OUTPUT
			observation = process_obs_dict(observation_d)
			a, q = agent.act(observation)

			for _ in range(frameskip):

				observation_d, reward, done, info = env.step(a, project = False)
				#observation = process_obs_dict(observation_d)

				total_reward += reward
				steps += 1

			#print(observation)

			print(steps, 'total reward:', total_reward)

			if done:

				break

		print('finished testing!')
Beispiel #11
0
    def test1(self):

        env = RunEnv(visualize=False)
        observation = env.reset()

        action = env.action_space.sample()
        action[5] = np.NaN
        self.assertRaises(ValueError, env.step, action)
Beispiel #12
0
    def run(self):
        
        self.env = RunEnv(visualize=False)
        self.env.reset(difficulty = 2, seed = int(time.time()))
        if self.monitor:
            self.env.monitor.start('monitor/', force=True)

        # tensorflow variables (same as in model.py)
        self.observation_size = 55+7
        self.action_size = np.prod(self.env.action_space.shape)
        self.hidden_size = 128
        weight_init = tf.random_uniform_initializer(-0.05, 0.05)
        bias_init = tf.constant_initializer(0)
        # tensorflow model of the policy
        self.obs = tf.placeholder(tf.float32, [None, self.observation_size])
        self.debug = tf.constant([2,2])
        with tf.variable_scope("policy-a"):
            h1 = fully_connected(self.obs, self.observation_size, self.hidden_size, weight_init, bias_init, "policy_h1")
            h1 = tf.nn.relu(h1)
            h2 = fully_connected(h1, self.hidden_size, self.hidden_size, weight_init, bias_init, "policy_h2")
            h2 = tf.nn.relu(h2)
            h3 = fully_connected(h2, self.hidden_size, self.action_size, weight_init, bias_init, "policy_h3_1")
            h3 = tf.nn.tanh(h3,name="policy_h3")
            action_dist_logstd_param = tf.Variable((.01*np.random.randn(1, self.action_size)).astype(np.float32), name="policy_logstd")
        self.action_dist_mu = h3
        self.action_dist_logstd = tf.tile(action_dist_logstd_param, tf.stack((tf.shape(self.action_dist_mu)[0], 1)))

        config = tf.ConfigProto(
            device_count = {'CPU': 0}
        )
        self.session = tf.Session()
        self.session.run(tf.initialize_all_variables())
        var_list = tf.trainable_variables()

        self.set_policy = SetPolicyWeights(self.session, var_list)

        while True:
            # get a task, or wait until it gets one
            next_task = self.task_q.get(block=True)
            if next_task == 1:
                # the task is an actor request to collect experience
                path = self.rollout()
                self.task_q.task_done()
                self.result_q.put(path)
            elif next_task == 2:
                print "kill message"
                if self.monitor:
                    self.env.monitor.close()
                self.task_q.task_done()
                break
            else:
                # the task is to set parameters of the actor policy
                self.set_policy(next_task)
                # super hacky method to make sure when we fill the queue with set parameter tasks,
                # an actor doesn't finish updating before the other actors can accept their own tasks.
                time.sleep(0.1)
                self.task_q.task_done()
        return
Beispiel #13
0
def build_model(shared_object):
	shared_object['env'] = RunEnv(shared_object.get('visualize',False))
	model_class_name = 'models.agents.' + shared_object.get('model_class',None)
	log_info('importing class : {}'.format(model_class_name))
	model_class = import_class(model_class_name)
	log_info('{} successfuly imported'.format(model_class_name))
	log_info('building model')
	model = model_class(shared_object)
	return model
Beispiel #14
0
def Simulation(proxy_agent, episodes, vis=False):
    env = RunEnv(visualize=vis)
    observation = env.reset(difficulty=0)
    memory = random.randint(1000, 2000)
    tau = random.uniform(0.01, .9)
    epsilon = random.uniform(.15, .9)
    target = proxy_agent.ProduceTargetActorCritic( memory, tau, epsilon )
    batches =  [ 16, 32, 64, 128]
    batchsize = batches[random.randint(0,len(batches)-1)]
    for episode in range(0, episodes):
        action = env.action_space.sample()
        observation, reward, done, info = env.step(action)
        observation = np.array(observation)
        Preprocess = Preprocessing(observation, delta=0.01)
        prevState = Preprocess.GetState(observation)
        if(vis):
            target.OUprocess(0, 0.15, 0.0)
        else:
            target.OUprocess(random.random(), 0.15,0.0)
        pelvis_y = 0

        for i in range(1,1000):
            observation, reward, done, info = env.step(action)
            observation = np.array(observation)
            #means it didn't go the full simulation
            if i > 1:
                reward += (observation[2] - pelvis_y)*0.01 #penalty for pelvis going down
            reward = env.current_state[4] * 0.01
            reward += 0.01  # small reward for still standing
            reward += min(0, env.current_state[22] - env.current_state[1]) * 0.1  # penalty for head behind pelvis
            reward -= sum([max(0.0, k - 0.1) for k in [env.current_state[7], env.current_state[10]]]) * 0.02  # penalty for straight legs


            if done and i < 1000:
                reward = 0

            state = Preprocess.GetState(observation)
            s,a,r,sp = Preprocess.ConvertToTensor(prevState,action, reward, state)
            target.addToMemory(s,a,r,sp)

                #        env.render()
            if done:
                env.reset(difficulty = 0, seed = None) #resets the environment if done is true
                if(target.primedToLearn()):

                    lock.acquire()
                    proxy_agent.PerformUpdate(batchsize, target)
                    target.UpdateTargetNetworks(agent.getCritic(), agent.getActor())
                    print("saving actor")
                    proxy_agent.saveActorCritic()
                    print("actor saved")
                    lock.release()
                print("reseting environment" + str(episode))
                break
            action = target.selectAction(s)
            action = action.numpy()
            prevState = state;
 def test_actions(self):
     env = RunEnv(visualize=False)
     env.reset()
     v = env.action_space.sample()
     v[0] = 1.5
     v[1] = -0.5
     observation, reward, done, info = env.step(v)
     self.assertLessEqual(env.last_action[0],1.0)
     self.assertGreaterEqual(env.last_action[1],0.0)
    def __init__(self, visualize=False, difficulty=None):
        super(LearnToRunEnv, self).__init__()
        if difficulty == None:
            self.difficulty = random.randint(0, 2)
        else:
            self.difficulty = difficulty

        self.learntorun_env = RunEnv(visualize=visualize)
        self.observation_space = self.learntorun_env.observation_space
        self.action_space = self.learntorun_env.action_space
Beispiel #17
0
def standalone_headless_isolated(pq, cq, plock):
    # locking to prevent mixed-up printing.
    plock.acquire()
    print('starting headless...', pq, cq)
    try:
        from osim.env import RunEnv
        # RunEnv = runenv_with_alternative_obstacle_generation_scheme()
        e = RunEnv(visualize=False, max_obstacles=0)
        # bind_alternative_pelvis_judgement(e)
        # use_alternative_episode_length(e)
    except Exception as err:
        print('error on start of standalone')
        traceback.print_exc()
        plock.release()
        return
    else:
        plock.release()

    def report(e):
        # a way to report errors ( since you can't just throw them over a pipe )
        # e should be a string
        print('(standalone) got error!!!')
        cq.put(('error', e))

    def floatify(np):
        return [float(np[i]) for i in range(len(np))]

    try:
        while True:
            msg = pq.get()
            # messages should be tuples,
            # msg[0] should be string

            # isinstance is dangerous, commented out
            # if not isinstance(msg,tuple):
            #     raise Exception('pipe message received by headless is not a tuple')

            if msg[0] == 'reset':
                o = e.reset(difficulty=0)
                cq.put(floatify(o))
            elif msg[0] == 'step':
                o, r, d, i = e.step(msg[1])
                o = floatify(o)  # floatify the observation
                cq.put((o, r, d, i))
            else:
                cq.close()
                pq.close()
                del e
                break
    except Exception as e:
        traceback.print_exc()
        report(str(e))

    return  # end process
def standalone_headless_isolated(conn, plock):
    # locking to prevent mixed-up printing.
    plock.acquire()
    print('starting headless...', conn)
    try:
        import traceback
        from osim.env import RunEnv
        e = RunEnv(visualize=False)
    except Exception as e:
        print('error on start of standalone')
        traceback.print_exc()

        plock.release()
        return
    else:
        plock.release()

    def report(e):
        # a way to report errors ( since you can't just throw them over a pipe )
        # e should be a string
        print('(standalone) got error!!!')
        conn.send(('error', e))

    def floatify(np):
        return [float(np[i]) for i in range(len(np))]

    try:
        while True:
            msg = conn.recv()
            # messages should be tuples,
            # msg[0] should be string

            # isinstance is dangerous, commented out
            # if not isinstance(msg,tuple):
            #     raise Exception('pipe message received by headless is not a tuple')

            if msg[0] == 'reset':
                o = e.reset(difficulty=2)
                conn.send(floatify(o))
            elif msg[0] == 'step':
                ordi = e.step(msg[1])
                ordi[0] = floatify(ordi[0])
                conn.send(ordi)
            else:
                conn.close()
                del e
                break
    except Exception as e:
        traceback.print_exc()
        report(str(e))

    return  # end process
Beispiel #19
0
    def test(skip=1):
        # e = p.env
        te = RunEnv(visualize=True, max_obstacles=10)
        from multi import fastenv

        fenv = fastenv(te, skip)  # 4 is skip factor
        agent.render = True
        try:
            agent.play(fenv, realtime=True, max_steps=-1, noise_level=1e-11)
        except:
            pass
        finally:
            del te
Beispiel #20
0
 def __init__(self,
              game='l2r',
              visualize=False,
              max_obstacles=10,
              skip_count=1):
     self.env = RunEnv(visualize=visualize, max_obstacles=max_obstacles)
     self.step_count = 0
     self.old_observation = None
     self.skip_count = 1  # skip_count  # 4
     self.last_x = 0
     self.current_x = 0
     self.observation_space_shape = (76, )
     self.action_space = self.env.action_space
     self.difficulty = 2
Beispiel #21
0
 def __init__(self,
              visualize=True,
              test=False,
              step_size=0.01,
              processor=None,
              timestep_limit=1000):
     self.visualize = visualize
     self._osim_env = RunEnv(visualize=visualize)
     self._osim_env.stepsize = step_size
     self._osim_env.spec.timestep_limit = timestep_limit
     self._osim_env.horizon = timestep_limit
     # self._osim_env.integration_accuracy = 1e-1
     if test:
         self._osim_env.timestep_limit = 1000
     self.processor = processor
     print "stepsize: " + str(self._osim_env.stepsize)
Beispiel #22
0
    def test(skip=1):
        # e = p.env
        te = RunEnv(visualize=False)
        from multi import fastenv

        fenv = fastenv(te, skip)  # 4 is skip factor
        agent.render = True
        agent.training = False
        try:
            #print('playing')
            #agent.play(fenv,realtime=True,max_steps=-1,noise_level=1e-11)
            playifavailable(0)
        except:
            pass
        finally:
            del te
    def test_reset(self):
        env = RunEnv(visualize=False)
        observation = env.reset(difficulty=2, seed=123)
        env1 = env.env_desc
        observation = env.reset(difficulty=2, seed=3)
        observation = env.reset(difficulty=2, seed=3)
        observation = env.reset(difficulty=2, seed=3)
        observation = env.reset(difficulty=2, seed=3)
        observation = env.reset(difficulty=2, seed=123)
        env2 = env.env_desc

        s = map(lambda x: x[0] - x[1], list(zip(env1["obstacles"][1],env2["obstacles"][1])))
        self.assertAlmostEqual(sum([k**2 for k in s]), 0.0)

        action = env.action_space.sample()
        action[5] = np.NaN
        self.assertRaises(ValueError, env.step, action)
Beispiel #24
0
def env(chrom):
    from osim.env import L2RunEnv as RunEnv
    e = RunEnv(visualize=False)
    e.reset()

    T = 2
    total_reward = 0
    for t in range(500):
        obs, reward, done, _ = e.step(
            controller.input(chrom.allele, T, t * 0.01))
        total_reward += reward
        if done:
            break
    # print("HEADLESS: The reward is {}".format(total_reward))

    # enables to calculate accumulated fitness
    if total_reward < 0: total_reward = 0
    del e
    return total_reward
def test(args):
    print('start testing')

    ddpg = DDPG()
    ddpg.load_model(args.model, load_memory=False)
    env = RunEnv(visualize=args.visualize, max_obstacles=args.max_obs)

    np.random.seed(args.seed)
    for i in range(1):
        step = 0
        state = env.reset(difficulty=2)
        fg = FeatureGenerator()

        state = fg.gen(state)
        #obs = fg.traj[0]
        #print(obs.left_knee_r, obs.right_knee_r)

        ep_reward = 0
        ep_memories = []
        while True:
            action = ddpg.select_action(list(state))
            next_state, reward, done, info = env.step(action.tolist())
            next_state = fg.gen(next_state)

            #obs = fg.traj[0]
            #print(obs.left_knee_r, obs.right_knee_r)

            print('step: {0:03d}'.format(step), end=', action: ')
            for act in action:
                print('{0:.3f}'.format(act), end=', ')
            print()

            state = next_state
            ep_reward += reward
            step += 1

            print('reward:', ep_reward)

            if done:
                break

        print('\nEpisode: {} Reward: {}, n_steps: {}'.format(
            i, ep_reward, step))
    def create(self, env_id, seed=None):
        try:
            if (env_id == 'osim'):
                from osim.env import RunEnv
                env = RunEnv(visualize=True)
            else:
                env = gym.make(env_id)
            print('making environment')
            if seed:
                env.seed(seed)
        except gym.error.Error:
            raise InvalidUsage(
                "Attempted to look up malformed environment ID '{}'".format(
                    env_id))

        instance_id = str(uuid.uuid4().hex)[:self.id_len]
        self.envs[instance_id] = env
        self.envs_id[instance_id] = env_id
        return instance_id
    def submit(self):

        remote_base = 'http://grader.crowdai.org:1729'
        env = RunEnv(visualize=self.visualize)
        client = Client(remote_base)

        # Create environment
        observation = client.env_create(self.submit_token)

        # Run a single step
        #
        # The grader runs 3 simulations of at most 1000 steps each. We stop after the last one
        while True:
            [observation, reward, done,
             info] = client.env_step(self.agent.forward(observation))
            if done:
                observation = client.env_reset()
                if not observation:
                    break

        client.submit()
def standalone(conn,visualize=True):
    from osim.env import RunEnv
    re = RunEnv(visualize=visualize)
    e = fastenv(re,4)

    while True:
        msg = conn.recv()

        # messages should be tuples,
        # msg[0] should be string

        if msg[0] == 'reset':
            obs = e.reset()
            conn.send(obs)
        elif msg[0] == 'step':
            four = e.step(msg[1])
            conn.send(four)
        else:
            conn.close()
            del e
            return
Beispiel #29
0
def standalone_headless_isolated(conn,
                                 visualize,
                                 n_obstacles,
                                 run_logs_dir,
                                 additional_info,
                                 higher_pelvis=0.65):
    try:
        e = RunEnv(visualize=visualize, max_obstacles=n_obstacles)
        if higher_pelvis != 0.65:
            bind_alternative_pelvis_judgement(e, higher_pelvis)
        e = MyRunEnvLogger(e,
                           log_dir=run_logs_dir,
                           additional_info=additional_info)

        while True:
            msg = conn.recv()

            # messages should be tuples,
            # msg[0] should be string

            if msg[0] == 'reset':
                o = e.reset(difficulty=msg[1], seed=msg[2])
                conn.send(o)
            elif msg[0] == 'step':
                ordi = e.step(msg[1])
                conn.send(ordi)
            elif msg[0] == 'close':
                e.close()
                conn.send(None)

                import psutil
                current_process = psutil.Process()
                children = current_process.children(recursive=True)
                for child in children:
                    child.terminate()
                return
    except Exception as e:
        import traceback
        print(traceback.format_exc())
        conn.send(e)
Beispiel #30
0
    def __init__(self, shared_object):
        self.env = shared_object.get("env", None)

        if self.env:
            self.env = RunEnv(shared_object.get('visualize', False))

        self.nb_actions = self.env.action_space.shape[0]

        ## memory parameters
        self.memoryLimit = shared_object.get('memoryLimit', 100000)
        self.window_length = shared_object.get('window_length', 1)

        ## random process parameters
        self.random_process_theta = shared_object.get('random_process_theta',
                                                      .15)
        self.random_process_mu = shared_object.get('random_process_mu', 0.)
        self.random_process_sigma = shared_object.get('random_process_sigma',
                                                      .2)

        ## building the networks

        super(example, self).__init__(shared_object)