traci.trafficlight.Phase(32, "rrGrrrrrGrrr"),   
                            traci.trafficlight.Phase(2, "rryrrrrryrrr"),
                            traci.trafficlight.Phase(32, "rrrGGrrrrGGr"),   
                            traci.trafficlight.Phase(2, "rrryyrrrryyr"),
                            traci.trafficlight.Phase(32, "rrrrrGrrrrrG"), 
                            traci.trafficlight.Phase(2, "rrrrryrrrrry")
                            ])

    for run in range(1, args.runs+1):
        obs = env.reset()
        agent = TrueOnlineSarsaLambda(env.observation_space, env.action_space, alpha=args.alpha, gamma=args.gamma, epsilon=args.epsilon, fourier_order=21)
        
        done = False
        if args.fixed:
            while not done:
                _, _, done, _ = env.step({})
        else:
            while not done:
                action = agent.act(agent.get_features(obs))

                next_obs, r, done, _ = env.step(action=action)

                agent.learn(state=obs, action=action, reward=r, next_state=next_obs, done=done)

                obs = next_obs

        env.save_csv(out_csv, run)



Beispiel #2
0
            ts:
            QLAgent(starting_state=env.encode(initial_states[ts]),
                    state_space=env.observation_space,
                    action_space=env.action_space,
                    alpha=alpha,
                    gamma=gamma,
                    exploration_strategy=EpsilonGreedy(initial_epsilon=0.05,
                                                       min_epsilon=0.005,
                                                       decay=decay))
            for ts in env.ts_ids
        }
        infos = []
        done = {'__all__': False}
        while not done['__all__']:
            actions = {ts: ql_agents[ts].act() for ts in ql_agents.keys()}

            s, r, done, info = env.step(actions=actions)
            infos.append(info)

            for agent_id in ql_agents.keys():
                ql_agents[agent_id].learn(new_state=env.encode(s[agent_id]),
                                          reward=r[agent_id])

        env.close()

        df = pd.DataFrame(infos)
        df.to_csv(
            'outputs/4x4grid/c2_alpha{}_gamma{}_decay{}_run{}.csv'.format(
                alpha, gamma, decay, run),
            index=False)
                        state_space=env.observation_space,
                        action_space=env.action_space,
                        alpha=args.alpha,
                        gamma=args.gamma,
                        exploration_strategy=EpsilonGreedy(
                            initial_epsilon=args.epsilon,
                            min_epsilon=args.min_epsilon,
                            decay=args.decay))
            for ts in env.ts_ids
        }

        done = {'__all__': False}
        infos = []
        if args.fixed:
            while not done['__all__']:
                _, _, done, _ = env.step({})
        else:
            while not done['__all__']:
                actions = {ts: ql_agents[ts].act() for ts in ql_agents.keys()}

                s, r, done, _ = env.step(actions=actions)

                if args.v:
                    print('s=', env.radix_decode(ql_agents['t'].state), 'a=',
                          actions['t'], 's\'=', env.radix_encode(s['t']), 'r=',
                          r['t'])

                for agent_id in ql_agents.keys():
                    ql_agents[agent_id].learn(new_state=env.encode(
                        s[agent_id]),
                                              reward=r[agent_id])
                            ])

    env = VisualizationEnv(
        env=env, 
        episodic=False,
        features_names=['Phase 0', 'Phase 1', 'Elapsed time'] + ['Density lane ' + str(i) for i in range(4)] + ['Queue lane ' + str(i) for i in range(4)],
        actions_names=['Phase 0', 'Phase 1']
    )

    for run in range(1, args.runs+1):
        initial_states = env.reset()
        ql_agents = {ts: QLAgent(starting_state=env.encode(initial_states),
                                 state_space=env.observation_space,
                                 action_space=env.action_space,
                                 alpha=args.alpha,
                                 gamma=args.gamma,
                                 exploration_strategy=EpsilonGreedy(initial_epsilon=args.epsilon, min_epsilon=args.min_epsilon, decay=args.decay)) for ts in env.ts_ids}

        env.set_agent(ql_agents['t'])
        env.add_plot('Epsilon', lambda: ql_agents['t'].exploration.epsilon)

        done = False
        while not done:
            actions = {ts: ql_agents[ts].act() for ts in ql_agents.keys()}
            s, r, done, _ = env.step(action=actions['t'])
            for agent_id in ql_agents.keys():
                ql_agents[agent_id].learn(next_state=env.encode(s), reward=r)
        env.close()


        env.join()