def build(path, is_virtual=False):
    # create world
    world = World(path, thread_num=args.thread)

    # create agents
    agents = []
    for i in world.intersections:
        action_space = gym.spaces.Discrete(len(i.phases))
        agents.append(
            PressLightAgent(
                action_space,
                LaneVehicleGenerator(world,
                                     i, ["lane_count"],
                                     in_only=True,
                                     average=None),
                LaneVehicleGenerator(world,
                                     i, ["lane_waiting_count"],
                                     in_only=True,
                                     average="all",
                                     negative=True), i.id, world, is_virtual))
        if args.load_model:
            agents[-1].load_model(args.save_dir)
        # if len(agents) == 5:
        #     break
    # print(agents[0].ob_length)
    # print(agents[0].action_space)

    # create metric
    metric = TravelTimeMetric(world)

    # create env
    env = TSCEnv(world, agents, metric)
    return world, agents, env
Example #2
0
def init(args, test=False):
    tf_mute_warning()
    args.save_dir = save_dir + args.config_file[7:-5]
    if test:
        args.save_dir = save_dir + args.config_file[7:-10]

    # config_name = args.config_file.split('/')[1].split('.')[0]
    # args.agent_save_dir = args.save_dir + "/" + config_name
    if not os.path.exists(args.save_dir):
        os.mkdir(args.save_dir)

    if not os.path.exists(args.log_dir):
        os.makedirs(args.log_dir)
    logger = logging.getLogger('main')
    logger.setLevel(logging.DEBUG)
    fh = logging.FileHandler(
        os.path.join(args.log_dir,
                     datetime.now().strftime('%Y%m%d-%H%M%S') + ".log"))
    fh.setLevel(logging.DEBUG)
    sh = logging.StreamHandler()
    sh.setLevel(logging.INFO)
    logger.addHandler(fh)
    logger.addHandler(sh)

    # create world
    world = World(args.config_file, thread_num=args.thread, silent=True)

    # create agents
    agents = []
    for i in world.intersections:
        action_space = gym.spaces.Discrete(len(i.phases))
        agents.append(
            DQNAgent(
                action_space,
                LaneVehicleGenerator(world,
                                     i, ["lane_count"],
                                     in_only=True,
                                     average=None),
                LaneVehicleGenerator(world,
                                     i, ["lane_waiting_count"],
                                     in_only=True,
                                     average="all",
                                     negative=True), i.id))
        if args.load_model:
            agents[-1].load_model(args.save_dir)
    if args.share_weights:
        model = agents[0].model
        for agent in agents:
            agent.model = model

    # create metric
    metric = TravelTimeMetric(world)

    # create env
    env = TSCEnv(world, agents, metric)

    return env
Example #3
0
def run(args):
    # create world
    world = World(args.config_file, thread_num=args.thread, silent=True)

    # create agents
    agents = []
    for i in world.intersections:
        action_space = gym.spaces.Discrete(len(i.phases))
        agents.append(
            MaxPressureAgent(
                action_space, i, world,
                LaneVehicleGenerator(world,
                                     i, ["lane_count"],
                                     in_only=True,
                                     average=None),
                LaneVehicleGenerator(world,
                                     i, ["lane_waiting_count"],
                                     in_only=True,
                                     average="all",
                                     negative=True)))

    # create metric
    metric = TravelTimeMetric(world)

    # create env
    env = TSCEnv(world, agents, metric)

    # simulate
    obs = env.reset()
    actions = []
    i = 0
    reward_sum = 0.
    reward_cnt = 0.
    while i < args.steps:
        if i % args.action_interval == 0:
            actions = []
            for agent_id, agent in enumerate(agents):
                actions.append(agent.get_action(obs[agent_id]))
            for _ in range(args.action_interval):
                obs, rewards, dones, info = env.step(actions)
                i += 1
            for reward in rewards:
                reward_sum += reward
                reward_cnt += 1
    print("avg queue length: {}".format(reward_sum / reward_cnt))

    result = env.eng.get_average_travel_time()
    return result
def build(path):
    # create world
    world = World(path, thread_num=args.thread)

    # create agents
    agents = []
    for idx, i in enumerate(world.intersections):
        action_space = gym.spaces.Discrete(len(i.phases))
        agents.append(
            IntelliLightAgent(action_space, [
                LaneVehicleGenerator(world,
                                     i, ["lane_waiting_count"],
                                     in_only=True,
                                     average="lane"),
                LaneVehicleGenerator(
                    world, i, ["lane_count"], in_only=True, average="lane"),
                LaneVehicleGenerator(world,
                                     i, ["lane_waiting_time_count"],
                                     in_only=True,
                                     average="lane"),
                IntersectionVehicleGenerator(world, i, targets=["vehicle_map"])
            ], [
                LaneVehicleGenerator(world,
                                     i, [
                                         "lane_waiting_count", "lane_delay",
                                         "lane_waiting_time_count"
                                     ],
                                     in_only=True,
                                     average="all"),
                IntersectionVehicleGenerator(
                    world, i, targets=["passed_count", "passed_time_count"])
            ], world, idx))
        if args.load_model:
            agents[-1].load_model(args.save_dir)

    # create metric
    metric = TravelTimeMetric(world)

    # create env
    env = TSCEnv(world, agents, metric)
    return world, agents, env
def build(path):
    world = World(path, thread_num=args.thread)
    # create observation generator, which is used to construct sample
    observation_generators = []
    for node_dict in world.intersections:
        node_id = node_dict.id
        node_id_int = net_node_dict_inter2id[node_id]
        tmp_generator = LaneVehicleGenerator(world,
                                             node_dict, ["lane_count"],
                                             in_only=True,
                                             average='road')
        observation_generators.append((node_id_int, tmp_generator))
        # if len(observation_generators) == 5:
        #     break
    sorted(
        observation_generators, key=lambda x: x[0]
    )  # sorted the ob_generator based on its corresponding id_int, increasingly

    # create agent
    action_space = gym.spaces.Discrete(len(world.intersections[0].phases))
    colightAgent = CoLightAgent(
        action_space, observation_generators,
        LaneVehicleGenerator(world,
                             world.intersections[0], ["lane_waiting_count"],
                             in_only=True,
                             average="all",
                             negative=True), world, dic_traffic_env_conf,
        dic_graph_setting, args)
    if args.load_model:
        colightAgent.load_model(args.load_dir)
    print(colightAgent.ob_length)
    print(colightAgent.action_space)
    # create metric
    metric = TravelTimeMetric(world)

    # create env
    env = TSCEnv(world, colightAgent, metric)
    return world, colightAgent, env
Example #6
0
def init(args, test=False):
    tf_mute_warning()
    args.save_dir = save_dir + args.config_file[7:-5]
    if test:
        args.save_dir = save_dir + args.config_file[7:-10]

    # config_name = args.config_file.split('/')[1].split('.')[0]
    # args.agent_save_dir = args.save_dir + "/" + config_name
    if not os.path.exists(args.save_dir):
        os.mkdir(args.save_dir)

    # create world
    world = World(args.config_file, thread_num=args.thread, silent=True)

    # create agents
    agents = []
    for i in world.intersections:
        action_space = gym.spaces.Discrete(len(i.phases))
        agents.append(
            FRAP_DQNAgent(
                action_space,
                LaneVehicleGenerator(world,
                                     i, ["lane_count"],
                                     in_only=True,
                                     average=None),
                LaneVehicleGenerator(world,
                                     i, ["lane_waiting_count"],
                                     in_only=True,
                                     average="all",
                                     negative=True), world, i.id))

    # create metric
    metric = TravelTimeMetric(world)

    # create env
    env = TSCEnv(world, agents, metric)

    return env
Example #7
0
def build(path):
    # create world
    world = World(path, thread_num=args.thread)

    # create agents
    agents = []
    for i in world.intersections:
        action_space = gym.spaces.Discrete(len(i.phases))
        agents.append(DQNAgent(
            action_space,
            LaneVehicleGenerator(world, i, ["lane_count"], in_only=True, average=None),
            LaneVehicleGenerator(world, i, ["lane_waiting_count"], in_only=True, average="all", negative=True),
            i.id
        ))
        if args.load_model:
            agents[-1].load_model(args.save_dir)

    # create metric
    metric = TravelTimeMetric(world)

    # create env
    env = TSCEnv(world, agents, metric)
    return world, agents, env
def test(path):
    # create world
    world = World(path, thread_num=args.thread)

    # create agents
    agents = []
    for i in world.intersections:
        action_space = gym.spaces.Discrete(len(i.phases))
        agents.append(
            MaxPressureAgent(
                action_space, i, world,
                LaneVehicleGenerator(world, i, ["lane_count"], in_only=True)))

    # create metric
    metric = TravelTimeMetric(world)

    # create env
    env = TSCEnv(world, agents, metric)

    # simulate
    obs = env.reset()
    actions = []
    for i in range(args.steps):
        actions = []
        for agent_id, agent in enumerate(agents):
            actions.append(agent.get_action(obs[agent_id]))
        obs, rewards, dones, info = env.step(actions)
        #print(world.intersections[0]._current_phase, end=",")
        # print(obs, actions)
        # print(env.eng.get_average_travel_time())
        #print(obs)
        #print(rewards)
        # print(info["metric"])

    logger.info("Final Travel Time is %.4f" %
                env.eng.get_average_travel_time())
    return env.eng.get_average_travel_time()
def build(path):
    # create world
    world = World(path, thread_num=args.thread)

    # create agents
    agents = []
    for i in world.intersections:
        action_space = gym.spaces.Discrete(len(i.phases))
        agents.append(
            MADDPGAgent(
                action_space,
                LaneVehicleGenerator(world,
                                     i, ["lane_count"],
                                     in_only=True,
                                     average=None),
                LaneVehicleGenerator(world,
                                     i, ["lane_waiting_count"],
                                     in_only=True,
                                     average="all",
                                     negative=True), args, i.id))
    ob_space_n = []
    action_space_n = []
    for agent in agents:
        ob_space_n.append(agent.ob_shape)
        action_space_n.append(agent.action_space)
    print(ob_space_n)
    print(action_space_n)
    for i, agent in enumerate(agents):
        agent.build_model(ob_space_n, action_space_n, i)

    # create metric
    metric = TravelTimeMetric(world)

    # create env
    env = TSCEnv(world, agents, metric)
    return world, agents, env
Example #10
0
    'green_v': args.green_v,
    'red_v': args.red_v
}

#Create world
world = World(args.config_file, thread_num=args.thread)

#Create agents
agents = []
for i in world.intersections:
    action_space = gym.spaces.Discrete(len(i.phases))
    agents.append(SOTLAgent(action_space, options, i, world))

# Create metric
metric = [
    TravelTimeMetric(world),
    ThroughputMetric(world),
    SpeedScoreMetric(world),
    MaxWaitingTimeMetric(world)
]

#Create env
env = TSCEnv(world, agents, metric)

obs = env.reset()
actions = []
steps = 0
episodes_rewards = 0

#Walk through the steps
while steps < args.steps:
Example #11
0
        StateOfThreeGenerator(world, i, ["state_of_three"], in_only=True, average=None),
        LaneVehicleGenerator(world, i, ["lane_waiting_count"], in_only=True, average="all", negative=True),
        i,
        world,
        file_name,
        info_file
    ))
    if args.load_model:
        agents[-1].load_model(args.save_dir)



print(i.phases)

# Create metric
metric = [TravelTimeMetric(world), ThroughputMetric(world), SpeedScoreMetric(world), MaxWaitingTimeMetric(world)]

# create env
env = TSCEnv(world, agents, metric)

#
n_agents = len(agents)

# train dqn_agent
def train(args, env):
    for e in range(episodes):

        for agent in agents: agent.reset_episode_infos()

        first_obs = np.array(env.reset())*0.01
        current_obs = first_obs
Example #12
0
args = parser.parse_args()

# create world
world = World(args.config_file, thread_num=args.thread)

# create agents
agents = []
for i in world.intersections:
    action_space = gym.spaces.Discrete(len(i.phases))
    agents.append(
        MaxPressureAgent(
            action_space, i, world,
            LaneVehicleGenerator(world, i, ["lane_count"], in_only=True)))

# create metric
metric = [TravelTimeMetric(world), QueueLengthMetric(world)]
world.subscribe("lane_waiting_time_count")

# create env
env = TSCEnv(world, agents, metric)

# simulate
obs = env.reset()
actions = []
for i in range(args.steps):
    actions = []
    for agent_id, agent in enumerate(agents):
        actions.append(agent.get_action(obs[agent_id]))
    obs, rewards, dones, info = env.step(actions)
    for metric_obj in env.metric:
        metric_obj.update()
Example #13
0
def create_colight_env(args, agent="bc_colight"):
    config = json.load(open(args.config_file, "r"))
    road_net_file_path = config["dir"] + config["roadnetFile"]
    res = build_int_intersection_map(road_net_file_path)
    net_node_dict_id2inter = res[0]
    net_node_dict_inter2id = res[1]
    net_edge_dict_id2edge = res[2]
    net_edge_dict_edge2id = res[3]
    node_degree_node = res[4]
    node_degree_edge = res[5]
    node_adjacent_node_matrix = res[6]
    node_adjacent_edge_matrix = res[7]
    edge_adjacent_node_matrix = res[8]

    # create world
    world = World(args.config_file, thread_num=args.thread, silent=True)

    dic_traffic_env_conf = {
        "NUM_INTERSECTIONS": len(net_node_dict_id2inter),  # used
        "NUM_ROADS": len(net_edge_dict_id2edge),  # used
    }

    dic_graph_setting = {
        "NEIGHBOR_NUM": 4,  # standard number of adjacent nodes of each node
        "NEIGHBOR_EDGE_NUM":
        4,  # # standard number of adjacent edges of each node
        "N_LAYERS": 1,  # layers of MPNN
        "INPUT_DIM": [128, 128],
        # input dimension of each layer of multiheadattention, the first value should == the last value of "NODE_EMB_DIM"
        "OUTPUT_DIM": [128, 128],
        # output dimension of each layer of multiheadattention, the first value should == the last value of "NODE_EMB_DIM"
        "NODE_EMB_DIM":
        [128, 128],  # the firsr two layer of dense to embedding the input
        "NUM_HEADS": [5, 5],
        "NODE_LAYER_DIMS_EACH_HEAD": [16, 16],  # [input_dim,output_dim]
        "OUTPUT_LAYERS": [],  #
        "NEIGHBOR_ID":
        node_adjacent_node_matrix,  # adjacent node id of each node
        "ID2INTER_MAPPING":
        net_node_dict_id2inter,  # id ---> intersection mapping
        "INTER2ID_MAPPING":
        net_node_dict_inter2id,  # intersection ----->id mapping
        "NODE_DEGREE_NODE":
        node_degree_node,  # number of adjacent nodes of node
    }
    tmp_agents = []
    observation_generators = []
    for node_dict in world.intersections:
        node_id = node_dict.id
        action_space = gym.spaces.Discrete(len(node_dict.phases))
        node_id_int = net_node_dict_inter2id[node_id]
        tmp_generator = LaneVehicleGenerator(world,
                                             node_dict, ["lane_count"],
                                             in_only=True,
                                             average=None)
        observation_generators.append((node_id_int, tmp_generator))
    sorted(
        observation_generators, key=lambda x: x[0]
    )  # sorted the ob_generator based on its corresponding id_int, increasingly
    # create agent
    action_space = gym.spaces.Discrete(len(world.intersections[0].phases))
    if agent == "bc_colight":
        colightAgent = BCCoLightAgent(action_space, observation_generators,
                                      world, dic_traffic_env_conf,
                                      dic_graph_setting, args)
    elif agent == "colight":
        colightAgent = CoLightAgent(
            action_space, observation_generators,
            LaneVehicleGenerator(world,
                                 world.intersections[0],
                                 ["lane_waiting_count"],
                                 in_only=True,
                                 average="all",
                                 negative=True), world, dic_traffic_env_conf,
            dic_graph_setting, args)
    else:
        colightAgent = None
    # print(colightAgent.ob_length)
    # print(colightAgent.action_space)
    # create metric
    metric = TravelTimeMetric(world)
    agents = [colightAgent]
    # create env
    env = TSCEnv(world, agents, metric)
    return env