Пример #1
0
def main():
    # Control code here
    sumoCmd = [sumoBinary, "-c", sumoConfig, "--start"]
    traci.start(sumoCmd)
    TLIds = traci.trafficlight.getIDList()
    actionsMap = makemap(TLIds)
    detectorIDs = traci.inductionloop.getIDList()
    state_space_size = traci.inductionloop.getIDCount() * 2
    action_space_size = len(actionsMap)
    agent = Learner(state_space_size, action_space_size, 0.1)
    agent.load("./save/traffic.h5")
    # Get number of induction loops
    state = get_state(detectorIDs)
    total_reward = 0
    simulationSteps = 0
    while simulationSteps < 5000:
        action = agent.act(state)
        lightsPhase = actionsMap[action]
        for light, index in zip(TLIds, range(len(TLIds))):
            traci.trafficlight.setPhase(light, lightsPhase[index])
        for i in range(2):
            traci.simulationStep()
            time.sleep(0.4)
        simulationSteps += 2
        next_state = get_state(detectorIDs)
        reward = calc_reward(state, next_state)
        total_reward += reward
        agent.remember(state, action, reward, next_state)
        state = next_state
    traci.close()
    print "Simulation Reward: {}".format(total_reward)
Пример #2
0
def main():
    # Control code here
    sumoCmd = [sumoBinary, "-c", sumoConfig, "--start"]
    traci.start(sumoCmd)
    TLIds = traci.trafficlight.getIDList()
    actionsMap = makemap(TLIds)
    detectorIDs = traci.inductionloop.getIDList()
    print TLIds
    state_space_size = traci.inductionloop.getIDCount()*2
    action_space_size = len(actionsMap)
    agent = Learner(state_space_size, action_space_size, 1.0)
    # agent.load("./save/traffic.h5")
    traci.close()
    epochs = 20000
Пример #3
0
def main():
    # Control code here
    sumoGuiCmd = [
        sumoBinary, "-c", sumoConfig, "--fcd-output", dumpFile,
        "--tripinfo-output", tripInfoFinal
    ]
    sumoCliCmd = [sumoCli, "-c", sumoConfig, "--start"]
    traci.start(sumoCliCmd)
    TLIds = traci.trafficlight.getIDList()
    actionsMap = makemap(TLIds)
    detectorIDs = traci.inductionloop.getIDList()
    state_space_size = traci.inductionloop.getIDCount() * 2
    action_space_size = len(actionsMap)
    print('hello', action_space_size)
    agent = Learner(state_space_size, action_space_size, 1.0)
    #agent.load("./save/EdgeDensityModel.h5")
    traci.close()
    epochs = 10
    for simulation in range(epochs):
        if (simulation % 9 == 0 and simulation != 0):
            traci.start(sumoGuiCmd)
        else:
            traci.start(sumoCliCmd)
        # Get number of induction loops
        state = get_state(detectorIDs)
        state1 = get_state_edge_density()
        total_reward = 0
        simulationSteps = 0
        while simulationSteps < 1000:
            action = agent.act(state)
            lightsPhase = actionsMap[action]
            for light, index in zip(TLIds, range(len(TLIds))):
                traci.trafficlight.setPhase(light, lightsPhase[index])
            for i in range(2):
                traci.simulationStep()
            simulationSteps += 2
            next_state = get_state(detectorIDs)
            next_state1 = get_state_edge_density()
            reward = calc_reward(state, next_state)
            #reward = calc_reward_edge_density(state1, next_state1)
            total_reward += reward
            #agent.remember(state1, action, reward, next_state1)
            state = next_state
            state1 = next_state1
        traci.close()
def main():
    # Control code here
    sumoCmd = [sumoBinary, "-c", sumoConfig, "--start"]
    traci.start(sumoCmd)
    TLIds = traci.trafficlights.getIDList()
    actionsMap = makemap(TLIds)
    detectorIDs = traci.inductionloop.getIDList()
    state_space_size = traci.inductionloop.getIDCount()*2
    action_space_size = len(actionsMap)
    agent = Learner(state_space_size, action_space_size, 1.0)
    # agent.load("./save/traffic.h5")
    traci.close()
    epochs = 1000
    for simulation in range(epochs):
        traci.start(sumoCmd)
        # Get number of induction loops
        state = get_state(detectorIDs)
        total_reward = 0
        simulationSteps = 0
        while simulationSteps < 1000:
            action = agent.act(state)
            lightsPhase = actionsMap[action]
            for light, index in zip(TLIds, range(len(TLIds))):
                traci.trafficlights.setPhase(light, lightsPhase[index])
            for i in range(2):
                traci.simulationStep()
            simulationSteps += 2
            next_state = get_state(detectorIDs)
            reward = calc_reward(state, next_state)
            total_reward += reward
            agent.remember(state, action, reward, next_state)
            state = next_state
        traci.close()
        with open("ResultsOfSimulations.txt", "a") as f:
            f.write("Simulation {}: {}\n".format(simulation, total_reward))
        agent.replay()
        if simulation % 10 == 0:
            agent.save("./save/traffic.h5")
Пример #5
0
def main():
    # Control code here
    sumoCmd = [sumoBinary, "-c", sumoConfig, "--start"]
    traci.start(sumoCmd)
    TLIds = traci.trafficlights.getIDList()
    actionsMap = makemap(TLIds)
    detectorIDs = traci.inductionloop.getIDList()
    state_space_size = traci.inductionloop.getIDCount() * 2
    action_space_size = len(actionsMap)
    agent = Learner(state_space_size, action_space_size, 1.0)
    # agent.load("./save/traffic.h5")
    epochs = 10
    for simulation in range(epochs):
        traci.start(sumoCmd)
        # Get number of induction loops
        state = get_state(detectorIDs)
        state1 = get_state_edge_density()
        total_reward = 0
        simulationSteps = 0
        while simulationSteps < 1000:
            action = agent.act(state)
            lightsPhase = actionsMap[action]
            for light, index in zip(TLIds, range(len(TLIds))):
                traci.trafficlights.setPhase(light, lightsPhase[index])
            for i in range(2):
                traci.simulationStep()
            simulationSteps += 2
            next_state = get_state(detectorIDs)
            next_state1 = get_state_edge_density()
            #calculate reward
            reward = calc_reward(state, next_state)
            total_reward += reward
            #remember actions and rewards
            agent.remember(state, action, reward, next_state)
            state = next_state
            state1 = next_state1
            print(state1)