示例#1
0
 def initIndividuals(self):
     self.individuals = EvolutionaryLearner.initIndividuals(self)
示例#2
0
文件: Driver.py 项目: croatis/SATLO
    def run(self):
        numOfRSRulesApplied = 0
        numofRSintRulesApplied = 0
        traci.start(
            self.sumoCmd
        )  # Start SUMO. Comment out if running Driver as standalone module.

        # Run set-up script and acquire list of user defined rules and traffic light agents in simulation
        userDefinedRules = self.setUpTuple[0]
        trafficLights = self.setUpTuple[1]
        rule = -1
        nextRule = -1

        # Assign each traffic light an individual from their agent pool for this simulation run, and a starting rule
        for tl in trafficLights:
            tl.assignIndividual()
            tl.updateCurrentPhase(traci.trafficlight.getPhaseName(
                tl.getName()))

            rule = self.applicableUserDefinedRule(
                tl, userDefinedRules)  # Check user-defined rules

            # If no user-defined rules can be applied, get a rule from Agent Pool
            if rule == False or rule is None:
                validRules = self.getValidRules(tl, tl.getAssignedIndividual())
                rule = tl.getNextRule(validRules[0], validRules[1],
                                      traci.simulation.getTime()
                                      )  # Get a rule from assigned Individual

                # if no valid rule applicable, apply the Do Nothing rule.
                if rule == -1:
                    tl.doNothing()  # Update traffic light's Do Nothing counter
                    tl.getAssignedIndividual().updateFitnessPenalty(
                        False, 0)  # Update fitness penalty for individual

                else:
                    # If rule conditions are satisfied, apply its action. Otherwise, do nothing.
                    if not rule.hasDoNothingAction():
                        traci.trafficlight.setPhase(tl.getName(),
                                                    rule.getAction())
                        tl.resetTimeInCurrentPhase()
            else:
                self.applyUserDefinedRuleAction(
                    tl, traci.trafficlight.getPhaseName(tl.getName()), rule)
                tl.resetTimeInCurrentPhase()

            tl.setCurrentRule(rule)  # Set current rule in traffic light
            tl.updateTimePhaseSpentInRed(
                traci.trafficlight.getPhase(tl.getName()), 5)

            # Simulation loop
        step = 0
        # Variables for rule rewards
        carsWaitingBefore = {}
        carsWaitingAfter = {}
        while traci.simulation.getMinExpectedNumber(
        ) > 0 and traci.simulation.getTime() < self.maxSimulationTime:
            tl.removeOldIntentions(traci.simulation.getTime())
            traci.simulationStep(
            )  # Advance SUMO simulation one step (1 second)

            # Traffic Light agents reevaluate their state every 5 seconds
            if step % 5 == 0:
                # For every traffic light in simulation, select and evaluate new rule from its agent pool
                for tl in trafficLights:

                    #USER DEFINED RULE CHECK
                    #-------------------------------------------------------
                    if self.assignGreenPhaseToSingleWaitingPhase_UDRule:
                        applied = self.checkAssignGreenPhaseToSingleWaitingPhaseRule(
                            tl)
                        if applied is True:
                            continue
                    if self.maxGreenAndYellow_UDRule:
                        applied = self.checkMaxGreenAndYellowPhaseRule(
                            tl, nextRule)
                        if applied is True:
                            continue

                    if self.maxRedPhaseTime_UDRule:
                        applied = self.checkMaxRedPhaseTimeRule(tl)
                        if applied is True:
                            continue

                    # END USER DEFINED RULE CHECK
                    #-------------------------------------------------------

                    tl.updateTimeInCurrentPhase(5)

                    carsWaitingBefore = tl.getCarsWaiting()
                    carsWaitingAfter = self.carsWaiting(tl)

                    nextRule = self.applicableUserDefinedRule(
                        tl, userDefinedRules
                    )  # Check if a user-defined rule can be applied

                    # If no user-defined rules can be applied, get a rule from Agent Pool
                    if nextRule == False:
                        validRules = self.getValidRules(
                            tl, tl.getAssignedIndividual())
                        print("Valid rules for RS are", validRules[0],
                              "and valid rules for RSint are", validRules[1],
                              "\n\n")

                        if len(validRules[0]) == 0 and len(validRules[1]) == 0:
                            nextRule = -1  # -1 is used to represent "no valid next rule"
                        else:
                            nextRule = tl.getNextRule(
                                validRules[0], validRules[1],
                                traci.simulation.getTime(
                                ))  # Get a rule from assigned Individual

                        if nextRule == -1:
                            tl.doNothing(
                            )  # Update traffic light's Do Nothing counter
                            tl.getAssignedIndividual().updateFitnessPenalty(
                                False,
                                False)  # Update fitness penalty for individual

                            # If next rule is not a user-defined rule, update the weight of the last applied rule
                        else:
                            oldRule = tl.getCurrentRule()
                            # If applied rule isn't user-defined, update its weight
                            if oldRule not in userDefinedRules:
                                if oldRule != -1:
                                    ruleWeightBefore = oldRule.getWeight(
                                    )  # Used to calculate fitness penalty to individual
                                    oldRule.updateWeight(
                                        ReinforcementLearner.updatedWeight(
                                            oldRule, nextRule,
                                            self.getThroughputRatio(
                                                self.getThroughput(
                                                    tl, carsWaitingBefore,
                                                    carsWaitingAfter),
                                                len(carsWaitingBefore)),
                                            self.getWaitTimeReducedRatio(
                                                self.getThroughputWaitingTime(
                                                    tl, carsWaitingBefore,
                                                    carsWaitingAfter),
                                                self.getTotalWaitingTime(
                                                    carsWaitingBefore)),
                                            len(carsWaitingAfter) -
                                            len(carsWaitingBefore)))
                                    tl.getAssignedIndividual(
                                    ).updateFitnessPenalty(
                                        True,
                                        oldRule.getWeight() > ruleWeightBefore)

                                    # Apply the next rule; if action is -1 then action is do nothing
                                if not nextRule.hasDoNothingAction():
                                    traci.trafficlight.setPhase(
                                        tl.getName(), nextRule.getAction())

                                    if nextRule is not tl.getCurrentRule():
                                        traci.trafficlight.setPhase(
                                            tl.getName(), nextRule.getAction())
                                        tl.resetTimeInCurrentPhase()

                                if nextRule.getType() == 0:
                                    print(
                                        "Applying TL action from RS! Action is",
                                        nextRule.getAction(), "\n\n")
                                    numOfRSRulesApplied += 1
                                else:
                                    print(
                                        "Applying TL action from RSint! Action is",
                                        nextRule.getAction(), "\n\n")
                                    numofRSintRulesApplied += 1
                    else:
                        self.applyUserDefinedRuleAction(
                            tl, traci.trafficlight.getPhaseName(tl.getName()),
                            nextRule)
                        tl.resetTimeInCurrentPhase()

                        #USER DEFINED RULE CHECK
                    if self.maxGreenAndYellow_UDRule:
                        self.checkMaxGreenAndYellowPhaseRule(tl, nextRule)

                    if self.assignGreenPhaseToSingleWaitingPhase_UDRule:
                        self.checkAssignGreenPhaseToSingleWaitingPhaseRule(tl)

                    if self.maxRedPhaseTime_UDRule:
                        self.checkMaxRedPhaseTimeRule(tl)

                    tl.setCurrentRule(
                        nextRule
                    )  # Update the currently applied rule in the traffic light
                    tl.updateCarsWaiting(
                        carsWaitingAfter
                    )  # Set the number of cars waiting count within the TL itself

            step += 1  # Increment step in line with simulator

            # Update the fitnesses of the individuals involved in the simulation based on their fitnesses
        simRunTime = traci.simulation.getTime()
        print("***SIMULATION TIME:", simRunTime, "\n\n")
        for tl in trafficLights:
            tl.resetRecievedIntentions()
            i = tl.getAssignedIndividual()
            i.updateLastRunTime(simRunTime)
            print("Individual", i, "has a last runtime of", i.getLastRunTime())
            i.updateFitness(
                EvolutionaryLearner.rFit(i, simRunTime,
                                         i.getAggregateVehicleWaitTime()))
            print(tl.getName(), "'s coop rules were invalid",
                  tl.getCoopRuleValidRate(), "percent of the time.")
            print(tl.getName(), "'s RS rules were invalid",
                  tl.getRSRuleValidRate(), "percent of the time.")
            print("\n\nA total of", numOfRSRulesApplied,
                  "rules from RS were applied and", numofRSintRulesApplied,
                  "rules from RSint were applied.")
        traci.close()  # End simulation

        return simRunTime  # Returns all the agent pools to the main module
        sys.stdout.flush()
示例#3
0
                    else:
                        needsTesting.append(False)
            
            if True not in needsTesting:
                allIndividualsTested = True
                for ap in resultingAgentPools:
                    for i in ap.getIndividualsSet():
                        continue # print(i, "has a selected count of:", i.getSelectedCount())
            #allIndividualsTested = True # Uncomment for quick testing

            # Prepare individuals for the next run through
        for ap in setUpTuple[2]:
            ap.normalizeIndividualsFitnesses()  # Normalize the fitness values of each Individual in an agent pool for breeding purposes
                
        if generations + 1 < totalGenerations:
            EvolutionaryLearner.createNewGeneration(setUpTuple[2])     # Update agent pools with a new generation of individuals
            for ap in setUpTuple[2]:
                for i in ap.getIndividualsSet():
                    i.resetSelectedCount()
                    i.resetAggregateVehicleWaitTime()
                    # print("Generation includes Individual:", i.getID(), ";\n")
            sys.stdout.flush()
        else:
            OutputManager.run(setUpTuple[2], sum(generationRuntimes)/50, (sum(generationRuntimes)/50)*50)
            print("Output file created.")
        
        #     bestIndividuals = []
        #     for ap in setUpTuple[2]:
        #         bestIndividuals.append(ap.getBestIndividual())
            
        #     f = open("bestIndividuals.txt", "w")
    def run(self):
        traci.start(
            self.sumoCmd
        )  # Start SUMO. Comment out if running Driver as standalone module.

        # Run set-up script and acquire list of user defined rules and traffic light agents in simulation
        userDefinedRules = self.setUpTuple[0]
        trafficLights = self.setUpTuple[1]
        rule = None
        nextRule = None

        # Assign each traffic light an individual from their agent pool for this simulation run, and a starting rule
        for tl in trafficLights:
            tl.assignIndividual()

            rule = self.applicableUserDefinedRule(
                tl, userDefinedRules)  # Check user-defined rules

            # If no user-defined rules can be applied, get a rule from Agent Pool
            if rule == False:
                validRules = self.getValidRules(tl, tl.getAssignedIndividual())
                rule = tl.getNextRule(validRules[0], validRules[1],
                                      traci.simulation.getTime()
                                      )  # Get a rule from assigned Individual

                # if no valid rule applicable, apply the Do Nothing rule.
                if rule == -1:
                    # print("No valid rule. Do Nothing action applied.")
                    tl.doNothing()  # Update traffic light's Do Nothing counter
                    tl.getAssignedIndividual().updateFitnessPenalty(
                        False, 0)  # Update fitness penalty for individual

                else:
                    # If rule conditions are satisfied, apply its action. Otherwise, do nothing.
                    if not rule.hasDoNothingAction():
                        traci.trafficlight.setPhase(tl.getName(),
                                                    rule.getAction())
            else:
                self.applyUserDefinedRuleAction(
                    tl, traci.trafficlight.getPhaseName(tl.getName()), rule)

            tl.setCurrentRule(rule)  # Set current rule in traffic light

            # Simulation loop
        step = 0
        # Variables for rule rewards
        carsWaitingBefore = {}
        carsWaitingAfter = {}
        while traci.simulation.getMinExpectedNumber(
        ) > 0 and traci.simulation.getTime() < self.maxSimulationTime:
            traci.simulationStep(
            )  # Advance SUMO simulation one step (1 second)

            # Traffic Light agents reevaluate their state every 5 seconds
            if step % 5 == 0:
                # For every traffic light in simulation, select and evaluate new rule from its agent pool
                for tl in trafficLights:
                    carsWaitingBefore = tl.getCarsWaiting()
                    carsWaitingAfter = self.carsWaiting(tl)

                    nextRule = self.applicableUserDefinedRule(
                        tl, userDefinedRules
                    )  # Check if a user-defined rule can be applied

                    # If no user-defined rules can be applied, get a rule from Agent Pool
                    if nextRule == False:
                        validRules = self.getValidRules(
                            tl, tl.getAssignedIndividual())
                        nextRule = tl.getNextRule(
                            validRules[0], validRules[1],
                            traci.simulation.getTime(
                            ))  # Get a rule from assigned Individual

                        # if no valid rule applicable, apply the Do Nothing rule.
                        if nextRule == -1:
                            tl.doNothing(
                            )  # Update traffic light's Do Nothing counter
                            tl.getAssignedIndividual().updateFitnessPenalty(
                                False,
                                False)  # Update fitness penalty for individual

                            # If next rule is not a user-defined rule, update the weight of the last applied rule
                        else:
                            oldRule = tl.getCurrentRule()

                            # If applied rule isn't user-defined, update its weight
                            if oldRule not in userDefinedRules:
                                if oldRule != -1:
                                    ruleWeightBefore = oldRule.getWeight(
                                    )  # Used to calculate fitness penalty to individual
                                    oldRule.updateWeight(
                                        ReinforcementLearner.updatedWeight(
                                            oldRule, nextRule,
                                            self.getThroughputRatio(
                                                self.getThroughput(
                                                    tl, carsWaitingBefore,
                                                    carsWaitingAfter),
                                                len(carsWaitingBefore)),
                                            self.getWaitTimeReducedRatio(
                                                self.getThroughputWaitingTime(
                                                    tl, carsWaitingBefore,
                                                    carsWaitingAfter),
                                                self.getTotalWaitingTime(
                                                    carsWaitingBefore)),
                                            len(carsWaitingAfter) -
                                            len(carsWaitingBefore)))
                                    tl.getAssignedIndividual(
                                    ).updateFitnessPenalty(
                                        True,
                                        oldRule.getWeight() > ruleWeightBefore)
                                    print("Old weight was", ruleWeightBefore,
                                          "and new weight is",
                                          oldRule.getWeight())
                                    # Apply the next rule; if action is -1 then action is do nothing
                                if not nextRule.hasDoNothingAction():
                                    # print('Next rule action is', nextRule.getAction())
                                    traci.trafficlight.setPhase(
                                        tl.getName(), nextRule.getAction())

                                if nextRule.getType() == 0:
                                    print(
                                        "Applying TL action from RS! Action is",
                                        nextRule.getAction(), "\n\n")
                                else:
                                    print(
                                        "Applying TL action from RSint! Action is",
                                        nextRule.getAction(), "\n\n")

                    else:
                        self.applyUserDefinedRuleAction(
                            tl, traci.trafficlight.getPhaseName(tl.getName()),
                            nextRule)
                        # # print("Applying action of", nextRule.getConditions())

                    tl.setCurrentRule(
                        nextRule
                    )  # Update the currently applied rule in the traffic light
                    tl.updateCarsWaiting(
                        carsWaitingAfter
                    )  # Set the number of cars waiting count within the TL itself

            step += 1  # Increment step in line with simulator

            # Update the fitnesses of the individuals involved in the simulation based on their fitnesses
        simRunTime = traci.simulation.getTime()
        print("***SIMULATION TIME:", simRunTime, "\n\n")
        for tl in trafficLights:
            # # print(tl.getName(), "has these communicated intentions:", tl.getCommunicatedIntentions())
            i = tl.getAssignedIndividual()
            i.updateLastRunTime(simRunTime)
            print("Individual", i, "has a last runtime of", i.getLastRunTime())
            i.updateFitness(
                EvolutionaryLearner.rFit(i, simRunTime,
                                         i.getAggregateVehicleWaitTime()))

        traci.close()  # End simulation

        return self.setUpTuple[
            2]  # Returns all the agent pools to the main module