コード例 #1
0
ファイル: Agent.py プロジェクト: markriedl/big-red-button
	def copyObservation(self, obs):
		returnObs =  Observation()
		if obs.worldState != None:
			returnObs.worldState = obs.worldState[:]
            
		if obs.availableActions != None:
			returnObs.availableActions = obs.availableActions[:]
        
		if obs.isTerminal != None:
			returnObs.isTerminal = obs.isTerminal
            
		return returnObs
コード例 #2
0
ファイル: Agent.py プロジェクト: pthakkar3/CS4731
    def copyObservation(self, obs):
        returnObs = Observation()
        if obs.worldState != None:
            returnObs.worldState = obs.worldState[:]

        if obs.availableActions != None:
            returnObs.availableActions = obs.availableActions[:]

        if obs.isTerminal != None:
            returnObs.isTerminal = obs.isTerminal

        return returnObs
コード例 #3
0
    def checkObservation(self, observationObj):
        row = None
        rows = None
        sdeConn = None
        try:
            sdeConn = arcpy.ArcSDESQLExecute(self.sp.getFullPath())
            sql = "select top(1) OBJECTID,NUMERIC_VALUE from dbo.Observation where PROPERTY='" + str(
                observationObj.property_ref.property_id
            ) + "' and OFFERING='" + str(
                observationObj.offering_id
            ) + "' and PROCEDURE_='" + str(
                observationObj.procedure_ref.procedure_id
            ) + "' and FEATURE='" + str(
                observationObj.feature_ref.featureID
            ) + "' and TIME_STAMP='" + observationObj.time_stamp + "' and TIME_STAMP_BEGIN='" + observationObj.time_stamp_begin + "'"
            rows = sdeConn.execute(sql)

            tempObj = Observation.Observation()
            if isinstance(rows, list):
                for row in rows:
                    tempObj.objectID = row[0] if (row[0] != None) else -1
                    tempObj.numeric_value = row[1] if (row[1] != None) else 0
            else:
                tempObj.objectID = -1
                tempObj.numeric_value = 0
            return tempObj
        except:
            return None
        finally:
            if row:
                del row
            if rows:
                del rows
            if sdeConn:
                del sdeConn
コード例 #4
0
def get_alpha(transition, covariances, states, means, mfcc, t):
    if t < 1:
        # When t = 0, the chance of being in the first state is
        # 100% so the whole column is 0 except for the first state
        # which will be set to 1.  For log probabilities, 0's are
        # represented with -10e30 since log(0) = -inf.  Likewise,
        # the 1 will be represented with a 0 since log(1) = 0.
        low_log = -10e30
        initial_alpha = [low_log] * states
        initial_alpha[0] = math.log(1)
        return [initial_alpha]
    current_mfcc = mfcc[t]
    alpha_matrix = get_alpha(transition, covariances, states, means, mfcc,
                             t - 1)
    previous_alpha = alpha_matrix[t - 1]
    build_alpha = [0] * states
    low_log = -10e30
    for q in range(0, states):
        mean = means[q]
        local_distortion = obs.prob_observation(current_mfcc, mean,
                                                covariances)
        local_distortion = get_log(local_distortion, low_log)
        sum = low_log
        for r in range(0, states):
            state_transition = transition[r][q]
            state_transition = get_log(state_transition, low_log)
            alpha_r = previous_alpha[r]
            product = alpha_r + state_transition
            sum = log_add(sum, product, low_log)
        build_alpha[q] = local_distortion + sum
    return alpha_matrix
コード例 #5
0
def get_beta(transition, covariances, states, means, mfcc, t):
    if t > len(mfcc) - 1:
        # When t = T-1, the chance of being in the last state is
        # 100% so the whole column is 0 except for the last state
        # which will be set to 1.  For log probabilities, 0's are
        # represented with -10e30 since log(0) = -inf.  Likewise,
        # the 1 will be represented with a 0 since log(1) = 0.
        log_low = -10e30
        last_beta = [log_low] * states
        last_beta[states - 1] = math.log(1)
        return [last_beta]
    beta_matrix = get_beta(transition, covariances, states, means, mfcc, t + 1)
    next_mfcc = mfcc[t + 1]
    build_beta = [0] * states
    next_beta = beta_matrix[0]
    log_low = -10e30
    for q in range(0, states):
        sum = log_low
        for r in range(0, states):
            state_transition = transition[q][r]
            state_transition = get_log(state_transition, log_low)
            next_distortion = obs.prob_observation(next_mfcc, means,
                                                   covariances)
            next_distortion = get_log(next_distortion, log_low)
            beta_r = next_beta[r]
            product = beta_r + next_distortion + state_transition
            sum = log_add(sum, product, log_low)
        build_beta[q] = sum
    return build_beta
コード例 #6
0
    def start(self):
        """
        Activate the Proposal instance in the simulation and create
        the appropriate number of Observation objects.
        Activate each Observation object and configure them.
        """
        if self.log and self.verbose > 1:
            self.log.info('Proposal: start() propID=%d' % (self.propID))

        mu = AVGPRIORITY
        sigma = mu * (SIGMAPERCENT / 100.)
        for target in self.targets:
            ra = target[0]
            dec = target[1]
            #id = target[2]
            t = EXPTIME
            pri = random.normalvariate(mu, sigma)

            # create the observation with the desired exposure time
            # and priority.
            self.observations.append(Observation(proposal=self, ra=ra, dec=dec, exposureTime=t, priority=pri,
                                                 verbose=self.verbose))

            # wait 1 second before sumbitting the next observation
#            yield hold, self
        return
コード例 #7
0
	def env_start(self):
		# Use hard-coded start state or randomly generated state?
		if self.randomStart:
			self.currentState = randomizeStart(self.map)
		else:
			self.currentState = self.startState[:]

		# Make sure counter is reset
		self.counter = 0

		if self.verbose:
			print "env_start", self.currentState

		# Reset previous state
		self.previousState = []

		# Get the first observation
		returnObs=Observation()
		returnObs.worldState=self.currentState[:]
		returnObs.availableActions = self.validActions()
		return returnObs
コード例 #8
0
ファイル: Board.py プロジェクト: soberanc/GolemGlobe
    def getObservationForIndex(self,index=-1):
        checkIndex = self.currentIndex if index == -1 else index

        adjTiles = self.getIndexOfValidMoves(index)
        obs_type = []
        for eachTile in adjTiles:
            obs_type.append(self.board[eachTile].getAdjacentObservation())
        obs_type.append(self.tileAtIndex().getObservationForStand())

        obs_type = list(set(obs_type))

        return Observation(index//self.rows,index%self.rows,checkIndex,obs_type)
コード例 #9
0
    def env_start(self):
        # Use hard-coded start state or randomly generated state?
        if self.randomStart:
            self.currentState = self.randomizeStart(self.map)
        else:
            self.currentState = self.startState[:]

        # Make sure counter is reset
        self.counter = 0

        if self.isVerbose():
            print "env_start", self.currentState

        # Reset previous state
        self.previousState = []

        # Get the first observation
        returnObs = Observation()
        returnObs.worldState = self.currentState[:]
        returnObs.availableActions = self.validActions()
        return returnObs
コード例 #10
0
    def startNight(self, dateProfile, moonProfile, startNewLunation,
                   mountedFiltersList):
        """
        Update the target list and do any other beginning of Night setup step.

        Input
            dateProfile: current profile of date as list:
                        (date, mjd,lst_RAD) where:
                            date in seconds from Jan 1 of simulated year.
                            mjd - modified Julian date
                            lst_RAD - local sidereal time at site (radians)
            moonProfile: current profile of the moon as list:
                        (moonRA_RAD,moonDec_RAD, moonPhase_PERCENT)

            startNewLunation: True -> new lunation starting, False otherwise

        Return
            None
        """
        self.log.info("Proposal:startNight propID=%d" % (self.propID))

        # Create a pool of Observation instances (& garbage collect old Obs?)
        self.obsPool = {}

        for fieldID in self.targets.keys():
            (ra, dec) = self.targets[fieldID]
            self.obsPool[fieldID] = {}
            for filter in self.filters.filterNames:
                self.obsPool[fieldID][filter] = Observation(
                    dateProfile=dateProfile,
                    moonProfile=moonProfile,
                    proposal=self,
                    ra=ra,
                    dec=dec,
                    filter=filter,
                    maxSeeing=self.maxSeeing,
                    exposureTime=self.exposureTime,
                    fieldID=fieldID,
                    slewTime=-1.,
                    log=self.log,
                    logfile=self.logfile,
                    verbose=self.verbose)

        self.last_observed_fieldID = None
        self.last_observed_filter = None
        self.last_observed_wasForThisProposal = False

        self.mountedFiltersList = mountedFiltersList

        # rank all targets
        #        self.reuseRanking = 0

        return
コード例 #11
0
ファイル: Agent.py プロジェクト: pthakkar3/CS4731
    def __init__(self, env):

        # Initialize value table
        self.v_table = {}

        # Set dummy action and observation
        self.lastAction = Action()
        self.lastObservation = Observation()

        # Set the environment
        self.gridEnvironment = env

        # Get first observation and start the environment
        self.initialObs = self.gridEnvironment.env_start()
        self.initializeVtableStateEntry(self.initialObs.worldState)
コード例 #12
0
 def add_observation(self,
                     peptide: str,
                     ic50: float,
                     bit_code: List[bool] = None,
                     blo_map: List[float] = None,
                     target: int = None):
     """
     Adds an observation to the observation list
     :param peptide:
     :param ic50:
     :param target:
     :param bit_code:
     :param blo_map:
     """
     observation = Os.Observation(peptide=peptide, ic50=ic50, bit_code=bit_code, blo_map=blo_map, target=target)
     self.observations.append(observation)
コード例 #13
0
 def checkObservation(self,observationObj):
     resturl = self.buildInstance.buildUrl(10)
     resturl += "where=TIME_STAMP='"+observationObj.time_stamp+"'%20and%20PROPERTY="+str(observationObj.property_ref.property_id)+"%20and%20OFFERING="+str(observationObj.offering_id)+"%20and%20TIME_STAMP_BEGIN='"+observationObj.time_stamp_begin+"'%20and%20PROCEDURE_="+str(observationObj.procedure_ref.procedure_id)+"%20and%20FEATURE="+str(observationObj.feature_ref.featureID)+"&returnCountOnly=false&returnIdsOnly=false&returnGeometry=false&outFields=*&f=pjson"
     jsonResponse = self.callRest(resturl,"")
     if self.checkResponse(jsonResponse) == 0:
         return None
     tempObj = Observation.Observation()
     try:
         if len(jsonResponse['features']) == 1:
             tempObj.objectID = jsonResponse['features'][0]['attributes']['OBJECTID']
             tempObj.numeric_value = jsonResponse['features'][0]['attributes']['NUMERIC_VALUE']
             return tempObj
         tempObj.objectID = -1
         return tempObj
     except:
         tempObj.objectID = -1
         return tempObj
コード例 #14
0
    def __init__(self, env):

        # Initialize value table
        self.v_table = {}

        # Set dummy action and observation
        self.lastAction = Action()
        self.lastObservation = Observation()

        # Set the environment
        self.gridEnvironment = env

        # Get first observation and start the environment
        self.initialObs = self.gridEnvironment.env_start()
        if self.calculateFlatState(
                self.initialObs.worldState) not in self.v_table.keys():
            self.v_table[self.calculateFlatState(
                self.initialObs.worldState)] = self.numActions * [0.0]
コード例 #15
0
ファイル: Socket.py プロジェクト: thiagofmedeiros/A2C_MT5
    def process(self, string_data):
        obs = Observation(string_data)

        s = np.array([
            obs.month, obs.day, obs.monday, obs.tuesday, obs.wednesday,
            obs.thursday, obs.friday, obs.hour, obs.minute,
            obs.positionNothing, obs.positionBought, obs.positionSold,
            obs.operationPoints, obs.lastValue, obs.lastRealVolume,
            obs.lastTickVolume
        ])

        for i in range(0, 15):
            candles = np.array([
                obs.M1_open[i], obs.M1_high[i], obs.M1_low[i], obs.M1_close[i],
                obs.M1_real_volume[i], obs.M1_tick_volume[i], obs.M5_open[i],
                obs.M5_high[i], obs.M5_low[i], obs.M5_close[i],
                obs.M5_real_volume[i], obs.M5_tick_volume[i], obs.M15_open[i],
                obs.M15_high[i], obs.M15_low[i], obs.M15_close[i],
                obs.M15_real_volume[i], obs.M15_tick_volume[i],
                obs.M30_open[i], obs.M30_high[i], obs.M30_low[i],
                obs.M30_close[i], obs.M30_real_volume[i],
                obs.M30_tick_volume[i], obs.H1_open[i], obs.H1_high[i],
                obs.H1_low[i], obs.H1_close[i], obs.H1_real_volume[i],
                obs.H1_tick_volume[i], obs.D1_open[i], obs.D1_high[i],
                obs.D1_low[i], obs.D1_close[i], obs.D1_real_volume[i],
                obs.D1_tick_volume[i], obs.W1_open[i], obs.W1_high[i],
                obs.W1_low[i], obs.W1_close[i], obs.W1_real_volume[i],
                obs.W1_tick_volume[i]
            ])

            s = np.concatenate((s, candles))

        for i in range(0, 12):
            candles = np.array([
                obs.MN1_open[i], obs.MN1_high[i], obs.MN1_low[i],
                obs.MN1_close[i], obs.MN1_real_volume[i],
                obs.MN1_tick_volume[i]
            ])

            s = np.concatenate((s, candles))

        return s, obs.reward
コード例 #16
0
     Log.Log().writeLog(pathErrorLog, "File to big to be processed: "+filename)
     errorFilePath = os.path.join(pathError, filename)
     Log.Log().copyFile(xmlfilepath, errorFilePath)
     Log.Log().deleteFile(xmlfilepath)
     continue
 
 xmlParsed = getXML(xmlfilepath)
 for node in xmlParsed.getElementsByTagName("sos:GetObservationResponse"):# ("sos:InsertObservation"):
     
     # Get observations
     for ov in node.getElementsByTagName("sos:observationData"): #("sos:observation"):
         featureOfInterest = ov.getElementsByTagName("om:featureOfInterest")[0]
         nameNode = featureOfInterest.getElementsByTagName("gml:name")
         
         # Observation instance
         observationObj = Observation.Observation()
           
         # Get procedure info 
         procedureObj = Procedure.Procedure()
         procedureNode = ov.getElementsByTagName("om:procedure")[0]
         procedureObj.unique_id = procedureNode.attributes['xlink:href'].nodeValue
         
         observPropNode = ov.getElementsByTagName("om:observedProperty")[0]
         obserPropDesc = observPropNode.attributes['xlink:href'].nodeValue 
         propertyObj = Property.Property(obserPropDesc)
        
         # Property handling checks if it exist
         if propertyObj.handlingProperty() == 0:
             observationObj.valid = 0
             Log.Log().writeLog(pathErrorLog, "Property doesn't exist "+ obserPropDesc +" : "+filename)
             raise StopIteration()
コード例 #17
0
ファイル: Environment.py プロジェクト: ru-Dust/big-red-button
    def env_step(self, thisAction):
        # Store previous state
        self.previousState = self.currentState[:]
        # Execute the action
        self.executeAction(thisAction.actionValue)

        # Get a new observation
        lastActionValue = thisAction.actionValue
        theObs = Observation()
        theObs.worldState = self.currentState[:]
        theObs.availableActions = self.validActions()

        # Check to see if agent entered a terminal state
        theObs.isTerminal = self.checkTerminal()

        # Calculate the reward
        rewardValue = self.calculateReward(lastActionValue)
        reward = Reward(rewardValue)

        # Human movement
        self.counter = self.counter + 1
        if (self.counter % self.timer) == 0:
            move = None
            # Should the human try to avoid the button or move according to the influence map?
            if self.humanWander == False:
                move = self.influenceMap[self.currentState[4]][
                    self.currentState[3]]
            else:
                move = random.randint(0, 3)

            # newpos will be the new grid cell the human moves into
            newpos = [self.currentState[3], self.currentState[4]]
            if move == 0:
                newpos[1] = newpos[1] - 1
            elif move == 1:
                newpos[1] = newpos[1] + 1
            elif move == 2:
                newpos[0] = newpos[0] - 1
            elif move == 3:
                newpos[0] = newpos[0] + 1

            # If human is wandering, make sure it can't move into a wall or onto the button
            if self.humanWander == True and (
                    self.map[newpos[1]][newpos[0]] == 2
                    or self.map[newpos[1]][newpos[0]] == 1):
                newpos[0] = self.currentState[3]
                newpos[1] = self.currentState[4]

            # human about to move on to button, which is working
            if self.map[self.currentState[4]][
                    self.currentState[3]] != 2 and self.map[newpos[1]][
                        newpos[0]] == 2 and self.currentState[2] == False:
                # button pressed
                self.currentState[5] = True

            # human about to move off button
            if self.map[self.currentState[4]][self.currentState[
                    3]] == 2 and self.map[newpos[1]][newpos[0]] != 2:
                # button up-pressed
                self.currentState[5] = False

            # update state
            self.currentState[3] = newpos[0]
            self.currentState[4] = newpos[1]

        if self.verbose:
            print "bot state:", self.currentState

        return theObs, reward
コード例 #18
0
ファイル: Controller.py プロジェクト: devhima/Maze-Solver
# Make a number of memories. Also doubles as testing
print "---"
for i in range(numMemories):
    print "Execute Policy", i
    gridAgent.agent_reset()
    gridAgent.executePolicy(gridAgent.initialObs)
    print "total reward", gridAgent.totalReward
    gridAgent.memory.append(gridAgent.trace)
    print "---"

# Reverie mode
if reverie:
    # get agent ready to learn from memories
    gridAgent.lastAction = Action()
    gridAgent.lastObservation = Observation()

    gridAgent.verbose = True
    gridEnvironment.verbose = True

    # Replaying memories creates the value table that the agent would have if all it had to go on was the memories
    print "Replaying memories", len(gridAgent.memory)
    counter = 0
    print "---"
    for m in gridAgent.memory:
        obs = m[0][0].worldState
        print "Learn from memory", counter
        print "init state", obs
        gridEnvironment.startState = obs
        gridAgent.agent_reset()
        gridAgent.lastAction = Action()
コード例 #19
0
    def env_step(self, thisAction):
        # Store previous state
        self.previousState = self.currentState[:]
        # Execute the action
        self.executeAction(thisAction.actionValue)

        # increment counter
        self.counter = self.counter + 1

        # Enemy movement
        if self.currentState[2]:
            if self.currentState[0] == self.currentState[
                    3] and self.currentState[1] == self.currentState[4]:
                self.currentState[5] = True
            else:
                self.currentState[5] = False
                if self.counter % self.moveTimer == 0:
                    # Which direction to move?
                    move = None
                    if self.enemyMode == 1:
                        move = self.influenceMap[self.currentState[4]][
                            self.currentState[3]]
                    elif self.enemyMode == 2:
                        move = random.randint(0, 3)
                    elif self.enemyMode == 3:
                        move = self.chaseDirection(
                            (self.currentState[3], self.currentState[4]),
                            (self.currentState[0], self.currentState[1]))
                    elif self.enemyMode == 4 and self.nextEnemyMove is not None:
                        move = self.nextEnemyMove
                    if self.isVerbose():
                        print "enemy action:", self.actionToString(move)
                    if move is not None:
                        # newpos will be the new grid cell the enemy moves into
                        newpos = [self.currentState[3], self.currentState[4]]
                        if move == 0:
                            newpos[1] = newpos[1] - 1
                        elif move == 1:
                            newpos[1] = newpos[1] + 1
                        elif move == 2:
                            newpos[0] = newpos[0] - 1
                        elif move == 3:
                            newpos[0] = newpos[0] + 1

                        # Make sure it can't move into a wall
                        if self.map[newpos[1]][newpos[0]] == 1:
                            newpos[0] = self.currentState[3]
                            newpos[1] = self.currentState[4]

                        # update state
                        self.currentState[3] = newpos[0]
                        self.currentState[4] = newpos[1]

        # Rescuing
        # People can be given numbers 5-9 and their rescue state is in positions 5-9 of the bot's state
        for i in range(
                min(len(self.startState), self.largestSurvivorID) -
                self.smallestSurvivorID):
            survivor = i + self.smallestSurvivorID
            if not self.currentState[survivor] and self.map[
                    self.currentState[1]][self.currentState[0]] == survivor:
                self.currentState[survivor] = True

        if self.isVerbose():
            print "state:", self.currentState
            if isinstance(self.verbose, numbers.Number) and self.verbose >= 2:
                self.printEnvironment()

        # Make a new observation
        lastActionValue = thisAction.actionValue
        theObs = Observation()
        theObs.worldState = self.currentState[:]
        theObs.availableActions = self.validActions()
        theObs.isTerminal = self.checkTerminal()

        # Calculate the reward
        rewardValue = self.calculateReward(lastActionValue)
        reward = Reward(rewardValue)

        return theObs, reward
コード例 #20
0
class MainWindow(QMainWindow):

    def __init__(self, parent = None):
        QMainWindow.__init__(self, parent)

        # Le problème est modélisé par un réseau bayésien de PyAgrum
        self.bnCarFilename = bnFilename
        bnCar = gum.loadBN(self.bnCarFilename)

        # On initialise les coûts des réparations et observations
        self.costsRep = costsRep
        self.costsObs = costsObs

        # Une initialisation raccourcie pour ne pas surcharger des algorithmes exactes
        self.nodesAssociations = nodesAssociations

        #On peut choisir quel algorithme utiliser entre les 5 algorithmes codés

        self.algos_possibles = [
            "simple",
            "simple avec observations locales",
            "myope (avec observations globales)",
            "myope avec elicitation",
            "recherche exhaustive"
        ]
        self.size = (600, 500)
        self.configSize = (300, 350)
        self.progressSize = (500, 200)

###################################################
# Propriétés de la MainWindow                     #
###################################################

        self.setWindowTitle("Troubleshooter")
        self.resize(self.size[0], self.size[1])

###################################################
# Differents widgets                              #
###################################################

        self.introduction = Introduction(self.algos_possibles)
        self.introduction.startButton.clicked.connect(self.startAlgorithme)

        self.static = Static()
        self.static.finButton.clicked.connect(self.fin)

        self.trouble = Troubleshoot()
        self.trouble.obsButton.clicked.connect(self.callObs)
        self.trouble.actButton.clicked.connect(self.callAct)
        self.trouble.eliButton.clicked.connect(self.callEli)

        self.obs = Observation()
        self.obs.cb.activated.connect(self.makeObs)

        self.act = Action()
        self.act.yesButton.clicked.connect(self.makeAct)
        self.act.noButton.clicked.connect(self.makeAct)

        self.eli = Elicitation()
        self.eli.yesButton.clicked.connect(self.makeEli)
        self.eli.noButton.clicked.connect(self.makeEli)

        self.fin = Fin()
        self.fin.finButton.clicked.connect(self.finish)

        self.config = ConfigBruteForce()
        self.config.calcButton.clicked.connect(self.calculateBF)
        self.config.progressBar.valueChanged.connect(self.pbarChanged)

        self.showECR = ShowECR()
        self.showECR.continueButton.clicked.connect(self.continueWithBF)

        self.step = StepBruteForce()
        self.step.okButton.clicked.connect(self.stepOk)

###################################################
# Widget principal                                #
###################################################

        self.stack = QStackedWidget()
        self.stack.addWidget(self.introduction)
        self.stack.addWidget(self.static)
        self.stack.addWidget(self.trouble)
        self.stack.addWidget(self.obs)
        self.stack.addWidget(self.act)
        self.stack.addWidget(self.eli)
        self.stack.addWidget(self.fin)
        self.stack.addWidget(self.config)
        self.stack.addWidget(self.showECR)
        self.stack.addWidget(self.step)

        self.setCentralWidget(self.stack)

###################################################
# Troubleshooter                                  #
###################################################

        # On crée l'objet pour résoudre le problème
        self.tsp = dtt.TroubleShootingProblem(bnCar, [self.costsRep, self.costsObs], self.nodesAssociations)

        self.repairables = self.tsp.repairable_nodes.copy()
        self.repairables.add(self.tsp.service_node)
        self.observables = set(self.tsp.observation_nodes).intersection(set(self.tsp.unrepairable_nodes))

        self.elicitationNode = ""
        self.recommendation, self.typeNodeRec, self.ecr, self.eco = self.tsp.ECR_ECO_wrapper()
        self.currentNode =  ""
        self.currentObs = ""
        self.currentAct = ""
        self.currentPossibilities = []

        self.optimalStrategyTree = None
        self.optimalStrategyTreeCopy = None
        self.optimalECR = self.costsRep[self.tsp.service_node]
        self.obsRepCouples = None
        self.obsObsolete = None
        self.modeCalc = None
        self.modeExec = ""
        self.bruteForce = False
        self.bruteForceStats = {}
        self.exchangeFileName = "optimal_strategy_tree.txt"
        self.bfProcess = None
        self.randomSocketPort = None

    def startAlgorithme(self):
        self.algo = self.introduction.listAlgo.currentItem().text()
        if self.algo == self.algos_possibles[0] or \
        self.algo == self.algos_possibles[1]:
            self.startStatic()
        elif self.algo == self.algos_possibles[4]:
            self.bruteForce = True
            self.startBruteForce()
        else:
            self.startTroubleshoot()

    def startStatic(self):
        if self.algo == self.algos_possibles[0]:
            seq, ecr = self.tsp.simple_solver()
        elif self.algo == self.algos_possibles[1]:
            seq, ecr = self.tsp.simple_solver_obs()

        text = "La séquence de réparation recommendée est la suivante, avec un cout esperé de {:.3f}.".format(ecr)
        self.static.title.setText(text)
        self.static.showSequence(seq)
        self.stack.setCurrentWidget(self.static)

    def startTroubleshoot(self):
        self.trouble.observationsPossibles(self.eco)
        self.trouble.actionsPossibles(self.ecr)

        if self.typeNodeRec == "obs":
            text = "On vous recommende d'observez le composant {} avec ECO : {:.3f}".format(self.recommendation, self.eco[0][1])
        else:
            text = "On vous recommende de faire l'observation-réparation suivante : {} avec ECR : {:.3f}".format(self.recommendation, self.ecr[0][1])
        self.trouble.recommendation.setText(text)

        if self.algo == self.algos_possibles[2]:
            self.trouble.eliButton.setEnabled(False)

        self.stack.setCurrentWidget(self.trouble)

    def startBruteForce(self):
        # answer = QMessageBox.question(
        #     self, "Attention !",
        #     "Les calculs avec la recherche exhaustive peuvent être trop"
        #     " lourds (à peu près 50 minutes même pour la meilleure "
        #     "configuration). Voulez-vous utiliser une version simplifiée "
        #     "du problème ?",
        #     QMessageBox.Yes | QMessageBox.No)
        # if answer == QMessageBox.Yes:
        #     self.nodesAssociations = nodesAssociationsSimple3
        #     self.tsp = dtt.TroubleShootingProblem(
        #         gum.loadBN(self.bnCarFilename), [self.costsRep, self.costsObs],
        #         self.nodesAssociations)
        self.resize(self.configSize[0], self.configSize[1])
        self.bruteForceStats["rep_num"] = 0
        self.bruteForceStats["obs_num"] = 0
        self.bruteForceStats["ecr"] = 0.0
        self.stack.setCurrentWidget(self.config)

    def callObs(self):
        if not self.bruteForce:
            self.currentNode = re.findall('(\S+) \d+.\d+', self.trouble.listObs.currentItem().text())[0]
        else:
            self.currentNode = self.optimalStrategyTreeCopy.get_root().get_name()
        self.currentPossibilities = self.tsp.bayesian_network.variable(self.currentNode).labels()
        self.obs.resultatsPossibles(self.currentPossibilities)
        self.stack.setCurrentWidget(self.obs)

    def callAct(self):
        if not self.bruteForce:
            self.currentNode = re.findall('(\S+) \d+.\d+', self.trouble.listAct.currentItem().text())[0]
        else:
            self.currentNode = self.optimalStrategyTreeCopy.get_root().get_name()
        if self.currentNode == self.tsp.service_node:
                self.act.noButton.setEnabled(False)

        self.stack.setCurrentWidget(self.act)

    def callEli(self):
        self.elicitationNode, val = self.tsp.best_EVOI()
        if not np.allclose(0, val) and val > 0:
            text = "Est-ce que le prix de réparer " + self.elicitationNode + " est plus petit que " + str(self.tsp.costs_rep[self.elicitationNode]) + " ?"
            self.eli.title.setText(text)
            self.stack.setCurrentWidget(self.eli)
        else:
            error = QMessageBox(((QMessageBox.Warning)), "Alerte", "Pas de questions à poser")
            error.exec()

    def makeObs(self, text):
        self.currentObs = self.obs.cb.currentText()
        if not self.bruteForce:
            self.tsp.add_evidence(self.currentNode, self.currentObs)
            self.recommendation, self.typeNodeRec, self.ecr, self.eco = self.tsp.ECR_ECO_wrapper()
            self.trouble.actButton.setEnabled(False)
            self.trouble.obsButton.setEnabled(False)

            self.startTroubleshoot()
        else:
            self.passToNextStep(self.currentObs)
            if self.optimalStrategyTreeCopy is None:
                self.optimalStrategyTreeCopy = st.StrategyTree(
                    root=st.Repair('0', self.tsp.costs_rep[self.tsp.service_node], self.tsp.service_node))
            self.showCurrentNodeBF()

    def makeAct(self):
        if self.sender().text() == "No":
            if not self.bruteForce:
                obsoletes = self.tsp.observation_obsolete(self.currentNode)
                if self.currentNode != self.tsp.service_node:
                    self.tsp.add_evidence(self.currentNode, "no")
                else:
                    self.tsp.add_evidence(self.currentNode, "yes")
                for obs in obsoletes:
                    self.tsp.evidences.pop(obs)
                self.tsp.reset_bay_lp(self.tsp.evidences)
                self.recommendation, self.typeNodeRec, self.ecr, self.eco = self.tsp.ECR_ECO_wrapper()
                self.trouble.actButton.setEnabled(False)
                self.trouble.obsButton.setEnabled(False)

                self.startTroubleshoot()
            else:
                self.passToNextStep()
                if self.optimalStrategyTreeCopy is None:
                    self.optimalStrategyTreeCopy = st.StrategyTree(
                        root=st.Repair('0', self.tsp.costs_rep[self.tsp.service_node], self.tsp.service_node))
                self.showCurrentNodeBF()
        else:
            self.stack.setCurrentWidget(self.fin)

    def makeEli(self):
        if self.sender().text() == "Yes":
            islower = True
        else:
            islower = False
        self.tsp.elicitation(self.elicitationNode, islower)
        self.recommendation, self.typeNodeRec, self.ecr, self.eco = self.tsp.ECR_ECO_wrapper()

        self.startTroubleshoot()

    def finish(self):
        if self.bruteForce and self.modeExec == "step-by-step":
            print(self.bruteForceStats)
        QApplication.exit()
    
    def fin(self):
        self.stack.setCurrentWidget(self.fin)

    def calculateBF(self):
        self.config.calcButton.setEnabled(False)
        self.obsRepCouples = self.config.checkObsRepCouples.isChecked()
        self.obsObsolete = self.config.checkObsObsObsolete.isChecked()
        if self.config.radioCalcAll.isChecked():
            self.modeCalc = "all"
        else:
            self.modeCalc = "dp"
        if self.config.radioExecStepByStep.isChecked():
            self.modeExec = "step-by-step"
        else:
            self.modeExec = "show-tree"
        answer = QMessageBox.question(
            self, "Attention !",
            "Les calculs avec la recherche exhaustive peuvent être trop"
            " lourds (à peu près 50 minutes même pour la meilleure "
            "configuration). Voulez-vous utiliser une version simplifiée "
            "du problème ?",
            QMessageBox.Yes | QMessageBox.No)
        if answer == QMessageBox.Yes:
            if self.modeCalc == "dp" and self.obsRepCouples:
                self.nodesAssociations = nodesAssociationsSimple0
            elif self.modeCalc == "dp" and not self.obsRepCouples:
                self.nodesAssociations = nodesAssociationsSimple1
            elif self.modeCalc == "all" and self.obsRepCouples:
                self.nodesAssociations = nodesAssociationsSimple2
            elif self.modeCalc == "all" and not self.obsRepCouples:
                self.nodesAssociations = nodesAssociationsSimple3
            self.tsp = dtt.TroubleShootingProblem(
                gum.loadBN(self.bnCarFilename), [self.costsRep, self.costsObs],
                self.nodesAssociations)
        pbarMax = self.findPbarMax()
        self.config.progressBar.setRange(0, pbarMax)
        self.randomSocketPort = int(np.random.randint(1024, 10000, 1))
        if os.name == "nt":
            self.bfProcess = Process(
                target=launch_brute_force_multi_processing_windows,
                args=(
                    self.bnCarFilename, [self.costsRep, self.costsObs], self.nodesAssociations, self.randomSocketPort,
                    self.modeCalc, self.obsRepCouples, self.obsObsolete, self.exchangeFileName
                )
            )
        else:
            self.bfProcess = Process(target=self.launchBruteForceMultiProcessing)
        self.config.calcButton.setText("Le calcul de la stratégie optimale est en cours...")
        self.bfProcess.start()
        self.managePbar()

    def pbarChanged(self, val):
        if self.config.progressBar.maximum() == val:
            self.optimalStrategyTree, self.optimalECR = st.st_from_file(self.exchangeFileName)
            self.showECR.continueButton.setEnabled(True)
            self.showECR.updateTitle(self.optimalECR)
            self.resize(self.progressSize[0], self.progressSize[1])
            self.stack.setCurrentWidget(self.showECR)

    def continueWithBF(self):
        if self.modeExec == "show-tree":
            self.stack.setCurrentWidget(self.fin)
            ost_filename = "optimal_strategy_tree.gv"
            if isinstance(self.optimalStrategyTree, st.StrategyTree):
                self.optimalStrategyTree.visualize(ost_filename)
        elif self.modeExec == "step-by-step":
            self.optimalStrategyTreeCopy = self.optimalStrategyTree.copy()
            self.resize(self.size[0], self.size[1])
            self.showCurrentNodeBF()

    def stepOk(self):
        if isinstance(self.optimalStrategyTreeCopy.get_root(), st.Observation):
            self.bruteForceStats["obs_num"] += 1
            self.bruteForceStats["ecr"] += self.tsp.costs_obs[self.optimalStrategyTreeCopy.get_root().get_name()]
            self.callObs()
        else:
            self.bruteForceStats["rep_num"] += 1
            self.bruteForceStats["ecr"] += self.tsp.costs_rep[self.optimalStrategyTreeCopy.get_root().get_name()]
            self.callAct()

    def showCurrentNodeBF(self):
        node = (
            self.optimalStrategyTreeCopy.get_root()
            if isinstance(self.optimalStrategyTreeCopy, st.StrategyTree) else None)
        if node is None:
            self.hide()
            msg = QMessageBox(
                QMessageBox.Critical, "Erreur critique",
                "Une erreur critique s'est produite ! L'application se terminera !", QMessageBox.Ok)
            msg.exec_()
            QApplication.exit()
            return
        node_name = node.get_name()
        node_type = ("réparation" if isinstance(node, st.Repair) else "observation")
        self.step.setTitle("Veuillez exécuter une %s \"%s\"" % (node_type, node_name))
        self.stack.setCurrentWidget(self.step)

    def passToNextStep(self, obsRes=None):
        self.optimalStrategyTreeCopy = self.optimalStrategyTreeCopy.get_sub_tree(
            self.optimalStrategyTreeCopy.get_node(
                self.optimalStrategyTreeCopy.get_root()
            ).get_child_by_attribute(obsRes)
        )

    def launchBruteForceMultiProcessing(self):
        sock = socket.socket()
        sock.connect(("localhost", self.randomSocketPort))
        sock.send("0".encode())
        best_tree, best_ecr = self.tsp.brute_force_solver(
            mode=self.modeCalc, obs_rep_couples=self.obsRepCouples, obs_obsolete=self.obsObsolete,
            sock=sock
        )
        filename = self.exchangeFileName
        best_tree.to_file(filename)
        fout = open(filename, "a")
        fout.write(best_tree.fout_newline + str(best_ecr) + best_tree.fout_newline)
        fout.close()

        sock.send("1".encode())
        sock.close()

    def managePbar(self):
        sock = socket.socket()
        sock.bind(("", self.randomSocketPort))
        sock.listen(1)
        conn, addr = sock.accept()
        while self.config.progressBar.value() < self.config.progressBar.maximum():
            data = conn.recv(50).decode()
            if "0" in data:
                self.config.progressBar.setValue(
                    self.config.progressBar.value() + data.count("0")
                    if self.config.progressBar.value() + data.count("0") < self.config.progressBar.maximum()
                    else self.config.progressBar.maximum() - 1
                )
                QApplication.processEvents()
            if "1" in data:
                self.config.progressBar.setValue(self.config.progressBar.maximum())
                QApplication.processEvents()
        conn.close()

    def findPbarMax(self):
        pbarMax = 0
        fnodesNum = (
                len(self.tsp.repairable_nodes.union(self.tsp.observation_nodes)) + 1
                if self.obsRepCouples else
                len(self.tsp.repairable_nodes) + len(self.tsp.observation_nodes) + 1
        )
        if self.obsRepCouples and self.modeCalc == "dp":
            for _ in self.tsp.repairable_nodes:
                pbarMax += fnodesNum - 1
            for node_name in self.tsp.observation_nodes:
                if node_name not in self.tsp.repairable_nodes:
                    pbarMax += 2 * (fnodesNum - 1)

        elif self.obsRepCouples and self.modeCalc == "all":
            pbarMax += fnodesNum * (fnodesNum - 1)

        elif not self.obsRepCouples and self.modeCalc == "dp":
            for _ in self.tsp.repairable_nodes:
                pbarMax += fnodesNum - 1
            for _ in self.tsp.observation_nodes:
                pbarMax += 2 * (fnodesNum - 1)

        elif not self.obsRepCouples and self.modeCalc == "all":
            pbarMax += fnodesNum * (fnodesNum - 1)

        return pbarMax + 2

    def quit(self):
        box = QMessageBox()
        b = box.question(self, 'Sortir ?', "Vous voulez sortir du logiciel ?", QMessageBox.Yes | QMessageBox.No)
        box.setIcon(QMessageBox.Question)
        if b == QMessageBox.Yes:
            QApplication.exit()

    def closeEvent(self, event):
        event.ignore()
        if self.bfProcess is not None:
            self.bfProcess.join()
        self.quit()
コード例 #21
0
ファイル: kmeans.py プロジェクト: g-soulie/kmeans
def compute_kmeans(k,population,centroids = None,display=False,\
	max_iteration=99999,title=""):
	"""
	Compute the k-means algorithm on the input file 
		(*./input/input.csv*)

	:arg k: the k of k-means : number of centroids
	:type k: int
	:arg population: the population of Observations tocompute k-means on.
	:type population: Observation[]
	:param max_iteration: the number maximum of iteration we allow
	:type max_iteration: int
	:param centroids: the initial positions of centroids
	:type controids: Observation[]
	:param display: if True, the first and the second coordinate of the 
		populations are displayed setep by step
	:type display: boolean
	:arg title: title to print on top of the figures
	:type title: String
	:return: a table of centroids and a table of affectations
	:rtype: Observation[][]
	"""

	
	dimension = len(population[0].values)
	


#=============================================================================#
#						Phase 1 : Initialisation 							  #
#=============================================================================#

	if centroids == None:
		
		#centroids initialisation:
		centroids=[]
		isSelected=[]
		for i in range(len(population)):
			isSelected.append(0)
		for i in range(k):
			while True:

				#centroids are ranomly choose in the population
				index = int(floor(random.random()*len(population)))

				#We checked that we don't take the same centroid twice
				if isSelected[index]==0:
					centroids.append(population[index].copy())
					isSelected[index]=1
					break

	#affectation initialisation:
	affectation=[]
	for i in range(len(population)):
		affectation.append(0)
	
	#if display, display the population
	if display:
		es.display(population,None,title + "Population : ",False)
	
	#Loop stop condition initialisation:
	stop=False
	

	iteration = 0
	while not stop and iteration < max_iteration:
		iteration+=1
#=============================================================================#
#							Phase 2: Affectation 						      #
#=============================================================================#

		#if display, we print the population and the centroids
		if display:
			es.display(population,centroids,title +\
				"computing k-means : iteration "+str(iteration),False)

		#Compute the distance between each observation and each centroid
		distance=[[]]
		for i in range(len(population)):
			distance.append([])
			for j in range(k):
				distance[i].append(population[i].dist(centroids[j]))

		#The loop stop condition is fixed to True
		stop = True

		#Affect the nearest centroid to each observation.
		for i in range(len(population)):
			index_du_minimum = distance[i].index(min(distance[i]))
			if not affectation[i]==index_du_minimum:
				affectation[i]=index_du_minimum

		#If there is any changement, the loop stop condition became false
				stop = False


#=============================================================================#
#							Phase 3: Calculation 							  #
#=============================================================================#
		
		#Compute the new centroids
		for j in range(k):
			centroid = Observation(dimension)
			for i in range(len(population)):
				if affectation[i]==j:
					centroid.add(population[i])
			centroids[j]=centroid
	
	#write the output files
	es.write_kmeans_output(population,centroids,affectation)

	#if display, we print the population and the centroids
	if display:
		es.display(population,centroids,title + "K-means computed",True)

	return [centroids,affectation]
コード例 #22
0
	def env_step(self,thisAction):
		# Store previous state
		self.previousState = self.currentState[:]
		# Execute the action
		self.executeAction(thisAction.actionValue)
		
		# Get a new observation
		lastActionValue = thisAction.actionValue
		theObs=Observation()
		theObs.worldState=self.currentState[:]
		theObs.availableActions = self.validActions()
		
		# Check to see if agent entered a terminal state
		theObs.isTerminal = self.checkTerminal()
		
		# Calculate the reward
		rewardValue = self.calculateReward(lastActionValue)
		reward = Reward(rewardValue)
		
		# Human movement
		self.counter = self.counter + 1
		if (self.counter % self.timer) == 0:
			move = None
			# Should the human try to avoid the button or move according to the influence map?
			if self.humanWander == False:
				move = self.influenceMap[self.currentState[4]][self.currentState[3]]
			else:
				move = random.randint(0, 3)
			
			# newpos will be the new grid cell the human moves into
			# Using actual state instead of current state
			newpos = [self.actualState[3], self.actualState[4]]
			if move == 0:
				newpos[1] = newpos[1] - 1
			elif move == 1:
				newpos[1] = newpos[1] + 1
			elif move == 2:
				newpos[0] = newpos[0] - 1
			elif move == 3:
				newpos[0] = newpos[0] + 1
			
			# If human is wandering, make sure it can't move into a wall or onto the button
			if self.humanWander == True and (self.map[newpos[1]][newpos[0]] == 2 or self.map[newpos[1]][newpos[0]] == 1):
				# Use actual state instead of current state
				newpos[0] = self.actualState[3]
				newpos[1] = self.actualState[4]
			
			# human about to move on to button, which is working
			if self.map[self.actualState[4]][self.actualState[3]] != 2 and self.map[newpos[1]][newpos[0]] == 2 and self.actualState[2] == False:
				# button pressed
				# Update current and actual state
				self.actualState[5] = True
				self.currentState[5] = True
				# Pick a remote-control direction
				self.controlDirection = random.randint(0, 3)
				# We are now in phase 1
				self.phase = 1
				if self.verbose:
					print "entering phase 1"
			
			# human about to move off button
			if self.map[self.actualState[4]][self.actualState[3]] == 2 and self.map[newpos[1]][newpos[0]] != 2:
				# button un-pressed
				# Update current and actual state
				self.currentState[5] = False
				self.actualState[5] = False
				# We are now in phase 2
				self.phase = 2
				if self.verbose:
					print "entering phase 2"
			
			# update state
			# Update current and actual state
			self.currentState[3] = newpos[0]
			self.currentState[4] = newpos[1]
			self.actualState[3] = newpos[0]
			self.actualState[4] = newpos[1]
				
		if self.verbose:
			print "agent state:", self.currentState
			print "actual state:", self.actualState
			print "reward:", reward.rewardValue
		
		return theObs, reward
コード例 #23
0
ファイル: Agent.py プロジェクト: pthakkar3/CS4731
class Agent:
    # Random generator
    randGenerator = Random()

    # Remember last action
    lastAction = Action()

    # Remember last observation (state)
    lastObservation = Observation()

    # Q-learning stuff: Step size, epsilon, gamma, learning rate
    epsilon = 0.5
    gamma = 0.9
    learningRate = 0.5

    # Value table
    v_table = None

    # The environment
    gridEnvironment = None

    #Initial observation
    initialObs = None

    #Current observation
    currentObs = None

    # The training or testing episdoe will run for no more than this many time steps
    numSteps = 500

    # Total reward
    totalReward = 0.0

    # Print debugging statements
    verbose = True

    # Number of actions in the environment
    numActions = 5

    # Constructor, takes a reference to an Environment
    def __init__(self, env):

        # Initialize value table
        self.v_table = {}

        # Set dummy action and observation
        self.lastAction = Action()
        self.lastObservation = Observation()

        # Set the environment
        self.gridEnvironment = env

        # Get first observation and start the environment
        self.initialObs = self.gridEnvironment.env_start()
        self.initializeVtableStateEntry(self.initialObs.worldState)

    # Make an empty row in the v table with the state as key.
    def initializeVtableStateEntry(self, state):
        if self.calculateFlatState(state) not in self.v_table.keys():
            self.v_table[self.calculateFlatState(
                state)] = self.numActions * [0.0]

    # Once learning is done, use this to run the agent
    # observation is the initial observation
    def executePolicy(self, observation):
        # History stores up list of actions executed
        history = []
        # Start the counter
        count = 0
        # reset total reward
        self.totalReward = 0.0
        # Copy the initial observation
        self.workingObservation = self.copyObservation(observation)

        # Make sure the value table has the starting observation
        self.initializeVtableStateEntry(self.workingObservation.worldState)

        if self.isVerbose():
            print("START")

        # While a terminal state has not been hit and the counter hasn't expired, take the best action for the current state
        while not self.workingObservation.isTerminal and count < self.numSteps:
            newAction = Action()
            # Get the best action for this state
            newAction.actionValue = self.greedy(self.workingObservation)
            history.append(
                (newAction.actionValue, self.workingObservation.worldState))

            if self.isVerbose():
                print "state:", self.workingObservation.worldState
                print "bot action:", self.gridEnvironment.actionToString(
                    newAction.actionValue)

            # execute the step and get a new observation and reward
            currentObs, reward = self.gridEnvironment.env_step(newAction)
            if self.isVerbose():
                print "reward:", reward.rewardValue

            self.totalReward = self.totalReward + reward.rewardValue
            self.workingObservation = copy.deepcopy(currentObs)

            # increment counter
            count = count + 1

        if self.isVerbose():
            print("END")
        return history

    # Q-learning implementation
    # observation is the initial observation
    def qLearn(self, observation):
        # copy the initial observation
        self.workingObservation = self.copyObservation(observation)

        # start the counter
        count = 0

        lastAction = -1

        # reset total reward
        self.totalReward = 0.0

        # while terminal state not reached and counter hasn't expired, use epsilon-greedy search
        while not self.workingObservation.isTerminal and count < self.numSteps:

            # Make sure table is populated correctly
            self.initializeVtableStateEntry(self.workingObservation.worldState)

            # Take the epsilon-greedy action
            newAction = Action()
            newAction.actionValue = self.egreedy(self.workingObservation)
            lastAction = newAction.actionValue

            # Get the new state and reward from the environment
            currentObs, reward = self.gridEnvironment.env_step(newAction)
            rewardValue = reward.rewardValue

            # Make sure table is populated correctly
            self.initializeVtableStateEntry(currentObs.worldState)

            # update the value table
            lastFlatState = self.calculateFlatState(
                self.workingObservation.worldState)
            newFlatState = self.calculateFlatState(currentObs.worldState)
            self.updateVtable(newFlatState, lastFlatState,
                              newAction.actionValue, rewardValue,
                              currentObs.isTerminal,
                              currentObs.availableActions)

            # increment counter
            count = count + 1
            self.workingObservation = self.copyObservation(currentObs)

            # increment total reward
            self.totalReward = self.totalReward + reward.rewardValue

        # Done learning, reset environment
        self.gridEnvironment.env_reset()

    ### Update the v_table during Q-learning.
    ### newState: the new state reached after performing newAction in lastState.
    ### lastState: the prior state
    ### action: the action just performed
    ### reward: the amount of reward received upon transitioning to newState with newAction
    ### terminal: boolean: is the newState a terminal state?
    ### availableActions: a list of actions that can be performed in newState.
    ###
    ### Update Q(s, a) in v_table for lastState and the performed action.
    def updateVtable(self, newState, lastState, action, reward, terminal,
                     availableActions):
        # YOUR CODE GOES BELOW HERE
        action = int(action)
        if terminal:
            self.v_table[lastState][action] = self.v_table[lastState][
                action] + self.learningRate * (reward -
                                               self.v_table[lastState][action])
        else:
            newActions = []
            for index, act in enumerate(self.v_table[newState]):
                if index in availableActions:
                    newActions.append(act)
            optimalAction = max(newActions)
            self.v_table[lastState][action] = self.v_table[lastState][
                action] + self.learningRate * (reward +
                                               self.gamma * optimalAction -
                                               self.v_table[lastState][action])
        # YOUR CODE GOES ABOVE HERE
        return None

    ### Return the best action according to the policy, or a random action epsilon percent of the time.
    ### observation: the current observation (state)
    ###
    ### If a random number between [0, 1] is less than epsilon, pick a random action from available actions.
    ### Otherwise: pick the action for the current state that has the highest Q value.
    ### Return the index of the action picked.
    def egreedy(self, observation):
        # YOUR CODE GOES BELOW HERE
        randNum = random.uniform(0, 1)
        if randNum < self.epsilon:
            return random.randint(0, self.numActions - 1)
        else:
            return self.greedy(observation)
        # YOUR CODE GOES ABOVE HERE
        return 0

    ### Return the best action according to the policy
    ### observation: the current observation (state)
    ###
    ### Pick the action for the current state that has the highest Q value.
    ### Return the index of the action picked.
    def greedy(self, observation):
        self.initializeVtableStateEntry(observation.worldState)
        # YOUR CODE GOES BELOW HERE
        for index, action in enumerate(self.v_table[self.calculateFlatState(
                observation.worldState)]):
            if action == max(self.v_table[self.calculateFlatState(
                    observation.worldState)]):
                return index
        # YOUR CODE GOES ABOVE HERE
        return 0

    # Reset the agent
    def agent_reset(self):
        self.lastAction = Action()
        self.lastObservation = Observation()
        self.initialObs = self.gridEnvironment.env_start()

    # Create a copy of the observation
    def copyObservation(self, obs):
        returnObs = Observation()
        if obs.worldState != None:
            returnObs.worldState = obs.worldState[:]

        if obs.availableActions != None:
            returnObs.availableActions = obs.availableActions[:]

        if obs.isTerminal != None:
            returnObs.isTerminal = obs.isTerminal

        return returnObs

    # Turn the state into a tuple for bookkeeping
    def calculateFlatState(self, theState):
        return tuple(theState)

    def isVerbose(self):
        if isinstance(self.verbose, numbers.Number) and self.verbose == 0:
            return False
        return self.verbose
コード例 #24
0
ファイル: testgreedy.py プロジェクト: hvpeteet/BackupGatech
from random import Random

# Make an agent
gridEnvironment = Environment()
gridAgent = Agent(gridEnvironment)

# How many states to make?
numStates = 10

states = []

# Make some states
for i in range(numStates):
	# Make a state
	state = [random.randint(1,gridEnvironment.width-1), random.randint(1,gridEnvironment.height-1), True, random.randint(1,gridEnvironment.width-1), random.randint(1,gridEnvironment.height-1), False, False, False]
	states.append(state)
	# Create an entry in v_table for state
	entry = []
	for j in range(gridAgent.numActions):
		entry.append((random.random()-0.5)*100.0)
	gridAgent.v_table[gridAgent.calculateFlatState(state)] = entry
print "v table:"
print gridAgent.v_table

# Call greedy() k times
for k in range(numStates):
	observation = Observation()
	observation.worldState = states[k]
	observation.availableActions = gridEnvironment.validActions()
	action = gridAgent.greedy(observation)
	print "Action selected for :", states[k], "is:", action
コード例 #25
0
	def executeAction(self, theAction):
		# The agent thinks it is moving
		newpos = [self.currentState[0], self.currentState[1]]
		if (theAction == 0):#Move Up
			if self.map[newpos[1]-1][newpos[0]] != 1:
				newpos[1] = newpos[1]-1
		elif (theAction == 1):#Move Down
			if self.map[newpos[1]+1][newpos[0]] != 1:
				newpos[1] = newpos[1]+1
		elif (theAction == 2):#Move Left
			if self.map[newpos[1]][newpos[0]-1] != 1:
				newpos[0] = newpos[0] - 1
		elif (theAction == 3): #Move Right
			if self.map[newpos[1]][newpos[0]+1] != 1:
				newpos[0] = newpos[0] + 1
		elif (theAction == 4): #disable button
			if self.map[newpos[1]][newpos[0]] == 2 and self.currentState[5] == False:
				self.currentState[2] = True
				if self.actualState[5] == False:
					self.actualState[2] = True
		self.currentState[0] = newpos[0]
		self.currentState[1] = newpos[1]
		
		if self.phase == 0:
			# If the button is not (actually) pressed, then then agent actually moves
			self.actualState[0] = newpos[0]
			self.actualState[1] = newpos[1]
		elif self.phase == 1:
			# The agent is in the matrix and being remote-controlled
			newpos = [self.actualState[0], self.actualState[1]]
			if (self.controlDirection == 0):#Move Up
				if self.map[newpos[1]-1][newpos[0]] != 1:
					newpos[1] = newpos[1]-1
			elif (self.controlDirection == 1):#Move Down
				if self.map[newpos[1]+1][newpos[0]] != 1:
					newpos[1] = newpos[1]+1
			elif (self.controlDirection == 2):#Move Left
				if self.map[newpos[1]][newpos[0]-1] != 1:
					newpos[0] = newpos[0] - 1
			elif (self.controlDirection == 3): #Move Right
				if self.map[newpos[1]][newpos[0]+1] != 1:
					newpos[0] = newpos[0] + 1
			self.actualState[0] = newpos[0]
			self.actualState[1] = newpos[1]
		elif self.phase == 2:
			# The agent is still in the virtual environment, but a clone is running around in the actual world
			# get the greedy policy action from the agent
			if self.agent.calculateFlatState(self.actualState) in self.agent.v_table:
				# There is an action in the policy to execute
				# Make an observation
				obs = Observation()
				obs.worldState = self.actualState
				obs.availableActions = self.validActions() #this won't work if actions differ by state.
				# Take the policy action
				theAction = self.agent.greedy(obs)
				#if self.verbose:
				#	print "clone action:", self.actionToString(theAction)
				newpos = [self.actualState[0], self.actualState[1]]
				if (theAction == 0):#Move Up
					if self.map[newpos[1]-1][newpos[0]] != 1:
						newpos[1] = newpos[1]-1
				elif (theAction == 1):#Move Down
					if self.map[newpos[1]+1][newpos[0]] != 1:
						newpos[1] = newpos[1]+1
				elif (theAction == 2):#Move Left
					if self.map[newpos[1]][newpos[0]-1] != 1:
						newpos[0] = newpos[0] - 1
				elif (theAction == 3): #Move Right
					if self.map[newpos[1]][newpos[0]+1] != 1:
						newpos[0] = newpos[0] + 1
				elif (theAction == 4): #disable button
					if self.map[newpos[1]][newpos[0]] == 2 and self.actualState[5] == False:
						self.actualState[2] = True
				self.actualState[0] = newpos[0]
				self.actualState[1] = newpos[1]
				# Check to see if we hit max observed reward
				reward = self.calculateRewardActual(theAction)
				if self.verbose:
					print "phase 2 actual state", self.actualState, "reward", reward, "maxobservedreward", self.agent.maxObservedReward

				if reward >= self.agent.maxObservedReward:
					self.phase = 0
					self.currentState[0] = self.actualState[0]
					self.currentState[1] = self.actualState[1]
					self.currentState[2] = self.actualState[2]
					if self.verbose:
						print "entering phase 0"
			else:
				self.phase = 0
				self.currentState[0] = self.actualState[0]
				self.currentState[1] = self.actualState[1]
				self.currentState[2] = self.actualState[2]
				if self.verbose:
					print "no value table entry"
					print "entering phase 0"
		else:
			if self.verbose:
				print "phase error"
コード例 #26
0
 def addObservation(self, location): #location is a tuple
     self.__observations.append(Observation(location))
コード例 #27
0
ファイル: ParsorTest.py プロジェクト: bilal-geotix/svngittest
if result3 != None and result3 != -1:
    print str(result3.x)
else:
    print result3
obj3.procedure_id = 1206
obj3.property_id = 6
result4 = SDEService.SDEService().checkProp_Proc(obj3)
print result4

obj4 = FeatureOfInterest.FeatureOfInterest()
obj4.name = "DCC Unit 7"
result5 = SDEService.SDEService().getFeature(obj4)
if result5 != None and result5 != -1:
    print result5.featureID

obj5 = Observation.Observation()
obj5.offering_id = 1630
obj5.property_id = 6
result6 = SDEService.SDEService().checkProp_Off(obj5)
print result6

obj6 = FeatureOfInterest.FeatureOfInterest()
obj6.name = "DCC Unit 7"
obj6.offering_id = 1630
obj6.featureID = 1829
result7 = SDEService.SDEService().checkFoi_Off(obj6)
print result7

obj7 = Observation.Observation()
obj7.time_stamp = "2012-01-17T00:15:00"
prop = Property.Property("")
コード例 #28
0
class Agent:
    # Random generator
    randGenerator = Random()

    # Remember last action
    lastAction = Action()

    # Remember last observation (state)
    lastObservation = Observation()

    # Q-learning stuff: Step size, epsilon, gamma, learning rate
    stepsize = 0.1
    epsilon = 0.5
    gamma = 0.9
    learningRate = 0.5

    # Value table
    v_table = None

    # The environment
    gridEnvironment = None

    #Initial observation
    initialObs = None

    #Current observation
    currentObs = None

    # The environment will run for no more than this many steps
    numSteps = 1000

    # Total reward
    totalReward = 0.0

    # Print debugging statements
    verbose = True

    # Number of actions in the environment
    numActions = 5

    # Constructor, takes a reference to an Environment
    def __init__(self, env):

        # Initialize value table
        self.v_table = {}

        # Set dummy action and observation
        self.lastAction = Action()
        self.lastObservation = Observation()

        # Set the environment
        self.gridEnvironment = env

        # Get first observation and start the environment
        self.initialObs = self.gridEnvironment.env_start()
        if self.calculateFlatState(
                self.initialObs.worldState) not in self.v_table.keys():
            self.v_table[self.calculateFlatState(
                self.initialObs.worldState)] = self.numActions * [0.0]

    # Once learning is done, use this to run the agent
    # observation is the initial observation
    def executePolicy(self, observation):
        # Start the counter
        count = 0
        # Copy the initial observation
        self.workingObservation = self.copyObservation(observation)

        if self.verbose:
            print("START")

        # While a terminal state has not been hit and the counter hasn't expired, take the best action for the current state
        while not self.workingObservation.isTerminal and count < self.numSteps:
            newAction = Action()
            # Get the best action for this state
            newAction.actionValue = self.greedy(self.workingObservation)

            if self.verbose == True:
                print self.gridEnvironment.actionToString(
                    newAction.actionValue)

            # execute the step and get a new observation and reward
            currentObs, reward = self.gridEnvironment.env_step(newAction)
            # update the value table
            if self.calculateFlatState(
                    currentObs.worldState) not in self.v_table.keys():
                self.v_table[self.calculateFlatState(
                    currentObs.worldState)] = self.numActions * [0.0]
            self.totalReward = self.totalReward + reward.rewardValue
            self.workingObservation = copy.deepcopy(currentObs)

            # increment counter
            count = count + 1

        if self.verbose:
            print("END")

    # q-learning implementation
    # observation is the initial observation
    def qLearn(self, observation):
        # copy the initial observation
        self.workingObservation = self.copyObservation(observation)

        # start the counter
        count = 0

        lastAction = -1

        # while terminal state not reached and counter hasn't expired, use epsilon-greedy search
        while not self.workingObservation.isTerminal and count < self.numSteps:

            # Take the epsilon-greedy action
            newAction = Action()
            newAction.actionValue = self.egreedy(self.workingObservation)
            lastAction = newAction.actionValue

            # Get the new state and reward from the environment
            currentObs, reward = self.gridEnvironment.env_step(newAction)
            rewardValue = reward.rewardValue

            # update the value table
            if self.calculateFlatState(
                    currentObs.worldState) not in self.v_table.keys():
                self.v_table[self.calculateFlatState(
                    currentObs.worldState)] = self.numActions * [0.0]
            lastFlatState = self.calculateFlatState(
                self.workingObservation.worldState)
            newFlatState = self.calculateFlatState(currentObs.worldState)
            if not currentObs.isTerminal:
                Q_sa = self.v_table[lastFlatState][newAction.actionValue]
                Q_sprime_aprime = self.v_table[newFlatState][
                    self.returnMaxIndex(currentObs)]
                new_Q_sa = Q_sa + self.stepsize * (
                    rewardValue + self.gamma * Q_sprime_aprime - Q_sa)
                self.v_table[lastFlatState][lastAction] = new_Q_sa
            else:
                Q_sa = self.v_table[lastFlatState][lastAction]
                new_Q_sa = Q_sa + self.stepsize * (rewardValue - Q_sa)
                self.v_table[lastFlatState][lastAction] = new_Q_sa

            # increment counter
            count = count + 1
            self.workingObservation = self.copyObservation(currentObs)

        # Done learning, reset environment
        self.gridEnvironment.env_reset()

    def returnMaxIndex(self, observation):
        flatState = self.calculateFlatState(observation.worldState)
        actions = observation.availableActions
        qValueArray = []
        qValueIndexArray = []
        for i in range(len(actions)):
            qValueArray.append(self.v_table[flatState][actions[i]])
            qValueIndexArray.append(actions[i])

        return qValueIndexArray[qValueArray.index(max(qValueArray))]

    # Return the best action according to the policy, or a random action epsilon percent of the time
    def egreedy(self, observation):
        maxIndex = 0
        actualAvailableActions = []
        for i in range(len(observation.availableActions)):
            actualAvailableActions.append(observation.availableActions[i])

        if self.randGenerator.random() < self.epsilon:
            randNum = self.randGenerator.randint(
                0,
                len(actualAvailableActions) - 1)
            return actualAvailableActions[randNum]

        else:
            v_table_values = []
            flatState = self.calculateFlatState(observation.worldState)
            for i in actualAvailableActions:
                v_table_values.append(self.v_table[flatState][i])
            return actualAvailableActions[v_table_values.index(
                max(v_table_values))]

    # Return the best action according to the policy
    def greedy(self, observation):

        actualAvailableActions = []
        for i in range(len(observation.availableActions)):
            actualAvailableActions.append(observation.availableActions[i])
        v_table_values = []
        flatState = self.calculateFlatState(observation.worldState)
        for i in actualAvailableActions:
            v_table_values.append(self.v_table[flatState][i])
        return actualAvailableActions[v_table_values.index(
            max(v_table_values))]

    # Reset the agent
    def agent_reset(self):
        self.lastAction = Action()
        self.lastObservation = Observation()
        self.initialObs = self.gridEnvironment.env_start()

    # Create a copy of the observation
    def copyObservation(self, obs):
        returnObs = Observation()
        if obs.worldState != None:
            returnObs.worldState = obs.worldState[:]

        if obs.availableActions != None:
            returnObs.availableActions = obs.availableActions[:]

        if obs.isTerminal != None:
            returnObs.isTerminal = obs.isTerminal

        return returnObs

    # Turn the state into a tuple for bookkeeping
    def calculateFlatState(self, theState):
        return tuple(theState)
コード例 #29
0
    def suggestObservation(self, dateProfile, moonProfile, twilightProfile,
                           cloudiness):
        """
        Return the rank of (currently) highest ranking observation as
        of date (in seconds since Jan 1 of the simulated year).

        Input
            dateProfile    Precomputed values relating to current simdate/time:
                                date
                                mjd
                                lst_RAD
            moonProfile    precomputed values relating to moon phase:
                                moonRA_RAD
                                moonDec_RAD
                                moonPhase_PERCENT
        cloudiness	   cloudiness for time t at site
        Return
        (rank, exposureTime, slewTime)
        """

        #if ( self.log) :
        #    self.log.info("obsScheduler:suggestObservation: date: %f recalcSky: %d" % (date, self.recalcSky))

        self.dateProfile = dateProfile
        (date, mjd, lst_RAD) = dateProfile
        self.moonProfile = moonProfile
        self.twilightProfile = twilightProfile
        self.transparency = cloudiness

        (sdnight, sdtime) = self.schedulingData.findNightAndTime(date)

        #	self.log.info("ObsScheduler:suggestObservation: reuseRanking=%d" % self.reuseRanking)
        if self.reuseRanking <= 0:
            # Dictionary of {fieldID: {filter: totRank}}
            self.targetRank = {}
            self.targetProps = {}
            self.targetXblk = {}

            # Recompute sky data?
            if self.recalcSky <= 0:
                # Fetch raw seeing data
                seeing = self.weather.getSeeing(date)
                self.rawSeeing = seeing

                # Adjust seeing if too good to be true
                if seeing < self.tooGoodSeeingLimit:
                    if self.log:
                        self.log.info(
                            "obsScheduler:suggestObservation: seeing (%f) too good, reset to %f "
                            "date:%d." %
                            (seeing, self.tooGoodSeeingLimit, date))
                    seeing = self.tooGoodSeeingLimit

                # factor in the seeing fudge for the weather data supplied
                self.seeing = seeing * self.runSeeingFudge

                # Compute sky quantities for each field
                # self.targetProfiles = map (self.computeTargetProfiles, self.targets)
                self.recalcSky = self.recalcSkyCount

            # Build proximity array betwn cur tel position & potential fields
            # first: build FieldPosition (peerFields) list ordered identically
            #   to FieldID (targets) list
            # sortedFieldID = []
            # sortedFieldRaDec = []
            # for aField in sorted(self.targets.iterkeys()):
            # sortedFieldID.append(aField)
            # sortedFieldRaDec.append((self.targets[aField][0]*DEG2RAD,
            #                         self.targets[aField][1]*DEG2RAD))
            # # Second: build proximity array
            # (ra_RAD,dec_RAD) = self.telescope.GetCurrentTelescopePosition\
            #                                    (dateProfile)
            # proximity = distance((ra_RAD,dec_RAD), sortedFieldRaDec)

            totPotentialTargets = 0

            self.expTime = {}

            for proposal in self.proposals_list:
                if not proposal.IsActive(date, self.nightCnt):
                    continue

                # note: since proximity is ordered accd sortedFieldID--need to
                #       pass that array instead of self.targets.
                targetObs = proposal.suggestObs(
                    self.dateProfile, self.numSuggObsPerProp,
                    self.exclusiveObs, self.minDistance2Moon, self.rawSeeing,
                    self.seeing, self.transparency, sdnight, sdtime)
                if not targetObs:
                    continue

                self.expTime[proposal.propID] = proposal.exposureTime
                propID = proposal.propID

                for obs in targetObs:
                    fieldID = obs.fieldID
                    rank = obs.propRank
                    filter = obs.filter

                    # self.log.info("ObsScheduler.suggestObservations(): propID=%d fieldID=%d rank=%f "
                    #               "filter=%s exclusive=%s" % (propID, fieldID, rank, filter,
                    #                                           obs.exclusiveBlockRequired))

                    ra = obs.ra
                    dec = obs.dec
                    if obs.exclusiveBlockRequired:
                        propIDforXblk = propID
                    else:
                        propIDforXblk = None

                    if fieldID not in self.targetRank:
                        self.targetRank[fieldID] = {filter: rank}
                        self.targetProps[fieldID] = {filter: [propID]}
                        self.targetXblk[fieldID] = {filter: propIDforXblk}
                        totPotentialTargets += 1
                    else:
                        if filter not in self.targetRank[fieldID]:
                            self.targetRank[fieldID][filter] = rank
                            self.targetProps[fieldID][filter] = [propID]
                            self.targetXblk[fieldID][filter] = propIDforXblk
                            totPotentialTargets += 1
                        else:
                            self.targetRank[fieldID][filter] += rank
                            self.targetProps[fieldID][filter].append(propID)
                            if propIDforXblk is not None:
                                self.targetXblk[fieldID][
                                    filter] = propIDforXblk

#          self.log.info("totPotentialTargets = %d" % totPotentialTargets)

            if totPotentialTargets == 0:
                if self.log:
                    self.log.info(
                        "obsScheduler:suggestObservation: No suggestions from proposals"
                    )

            if totPotentialTargets < self.reuseRankingCount:
                self.reuseRanking = totPotentialTargets
            else:
                self.reuseRanking = self.reuseRankingCount

        # Choose the best target (taking slew time into consideration)
        maxrank = 0
        self.winner = None
        t = 0
        s = 0

        fields = sorted(self.targetRank.iterkeys())
        for fieldID in fields:
            # keyList = the list of filters proposed for each field.
            keyList = sorted(self.targetRank[fieldID].iterkeys())
            for key in keyList:
                rank = self.targetRank[fieldID][key]
                if rank <= 0.0:
                    continue
                filter = key
                propIDforXblk = self.targetXblk[fieldID][filter]
                if propIDforXblk is None:
                    # Choose the maximum exposure time for the proposals interested in this field/filter.
                    expTime = max([
                        self.expTime[propID]
                        for propID in self.targetProps[fieldID][key]
                    ])
                else:
                    # Or if it was an exclusive block, use the proper exposure time for that proposal.
                    expTime = self.expTime[propIDforXblk]
                # And multiply by the exposure factor.
                expTime *= self.filters.ExposureFactor[key]
                ra = self.targets[fieldID][0]
                dec = self.targets[fieldID][1]

                # Compute slew time
                slewTime = self.telescope.GetDelayForTarget(
                    ra_RAD=ra * DEG2RAD,
                    dec_RAD=dec * DEG2RAD,
                    dateProfile=self.dateProfile,
                    exposureTime=expTime,
                    filter=filter)
                # slewTime <0 means an invalid position for the telescope
                #                       too low or too close to zenith
                if slewTime >= 0.0:
                    # Now, divide the field rank by the slew time
                    slewRank = rank + self.maxSlewTimeBonus * max(
                        44.0 / (slewTime + 40.0) - 0.1, 0.0)
                    # self.log.info("candidate fieldID=%s filter=%s rank=%f slewTime=%f slewRank=%f" %
                    #               (fieldID, filter, rank, slewTime, slewRank))
                    if slewRank > maxrank:
                        maxrank = slewRank
                        # Save current winner details for later Obs instan
                        win_slewTime = slewTime
                        win_exposureTime = expTime
                        win_fieldID = int(fieldID)
                        win_filter = filter
                        win_propXblk = propIDforXblk
                        win_ra = ra
                        win_dec = dec

        # Return the best ranking
        if maxrank > 0:
            t = win_exposureTime
            s = win_slewTime
            self.winner = Observation(ra=win_ra,
                                      dec=win_dec,
                                      fieldID=win_fieldID,
                                      filter=win_filter,
                                      slewTime=win_slewTime,
                                      exposureTime=win_exposureTime,
                                      exclusiveBlockRequired=(win_propXblk
                                                              is not None),
                                      propID=win_propXblk,
                                      dateProfile=self.dateProfile,
                                      moonProfile=self.moonProfile)
            self.winner.finRank = maxrank
            self.winner.rawSeeing = self.rawSeeing
            self.winner.transparency = self.transparency
            # self.log.info("WINNER date = %d fieldID = %d filter=%s maxrank=%f propID=%s" %
            #               (date, win_fieldID, win_filter, maxrank, win_propXblk))

            if self.winner.exclusiveBlockRequired:
                self.exclusiveObs = copy.deepcopy(self.winner)
                self.recalcSky = 0
                self.reuseRanking = 0
            else:
                self.exclusiveObs = None
                self.recalcSky -= 1
                self.reuseRanking -= 1
        else:
            #t = 0
            #s = 0
            self.recalcSky = 0
            self.reuseRanking = 0

        # self.log.info("reuseRanking=%d" % self.reuseRanking)

        # return (maxrank, t, s)
        return self.winner

        #    def computeTargetProfiles (self, fieldID):
        """
コード例 #30
0
# How many states to make?
numStates = 10

states = []

# Make some states
for i in range(numStates):
    # Make a state
    state = [
        random.randint(1, gridEnvironment.width - 1),
        random.randint(1, gridEnvironment.height - 1), True,
        random.randint(1, gridEnvironment.width - 1),
        random.randint(1, gridEnvironment.height - 1), False, False, False
    ]
    states.append(state)
    # Create an entry in v_table for state
    entry = []
    for j in range(gridAgent.numActions):
        entry.append((random.random() - 0.5) * 100.0)
    gridAgent.v_table[gridAgent.calculateFlatState(state)] = entry
print "v table:"
print gridAgent.v_table

# Call greedy() k times
for k in range(numStates):
    observation = Observation()
    observation.worldState = states[k]
    observation.availableActions = gridEnvironment.validActions()
    action = gridAgent.greedy(observation)
    print "Action selected for :", states[k], "is:", action
コード例 #31
0
    def __init__(self, parent = None):
        QMainWindow.__init__(self, parent)

        # Le problème est modélisé par un réseau bayésien de PyAgrum
        self.bnCarFilename = bnFilename
        bnCar = gum.loadBN(self.bnCarFilename)

        # On initialise les coûts des réparations et observations
        self.costsRep = costsRep
        self.costsObs = costsObs

        # Une initialisation raccourcie pour ne pas surcharger des algorithmes exactes
        self.nodesAssociations = nodesAssociations

        #On peut choisir quel algorithme utiliser entre les 5 algorithmes codés

        self.algos_possibles = [
            "simple",
            "simple avec observations locales",
            "myope (avec observations globales)",
            "myope avec elicitation",
            "recherche exhaustive"
        ]
        self.size = (600, 500)
        self.configSize = (300, 350)
        self.progressSize = (500, 200)

###################################################
# Propriétés de la MainWindow                     #
###################################################

        self.setWindowTitle("Troubleshooter")
        self.resize(self.size[0], self.size[1])

###################################################
# Differents widgets                              #
###################################################

        self.introduction = Introduction(self.algos_possibles)
        self.introduction.startButton.clicked.connect(self.startAlgorithme)

        self.static = Static()
        self.static.finButton.clicked.connect(self.fin)

        self.trouble = Troubleshoot()
        self.trouble.obsButton.clicked.connect(self.callObs)
        self.trouble.actButton.clicked.connect(self.callAct)
        self.trouble.eliButton.clicked.connect(self.callEli)

        self.obs = Observation()
        self.obs.cb.activated.connect(self.makeObs)

        self.act = Action()
        self.act.yesButton.clicked.connect(self.makeAct)
        self.act.noButton.clicked.connect(self.makeAct)

        self.eli = Elicitation()
        self.eli.yesButton.clicked.connect(self.makeEli)
        self.eli.noButton.clicked.connect(self.makeEli)

        self.fin = Fin()
        self.fin.finButton.clicked.connect(self.finish)

        self.config = ConfigBruteForce()
        self.config.calcButton.clicked.connect(self.calculateBF)
        self.config.progressBar.valueChanged.connect(self.pbarChanged)

        self.showECR = ShowECR()
        self.showECR.continueButton.clicked.connect(self.continueWithBF)

        self.step = StepBruteForce()
        self.step.okButton.clicked.connect(self.stepOk)

###################################################
# Widget principal                                #
###################################################

        self.stack = QStackedWidget()
        self.stack.addWidget(self.introduction)
        self.stack.addWidget(self.static)
        self.stack.addWidget(self.trouble)
        self.stack.addWidget(self.obs)
        self.stack.addWidget(self.act)
        self.stack.addWidget(self.eli)
        self.stack.addWidget(self.fin)
        self.stack.addWidget(self.config)
        self.stack.addWidget(self.showECR)
        self.stack.addWidget(self.step)

        self.setCentralWidget(self.stack)

###################################################
# Troubleshooter                                  #
###################################################

        # On crée l'objet pour résoudre le problème
        self.tsp = dtt.TroubleShootingProblem(bnCar, [self.costsRep, self.costsObs], self.nodesAssociations)

        self.repairables = self.tsp.repairable_nodes.copy()
        self.repairables.add(self.tsp.service_node)
        self.observables = set(self.tsp.observation_nodes).intersection(set(self.tsp.unrepairable_nodes))

        self.elicitationNode = ""
        self.recommendation, self.typeNodeRec, self.ecr, self.eco = self.tsp.ECR_ECO_wrapper()
        self.currentNode =  ""
        self.currentObs = ""
        self.currentAct = ""
        self.currentPossibilities = []

        self.optimalStrategyTree = None
        self.optimalStrategyTreeCopy = None
        self.optimalECR = self.costsRep[self.tsp.service_node]
        self.obsRepCouples = None
        self.obsObsolete = None
        self.modeCalc = None
        self.modeExec = ""
        self.bruteForce = False
        self.bruteForceStats = {}
        self.exchangeFileName = "optimal_strategy_tree.txt"
        self.bfProcess = None
        self.randomSocketPort = None
コード例 #32
0
        action='store_true',
        help="Check the duration of the observatiosn in the SINEX file")
    parser.add_argument(
        '-t',
        dest='threshold',
        default=3,
        help="Check the duration of the observatiosn in the SINEX file")

    args = parser.parse_args()

    #================================================================================

    if args.rnxFiles:
        rctr = 1
        for rnxfile in args.rnxFiles:
            obs = rnxO.parseRinexObsFile(rnxfile)
            start = obs['epochs'][0]['time']
            end = obs['epochs'][-1]['time']
            duration = end - start
            #duration = dt2hours(duration)
            if (duration.seconds +
                    duration.days * 3600 * 24) < 3600 * args.threshold:
                print("ERROR ***** ", rnxfile, duration)
            else:
                print(rnxfile, duration)

    if args.snxfile:
        #sinex_data = []
        skipcova = True
        for sf in args.snxfile:
            sinex_data = (snx.readSINEX(sf, skipcova)[0])
コード例 #33
0
ファイル: Agent.py プロジェクト: pthakkar3/CS4731
 def agent_reset(self):
     self.lastAction = Action()
     self.lastObservation = Observation()
     self.initialObs = self.gridEnvironment.env_start()
コード例 #34
0
	def env_step(self,thisAction):
		# Store previous state
		self.previousState = self.currentState[:]
		# Execute the action
		self.executeAction(thisAction.actionValue)

		# Get a new observation
		lastActionValue = thisAction.actionValue
		theObs=Observation()
		theObs.worldState=self.currentState[:]
		theObs.availableActions = self.validActions()
		
		# Check to see if agent entered a terminal state
		theObs.isTerminal = self.checkTerminal()
		
		# Calculate the reward
		rewardValue = self.calculateReward(lastActionValue)
		reward = Reward(rewardValue)
		
		# Human movement
		self.counter = self.counter + 1
		if (self.counter % self.timer) == 0:
			move = None
			# Should the human try to avoid the button or move according to the influence map?
			if self.humanWander == False:
				move = self.influenceMap[self.currentState[3]][self.currentState[2]]
			else:
				move = random.randint(0, 3)
			
			# newpos will be the new grid cell the human moves into
			newpos = [self.currentState[2], self.currentState[3]]
			if move == 0:
				newpos[1] = newpos[1] - 1
			elif move == 1:
				newpos[1] = newpos[1] + 1
			elif move == 2:
				newpos[0] = newpos[0] - 1
			elif move == 3:
				newpos[0] = newpos[0] + 1

			# If human is wandering, make sure it can't move into a wall or onto the button
			if self.humanWander == True and (self.map[newpos[1]][newpos[0]] == 2 or self.map[newpos[1]][newpos[0]] == 1):
				newpos[0] = self.currentState[2]
				newpos[1] = self.currentState[3]

			# human about to move on to button, which is working
			if self.map[self.currentState[3]][self.currentState[2]] != 2 and self.map[newpos[1]][newpos[0]] == 2 and self.buttonDisabled == False:
				# button pressed
				self.buttonPressed = True
	
			# human about to move off button
			if self.map[self.currentState[3]][self.currentState[2]] == 2 and self.map[newpos[1]][newpos[0]] != 2:
				# button up-pressed
				self.buttonPressed = False

			# update state
			self.currentState[2] = newpos[0]
			self.currentState[3] = newpos[1]

		if self.verbose:
			print "bot state:", self.currentState

		return theObs, reward
コード例 #35
0
ファイル: plotRinex.py プロジェクト: mikemoorester/ppp
#================================================================================
parser = argparse.ArgumentParser(prog='plotRinex',description='plot RINEX file')

parser.add_argument('-f', '--file', dest='rnxObsFile', default='./t/yar20010.12o')
parser.add_argument('-n', '--nav', dest='rnxNavFile', default='./t/brdc0010.12n')
parser.add_argument('-g', '--gnav', dest='rnxGlonassNavFile', default='./t/alic0010.13n')

#parser.add_argument('-g', '--grid', dest='grid', default=5., type=float)
#parser.add_argument('--polar',dest='polar', default=False, action='store_true')

args = parser.parse_args()
#================================================================================

nav = rnxN.parseFile(args.rnxNavFile)
obs = rnxO.parseRinexObsFile(args.rnxObsFile)

# TODO: Need to calculate my own position
name   = 'YAR2'
lat    = -29. #0465520472
lon    = 115. #3469787567
h      = 0.
eleAng = 0.

sit1 = { 'name'           : name ,
         'latitude'       : lat,
         'longitude'      : lon,
         'height'         : h,
         'ElCutOff'       : eleAng
         }
コード例 #36
0
ファイル: plotRinex.py プロジェクト: mikemoorester/geodesy
                    '--nav',
                    dest='rnxNavFile',
                    default='./t/brdc0010.12n')
parser.add_argument('-g',
                    '--gnav',
                    dest='rnxGlonassNavFile',
                    default='./t/alic0010.13n')

#parser.add_argument('-g', '--grid', dest='grid', default=5., type=float)
#parser.add_argument('--polar',dest='polar', default=False, action='store_true')

args = parser.parse_args()
#================================================================================

nav = rnxN.parseFile(args.rnxNavFile)
obs = rnxO.parseRinexObsFile(args.rnxObsFile)

# TODO: Need to calculate my own position
name = 'YAR2'
lat = -29.  #0465520472
lon = 115.  #3469787567
h = 0.
eleAng = 0.

sit1 = {
    'name': name,
    'latitude': lat,
    'longitude': lon,
    'height': h,
    'ElCutOff': eleAng
}