def beginTrial(self):
        """
        Override this. It should just contain a loop where you keep constructing NTRTJobs, then calling
        runJob on it (which will block you until the NTRT instance returns), parsing the result from the job, then
        deciding if you should run another trial or if you want to terminate.
        """

        # Start a counter job ids to be use in dictionaries
        self.paramID = 1

        numTrials = self.jConf['learningParams']['numTrials']
        numGenerations = self.jConf['learningParams']['numGenerations']

        results = {}
        jobList = []
        
        lParams = self.jConf['learningParams']
        
        self.prefixes = []
        
        for keys in lParams:
            if keys[-4:] == "Vals":
                self.prefixes.append(keys[:-4])
        
        print (self.prefixes)
        
        self.currentGeneration = {}
        for p in self.prefixes:
            self.currentGeneration[p] = {}

        logFile = open('evoLog.txt', 'w') #Clear logfile
        logFile.close()

        scoreDump = open('scoreDump.txt', 'w')
        scoreDump.close()
        for n in range(numGenerations):
            # Create the generation'
            for p in self.prefixes:
                self.currentGeneration[p] = self.generationGenerator(self.currentGeneration[p], p + 'Vals')

            # Iterate over the generation (change range..)
            if n > 0:
                startTrial = self.jConf['learningParams']['deterministic']
            else:
                startTrial = 0

            # We want to write all of the trials for post processing
            for i in range(0, numTrials) :

                # MonteCarlo solution. This function could be overridden with something that
                # provides a filename for a pre-existing file
                fileName = self.getNewFile(i)
                
                for j in self.jConf['terrain']:
                    # All args to be passed to subprocess must be strings
                                  
                    args = {'filename' : fileName,
                            'resourcePrefix' : self.jConf['resourcePath'],
                            'path'     : self.jConf['lowerPath'],
                            'executable' : self.jConf['executable'],
                            'length'   : self.jConf['learningParams']['trialLength'],
                            'terrain'  : j}
                    if (n == 0 or i >= startTrial):
                        jobList.append(EvolutionJob(args))

            # Run the jobs
            conSched = ConcurrentScheduler(jobList, self.numProcesses)
            completedJobs = conSched.processJobs()

            # Read scores from files, write to logs
            totalScore = 0
            maxScore = -1000
            for job in completedJobs:
                job.processJobOutput()
                jobVals = job.obj

                scores = jobVals['scores']
                
                             

                # Iterate through all of the new scores for this file
                for i in scores:
                    score = i['distance']
                    
                    for p in self.prefixes:
                        if (lParams[p + 'Vals']['learning']):
                            key = jobVals [p + 'Vals']['paramID']
                            self.currentGeneration[p][key]['scores'].append(score)

                    totalScore += score
                    if score > maxScore:
                        maxScore = score

            avgScore = totalScore / float(len(completedJobs) * len(self.jConf['terrain']) )
            logFile = open('evoLog.txt', 'a')
            logFile.write(str((n+1) * numTrials) + ',' + str(maxScore) + ',' + str(avgScore) +'\n')
            logFile.close()
Beispiel #2
0
    def runTrials(self, st, nt):

        jobList = []

        print("Should run " + str(nt - st) + " jobs")

        numberToWrite = 0
        for p in self.prefixes:
            numberToWrite = max(numberToWrite, len(self.currentGeneration[p]))

        writeOut = min(numberToWrite, self.numTrials)

        if writeOut < nt - 1:
            raise NTRTMasterError("Not writing enough files")

        # We want to write all of the trials for post processing
        for i in range(0, writeOut):

            # MonteCarlo solution. This function could be overridden with something that
            # provides a filename for a pre-existing file
            fileName = self.getNewFile(i)

            for j in self.jConf['terrain']:
                # All args to be passed to subprocess must be strings
                args = {
                    'filename': fileName,
                    'resourcePrefix': self.jConf['resourcePath'],
                    'path': self.jConf['lowerPath'],
                    'executable': self.jConf['executable'],
                    'length': self.jConf['learningParams']['trialLength'],
                    'terrain': j
                }
                if (i <= nt and i >= st):
                    jobList.append(EvolutionJob(args))
                    self.trialTotal += 1

        # Run the jobs
        conSched = ConcurrentScheduler(jobList, self.numProcesses)
        completedJobs = conSched.processJobs()

        # Read scores from files, write to logs
        totalScore = 0
        maxScore = -1000

        for job in completedJobs:
            job.processJobOutput()
            jobVals = job.obj

            scores = jobVals['scores']

            # Iterate through all of the new scores for this file
            for i in scores:
                score = i['distance']

                for p in self.prefixes:
                    if (self.lParams[p + 'Vals']['learning']):
                        jobNum = self.getJobNum(jobVals[p + 'Vals']['paramID'],
                                                p)
                        self.currentGeneration[p][jobNum]['scores'].append(
                            score)

                totalScore += score
                if score > maxScore:
                    maxScore = score

        avgScore = totalScore / float(
            len(completedJobs) * len(self.jConf['terrain']))
        logFile = open('evoLog.txt', 'a')
        logFile.write(
            str(self.trialTotal) + ',' + str(maxScore) + ',' + str(avgScore) +
            '\n')
        logFile.close()
    def beginTrial(self):
        """
        Override this. It should just contain a loop where you keep constructing NTRTJobs, then calling
        runJob on it (which will block you until the NTRT instance returns), parsing the result from the job, then
        deciding if you should run another trial or if you want to terminate.
        """

        # Start a counter job ids to be use in dictionaries
        self.paramID = 1

        numTrials = self.jConf['learningParams']['numTrials']
        numGenerations = self.jConf['learningParams']['numGenerations']

        results = {}
        jobList = []

        lParams = self.jConf['learningParams']

        self.prefixes = []

        for keys in lParams:
            if keys[-4:] == "Vals":
                self.prefixes.append(keys[:-4])

        print(self.prefixes)

        self.currentGeneration = {}
        for p in self.prefixes:
            self.currentGeneration[p] = {}

        logFile = open('evoLog.txt', 'w')  #Clear logfile
        logFile.close()

        scoreDump = open('scoreDump.txt', 'w')
        scoreDump.close()
        for n in range(numGenerations):
            # Create the generation'
            for p in self.prefixes:
                self.currentGeneration[p] = self.generationGenerator(
                    self.currentGeneration[p], p + 'Vals')

            # Iterate over the generation (change range..)
            if n > 0:
                startTrial = self.jConf['learningParams']['deterministic']
            else:
                startTrial = 0

            # We want to write all of the trials for post processing
            for i in range(0, numTrials):

                # MonteCarlo solution. This function could be overridden with something that
                # provides a filename for a pre-existing file
                fileName = self.getNewFile(i)

                for j in self.jConf['terrain']:
                    # All args to be passed to subprocess must be strings

                    args = {
                        'filename': fileName,
                        'resourcePrefix': self.jConf['resourcePath'],
                        'path': self.jConf['lowerPath'],
                        'executable': self.jConf['executable'],
                        'length': self.jConf['learningParams']['trialLength'],
                        'terrain': j
                    }
                    if (n == 0 or i >= startTrial):
                        jobList.append(EvolutionJob(args))

            # Run the jobs
            conSched = ConcurrentScheduler(jobList, self.numProcesses)
            completedJobs = conSched.processJobs()

            # Read scores from files, write to logs
            totalScore = 0
            maxScore = -1000
            for job in completedJobs:
                job.processJobOutput()
                jobVals = job.obj

                scores = jobVals['scores']

                # Iterate through all of the new scores for this file
                for i in scores:
                    score = i['distance']

                    for p in self.prefixes:
                        if (lParams[p + 'Vals']['learning']):
                            key = jobVals[p + 'Vals']['paramID']
                            self.currentGeneration[p][key]['scores'].append(
                                score)

                    totalScore += score
                    if score > maxScore:
                        maxScore = score

            avgScore = totalScore / float(
                len(completedJobs) * len(self.jConf['terrain']))
            logFile = open('evoLog.txt', 'a')
            logFile.write(
                str((n + 1) * numTrials) + ',' + str(maxScore) + ',' +
                str(avgScore) + '\n')
            logFile.close()
Beispiel #4
0
    def runTrials(self, st, nt):
        
        jobList = []
        
        print("Should run " + str(nt - st) + " jobs")
        
        numberToWrite = 0
        for p in self.prefixes:
            numberToWrite = max(numberToWrite, len(self.currentGeneration[p]))
        
        writeOut = min(numberToWrite, self.numTrials)
        
        if writeOut < nt - 1:
            raise NTRTMasterError("Not writing enough files")
        
        # We want to write all of the trials for post processing
        for i in range(0, writeOut) :

            # MonteCarlo solution. This function could be overridden with something that
            # provides a filename for a pre-existing file
            fileName = self.getNewFile(i)
            
            for j in self.jConf['terrain']:
                # All args to be passed to subprocess must be strings
                args = {'filename' : fileName,
                        'resourcePrefix' : self.jConf['resourcePath'],
                        'path'     : self.jConf['lowerPath'],
                        'executable' : self.jConf['executable'],
                        'length'   : self.jConf['learningParams']['trialLength'],
                        'terrain'  : j}
                if (i <= nt and i >= st):
                    jobList.append(EvolutionJob(args))
                    self.trialTotal += 1 

        # Run the jobs
        conSched = ConcurrentScheduler(jobList, self.numProcesses)
        completedJobs = conSched.processJobs()

        # Read scores from files, write to logs
        totalScore = 0
        maxScore = -1000

        for job in completedJobs:
            job.processJobOutput()
            jobVals = job.obj

            scores = jobVals['scores']

            # Iterate through all of the new scores for this file
            for i in scores:
                score = i['distance']

                for p in self.prefixes:
                    if (self.lParams[p + 'Vals']['learning']):
                        jobNum =  self.getJobNum(jobVals[p + 'Vals']['paramID'], p)
                        self.currentGeneration[p][jobNum]['scores'].append(score)

                totalScore += score
                if score > maxScore:
                    maxScore = score


        avgScore = totalScore / float(len(completedJobs) * len(self.jConf['terrain']) )
        logFile = open('evoLog.txt', 'a')
        logFile.write(str(self.trialTotal) + ',' + str(maxScore) + ',' + str(avgScore) +'\n')
        logFile.close()
Beispiel #5
0
    def beginTrial(self):
        """
        Override this. It should just contain a loop where you keep constructing NTRTJobs, then calling
        runJob on it (which will block you until the NTRT instance returns), parsing the result from the job, then
        deciding if you should run another trial or if you want to terminate.
        """

        # Start a counter job ids to be use in dictionaries
        self.paramID = 1

        numTrials = self.jConf['learningParams']['numTrials']
        numGenerations = self.jConf['learningParams']['numGenerations']

        results = {}
        jobList = []
        self.currentGeneration = {}
        self.currentGeneration['edge'] = {}
        self.currentGeneration['node'] = {}
        self.currentGeneration['feedback'] = {}
        logFile = open('evoLog.txt', 'w')  #Clear logfile
        logFile.close()

        scoreDump = open('scoreDump.txt', 'w')
        scoreDump.close()
        for n in range(numGenerations):
            # Create the generation
            self.currentGeneration['edge'] = self.generationGenerator(
                self.currentGeneration['edge'], 'edgeVals')
            self.currentGeneration['node'] = self.generationGenerator(
                self.currentGeneration['node'], 'nodeVals')
            self.currentGeneration['feedback'] = self.generationGenerator(
                self.currentGeneration['feedback'], 'feedbackVals')

            # Iterate over the generation (change range..)
            for i in range(0, numTrials):

                # MonteCarlo solution. This function could be overridden with something that
                # provides a filename for a pre-existing file
                fileName = self.getNewFile(i)

                # All args to be passed to subprocess must be strings
                args = {
                    'filename': fileName,
                    'resourcePrefix': self.jConf['resourcePath'],
                    'path': self.jConf['lowerPath'],
                    'executable': self.jConf['executable'],
                    'length': self.jConf['learningParams']['trialLength'],
                    'terrain': self.jConf['terrain']
                }
                jobList.append(BrianJob(args))

            # Run the jobs
            conSched = ConcurrentScheduler(jobList, self.numProcesses)
            completedJobs = conSched.processJobs()

            # Read scores from files, write to logs
            totalScore = 0
            maxScore = -1000
            for job in completedJobs:
                job.processJobOutput()
                jobVals = job.obj

                scores = jobVals['scores']

                edgeKey = jobVals['edgeVals']['paramID']
                nodeKey = jobVals['nodeVals']['paramID']
                feedbackKey = jobVals['feedbackVals']['paramID']

                # Iterate through all of the new scores for this file
                for i in scores:
                    score = i['distance']

                    # TODO consider only appending when learning
                    self.currentGeneration['edge'][edgeKey]['scores'].append(
                        score)
                    self.currentGeneration['node'][nodeKey]['scores'].append(
                        score)
                    self.currentGeneration['feedback'][feedbackKey][
                        'scores'].append(score)
                    totalScore += score
                    if score > maxScore:
                        maxScore = score

            avgScore = totalScore / float(len(completedJobs))
            logFile = open('evoLog.txt', 'a')
            logFile.write(
                str((n + 1) * numTrials) + ',' + str(maxScore) + ',' +
                str(avgScore) + '\n')
            logFile.close()
    def beginTrial(self):
        """
        Override this. It should just contain a loop where you keep constructing NTRTJobs, then calling
        runJob on it (which will block you until the NTRT instance returns), parsing the result from the job, then
        deciding if you should run another trial or if you want to terminate.
        """
        
        # Start a counter job ids to be use in dictionaries
        self.paramID = 1
        
        numTrials = self.jConf['learningParams']['numTrials']
        numGenerations = self.jConf['learningParams']['numGenerations']
        
        results = {}
        jobList = []
        self.currentGeneration = {}
        self.currentGeneration['edge'] = {}
        self.currentGeneration['node'] = {}
        self.currentGeneration['feedback'] = {}
        logFile = open('evoLog.txt', 'w') #Clear logfile
        logFile.close()
        
        scoreDump = open('scoreDump.txt', 'w')
        scoreDump.close()
        for n in range(numGenerations):
            # Create the generation
            self.currentGeneration['edge'] = self.generationGenerator(self.currentGeneration['edge'], 'edgeVals')
            self.currentGeneration['node'] = self.generationGenerator(self.currentGeneration['node'], 'nodeVals')
            self.currentGeneration['feedback'] = self.generationGenerator(self.currentGeneration['feedback'], 'feedbackVals')
            
            # Iterate over the generation (change range..)
            for i in range(0, numTrials) :

                # MonteCarlo solution. This function could be overridden with something that
                # provides a filename for a pre-existing file
                fileName = self.getNewFile(i)

                # All args to be passed to subprocess must be strings
                args = {'filename' : fileName,
                        'resourcePrefix' : self.jConf['resourcePath'],
                        'path'     : self.jConf['lowerPath'],
                        'executable' : self.jConf['executable'],
                        'length'   : self.jConf['learningParams']['trialLength'],
                        'terrain'  : self.jConf['terrain']}
                jobList.append(BrianJob(args))
            
            # Run the jobs
            conSched = ConcurrentScheduler(jobList, self.numProcesses)
            completedJobs = conSched.processJobs()
            
            # Read scores from files, write to logs
            totalScore = 0
            maxScore = -1000
            for job in completedJobs:
                job.processJobOutput()
                jobVals = job.obj
               
                scores = jobVals['scores']
                
                edgeKey = jobVals ['edgeVals']['paramID']
                nodeKey = jobVals ['nodeVals']['paramID']
                feedbackKey = jobVals ['feedbackVals']['paramID']
               
                # Iterate through all of the new scores for this file
                for i in scores:
                    score = i['distance']
                    
                    # TODO consider only appending when learning
                    self.currentGeneration['edge'][edgeKey]['scores'].append(score)
                    self.currentGeneration['node'][nodeKey]['scores'].append(score)
                    self.currentGeneration['feedback'][feedbackKey]['scores'].append(score)
                    totalScore += score
                    if score > maxScore:
                        maxScore = score
            
            avgScore = totalScore / float(len(completedJobs))
            logFile = open('evoLog.txt', 'a') 
            logFile.write(str((n+1) * numTrials) + ',' + str(maxScore) + ',' + str(avgScore) +'\n')
            logFile.close()