def __threadTrain__(self, _eExit, _eTr, _qTr, _lUO) -> None: """ Description: This fucntion outsources the training of the surrogate to the appropriate optimization handler after finding the optimizer to use. |\n |\n |\n |\n |\n Parameters: + _eGlobal_Exit = ( mp.Event() ) Event signalling global exit for threads and processes + _eTr = ( mp.Event() ) Event signalling process completion + _qTr = ( mp.Queue() ) The queue onto which the process results should be returned + _lUO = ( mp.RLock() ) The lock for commong user output |\n Returns: + dict = ( dict ) ~ surrogate = ( vars ) The trained surrogate ~ fitness = ( float ) The overall fitness of the trained surrogate """ # STEP 0: Local variables dArgs = _qTr.get()[0] dResults = None iThread_ID = Helga.ticks() iThread_AppID = dArgs["thread"] iSwarms_Active = 0 iGA_Active = 0 iOptimizers_Active = 0 # region STEP 1->15: Train using provided optimizer # STEP 1: Check if not random optimizer if (rn.uniform(0.0, 1.0) > 0.3): # STEP 2: Check if optimizer is GA if (ga.isEnum(dArgs["optimizer"])): # STEP 3: User output if (self.bShowOutput): # STEP 4: Get lock _lUO.acquire() # STEP 5: Print output print("\t- Assigning SpongeBob to training") print("\t- Optimizer: " + str(dArgs["optimizer"])) print("\t- Thread ID: " + str(iThread_ID)) print("\t- Application Thread ID: " + str(iThread_AppID)) print("\t- Time: " + Helga.time() + "\n") # STEP 6: Release lock _lUO.release() # STEP 7: Create new optimizer sb = SpongeBob() # STEP 8: Outsoruce training dResults = sb.trainSurrogate(surrogate=dArgs["surrogate"], data=dArgs["data"], password=dArgs["password"], optimizer=dArgs["optimizer"]) # STEP 9: Check if swarm elif (sw.isEnum( dArgs["optimizer"] )): # STEP 10: User Output if (self.bShowOutput): # STEP 11: Get lock _lUO.acquire() # STEP 12: Print strings print("\t- Assigning Sarah to training") print("\t- Optimizer: " + str(dArgs["optimizer"])) print("\t- Thread ID: " + str(iThread_ID)) print("\t- Application Thread ID: " + str(iThread_AppID)) print("\t- Time: " + Helga.time() + "\n") # STEP 13: Release lock _lUO.release() # STEP 14: Create new optimizer sarah = Sarah() # STEP 15: Outsource training dResults = sarah.trainSurrogate(surrogate=dArgs["surrogate"], data=dArgs["data"], password=dArgs["password"], optimizer=dArgs["optimizer"]) # # endregion # region STEP 16->34: Random training # STEP 16: Use random else: # STEP 17: Update - Local variables iSwarms_Active = sw.getNumActiveSwarms() iGA_Active = ga.getNumActiveGAs() iOptimizers_Active = iSwarms_Active + iGA_Active # STEP 18: Random a handler iTmp_Optimizer = rn.randint(0, iOptimizers_Active - 1) # STEP 19: if swarm if (iTmp_Optimizer < iSwarms_Active): # STEP 20: Get new swarm enum eTmp_Optimzier = sw.getActiveSwarms()[iTmp_Optimizer] # STEP 21: User Output if (self.bShowOutput): # STEP 22: Get lock _lUO.acquire() # STEP 23: Print output print("\t- Assigning Sarah to training") print("\t- Optimizer: " + str(eTmp_Optimzier)) print("\t- Thread ID: " + str(iThread_ID)) print("\t- Application Thread ID: " + str(iThread_AppID)) print("\t- Time: " + Helga.time() + "\n") # STEP 24: Release lock _lUO.release() # STEP 25: Create new optimizer sarah = Sarah() # STEP 26: Outsource training dResults = sarah.trainSurrogate(surrogate=dArgs["surrogate"], data=dArgs["data"], password=dArgs["password"], optimizer=eTmp_Optimzier) # STEP 27: Then ga else: # STEP 28: Get new ga enum eTmp_Optimizer = ga.getActiveGAs()[iTmp_Optimizer - iSwarms_Active] # STEP 29: User Output if (self.bShowOutput): # STEP 30: Acquire lock _lUO.acquire() # STEP 31: Print output print("\t- Assigning SpongeBob to training") print("\t- Optimizer: " + str(eTmp_Optimizer)) print("\t- Thread ID: " + str(iThread_ID)) print("\t- Application Thread ID: " + str(iThread_AppID)) print("\t- Time: " + Helga.time() + "\n") # STEP 32: Release lock _lUO.release() # STEP 33: Create new optimizer sb = SpongeBob() # STEP 34: Outsource training dResults = sb.trainSurrogate(surrogate=dArgs["surrogate"], data=dArgs["data"], password=dArgs["password"], optimizer=eTmp_Optimizer) # # endregion # STEP 35: Get surrogate fitness fTmpFitness = dResults["surrogate"].getAFitness(data=dArgs["data"]) fTmpFitness = fTmpFitness * dResults["inverse accuracy"] # STEP 36: User Output if (self.bShowOutput): # STEP 37: Get lock _lUO.acquire() # STEP 38: Print output print("\t\t\t\t\t- Thread: " + str(iThread_AppID) + " - <" + str(dResults["accuracy"]) + " : " + str(round(fTmpFitness, 2)) + ">") print("\t\t\t\t\t- Time: " + Helga.time() + "\n") # STEP 39: release lock _lUO.release() # STEP 40: Populate output dictionary dOut = { "accuracy": dResults["accuracy"], "algorithm": dResults["algorithm"], "fitness": fTmpFitness, "iterations": dResults["iterations"], "inverse accuracy": dResults["inverse accuracy"], "scalar": dResults["scalar"], "surrogate": dResults["surrogate"] } # STEP 41: Set training results _qTr.put([dOut]) # STEP 42: Set training finished result _eTr.set() # STEP 43: Return return # # endregion # #endregion # #endregion #region Testing # #endregion
def __threadMap__(self, _eExit, _eTr, _qTr, _lUO) -> None: """ Description: This function outsources the mapping of the surrogate to the appropriate optimization handler after picking the optimizer to use. |\n |\n |\n |\n |\n Parameters: + _eGlobal_Exit = ( mp.Event() ) Event signalling global exit for threads and processes + _eTr = ( mp.Event() ) Event signalling process completion + _qTr = ( mp.Queue() ) The queue onto which the process results should be returned + _lUO = ( mp.RLock() ) The lock for commong user output |\n Returns: + dOut = ( dict ) ~ "result" = ( list ) The list of surrogate inputs that yielded the best results ~ "fitness" = ( float ) The fitness of the best results """ # STEP 0: Local variables dArgs = _qTr.get()[0] dResults = None iThread_ID = Helga.ticks() iThread_AppID = dArgs["thread"] iSwarms_Active = 0 iGA_Active = 0 iOptimizers_Active = 0 # region STEP 1->15: Map using provided optimizer # STEP 1: Check if not random optimizer if (rn.uniform(0.0, 1.0) > 0.3): # STEP 2: Check if optimizer is GA if (ga.isEnum(dArgs["optimizer"])): # STEP 3: User output if (self.bShowOutput): # STEP 4: Get lock _lUO.acquire() # STEP 5: Populate strings list for threaded output print("\t- Assigning SpongeBob to mapping") print("\t- Optimizer: " + str(dArgs["optimizer"])) print("\t- Thread ID: " + str(iThread_ID)) print("\t- Application Thread ID: " + str(iThread_AppID)) print("\t- Time: " + Helga.time() + "\n") # STEP 6: Release lock _lUO.release() # STEP 7: Create new mapper sb = SpongeBob() # STEP 8: Outsource mapping dResults = sb.mapSurrogate(surrogate=dArgs["surrogate"], data=dArgs["data"], optimizer=dArgs["optimizer"]) # STEP 9: Check if swarm if (sw.isEnum(dArgs["optimizer"])): # STEP 10: User output if (self.bShowOutput): # STEP 11: Get lock _lUO.acquire() # STEP 12: Populate strings list for threaded output print("\t- Assigning Sarah to mapping") print("\t- Optimizer: " + str(dArgs["optimizer"])) print("\t- Thread ID: " + str(iThread_ID)) print("\t- Application Thread ID: " + str(iThread_AppID)) print("\t- Time: " + Helga.time() + "\n") # STEP 13: Release lock _lUO.release() # STEP 14: Create new mapper sh = Sarah() # STEP 15: Outsource mapping dResults = sh.mapSurrogate(surrogate=dArgs["surrogate"], data=dArgs["data"], optimizer=dArgs["optimizer"]) # # endregion # region STEP 16->34: Map using random optimizer # STEP 16: Using random optimizer for mapping else: # STEP 17: Update - Local variables iSwarms_Active = sw.getNumActiveSwarms() iGA_Active = ga.getNumActiveGAs() iOptimizers_Active = iSwarms_Active + iGA_Active # STEP 18: Choose a random optimizer iTmp_Optimizer = rn.randint(0, iOptimizers_Active - 1) # STEP 19: Check if swarm: if (iTmp_Optimizer < iSwarms_Active): # STEP 20: Get optimizer enum eTmp_Optimizer = sw.getActiveSwarms()[iTmp_Optimizer] # STEP 21: User output if (self.bShowOutput): # STPE 22: Acquire lock _lUO.acquire() # STEP 23: Populate output strings print("\t- Assigning Sarah to training") print("\t- Optimizer: " + str(eTmp_Optimizer)) print("\t- Thread ID: " + str(iThread_ID)) print("\t- Application Thread ID: " + str(iThread_AppID)) print("\t- Time: " + Helga.time() + "\n") # STEP 24: Release lock _lUO.release() # STEP 25: Create new mapper sh = Sarah() # STEP 26: Outsource dResults = sh.mapSurrogate(surrogate=dArgs["surrogate"], data=dArgs["data"], optimizer=eTmp_Optimizer) # STEP 27: Then ga else: # STEP 28: Get optimizer enum eTmp_Optimizer = ga.getActiveGAs()[iTmp_Optimizer - iSwarms_Active] # STEP 29: User output if (self.bShowOutput): # STEP 30: Acquired lock _lUO.acquire() # STEP 31: Populate output strings print("\t- Assigning SpongeBob to training") print("\t- Optimizer: " + str(eTmp_Optimizer)) print("\t- Thread ID: " + str(iThread_ID)) print("\t- Application Thread ID: " + str(iThread_AppID)) print("\t- Time: " + Helga.time() + "\n") # STEP 32: Release lock _lUO.release() # STEP 33: Create new mapper sb = SpongeBob() # STEP 34: Outsource mapping dResults = sb.mapSurrogate(surrogate=dArgs["surrogate"], data=dArgs["data"], optimizer=eTmp_Optimizer) # # endregion # Step 35: User output if (self.bShowOutput): # STEP 36: Get lock _lUO.acquire() # STEP 37: Create output strings print("\t\t\t\t\t- Thread: " + str(iThread_AppID) + " - <" + str( round( 100.0 * dResults["fitness"], 3 ) ) + ">\n") # STEP 38: Release lock _lUO.release() # STEP 39: Set results _qTr.put([dResults]) # STEP 40: Set exit event _eTr.set() # STEP 41: Return return
def main(self, _iIterations: int, _iWaitPeriod): """ """ # STEP -1: Global variables global teUInputEvent global tTest # STEP 0: Local variables sFileName = Helga.ticks() lData = [] iCount = 0 # STEP 1: Setup - Global variables tTest = thread.Thread(target=self.__userInput) tTest.daemon = True tTest.start() # STEP ..: Setup - local variables # STEP 2: We out here looping while (True): # STEP 3: Perform the result acquisition print("\tDAVID - Gathering data (" + str(iCount + 1) + " / " + str(_iIterations) + ")") lData = self.__theTHING(lData, sFileName) iCount = iCount + 1 # STEP 4: Check for user input if (teUInputEvent.isSet() == True): # STEP 4.1: Get global varialbes global sUserInput # STEP 4.2: Check if input was to stop if (sUserInput == "stop"): # STEP 4.2.1: Clear variables and end loop sUserInput = "" teUInputEvent.clear() break else: # STEP 4.2.2: Clear variables and restart thread (no additional commands atm) sUserInput = "" teUInputEvent.clear() tTest.run() # STEP 5: Check if iterations have been reached if ((_iIterations > 0) and (iCount >= _iIterations)): # STEP 5.1: iteration condition achieved break # STEP 6: Wait the set amount of time if ((_iWaitPeriod > 0) and (_iWaitPeriod <= 10)): t.sleep(_iWaitPeriod) # STEP 7: Average data #lData = self.__averageData(lData, iCount) # STEP 8: Write the data to file and ??? self.__saveData(lData, sFileName, iCount) # STEP 9: GTFO return