def runThread(numTrain, numTest, agent, maxJobs, results, finished): exp = None try: # pretrain exp = SimpleSimulation(numDevices=numDevices, maxJobs=maxJobs, agentClass=agent, tasks=[HARD], systemStateClass=extendedSystemState, jobInterval=1) exp.setBatterySize(1e-1) exp.setFpgaIdleSleep(1e-3) exp.simulateEpisodes(int(numTrain)) except: traceback.print_exc(file=sys.stdout) if exp is not None: print("Error in experiment:", maxJobs, exp.time) exp.sharedAgent.setProductionMode() for i in range(int(numTest)): exp.simulateEpisode(int(numTrain) + i) results.put([ "Agent %s" % exp.sharedAgent.__name__, maxJobs, exp.numFinishedJobs ]) # results.put(["", jobInterval, np.average([dev.numJobs for dev in exp.devices]) / exp.getCompletedJobs()]) finished.put(True)
def runThread(jobInterval, fpgaSleepTime, numEpisodes, results, finished): exp = SimpleSimulation(numDevices=4, maxJobs=maxJobs) exp.setFpgaIdleSleep(fpgaSleepTime) exp.scenario.setInterval(jobInterval) print('interval', exp.scenario.timeInterval.mean) exp.setBatterySize(1e1) try: for i in range(numEpisodes): # exp.simulateTime(10) exp.simulateEpisode() results.put([ "FPGA Idle Sleep {} Interval {}".format( fpgaSleepTime, jobInterval), i, exp.getCurrentTime() ]) except: traceback.print_exc(file=sys.stdout) print( jobInterval, fpgaSleepTime, ) print("Error in experiment:", jobInterval, fpgaSleepTime, exp.getCurrentTime()) finished.put(True)
def runThread(agent, numEpisodes, numDevices, taskOptions, interval, results, finished): constants.CENTRALISED_LEARNING = False exp = SimpleSimulation(numDevices=numDevices, maxJobs=50, agentClass=agent, tasks=taskOptions, systemStateClass=targetedSystemState, reconsiderBatches=False, scenarioTemplate=RANDOM_SCENARIO_ROUND_ROBIN, centralisedLearning=False) exp.scenario.setInterval(interval) exp.setFpgaIdleSleep(1e-3) exp.setBatterySize(1e-1) assert numEpisodes % 2 == 0 offset = 0 e = 0 reduced = False try: #pretrain # for e in range(0): # exp.simulateEpisode() debug.infoEnabled = False for i in range(2): for e in range(int(numEpisodes / 2)): exp.simulateEpisode(e) dol_ind_task, dol_task_ind = DOL(exp.devices, taskOptions) results.put( ["DOL %d devices" % numDevices, offset + e, dol_ind_task]) results.put([ "Jobs Completed %d devices" % numDevices, offset + e, exp.numFinishedJobs ]) # results.put(["Interval %.2f" % interval, e, dol_ind_task]) if not reduced: print() # remove half for i in range(int(numDevices / 2)): exp.removeDevice() reduced = True print("reduce to", exp.devices) offset = e finished.put("") except: debug.printCache() traceback.print_exc(file=sys.stdout) print(agent, e) print("Error in experiment ̰:", exp.time) print(agent, numEpisodes, numDevices, taskOptions, interval, e, offset, reduced) sys.exit(0) finished.put(True)
def runThread(agent, numEpisodes, centralised, results, finished): exp = SimpleSimulation(numDevices=2, maxJobs=maxjobs, agentClass=agent, tasks=[HARD], systemStateClass=minimalSystemState, scenarioTemplate=REGULAR_SCENARIO_ROUND_ROBIN, centralisedLearning=centralised) # exp.scenario.setInterval(1) exp.setBatterySize(1e-1) exp.setFpgaIdleSleep(1e-3) e = None try: for e in range(numEpisodes): debug.infoEnabled = False exp.simulateEpisode(e) # results.put(["%s %s" % (exp.devices[0].agent.__name__, "Centralised" if centralised else "Decentralised"), e, exp.numFinishedJobs]) results.put([ "%s %s" % (exp.devices[0].agent.__name__, "Centralised" if centralised else "Decentralised"), e, exp.getCurrentTime() ]) except: debug.printCache() traceback.print_exc(file=sys.stdout) print(agent, e) print("Error in experiment ̰:", exp.time) sys.exit(0) finished.put(True)
def runThread(agent, numEpisodes, results, finished, histories): exp = SimpleSimulation(numDevices=2, maxJobs=6, agentClass=agent) exp.setFpgaIdleSleep(5) exp.setBatterySize(1e0) try: for e in range(numEpisodes): debug.infoEnabled = False exp.simulateEpisode() results.put(["Duration %s" % exp.sharedAgent, e, exp.getCurrentTime()]) # results.put(["Episode reward %s" % exp.sharedAgent, e, exp.sharedAgent.episodeReward]) except: debug.printCache(200) traceback.print_exc(file=sys.stdout) print(agent, e) print("Error in experiment ̰:", exp.time) sys.exit(0) finished.put(True) assert simulationResults.learningHistory is not None histories.put(simulationResults.learningHistory) print("\nsaving history", simulationResults.learningHistory, '\nr') print("forward", counters.NUM_FORWARD, "backward", counters.NUM_BACKWARD) exp.sharedAgent.printModel()
def runThread(agent, numEpisodes, numDevices, taskOptions, interval, results, finished): constants.CENTRALISED_LEARNING = False exp = SimpleSimulation(numDevices=numDevices, maxJobs=10, agentClass=agent, tasks=taskOptions, systemStateClass=targetedSystemState, reconsiderBatches=False, scenarioTemplate=RANDOM_SCENARIO_ROUND_ROBIN, centralisedLearning=False) exp.scenario.setInterval(interval) exp.setFpgaIdleSleep(5) exp.setBatterySize(1e0) e = None try: #pretrain for e in range(0): exp.simulateEpisode() for e in range(numEpisodes): debug.infoEnabled = False exp.simulateEpisode() times = np.zeros((len(taskOptions),)) percentages = np.zeros((numDevices,)) for device in exp.devices: for t in range(len(taskOptions)): task = taskOptions[t] times[t] = device.fpga.getConfigTime(task) sum = np.sum(times) if sum > 0: perc = times[0] / sum else: perc = 0.5 if perc < 0.5: perc = 1. - perc percentages[device.index] = perc # results.put(["", np.average(percentages), exp.numFinishedJobs]) results.put(["Interval %.2f" % interval, np.average(percentages), exp.numFinishedJobs]) # for device in exp.devices: # device.agent.setProductionMode() # exp.simulateEpisode() finished.put("") # if times[1] == 0: # ratio = inf # else: # ratio = times[0] / times[1] # if ratio < 1 and ratio != 0: ratio = 1. / ratio # results.put(["Device %d EASY/HARD" % (device.index), e, ratio]) # print(e, "Device %d Task %s" % (device.index, task), e, device.getNumTasksDone(task)) except: debug.printCache() traceback.print_exc(file=sys.stdout) print(agent, e) print("Error in experiment ̰:", exp.time) sys.exit(0) finished.put(True)
def runThread(id, agent, systemState, productionMode, offPolicy, numPhases, numEpisodes, results, finished): exp = SimpleSimulation(numDevices=4, maxJobs=maxjobs, agentClass=agent, tasks=[HARD], systemStateClass=systemState, scenarioTemplate=REGULAR_SCENARIO_ROUND_ROBIN, centralisedLearning=True, numEnergyLevels=numEnergyStates, trainClassification=True, offPolicy=offPolicy, allowExpansion=True) exp.setBatterySize(1e-1) exp.setFpgaIdleSleep(1e-3) exp.sharedAgent.loadModel() exp.sharedAgent.setProductionMode(productionMode) e = None overallEpisode = 0 try: for phase in range(numPhases): for e in range(numEpisodes): debug.infoEnabled = False exp.simulateEpisode(e) agentName = exp.devices[0].agent.__name__ result = [ f"{agentName} Production: {productionMode} OffPolicy: {offPolicy}", overallEpisode + e, exp.numFinishedJobs ] # print("result", result) results.put(result) # results.put([f"{agentName}", e, exp.getCurrentTime()]) # check if not the last one if phase < numPhases - 1: beforeStates = exp.currentSystemState.getUniqueStates() for i in range(5): exp.expandState("jobsInQueue") # print("\nexpand:", beforeStates, exp.currentSystemState.getUniqueStates()) # print() overallEpisode += numEpisodes except: debug.printCache() traceback.print_exc(file=sys.stdout) print(agent, e, offPolicy, productionMode) print("Error in experiment :", exp.time) sys.exit(0) finished.put(True)
def profileTarget(): debug.settings.enabled = False debug.settings.learnEnabled = False exp = Simulation(numDevices=2, systemStateClass=minimalSystemState, agentClass=minimalDeepAgent, centralisedLearning=True, offPolicy=True, trainClassification=False) exp.setBatterySize(1e-1) for i in range(1): exp.simulateEpisode(i)
def runThread(agent, numEpisodes, taskOptions, results, finished): exp = SimpleSimulation(numDevices=4, maxJobs=6, agentClass=agent, tasks=taskOptions) exp.scenario.setInterval(1) exp.setFpgaIdleSleep(5) exp.setBatterySize(1e0) e = None try: for e in range(numEpisodes): debug.infoEnabled = False exp.simulateEpisode() results.put([ "Agent %s (%s)" % (agent.__name__, len(taskOptions)), e, exp.numFinishedJobs ]) except: debug.printCache() traceback.print_exc(file=sys.stdout) print(agent, e) print("Error in experiment ̰:", exp.time) sys.exit(0) finished.put(True) # assert simulationResults.learningHistory is not None # histories.put(simulationResults.learningHistory) # print("\nsaving history", simulationResults.learningHistory, '\nr') print("forward", counters.NUM_FORWARD, "backward", counters.NUM_BACKWARD)
def runThread(agent, numEpisodes, numDevices, taskOptions, results, finished): constants.CENTRALISED_LEARNING = False exp = SimpleSimulation(numDevices=numDevices, maxJobs=10, agentClass=agent, tasks=taskOptions, systemStateClass=targetedSystemState, reconsiderBatches=False, scenarioTemplate=RANDOM_SCENARIO_ROUND_ROBIN, centralisedLearning=False) exp.scenario.setInterval(1) exp.setFpgaIdleSleep(60) exp.setBatterySize(1e0) e = None try: for e in range(numEpisodes): debug.infoEnabled = False exp.simulateEpisode() for device in exp.devices: for task in taskOptions: results.put(["Device %d Task %d" % (device.index, task.identifier), e, device.getNumTasksDone(task)]) # print(e, "Device %d Task %s" % (device.index, task), e, device.getNumTasksDone(task)) except: debug.printCache() traceback.print_exc(file=sys.stdout) print(agent, e) print("Error in experiment ̰:", exp.time) sys.exit(0) finished.put(True)
def runThread(agent, numEpisodes, results, finished, histories): exp = SimpleSimulation(numDevices=1, maxJobs=3, agentClass=agent) exp.setFpgaIdleSleep(5) exp.setBatterySize(1e0) # sim.simulations.constants.FPGA_IDLE_SLEEP = 5 # sim.simulations.constants.OFFLOADING_POLICY = REINFORCEMENT_LEARNING # sim.simulations.constants.TOTAL_TIME = 1e3 try: for e in range(numEpisodes): debug.infoEnabled = False exp.simulateEpisode() results.put([ "Episode reward %s" % exp.sharedAgent, e, exp.sharedAgent.episodeReward ]) except: debug.printCache(200) traceback.print_exc(file=sys.stdout) print(agent, e) print("Error in experiment ̰:", exp.time) sys.exit(0) finished.put(True) assert simulationResults.learningHistory is not None histories.put(simulationResults.learningHistory) print("\nsaving history", simulationResults.learningHistory, '\nr') print("forward", counters.NUM_FORWARD, "backward", counters.NUM_BACKWARD) exp.sharedAgent.printModel()
def runThread(agent, numEpisodes, results, finished, histories): exp = SimpleSimulation(numDevices=2, maxJobs=6, agentClass=agent, scenarioTemplate=RANDOM_SCENARIO_RANDOM) exp.setBatterySize(1e0) try: for e in range(int(numEpisodes / 2)): exp.simulateEpisode() results.put(["Agent %s" % agent.__name__, e, exp.numFinishedJobs]) exp.sharedAgent.setProductionMode(True) for e in range(int(numEpisodes / 2), int(numEpisodes)): exp.simulateEpisode() results.put(["Agent %s" % agent.__name__, e, exp.numFinishedJobs]) except: debug.printCache() traceback.print_exc(file=sys.stdout) print(agent) print("Error in experiment ̰:", exp.time) sys.exit(0) finished.put(True)
def runThread(agent, numEpisodes, numDevices, taskOptions, results, finished): constants.CENTRALISED_LEARNING = False exp = SimpleSimulation(numDevices=numDevices, maxJobs=10, agentClass=agent, tasks=taskOptions, systemStateClass=targetedSystemState, reconsiderBatches=False, scenarioTemplate=RANDOM_SCENARIO_ROUND_ROBIN, centralisedLearning=False) exp.scenario.setInterval(1) exp.setFpgaIdleSleep(60) exp.setBatterySize(1e0) e = None try: for e in range(numEpisodes): debug.infoEnabled = False exp.simulateEpisode() times = np.zeros((len(taskOptions),)) for device in exp.devices: for t in range(len(taskOptions)): task = taskOptions[t] times[t] = device.fpga.getConfigTime(task) results.put(["Device %d %% time EASY" % device.index, e, times[0]/np.sum(times)]) # if times[1] == 0: # ratio = inf # else: # ratio = times[0] / times[1] # if ratio < 1 and ratio != 0: ratio = 1. / ratio # results.put(["Device %d EASY/HARD" % (device.index), e, ratio]) # print(e, "Device %d Task %s" % (device.index, task), e, device.getNumTasksDone(task)) except: debug.printCache() traceback.print_exc(file=sys.stdout) print(agent, e) print("Error in experiment ̰:", exp.time) sys.exit(0) finished.put(True)
def runThread(agent, numEpisodes, results, finished): exp = SimpleSimulation(numDevices=2, maxJobs=maxjobs, agentClass=agent, tasks=[HARD], systemStateClass=minimalSystemState, scenarioTemplate=REGULAR_SCENARIO_ROUND_ROBIN, centralisedLearning=True, numEnergyLevels=numEnergyStates, trainClassification=True) # exp.scenario.setInterval(1) exp.sharedAgent.loadModel() exp.sharedAgent.setProductionMode() exp.setBatterySize(1e-1) exp.setFpgaIdleSleep(1e-3) e = None try: for e in range(numEpisodes): debug.infoEnabled = False exp.simulateEpisode(e) agentName = exp.devices[0].agent.__name__ result = [f"{agentName}", e, exp.numFinishedJobs] print(result) results.put(result) # results.put([f"{agentName}", e, exp.getCurrentTime()]) except: debug.printCache() traceback.print_exc(file=sys.stdout) print(agent, e) print("Error in experiment :", exp.time) sys.exit(0) finished.put(True)
def threadRun(episodeNum, results, finished): exp = SimpleSimulation(numDevices=numDevices, maxJobs=100, tasks=[HARD], centralisedLearning=False) exp.setBatterySize(1e-1) exp.setFpgaIdleSleep(1e-3) for agent, device in zip( [lazyTableAgent, minimalTableAgent, minimalTableAgent], exp.devices): device.agent = agent(reconsiderBatches=False, systemState=exp.currentSystemState, owner=device, offPolicy=exp.offPolicy) device.agent.setDevices(exp.devices) print("set agent", agent, agent.__name__, device.agent, device.agent.__name__) print([device.agent.__name__ for device in exp.devices]) for e in range(int(episodeNum)): exp.simulateEpisode(e) for device in exp.devices: # print("putting results", device.agent.__name__, device.numJobsDone) # results.put(["Agent %s" % device.agent.__name__, e, device.currentTime.current]) results.put([ "Device %d Agent %s" % (device.index, device.agent.__name__), e, device.numJobsDone ]) sys.stdout.write("\rProgress: %.2f%%" % ((e + 1) / episodeNum * 100.)) finished.put(True)
def run(): print("starting experiment") debug.enabled = False processes = list() constants.MINIMUM_BATCH = 5 results = multiprocessing.Queue() finished = multiprocessing.Queue() for jobInterval in np.arange(1, 1e2, 1e0): for _ in range(localConstants.REPEATS): processes.append( multiprocessing.Process(target=runThread, args=(SimpleSimulation( numDevices=numDevices, jobInterval=Gaussian( jobInterval, 1), agentClass=lazyAgent), jobInterval, results, finished))) results = executeMulti(processes, results, finished) plotMultiWithErrors("Average Power vs Job Interval", results=results, ylabel="Average Device Power", xlabel="Job Interval") # , save=True)
def runThread(agent, numTicks, numDevices, results, finished, histories): exp = SimpleSimulation(numDevices=numDevices, scenarioTemplate=REGULAR_SCENARIO_RANDOM, jobInterval=10, agentClass=agent) exp.setBatterySize(1e4) exp.reset() timeOffsets = dict() previousTime = dict() currentEnergy = dict() for dev in exp.devices: timeOffsets[dev] = 0 currentEnergy[dev] = dev.energyLevel previousTime[dev] = 0 dev.latestPower = None i = None try: for i in range(numTicks): if exp.finished: for dev in exp.devices: timeOffsets[dev] += dev.currentTime.current exp.reset() # for i in range(): usages = exp.simulateTick() if usages == []: usages = [(0, 0), (0, 0)] for duration, power in usages: currentTime = previousTime[exp.latestDevice] + duration results.put([ "%s Power" % exp.latestDevice, previousTime[exp.latestDevice], power * 1e3 ]) previousTime[exp.latestDevice] = currentTime results.put( ["%s Power" % exp.latestDevice, currentTime, power * 1e3]) time.sleep(0.2) except: traceback.print_exc(file=sys.stdout) print(agent, i) print("Error in experiment ̰:", exp.time) debug.printCache(200) sys.exit(0) finished.put(True) # assert simulationResults.learningHistory is not None # histories.put(simulationResults.learningHistory) # print("\nsaving history", simulationResults.learningHistory, '\nr') print("forward", counters.NUM_FORWARD, "backward", counters.NUM_BACKWARD)
def run(): print("testing simple simulation") constants.NUM_DEVICES = 2 constants.DEFAULT_ELASTIC_NODE.BATTERY_SIZE = 1e2 constants.OFFLOADING_POLICY = REINFORCEMENT_LEARNING debug.enabled = True debug.learnEnabled = True constants.DRAW_DEVICES = False taskOptions = [EASY, HARD] exp = Simulation(systemStateClass=minimalSystemState, agentClass=minimalTableAgent, tasks=taskOptions) numjobs = int(1e5) finishedJobs = dict() for task in taskOptions: finishedJobs[task] = 0 try: # for i in range(numjobs): # exp.createNewJob(exp.devices[0]) # for i in range(10): # exp.simulateTick() # for i in range(numjobs): # exp.simulateUntilJobDone() # finishedJobs[exp.getLatestFinishedJob().currentTask] += 1 # exp.simulateUntilTime(50) exp.simulateEpisode() print("finished jobs:", exp.finishedTasks) # try: # exp.simulateEpisode() # print("Experiment done!", exp.time) # except Exception: # print("number of successful episodes:", exp.episodeNumber) # print(sys.exc_info()) except: debug.printCache() traceback.print_exc(file=sys.stdout)
def runThread(index, results, finished): exp = Simulation(minimalSystemState, offloadingDecision, minimalTableAgent) # just run each thread to see if it crashes # while True: for i in range(1): try: exp.simulateEpisode() except Exception: print() print('-' * 100) print("error found!") print('-' * 100) print("number of successful episodes:", exp.episodeNumber) print(sys.exc_info()) traceback.print_stack() sys.exit(-1) # print("end of episode!", exp.time) results.put(["test", index, exp.episodeNumber]) finished.put(True)
def run(results, finished): exp = SimpleSimulation(numDevices=numDevices, maxJobs=100, tasks=[HARD], centralisedLearning=False) exp.setBatterySize(1e0) # exp.setBatterySize(1e-5) # replace agents in devices # for dev in exp.devices: # print(dev.agent, dev.agent.model) for agent, device in zip([lazyTableAgent, minimalTableAgent, minimalTableAgent], exp.devices): device.agent = agent(systemState=exp.currentSystemState, owner=device, offPolicy=exp.offPolicy) device.agent.setDevices(exp.devices) print("set agent", agent, agent.__name__, device.agent, device.agent.__name__) # device.agent.reset() # device.reset() print([device.agent.__name__ for device in exp.devices]) for e in range(int(numEpisodes)): exp.simulateEpisode() for device in exp.devices: # print("putting results", device.agent.__name__, device.numJobsDone) # results.put(["Agent %s" % device.agent.__name__, e, device.currentTime.current]) results.put(["Device %d Agent %s" % (device.index, device.agent.__name__), e, device.numJobsDone]) sys.stdout.write("\rProgress: %.2f%%" % ((e+1)/numEpisodes*100.)) # for device in exp.devices: # # device.agent.printModel() # device.agent.setProductionMode() # for device in exp.devices: # device.agent.printModel() finished.put(True)
def runThread(agent, numEpisodes, results, finished): exp = SimpleSimulation(numDevices=16, maxJobs=25, agentClass=agent, tasks=[HARD]) exp.scenario.setInterval(1) exp.setBatterySize(1e-1) e = None try: for e in range(numEpisodes): debug.infoEnabled = False exp.simulateEpisode() try: averageLifetime = exp.totalFinishedJobsLifetime / exp.numFinishedJobs except ZeroDivisionError: print("no jobs done!") print(agent, numEpisodes, e) averageLifetime = 0 results.put(["Agent %s" % agent.__name__, e, averageLifetime]) except: debug.printCache() traceback.print_exc(file=sys.stdout) print(agent, e) print("Error in experiment ̰:", exp.time) sys.exit(0) finished.put(True) # assert simulationResults.learningHistory is not None # histories.put(simulationResults.learningHistory) # print("\nsaving history", simulationResults.learningHistory, '\nr') print("forward", counters.NUM_FORWARD, "backward", counters.NUM_BACKWARD)
def runThread(agent, numEpisodes, results, finished): exp = SimpleSimulation(numDevices=16, maxJobs=150, agentClass=agent, tasks=[HARD]) exp.scenario.setInterval(1) exp.setBatterySize(1e0) e = None try: for e in range(numEpisodes): debug.infoEnabled = False exp.simulateEpisode() results.put([ "Agent %s" % agent.__name__, e, np.average([ device.totalSleepTime / device.currentTime.current for device in exp.devices ]) ]) except: debug.printCache() traceback.print_exc(file=sys.stdout) print(agent, e) print("Error in experiment ̰:", exp.time) sys.exit(0) finished.put(True)
def runThread(numEpisodesTrain, numEpisodesTest, numDevices, taskOptions, interval, results, finished): constants.CENTRALISED_LEARNING = False numDevices = int(numDevices) exp = SimpleSimulation(numDevices=numDevices, maxJobs=10, agentClass=regretfulTableAgent, tasks=taskOptions, systemStateClass=targetedSystemState, reconsiderBatches=False, scenarioTemplate=RANDOM_SCENARIO_ROUND_ROBIN, centralisedLearning=False) exp.scenario.setInterval(interval) exp.setFpgaIdleSleep(5) exp.setBatterySize(1e0) e = None try: #pretrain # for e in range(0): # exp.simulateEpisode() debug.infoEnabled = False for e in range(numEpisodesTrain): exp.simulateEpisode() # dols = list() for e in range(numEpisodesTest): exp.simulateEpisode() dol_ind_task, dol_task_ind = DOL(exp.devices, taskOptions, addIdle=False) results.put(["Devices: %d" % numDevices, interval, dol_ind_task]) results.put(["Jobs Devices %d" % numDevices, e, exp.numFinishedJobs / 1000]) # dols.append(dol_ind_task) # results.put(["Devices: %d" % numDevices, interval, np.average(dols)]) finished.put("") except: debug.printCache() traceback.print_exc(file=sys.stdout) # print(agent, e) print("Error in experiment ̰:", exp.time) sys.exit(0) finished.put(True)
def randomJobs(offloadingPolicy=ANYTHING, hw=True): constants.NUM_DEVICES = 1 print("random jobs") debug.enabled = True constants.OFFLOADING_POLICY = offloadingPolicy constants.JOB_LIKELIHOOD = 9e-3 # 2e-3 constants.SAMPLE_RAW_SIZE = Constant(40) constants.SAMPLE_SIZE = Constant(10) constants.PLOT_SKIP = 1 constants.FPGA_POWER_PLAN = IDLE_TIMEOUT constants.DRAW_DEVICES = True constants.FPGA_IDLE_SLEEP = 0.075 if offloadingPolicy == REINFORCEMENT_LEARNING: constants.MINIMUM_BATCH = 1e5 else: constants.MINIMUM_BATCH = 5 constants.DEFAULT_TASK_GRAPH = [EASY] constants.ROUND_ROBIN_TIMEOUT = 1e1 exp = Simulation(agentClass=minimalAgent) print("start simulation") exp.simulateEpisode() # UntilTime(1)
def runThread(agent, numEpisodes, results, finished): # constants.CENTRALISED_LEARNING = False exp = SimpleSimulation(numDevices=2, maxJobs=maxjobs, agentClass=agent, tasks=[HARD], systemStateClass=minimalSystemState, scenarioTemplate=REGULAR_SCENARIO_ROUND_ROBIN, reconsiderBatches=False, centralisedLearning=False) # exp.scenario.setInterval(1) exp.setFpgaIdleSleep(1e-3) exp.setBatterySize(1e-1) e = None try: for e in range(numEpisodes): debug.infoEnabled = False exp.simulateEpisode(e) results.put([ "Job %s" % exp.devices[0].agent.__name__, e, exp.numFinishedJobs ]) results.put([ "Duration %s" % exp.devices[0].agent.__name__, e, exp.getCurrentTime() ]) except: debug.printCache() traceback.print_exc(file=sys.stdout) print(agent, e) print("Error in experiment ̰:", exp.time) sys.exit(0) finished.put(True) # assert simulationResults.learningHistory is not None # histories.put(simulationResults.learningHistory) # print("\nsaving history", simulationResults.learningHistory, '\nr') print("forward", counters.NUM_FORWARD, "backward", counters.NUM_BACKWARD) if exp.sharedAgent.__class__ == minimalTableAgent: plotting.plotModel(exp.sharedAgent, drawLabels=False)
def runThread(id, agent, productionMode, offPolicy, numEpisodes, results, finished): startTime = datetime.now() exp = SimpleSimulation(numDevices=4, maxJobs=maxjobs, agentClass=agent, tasks=[HARD], systemStateClass=extendedSystemState, scenarioTemplate=REGULAR_SCENARIO_ROUND_ROBIN, centralisedLearning=True, numEnergyLevels=numEnergyStates, trainClassification=True, offPolicy=offPolicy) exp.sharedAgent.precache = True # exp.scenario.setInterval(1) exp.sharedAgent.loadModel() if productionMode: exp.sharedAgent.setProductionMode() exp.setBatterySize(1e-1) exp.setFpgaIdleSleep(1e-3) e = None try: for e in range(numEpisodes): debug.infoEnabled = False exp.simulateEpisode(e) agentName = exp.devices[0].agent.__name__ result = [ f"{agentName} PM: {productionMode} OP: {offPolicy}", e, exp.numFinishedJobs ] results.put(result) # result = [f"{agentName} PM: {productionMode} OP: {offPolicy} JOBS", e, exp.jobCounter] # results.put(result) except: debug.printCache() traceback.print_exc(file=sys.stdout) print(agent, e) print("Error in experiment :", exp.time) sys.exit(0) finished.put(True) print( f"duration: {agent} PM {productionMode} OP {offPolicy}: {datetime.now() - startTime}" )
def run(): print ("starting experiment") processes = list() # offloadingOptions = [True, False] results = multiprocessing.Queue() finished = multiprocessing.Queue() REPEATS = 6 for jobInterval in np.arange(1e1, 1e2, 1e1): # for fpgaPowerPlan in [sim.fpgaPowerPolicy.FPGA_STAYS_ON]: # , sim.constants.FPGA_IMMEDIATELY_OFF, sim.constants.FPGA_WAIT_OFF]: for _ in range(REPEATS): processes.append(multiprocessing.Process(target=runThread, args=(SimpleSimulation(maxJobs=5, numDevices=numDevices, jobInterval=jobInterval), jobInterval, results, finished))) results = executeMulti(processes, results, finished) plotMultiWithErrors("sleep time", results=results) # , save=True)
def run(): print("starting experiment") debug.enabled = False processes = list() # constants.MINIMUM_BATCH = 5 results = multiprocessing.Queue() finished = multiprocessing.Queue() for jobInterval in np.logspace(-3, 1, num=5, base=10.): for _ in range(localConstants.REPEATS): print(SimpleSimulation()) processes.append(multiprocessing.Process(target=runThread, args=(jobInterval, results, finished))) results = executeMulti(processes, results, finished) plotMultiWithErrors("Job Interval", results=results, ylabel="Total Jobs", xlabel="Job Interval") # , save=True)
def threadRun(percentageMinimal, episodeNum, results, finished): exp = SimpleSimulation(numDevices=numDevices, maxJobs=50, tasks=[HARD], centralisedLearning=False, systemStateClass=minimalSystemState, scenarioTemplate=REGULAR_SCENARIO_ROUND_ROBIN) exp.scenario.setInterval(interval / numDevices) exp.setBatterySize(1e-1) exp.setFpgaIdleSleep(1e-3) numMinimal = int(numDevices * percentageMinimal) numRandom = numDevices - numMinimal agents = [minimalTableAgent] * numMinimal + [randomAgent] * numRandom for agent, device in zip(agents, exp.devices): device.agent = agent(reconsiderBatches=False, systemState=exp.currentSystemState, owner=device, offPolicy=exp.offPolicy) device.agent.setDevices(exp.devices) # print("set agent", agent, agent.__name__, device.agent, device.agent.__name__) for e in range(int(episodeNum)): exp.simulateEpisode(e) totalMinimalAgentJobs = 0 for i in range(numMinimal): dev = exp.devices[i] assert dev.agent.__class__ == minimalTableAgent totalMinimalAgentJobs += dev.numJobsDone results.put([ "%d %% Basic Agents" % (int(percentageMinimal * 100.)), e, totalMinimalAgentJobs / numMinimal ]) # sys.stdout.write("\rProgress: %.2f%%" % ((e+1)/episodeNum*100.)) finished.put(True)
def runThread(jobInterval, results, finished): try: exp = SimpleSimulation(jobInterval=jobInterval) # exp.simulateTime(10) # pretrain exp.simulateEpisodes(100) except: traceback.print_exc(file=sys.stdout) print("Error in experiment:", jobInterval, exp.time) exp.sharedAgent.setProductionMode() exp.simulateEpisode() results.put(["", jobInterval, exp.numFinishedJobs]) # results.put(["", jobInterval, np.average([dev.numJobs for dev in exp.devices]) / exp.getCompletedJobs()]) finished.put(True)