Esempio n. 1
0
 def redecideDestination(self, task, job, device):
     # assert constants.OFFLOADING_POLICY == REINFORCEMENT_LEARNING
     # print("redeciding")
     # self.train(task, job, device)
     decision = self.forward(task, job, device)
     debug.out("redecided decision: %s" % decision)
     return decision
Esempio n. 2
0
def doOffloadJob(experiment, source, destination):
	debug.out("OFFLOAD JOB", 'g')
	offloadJob = job(source, 5, hardwareAccelerated=True)
	decision = experiment.sharedAgent.possibleActions[destination.index]
	decision.updateDevice()
	print("target index", decision.targetDeviceIndex)
	offloadJob.setDecisionTarget(decision)
	experiment.addJob(source, offloadJob)
	print("offload 1 0")
	while destination.currentJob is not offloadJob:
		experiment.simulateTick()
		print('\n\n-\n')
	print("destination has job again")
	print("forward", counters.NUM_FORWARD, "backward", counters.NUM_BACKWARD)
	decision = offloading.offloadingDecision.possibleActions[-2]
	decision.updateDevice(destination)
	offloadJob.setDecisionTarget(decision)
	# batch 2

	# time.sleep(1)
	print("\n\nshould activate now...")
	experiment.simulateTick()

	while destination.currentJob is not None or source.currentJob is not None:
		experiment.simulateTick()
		print('\n\n-\n')

	# assert offloadJob.immediate is False
	assert destination.currentJob is None
Esempio n. 3
0
	def setOffloadingOptions(self, allDevices):
		self.offloadingOptions = []
		for device in self.agent.getOffloadingTargets(allDevices):
			if device is not self:
				self.offloadingOptions.append(device)
		self.defaultOffloadingOptions = list(self.offloadingOptions)
		debug.out("set offloading options for %s to %s" % (self, self.offloadingOptions))
Esempio n. 4
0
    def finishTask(self):
        debug.out("done reconfiguration")
        self.job.processingNode.fpga.idle()

        # start processing
        return subtask.finishTask(
            self, (self.job.processingNode, mcuFpgaOffload(self.job)))
Esempio n. 5
0
    def __init__(self, job):  # device, samples, processor=None):
        debug.out("created mcu fpga offloading task")

        duration = job.processingNode.mcuToFpgaLatency(job.datasize)
        # energyCost = job.processingNode.mcuToFpgaEnergy(duration)

        subtask.__init__(self, job, duration)
Esempio n. 6
0
	def updateState(self, task, job, device):
		debug.out(debug.formatDebug("update state: %s %s %d %s %s", (device, device.batchLength(task), device.maxJobs, task, job)))
		self.setField('jobsInQueue', device.batchLength(task) / device.maxJobs)
		self.setField('currentConfig', device.fpga.isConfigured(task))
		# print("set currentconfig", device.fpga.isConfigured(task), task, device.getFpgaConfiguration())
		self.setField('energyRemaining', device.getEnergyLevelPercentage())
		self.setField('taskId', task.identifier)
Esempio n. 7
0
 def possible(self):
     debug.out(
         "{} possible? corresponding TX: {} source task: {} current? {}".
         format(self, self.correspondingTx, self.source.currentSubtask,
                self.correspondingTx == self.source.currentSubtask))
     # start if tx is also waiting, otherwise if tx has started already
     return self.correspondingTx == self.source.currentSubtask or self.correspondingTx.started
Esempio n. 8
0
    def __init__(self,
                 job,
                 duration,
                 owner=None,
                 addsLatency=True
                 ):  # , energyCost): # , device): # , origin, destination):
        # self.startTime = currentTime

        # defined subtasks must first set duration and energy
        self.job = job
        if owner is None and job is not None:
            self.owner = self.job.owner
        else:
            self.owner = owner
        # assert(self.owner is not None)

        debug.out(
            "subtask {} duration: {:.10f}".format(self.__name__, duration),
            'y')

        assert duration >= 0
        self.duration = duration
        self.__class__.totalDuration += duration
        # self.energyCost = energyCost

        self.progress = 0
        self.delay = 0
        self.started = False
        self.finished = False

        subtask.id += 1
        self.id = subtask.id
Esempio n. 9
0
    def finishTask(self, affectedDevices):
        debug.out("mrf not busy anymore")
        # self.job.creator.mrf.sleep()
        self.owner.mrf.sleep()
        # self.job.processingNode.mrf.sleep()
        debug.out("finishing rxmessage!", 'b')

        return subtask.finishTask(self, affectedDevices)
Esempio n. 10
0
 def nextJob(self, device=None, time=None):
     if device is None:
         device = self.previousDevice
     nextDevice = self.devicePolicy.nextDevice(device, self.devices)
     nextTime = self.nextTime(time)
     self.previousDevice = nextDevice
     debug.out("next job: %f %s" % (nextTime, nextDevice))
     return nextTime, nextDevice
Esempio n. 11
0
    def __init__(self, job):
        debug.out("created createMessage")
        # self.destination = job.destination
        # self.samples = job.samples

        duration = job.creator.mcu.messageOverheadLatency.gen()
        # energyCost = job.creator.mcu.activeEnergy(duration)

        subtask.__init__(self, job, duration)
Esempio n. 12
0
    def __init__(self, job):  # device, samples, processor=None):
        # self.processor = processor

        debug.out("created processing task")

        duration = job.processor.processingTime(job.samples, job.currentTask)

        # reduce message size
        subtask.__init__(self, job, duration)
Esempio n. 13
0
    def finishTask(self):
        debug.out("\treceived offloaded result")
        self.job.finish()

        self.owner.mcu.sleep()

        debug.out("finishing rxresult!", 'b')

        return rxMessage.finishTask(self, None)
Esempio n. 14
0
def doLocalJob(experiment, device):
	debug.out("LOCAL JOB", 'g')
	localJob = job(device, 5, hardwareAccelerated=True)
	decision = offloadingDecision.possibleActions[-1]
	print("decision is", decision)
	decision.updateDevice(device)
	localJob.setDecisionTarget(decision)
	experiment.addJob(device, localJob)
	experiment.simulateUntilJobDone()
	print("local done")
Esempio n. 15
0
	def stop(self):
		debug.out("STOP", 'r')
		self.finished = True

		# dump incomplete jobs to job list
		for dev in self.devices:
			if dev.currentJob is not None:
				self.unfinishedJobsList.append(dev.currentJob)
			for batch in dev.batch:
				for job in dev.batch[batch]:
					self.unfinishedJobsList.append(job)
    def expandState(self, field):
        # for printing
        if self.useSharedAgent:
            beforeCount = self.currentSystemState.getUniqueStates()
        else:
            beforeCount = self.devices[
                0].agent.currentSystemState.getUniqueStates()

        # change behaviour as required
        if field == "jobsInQueue":
            # increase max jobs allowed
            self.maxJobs += 1

            for dev in self.devices:
                dev.maxJobs += 1
        elif field == "taskId":
            # new task introduced
            # ensure it was added to simulation
            if self.useSharedAgent:
                numTasksInState = self.currentSystemState.singlesDiscrete[
                    field]
            else:
                numTasksInState = self.devices[
                    0].agent.currentSystemState.singlesDiscrete[field]
            assert len(self.tasks) == numTasksInState + 1
        else:
            raise Exception("State cannot be expanded")

        # print("increased maxjobs", self.devices[0].maxJobs)
        # self.currentSystemState.expandField(field)
        if self.sharedAgent is not None:
            assert field in self.sharedAgent.currentSystemState.singles
            self.sharedAgent.expandField(field)
        else:
            # all agents share a system state, so only one needs to be expanded

            for dev in self.devices:
                dev.agent.expandField(field)
            # other agents still need to update their predictions

            # print("agents", [id(dev.agent) for dev in self.devices])
            # for dev in self.devices:
            # print("dev", id(dev), id(dev.agent))
            # dev.agent.expandField(field)
            # print("next device")

        # for printing
        if self.useSharedAgent:
            afterCount = self.currentSystemState.getUniqueStates()
        else:
            afterCount = self.devices[
                0].agent.currentSystemState.getUniqueStates()

        debug.out("expanded states %d to %d" % (beforeCount, afterCount))
Esempio n. 17
0
 def beginTask(self):
     if (self.owner.currentJob is not None and self.job is None):
         debug.out("changing current job of %s from %s to %s" %
                   (self.owner, self.owner.currentJob, self.job))
     # assert not (self.owner.currentJob is not None and self.job is None) # this would erase existing job
     self.owner.currentJob = self.job
     # all versions of begin must set started
     self.start()
     # debug.out("started {} {}".format(self, self.job.samples))
     debug.out("started {}".format(self))
     pass
 def queueTask(self, time, taskType, device, subtask=None):
     # check if this device has a task lined up already:
     # if device.queuedTask is None:
     assert device is not None
     debug.out(
         "queueing task %f %s %s %s" % (time, taskType, device, subtask),
         'r')
     newTask = PrioritizedItem(time, (taskType, device, subtask))
     self.queue.put(newTask)
     # print("queue:")
     # self.printQueue()
     device.queuedTask = newTask
Esempio n. 19
0
 def timeOutSleep(processor):
     print(processor, processor.isIdle(), processor.idleTime,
           processor.idleTimeout, processor.owner.currentTd)
     if processor.isIdle():
         if processor.idleTime >= processor.idleTimeout:
             processor.sleep()
             debug.out("PROCESSOR SLEEP")
         else:
             processor.idleTime += constants.TD
     # else:
     else:
         processor.idleTime = 0
Esempio n. 20
0
def doWaitJob(experiment, device):
	# fix decision to wait
	debug.out("\nWAIT JOB", 'g')
	waitJob = job(device, 5, hardwareAccelerated=True)
	decision = offloading.offloadingDecision.possibleActions[-2]
	decision.updateDevice(device)
	print("target index", decision.targetDeviceIndex)
	waitJob.setDecisionTarget(decision)
	experiment.addJob(device, waitJob)
	experiment.simulateTime(constants.PLOT_TD * 100)
	print("wait done")
	print("forward", counters.NUM_FORWARD, "backward", counters.NUM_BACKWARD)
Esempio n. 21
0
    def perform(self, visualiser=None):
        self.beginTask()
        subtaskPower = self.owner.getTotalPower()
        debug.out("process {} {} {}".format(self, self.started, self.duration))
        debug.out("power: %s" % subtaskPower, 'g')
        debug.out(
            "states: {0}".format(
                [comp.getPowerState() for comp in self.owner.components]), 'y')
        self.progress = self.duration

        # update visualiser if passed in
        if visualiser is not None:
            visualiser.update()

        self.finished = True
        # self.owner.currentTime.increment(self.duration)

        affectedDevices = self.__class__.finishTask(self)
        debug.out("affected by finishing subtask: %s" % str(affectedDevices))

        # add delay to job
        if self.addsLatency:
            self.job.totalLatency += self.progress
        #
        # self.owner.nextTask()

        return affectedDevices, self.duration, subtaskPower
Esempio n. 22
0
    def __init__(self, job=None, node=None):

        if node is not None:
            debug.out("creating batchContinue with node {}".format(node))
            self.processingNode = node
            self.job = job
        elif job is not None:
            debug.out("creating batchContinue with job {}".format(job))
            self.processingNode = job.processingNode
        else:
            raise Exception("Cannot create batchContinue without job and node")
        duration = self.processingNode.platform.MCU_BATCHING_LATENCY.gen()
        self.batch = self.processingNode.currentBatch

        subtask.__init__(self, job, duration)
Esempio n. 23
0
	def simulateEpisode(self, episodeNumber):
		self.reset(episodeNumber)
		i = 0
		while not self.finished:
			i += 1
			self.simulateTick()
			debug.out(debug.formatDebug("%s", [dev.energyLevel for dev in self.devices]))
		self.episodeNumber += 1

		# update target model if required
		if self.offPolicy:
			if self.useSharedAgent:
				if not self.sharedAgent.productionMode:
					self.sharedAgent.updateModel()
			else:
				for dev in self.devices:
					if not dev.agent.productionMode:
						dev.agent.updateModel()
Esempio n. 24
0
    def finishTask(self, affectedDevices):
        debug.out("finishing subtask! {} {}".format(self, self.owner), 'b')

        self.owner.currentSubtask = None

        if not isinstance(affectedDevices, list):
            affectedDevices = [affectedDevices]

        # add any new subtasks
        # TODO: add queuedTime to created subtasks
        if affectedDevices is not None:
            for affected in affectedDevices:
                if affected is not None:
                    device, newSubtask = affected
                    device.addSubtask(newSubtask)

        # debug.out("current task: {} {}".format(self.owner, self.owner.currentSubtask))
        return affectedDevices
Esempio n. 25
0
	def __init__(self, systemState, reconsiderBatches, allowExpansion=constants.ALLOW_EXPANSION, owner=None, offPolicy=constants.OFF_POLICY, loss='binary_crossentropy', activation='relu', metrics=['accuracy'], trainClassification=True, precache=True):
		self.gamma = constants.GAMMA
		self.loss = loss
		self.activation = activation
		self.metrics = metrics
		self.classification = trainClassification
		self.precache = precache

		debug.out("DQN agent created")
		self.policy = EpsGreedyQPolicy(eps=constants.EPS)
		# self.dqn = rl.agents.DQNAgent(model=self.model, policy=rl.policy.LinearAnnealedPolicy(, attr='eps', value_max=sim.constants.EPS_MAX, value_min=sim.constants.EPS_MIN, value_test=.05, nb_steps=sim.constants.EPS_STEP_COUNT), enable_double_dqn=False, gamma=.99, batch_size=1, nb_actions=self.numActions)
		# self.optimizer = keras.optimizers.Adam(lr=constants.LEARNING_RATE)
		self.optimizer = keras.optimizers.RMSprop(lr=constants.LEARNING_RATE)

		# self.trainingTargets = []

		# self.createModel()

		qAgent.__init__(self, systemState, reconsiderBatches=reconsiderBatches, owner=owner, offPolicy=offPolicy)
Esempio n. 26
0
    def finishTask(self, affectedDevices=None):
        # if using rl, update model
        # must update when starting,
        # if constants.OFFLOADING_POLICY == offloadingPolicy.REINFORCEMENT_LEARNING:
        debug.out("training after offloading job")
        self.owner.agent.train(self.job.currentTask,
                               self.job,
                               self.owner,
                               cause=self.__name__)

        # removing job from sender
        self.owner.removeJob(self.job)

        if self.owner.gracefulFailure:
            debug.out("continuing graceful failure")
            nextSubtask = batchContinue(node=self.owner, job=self.job)
            affectedDevices = [(self.owner, nextSubtask)]

        return txMessage.finishTask(self, affectedDevices)
Esempio n. 27
0
    def __init__(self, job, source, destination, jobToAdd):
        debug.out("created txMessage")

        self.source = source
        self.destination = destination

        debug.out("txmessage {} -> {}".format(source, destination))

        # source mcu does the work
        duration = job.creator.mrf.rxtxLatency(job.datasize)
        # energyCost = job.creator.mrf.txEnergy(duration)

        # create receiving task
        rx = jobToAdd(job, duration, self, source,
                      destination)  # , owner=destination)
        # # destination.addSubtask(rx)
        self.correspondingRx = rx
        self.waitingForRX = False

        subtask.__init__(self, job, duration)
Esempio n. 28
0
    def finishTask(self):
        # usingReinforcementLearning = constants.OFFLOADING_POLICY == offloadingPolicy.REINFORCEMENT_LEARNING

        debug.out("adding processing task 1")

        # if offloading, this is before processing
        # if not self.job.processed:
        # move job to new owner
        debug.out("moving job to processingNode")
        # move job to the processing from the creator
        newOwner = self.job.processingNode
        # self.job.creator.waiting = True

        # if usingReinforcementLearning:
        # 	debug.learnOut("training before reevaluating")
        # 	debug.learnOut("backward before update")
        # 	# TODO: this the correct device?
        # 	self.owner.agent.train(self.job.currentTask, self.job, self.owner)
        # 	# systemState.current.update(self.job.currentTask, self.job, self.owner) # still old owner
        # 	# self.job.creator.decision.privateAgent.backward(self.job.reward(), self.job.finished)

        # TODO: rx job in tdsimulation likely broken because not adding received job to backlog (assuming subtask is created)
        self.job.moveTo(newOwner)

        # if using rl, reevalute decision
        # if usingReinforcementLearning:
        # print()
        debug.out("updating decision upon reception")
        debug.out("owner: {}".format(self.job.owner))
        # systemState.current.update(self.job.currentTask, self.job, self.job.owner)
        # debug.out("systemstate: {}".format(systemState.current))

        # # print("systemstate: {}".format(systemState.current))
        # choice = self.job.owner.decision.privateAgent.forward(self.job.owner)
        # print("choice: {}".format(choice))

        # self.job.setDecisionTarget(choice)
        # self.job.activate()

        choice = self.job.owner.agent.redecideDestination(
            self.job.currentTask, self.job, self.job.owner)
        debug.learnOut("redeciding choice %s" % choice)
        self.job.setDecisionTarget(choice)
        affected = self.job.activate()
        # warnings.warn("redecision isn't affected i think")
        # affected = choice.targetDevice
        # otherwise, just add it to the local batch
        # else:
        # 	affected = self.job.processingNode, batching(self.job)

        return rxMessage.finishTask(self, [affected])
Esempio n. 29
0
	def updateDeviceEnergy(self, totalPower):
		self.updateAveragePower(totalPower)
		assert self.currentTd is not None
		incrementalEnergy = totalPower * self.currentTd
		# ensure only using each time diff once
		self.totalEnergyCost += incrementalEnergy
		# TODO: assuming battery powered
		# print (incrementalEnergy)
		self.energyLevel -= incrementalEnergy
		self.latestPower = totalPower
		# print(self.currentTd, "@", totalPower)
		debug.out("updating device energy %f %f %f %f" % (self.currentTd, incrementalEnergy, self.totalEnergyCost, self.energyLevel))

		# update device time if local time used
		if self.currentTime is not None:
			self.previousTimestamp = self.currentTime.current
			self.currentTime.increment(self.currentTd)

		self.currentTd = None

		return incrementalEnergy
Esempio n. 30
0
    def finishTask(self):
        self.job.processor.idle()

        debug.out("creating return message")

        self.job.processed = True
        presize = self.job.datasize
        self.job.datasize = self.job.processedMessageSize()
        debug.out("datasize changed from {0} to {1}".format(
            presize, self.job.datasize))

        debug.out("processed hw: {0} offload: {1}".format(
            self.job.hardwareAccelerated, self.job.offloaded()))

        if self.job.hardwareAccelerated:
            newSubtask = fpgaMcuOffload(self.job)
        else:
            # check if offloaded
            if self.job.offloaded():
                newSubtask = txResult(self.job, self.job.processingNode,
                                      self.job.creator)
            else:
                self.owner.agent.train(self.job.currentTask,
                                       self.job,
                                       self.owner,
                                       cause=self.__name__)
                newSubtask = batchContinue(self.job)

            # self.job.creator.jobActive = False
        return subtask.finishTask(self,
                                  [(self.job.processingNode, newSubtask)])