def canAcceptAndIsRequested(self): # get active and giver objects activeObject=self.getActiveObject() activeObjectQueue=self.getActiveObjectQueue() giverObject=self.getGiverObject() # if we have only one possible giver just check if there is a place, # the machine is up and the predecessor has an entity to dispose # this is done to achieve better (cpu) processing time if(len(activeObject.previous)==1): return activeObject.Up and giverObject.haveToDispose(activeObject) # dummy variables that help prioritize the objects requesting to give objects to the Machine (activeObject) isRequested=False # is requested is dummyVariable checking if it is requested to accept an item maxTimeWaiting=0 # dummy variable counting the time a predecessor is blocked # loop through the possible givers to see which have to dispose and which is the one blocked for longer for object in activeObject.previous: if(object.haveToDispose(activeObject) and object.receiver==self): isRequested=True # if the predecessor objects have entities to dispose of if(object.downTimeInTryingToReleaseCurrentEntity>0):# and the predecessor has been down while trying to give away the Entity timeWaiting=now()-object.timeLastFailureEnded # the timeWaiting dummy variable counts the time end of the last failure of the giver object else: timeWaiting=now()-object.timeLastEntityEnded # in any other case, it holds the time since the end of the Entity processing #if more than one predecessor have to dispose take the part from the one that is blocked longer if(timeWaiting>=maxTimeWaiting): activeObject.giver=object # the object to deliver the Entity to the activeObject is set to the ith member of the previous list maxTimeWaiting=timeWaiting # in the next loops, check the other predecessors in the previous list return activeObject.Up and isRequested
def RUN(self, fsm): """RUN state; timer is active. Call `pause()` to pause an active timer. This method will signal `done` upon completion. """ # set parameters tstart = now() self.__tic = tstart # temporarily store tic as start of RUN queue = self.ctrlQ tleft = self.duration - self.__tpassed # time left on timer # wait for timer to expire (or be paused) yield queue.remove(fsm, 1, renege=tleft) self.__tic = None telapsed = now() - tstart self.__tpassed += telapsed if fsm.acquired(queue): # PAUSE command assert (telapsed<tleft), \ "[TIMER]: Elapsed time exceeded time left during RUN!" assert (len(fsm.got)==1), "[TIMER]: Control queue failed!" cmd = fsm.got[0] assert (cmd==self.CMDPAUSE), \ "[TIMER]: Invalid control command received in RUN!" yield fsm.goto(self.PAUSE) else: assert (abs(self.__tpassed-self.duration)<const.EPSILON), \ "[TIMER]: Timer failed to complete properly in RUN!" self.__tpassed = self.duration if self.verbose>TIMER_VERBOSE: self.log("DONE") self.done.signal() yield fsm.goto(self.HALT, force=False)
def postProcessing(self, MaxSimtime=None): if MaxSimtime==None: from Globals import G MaxSimtime=G.maxSimTime activeObjectQueue=self.getActiveObjectQueue() #checks all the successors. If no one can accept an Entity then the machine might be blocked mightBeBlocked=True for nextObject in self.next: if nextObject.canAccept(): mightBeBlocked=False #if there is an entity that finished processing in Assembly but did not get to reach #the following Object #till the end of simulation, we have to add this blockage to the percentage of blockage in Assembly if (mightBeBlocked) and ((self.nameLastEntityEntered == self.nameLastEntityEnded)): self.totalBlockageTime+=now()-self.timeLastEntityEnded #if Assembly is currently processing an entity we should count this working time if(len(activeObjectQueue)>0) and (not (self.nameLastEntityEnded==self.nameLastFrameWasFull)): self.totalWorkingTime+=now()-self.timeLastFrameWasFull self.totalWaitingTime=MaxSimtime-self.totalWorkingTime-self.totalBlockageTime self.Waiting.append(100*self.totalWaitingTime/MaxSimtime) self.Working.append(100*self.totalWorkingTime/MaxSimtime) self.Blockage.append(100*self.totalBlockageTime/MaxSimtime)
def run(self): while 1: yield waituntil, self, self.canAcceptAndIsRequested #wait until the Assembly can accept a frame #and one "frame" giver requests it self.getEntity("Frame") #get the Frame for i in range(self.getActiveObjectQueue()[0].capacity): #this loop will be carried until the Frame is full with the parts yield waituntil, self, self.isRequestedFromPart #wait until a part is requesting for the assembly self.getEntity("Part") self.outputTrace(self.getActiveObjectQueue()[0].name, "is now full in "+ self.objName) self.timeLastFrameWasFull=now() self.nameLastFrameWasFull=self.getActiveObjectQueue()[0].name startWorkingTime=now() self.totalProcessingTimeInCurrentEntity=self.calculateProcessingTime() yield hold,self,self.totalProcessingTimeInCurrentEntity #hold for the time the assembly operation is carried self.totalWorkingTime+=now()-startWorkingTime self.outputTrace(self.getActiveObjectQueue()[0].name, "ended processing in " + self.objName) self.timeLastEntityEnded=now() self.nameLastEntityEnded=self.getActiveObjectQueue()[0].name startBlockageTime=now() self.completedJobs+=1 #Assembly completed a job self.waitToDispose=True #since all the frame is full while 1: yield waituntil, self, self.next[0].canAccept #wait until the next object is free if self.next[0].getGiverObject()==self: #if the free object can accept from this Assembly #break. Else continue break
def preempt(self): activeObject=self.getActiveObject() activeEntity=self.getActiveObjectQueue()[0] #get the active Entity #calculate the remaining processing time #if it is reset then set it as the original processing time if self.resetOnPreemption: remainingProcessingTime=self.procTime #else subtract the time that passed since the entity entered #(may need also failure time if there was. TO BE MELIORATED) else: remainingProcessingTime=self.procTime-(now()-self.timeLastEntityEntered) #update the remaining route of activeEntity activeEntity.remainingRoute.insert(0, {'stationIdsList':[str(self.id)],\ 'processingTime':\ {'distributionType':'Fixed',\ 'mean':str(remainingProcessingTime)}}) # activeEntity.remainingRoute.insert(0, [self.id, remainingProcessingTime]) activeEntity.remainingRoute.insert(0, {'stationIdsList':[str(self.lastGiver.id)],\ 'processingTime':\ {'distributionType':'Fixed',\ 'mean':'0'}}) # activeEntity.remainingRoute.insert(0, [self.lastGiver.id, 0]) #set the receiver as the object where the active entity was preempted from self.receiver=self.lastGiver self.next=[self.receiver] self.waitToDispose=True #set that I have to dispose self.receiver.timeLastEntityEnded=now() #required to count blockage correctly in the preemptied station reactivate(self)
def canAcceptAndIsRequested(self): # get the active and the giver objects activeObject=self.getActiveObject() activeObjectQueue=self.getActiveObjectQueue() giverObject=self.getGiverObject() #if we have only one predecessor just check if there is a place available and the predecessor has an entity to dispose if(len(activeObject.previous)==1): return self.canAccept() and giverObject.haveToDispose(activeObject) isRequested=False # dummy boolean variable to check if any predecessor has something to hand in maxTimeWaiting=0 # dummy timer to check which predecessor has been waiting the most #loop through the predecessors to see which have to dispose and which is the one blocked for longer for object in activeObject.previous: if(object.haveToDispose(activeObject)): # if they have something to dispose off isRequested=True # then the Queue is requested to handle the entity if(object.downTimeInTryingToReleaseCurrentEntity>0):# if the predecessor has failed wile waiting timeWaiting=now()-object.timeLastFailureEnded # then update according the timeWaiting to be compared with the ones else: # of the other machines timeWaiting=now()-object.timeLastEntityEnded #if more than one predecessor have to dispose take the part from the one that is blocked longer if(timeWaiting>=maxTimeWaiting): activeObject.giver=object maxTimeWaiting=timeWaiting return self.canAccept(self) and isRequested # return true when the Queue is not fully occupied and a predecessor is requesting it
def go(self): print('%s %s %s' % (now(), self.name, 'Starting')) yield request, self, gasstation print('%s %s %s' % (now(), self.name, 'Got a pump')) yield hold, self, 100.0 yield release, self, gasstation print('%s %s %s' % (now(), self.name, 'Leaving'))
def run(self): while 1: yield waituntil, self, self.canAcceptAndIsRequested #wait until the Assembly can accept a frame #and one "frame" giver requests it self.getEntity() #get the Frame with the parts self.timeLastEntityEntered=now() startWorkingTime=now() self.totalProcessingTimeInCurrentEntity=self.calculateProcessingTime() yield hold,self,self.totalProcessingTimeInCurrentEntity #hold for the time the assembly operation is carried self.totalWorkingTime+=now()-startWorkingTime self.timeLastEntityEnded=now() startBlockageTime=now() self.waitToDisposePart=True #Dismantle is in state to dispose a part yield waituntil, self, self.frameIsEmpty #wait until all the parts are disposed self.waitToDisposePart=False #Dismantle has no parts now self.waitToDisposeFrame=True #Dismantle is in state to dispose a part yield waituntil, self, self.isEmpty #wait until all the frame is disposed self.completedJobs+=1 #Dismantle completed a job self.waitToDisposeFrame=False #the Dismantle has no Frame to dispose now
def addBlockage(self): self.totalTimeInCurrentEntity=now()-self.timeLastEntityEntered self.totalTimeWaitingForOperator += self.operatorWaitTimeCurrentEntity if self.timeLastEntityEnded<self.timeLastShiftStarted: self.offShiftTimeTryingToReleaseCurrentEntity=self.timeLastShiftStarted-self.timeLastShiftEnded blockage=now()-(self.timeLastEntityEnded+self.downTimeInTryingToReleaseCurrentEntity+self.offShiftTimeTryingToReleaseCurrentEntity) self.totalBlockageTime+=blockage
def moveEntities(self): interval=now()-self.timeLastMoveHappened interval=(float(interval))*60.0 #the simulation time that passed since the last move was taken care moveTime1=0 moveTime2=0 #for the first entity if len(self.position)>0: if self.position[0]!=self.length: #if it does not reach the end of conveyer move it according to speed if self.position[0]+interval*self.speed<self.length: moveTime1=interval self.position[0]=self.position[0]+interval*self.speed #else move it to the end of conveyer else: moveTime1=(self.length-self.position[0])/self.speed self.position[0]=self.length self.entityLastReachedEnd=self.getActiveObjectQueue()[0] self.timeLastEntityReachedEnd=now() self.timeLastEntityEnded=now() #for the other entities for i in range(1,len(self.getActiveObjectQueue())): #if it does not reach the preceding entity move it according to speed if self.position[i]+interval*self.speed<self.position[i-1]-self.getActiveObjectQueue()[i].length: moveTime2=interval self.position[i]=self.position[i]+interval*self.speed #else move it right before the preceding entity else: mTime=(self.position[i-1]-self.getActiveObjectQueue()[i].length-self.position[i])/self.speed if mTime>moveTime2: moveTime2=mTime self.position[i]=self.position[i-1]-self.getActiveObjectQueue()[i-1].length self.timeLastMoveHappened=now() #assign this time as the time of last move self.totalWorkingTime+=max(moveTime1/60.0, moveTime2/60.0) #all the time of moving (the max since things move in parallel)
def commit(self): self.logger.debug('%s start commit at %s' % (self, now())) wsStrings = [] #write values to local group for itemID, value in self.writeset.iteritems(): item = self.snode.groups[itemID.gid][itemID] item.write(value) if self.logger.isEnabledFor(logging.DEBUG): wsStrings.append('(%s, %s)' % (itemID, value)) yield hold, self, RandInterval.get( *self.txn.config.get('commit.intvl.dist', ('fixed', 0))).next() yield hold, self, RandInterval.get( *self.txn.config.get('commit.time.dist', ('fixed', 0))).next() if self.logger.isEnabledFor(logging.DEBUG): self.logger.debug( '%s commit {%s} at %s' % (self.ID, ', '.join([s for s in wsStrings]), now())) #write to the original atomically dataset = self.snode.system.dataset for itemID, value in self.writeset.iteritems(): dataset[itemID].write(value) dataset[itemID].lastWriteTxn = self.txn #release locks for step in self.releaseLocks(): yield step
def execute(self): print('%s firework launched' % now()) yield hold, self, 10.0 # wait 10.0 time units for i in range(10): yield hold, self, 1.0 print('%s tick' % now()) yield hold, self, 10.0 # wait another 10.0 time units print('%s Boom!!' % now())
def setWIP(entityList): # for all the entities in the entityList for entity in entityList: # if the entity is of type Part if entity.type=='Part' or entity.type=='Batch' or entity.type=='SubBatch': object=entity.currentStation #identify the object object.getActiveObjectQueue().append(entity) #append the entity to its Queue entity.schedule.append([object,now()]) #append the time to schedule so that it can be read in the result # if the entity is of type Job/OrderComponent/Order/Mould elif entity.type=='Job' or entity.type=='OrderComponent' or entity.type=='Order' or entity.type=='Mould': # find the list of starting station of the entity currentObjectIds=entity.remainingRoute[0].get('stationIdsList',[]) # if the list of starting stations has length greater than one then there is a starting WIP definition error try: if len(currentObjectIds)==1: objectId=currentObjectIds[0] else: raise SetWipTypeError('The starting station of the the entity is not defined uniquely') except SetWipTypeError as setWipError: print 'WIP definition error: {0}'.format(setWipError) # get the starting station of the entity and load it with it object = findObjectById(objectId) object.getActiveObjectQueue().append(entity) # append the entity to its Queue # read the IDs of the possible successors of the object nextObjectIds=entity.remainingRoute[1].get('stationIdsList',[]) # for each objectId in the nextObjects find the corresponding object and populate the object's next list nextObjects=[] for nextObjectId in nextObjectIds: nextObject=findObjectById(nextObjectId) nextObjects.append(nextObject) # update the next list of the object for nextObject in nextObjects: # append only if not already in the list if nextObject not in object.next: object.next.append(nextObject) entity.remainingRoute.pop(0) # remove data from the remaining route. entity.schedule.append([object,now()]) #append the time to schedule so that it can be read in the result entity.currentStation=object # update the current station of the entity # if the currentStation of the entity is of type Machine then the entity # must be processed first and then added to the pendingEntities list # Its hot flag is not raised if not (entity.currentStation in G.MachineList): # variable to inform whether the successors are machines or not successorsAreMachines=True for nextObject in entity.currentStation.next: if not nextObject in G.MachineList: successorsAreMachines=False break if not successorsAreMachines: entity.hot = False else: entity.hot = True # add the entity to the pendingEntities list G.pendingEntities.append(entity)
def checkout(self): start = now() # Customer decides to check out yield request, self, checkout_aisle at_checkout = now() # Customer gets to front of line waittime.tally(at_checkout-start) yield hold, self, self.items*ITEMTIME leaving = now() # Customer completes purchase checkouttime.tally(leaving-at_checkout) yield release, self, checkout_aisle
def run(self): print 'main thread at %s' %now() yield hold, self, 10 print 'fork new thread at %s' %now() child = Child() child.start() yield hold, self, 3 print 'wait for child thread to finish at %s' %now() yield waitevent, self, child.finish print 'child thread finish at %s' %now()
def getserved(self, servtime, priority, myServer): Client.inClients.append(self.name) print('%s requests 1 unit at t = %s' % (self.name, now())) # request use of a resource unit yield request, self, myServer, priority yield hold, self, servtime # release the resource yield release, self, myServer print('%s done at t = %s' % (self.name, now())) Client.outClients.append(self.name)
def run(self): self.logger.debug('%s start at %s' %(self.ID, now())) self.Preparing() for step in self.prepare(): yield step while True: try: #start self.Running() for step in self.begin(): yield step #read and write for action in self.txn.actions: if action.isRead(): for step in self.read(action.itemID, action.attr): yield step else: assert action.isWrite() for step in self.write(action.itemID, action.attr): yield step #simulate the cost of each read/write step yield hold, self, RandInterval.get( *self.txn.config['action.intvl.dist']).next() #try commit self.Committing() for step in self.trycommit(): yield step #the commit phase is error free for step in self.commit(): yield step self.Committed() break except BThread.DeadlockException as e: self.logger.debug('%s aborted because of deadlock %s at %s' %(self.ID, str(e), now())) self.monitor.observe('deadlock.cycle.length', len(e.waiters) + 1) self.monitor.start('abort.deadlock') self.Aborting() for step in self.abort(): yield step #wait for one of the waiters to leave waitEvts = [] for w in e.waiters: waitEvts.append(w.finish) yield waitevent, self, waitEvts self.monitor.stop('abort.deadlock') except TimeoutException as e: self.monitor.observe('abort.timeout', 0) self.logger.debug( '%s aborted because of timeout on %r with state %s' %(e.args[0], e.args[1])) for step in self.cleanup(): yield step self.Finished()
def run(self): self.logger.debug('%s start at %s' % (self.ID, now())) self.Preparing() for step in self.prepare(): yield step while True: try: #start self.Running() for step in self.begin(): yield step #read and write for action in self.txn.actions: if action.isRead(): for step in self.read(action.itemID, action.attr): yield step else: assert action.isWrite() for step in self.write(action.itemID, action.attr): yield step #simulate the cost of each read/write step yield hold, self, RandInterval.get( *self.txn.config['action.intvl.dist']).next() #try commit self.Committing() for step in self.trycommit(): yield step #the commit phase is error free for step in self.commit(): yield step self.Committed() break except BThread.DeadlockException as e: self.logger.debug('%s aborted because of deadlock %s at %s' % (self.ID, str(e), now())) self.monitor.observe('deadlock.cycle.length', len(e.waiters) + 1) self.monitor.start('abort.deadlock') self.Aborting() for step in self.abort(): yield step #wait for one of the waiters to leave waitEvts = [] for w in e.waiters: waitEvts.append(w.finish) yield waitevent, self, waitEvts self.monitor.stop('abort.deadlock') except TimeoutException as e: self.monitor.observe('abort.timeout', 0) self.logger.debug( '%s aborted because of timeout on %r with state %s' % (e.args[0], e.args[1])) for step in self.cleanup(): yield step self.Finished()
def execute(self): checks = 0 measures = [] sample_period = 6 last_scaling_activity = -1e6 old_arrivals = 0 while True: # # update cpu utilization across all servers # busy = len(Server.busy) both = busy + len(Server.idle) Watcher.cpu_utilization.observe(100.0 * float(BUSY_FACTOR * busy) / float(BUSY_FACTOR * busy + len(Server.idle)) if both else 0.0) # # do autoscaling checks every 60 seconds # checks += 1 if checks % 10 == 0: # # compute the cpu utilization metric over the desired period # samples = self.period / sample_period utilization = sum(Watcher.cpu_utilization.yseries()[-samples:]) / samples measures.append(utilization) if len(measures) >= (self.breach_duration // self.period): if len(measures) > (self.breach_duration // self.period): measures.pop(0) if now() - last_scaling_activity > self.cooldown: new_server_target = both if all([x > self.upper_threshold for x in measures]): new_server_target = both + self.upper_breach_scale_increment new_server_target = min(new_server_target, self.max_size) elif all([x < self.lower_threshold for x in measures]): new_server_target = both + self.lower_breach_scale_increment new_server_target = max(new_server_target, self.min_size) if new_server_target < both: # scale down for _ in range(both - new_server_target): Server.terminate() last_scaling_activity = now() elif new_server_target > both: # scale up for _ in range(new_server_target - both): s = Server(self.max_wait, self.instance_type, self.latency) activate(s, s.execute()) last_scaling_activity = now() # # unrelated: also keep track of arrival rate # Watcher.arrivals_mon.observe(Job.arrivals - old_arrivals) old_arrivals = Job.arrivals yield hold, self, sample_period
def run(self): while now() < 100: if self.checkMsg('request'): for content in self.popContents('request'): print ('server %s received request %s at %s' %(self.inetAddr, content, now())) for step in self.waitMsg('request'): yield step while now() < 200: yield hold, self, 50 print '%s, messages: %s'%(self.inetAddr, self.rtiMessages)
def getEntity(self): activeEntity = CoreObject.getEntity(self) #run the default method # if the entity is in the G.pendingEntities list then remove it from there from Globals import G if activeEntity in G.pendingEntities: G.pendingEntities.remove(activeEntity) self.totalLifespan+=now()-activeEntity.startTime #Add the entity's lifespan to the total one. self.numOfExits+=1 # increase the exits by one self.totalNumberOfUnitsExited+=activeEntity.numberOfUnits # add the number of units that xited self.totalTaktTime+=now()-self.timeLastEntityLeft # add the takt time self.timeLastEntityLeft=now() # update the time that the last entity left from the Exit return activeEntity
def run(self): """Repeatitively check consistency on the tanks.""" while now() < OPERATION_TIME: #sleep yield hold, self, random.randint(1, 10 * OP_INTVL_MAX) self.monitor.start('txn') self.logger.debug('%s txn start at %s' %(self.ID, now())) #a large try clause for the transaction try: #lock for tank in self.tanks: for step in self.lock(tank, Lockable.SHARED): yield step yield hold, self, random.randint(1, OP_INTVL_MAX) #check tanks = [] for tank in self.tanks: tanks.append(tank) yield hold, self, random.randint(1, 2 * OP_INTVL_MAX) verifyTanks(tanks) #flow up if self.flowUp and self.tanks[-1].value == TOTAL_FLOW: global numUpFlowTxns numUpFlowTxns += 1 for tank in (self.tanks[0], self.tanks[-1]): for step in self.lock(tank, Lockable.EXCLUSIVE): yield step yield hold, self, random.randint(1, OP_INTVL_MAX) assert self.tanks[-1].value == TOTAL_FLOW self.tanks[0].value = TOTAL_FLOW yield hold, self, random.randint(1, 5 * OP_INTVL_MAX) self.tanks[-1].value = 0 self.logger.debug('%s up flow at %s' %(self.ID, now())) except BThread.DeadlockException as e: self.logger.debug('%s aborted because of deadlock %s at %s' %(self.ID, str(e), now())) global numAbortedCheckTxns global numAbortedUpFlowTxns numAbortedCheckTxns += 1 if self.flowUp and self.tanks[-1].value == TOTAL_FLOW: numAbortedUpFlowTxns += 1 finally: #unlock for tank in self.tanks: for step in self.unlock(tank): yield step yield hold, self, random.randint(1, OP_INTVL_MAX) global numCheckTxns numCheckTxns += 1 self.monitor.stop('txn') self.logger.debug('%s txn stop at %s' %(self.ID, now()))
def run(self): """Repeatitively check consistency on the tanks.""" while now() < OPERATION_TIME: #sleep yield hold, self, random.randint(1, 10 * OP_INTVL_MAX) self.monitor.start('txn') self.logger.debug('%s txn start at %s' % (self.ID, now())) #a large try clause for the transaction try: #lock for tank in self.tanks: for step in self.lock(tank, Lockable.SHARED): yield step yield hold, self, random.randint(1, OP_INTVL_MAX) #check tanks = [] for tank in self.tanks: tanks.append(tank) yield hold, self, random.randint(1, 2 * OP_INTVL_MAX) verifyTanks(tanks) #flow up if self.flowUp and self.tanks[-1].value == TOTAL_FLOW: global numUpFlowTxns numUpFlowTxns += 1 for tank in (self.tanks[0], self.tanks[-1]): for step in self.lock(tank, Lockable.EXCLUSIVE): yield step yield hold, self, random.randint(1, OP_INTVL_MAX) assert self.tanks[-1].value == TOTAL_FLOW self.tanks[0].value = TOTAL_FLOW yield hold, self, random.randint(1, 5 * OP_INTVL_MAX) self.tanks[-1].value = 0 self.logger.debug('%s up flow at %s' % (self.ID, now())) except BThread.DeadlockException as e: self.logger.debug('%s aborted because of deadlock %s at %s' % (self.ID, str(e), now())) global numAbortedCheckTxns global numAbortedUpFlowTxns numAbortedCheckTxns += 1 if self.flowUp and self.tanks[-1].value == TOTAL_FLOW: numAbortedUpFlowTxns += 1 finally: #unlock for tank in self.tanks: for step in self.unlock(tank): yield step yield hold, self, random.randint(1, OP_INTVL_MAX) global numCheckTxns numCheckTxns += 1 self.monitor.stop('txn') self.logger.debug('%s txn stop at %s' % (self.ID, now()))
def printProgress(self): #do not overflood the output, so we only print when both the #simulation time and real time pass a certain threshold wallclock = time.time() if (now() - self.lastPrintSimTime > self.simThr) and \ (wallclock - self.lastPrintRealTime > self.realThr): self.logger.info( 'progress = %s/%s/%s' % (self.numTxnsArrive, self.numTxnsDepart, self.numTxnsSched)) self.lastPrintSimTime = now() self.lastPrintRealTime = wallclock if wallclock - self.simStartTime > self.realTimeDuration: self.logger.info('Simulation is taking too long. Stop.') stopSimulation()
def operate(self, repairduration, triplength): # PEM tripleft = triplength # time needed to finish trip while tripleft > 0: yield hold, self, tripleft # try to finish the trip if self.interrupted(): # if another breakdown occurs print('%s at %s' % (self.interruptCause.name, now())) tripleft = self.interruptLeft # time to finish the trip self.interruptReset() # end interrupt state reactivate(br, delay=repairduration) # restart breakdown br yield hold, self, repairduration # delay for repairs print('Bus repaired at %s' % now()) else: break # no more breakdowns, bus finished trip print('Bus has arrived at %s' % now())
def printProgress(self): #do not overflood the output, so we only print when both the #simulation time and real time pass a certain threshold wallclock = time.time() if (now() - self.lastPrintSimTime > self.simThr) and \ (wallclock - self.lastPrintRealTime > self.realThr): self.logger.info('progress = %s/%s/%s'%(self.numTxnsArrive, self.numTxnsDepart, self.numTxnsSched)) self.lastPrintSimTime = now() self.lastPrintRealTime = wallclock if wallclock - self.simStartTime > self.realTimeDuration: self.logger.info('Simulation is taking too long. Stop.') stopSimulation()
def sendrreq(self): """Update parameters for next RREQ.""" ttl, ts, ID = self.ttl, self.ts, self.ID maxttl = self.DiscoveryHopLimit if ttl is None: # first RREQ ttl, ts = 1, now() ID = (id(self)+int(ts*1e3))%self.MaxID # random ID seed else: # not first RREQ ttl, ts = ttl*2, now() ID = (ID+1)%(self.MaxID) # iterate ID # set parameters self.ttl = min(ttl, maxttl) self.ts, self.ID = ts, ID
def run(self): #start client and storage nodes self.startupNodes() self.startupPaxos() #the big while loop while True: if self.state == BaseSystem.RUNNING: if not self.txnsToRun.empty(): #simulate txn arrive as scheduled at, txn = self.txnsToRun.get() while now() < at: nextArrive = Alarm.setOnetime(at - now(), 'txn-arr') yield waitevent, self, nextArrive if now() < at: continue self.numTxnsArrive += 1 if self.allowOverLoad or \ len(self.txnsRunning) < self.maxNumTxns: cnode = self.cnodes[txn.zoneID] cnode.onTxnArrive(txn) else: self.onTxnLoss(txn) else: self.state = BaseSystem.CLOSING self.logger.info( 'system: all txns arrived, start closing at %s' % now()) for cnode in self.cnodes: cnode.close() elif self.state == BaseSystem.CLOSING: #check if all closed closed = True for cnode in self.cnodes: if not cnode.isFinished(): closed = False break if closed: self.state == BaseSystem.CLOSED elif self.state == BaseSystem.CLOSED: break else: raise ValueError('Unknown system status:' % self.state) #print progress self.printProgress() #sleep in sim world if closing if self.state == BaseSystem.CLOSING: sleep = Alarm.setOnetime(self.simThr, 'closing') yield waitevent, self, sleep
def set_recvanno(self, p): """Set annotation for incoming traffic at start of capture (i.e. right after packet has been inserted into `rxbuffer`). :return: Modified packet `p`. By default, this method initializes the 'cif-collision' annotation, sets the 'cif-dst' annotation, and sets the 'cif-rxts' annotation. Overload this method as necessary. """ assert (p in self.rxbuffer), "[CHANNELIF]: set_recvanno(p) " + \ "could not find packet 'p' in rxbuffer!" p.setanno('cif-dst', self, ref=True) p.setanno('cif-collision', [], priv=True) # add p to collision list of all in rxbuffer/p idx = self.rxbuffer.index(p) range_not_idx = range(len(self.rxbuffer) ) range_not_idx.remove(idx) for k in range_not_idx: c = self.rxbuffer[k] ### XXX ### #c.getanno('cif-collision').append(Reference(p) ) ### XXX ### c.getanno('cif-collision').append(p) # add rxbuffer/p to collision list of p for k in range_not_idx: c = self.rxbuffer[k] ### XXX ### #p.getanno('cif-collision').append(Reference(c)) ### XXX ### p.getanno('cif-collision').append(c) # set timestamp for arrival at cif-dst p.setanno('cif-rxts', now() ) return p
def __timepassed(self): """Private method to determine time elapsed on timer.""" if self.__tic is None: return self.__tpassed else: tdelta = now() - self.__tic return (self.__tpassed + tdelta)
def haveToDispose(self, callerObject=None): # get active object and its queue activeObject=self.getActiveObject() activeObjectQueue=self.getActiveObjectQueue() thecaller = callerObject #if we have only one possible receiver just check if the Queue holds one or more entities if(len(activeObject.next)==1 or callerObject==None): activeObject.receiver=activeObject.next[0] return len(activeObjectQueue)>0\ and thecaller==activeObject.receiver #give the entity to the possible receiver that is waiting for the most time. #plant does not do this in every occasion! maxTimeWaiting=0 hasFreeReceiver=False # loop through the object in the successor list for object in activeObject.next: if(object.canAccept(activeObject)): # if the object can accept hasFreeReceiver=True timeWaiting=now()-object.timeLastEntityLeft # compare the time that it has been waiting if(timeWaiting>maxTimeWaiting or maxTimeWaiting==0):# with the others' maxTimeWaiting=timeWaiting self.receiver=object # and update the receiver to the index of this object #return True if the Queue has Entities and the caller is the receiver return len(activeObjectQueue)>0 and (thecaller is self.receiver) and hasFreeReceiver
def onTxnLoss(self, txn): self.numTxnsLoss += 1 self.numTxnsDepart += 1 self.monitor.observe( '%s.%s' % (BaseSystem.TXN_LOSS_KEY_PREFIX, txn.ID), 0) self.logger.debug('Txn %s loss from system at %s, loss rate=%s/%s' % (txn.ID, now(), self.numTxnsLoss, self.numTxnsSched))
def run(self): for cnode in self.system.cnodes: self.sysAcceptors.append(cnode.acceptor) for cnode in self.system.cnodes: self.sysLearners.append(cnode.learner) yield hold, self, self.skew self.acceptor.start() self.learner.start() timer = Alarm.setPeriodic(self.epochLength, 'epoch') while not self.shouldClose: yield waitevent, self, (self.closeEvent, timer) if timer in self.eventsFired: batch = [] while len(self.epochQueue) > 0: batch.append(self.epochQueue.pop(0)) eid = int((self.now() + 0.01) / self.epochLength) Proposer(self).propose(eid, list(batch)) if len(batch) > 0: self.logger.debug('%s proposed batch %s.%s %s at %s' % (self, self, eid, '[%s]' % (', '.join([txn.ID for txn in batch])), now())) elif self.shouldClose: self.acceptor.close() self.learner.close() self._close()
def collect(self, model, n=300.0): interval = model.runtime / n tlast = 0 while 1: yield hold, self, interval model.collect_stats(tlast) tlast = now()
def removeEntity(self, entity=None): self.addBlockage() activeObjectQueue=self.getActiveObjectQueue() activeObjectQueue.remove(entity) #remove the Entity from the queue self.failureTimeInCurrentEntity=0 self.downTimeInTryingToReleaseCurrentEntity=0 self.offShiftTimeTryingToReleaseCurrentEntity=0 self.timeLastEntityLeft=now() self.outputTrace(entity.name, "released "+self.objName) # update wipStatList if self.gatherWipStat: self.wipStatList.append([now(), len(activeObjectQueue)]) return entity
def reassemble(self): activeObject = self.getActiveObject() activeObjectQueue=activeObject.getActiveObjectQueue() # get the internal queue of the active core object curSubBatchId = 0 nextSubBatchId = 0 for i in range(len(activeObjectQueue)-1): curSubBatchId = activeObjectQueue[i].batchId nextSubBatchId = activeObjectQueue[i+1].batchId assert curSubBatchId == nextSubBatchId,\ 'The subBatches in the re-assembly station are not of the same Batch' #calculate the number of units of the Batch numberOfUnits=0 for subBatch in activeObjectQueue: numberOfUnits+=subBatch.numberOfUnits # the batch to be reassembled batchToBeReassembled = activeObjectQueue[0].parentBatch # if the activeEntity is hot then the subBatches should be also hot batchToBeReassembled.hot=activeObjectQueue[0].hot # if the activeEntity is in the pendingEntities list then place the subBatches there if activeObjectQueue[0] in G.pendingEntities: G.pendingEntities.append(batchToBeReassembled) for entity in activeObjectQueue: G.pendingEntities.remove(entity) del activeObjectQueue[:] batchToBeReassembled.numberOfSubBatches = 1 batchToBeReassembled.numberOfUnits=numberOfUnits activeObjectQueue.append(batchToBeReassembled) batchToBeReassembled.currentStation=self self.timeLastEntityEnded=now() self.outputTrace(batchToBeReassembled.name, 'was reassembled')
def haveToDispose(self, callerObject=None): # get active object and its queue activeObject=self.getActiveObject() activeObjectQueue=self.getActiveObjectQueue() thecaller = callerObject #if we have only one successor just check if machine waits to dispose and also is up # this is done to achieve better (cpu) processing time if(len(activeObject.next)==1 or callerObject==None): activeObject.receiver=activeObject.next[0] return len(activeObjectQueue)>0\ and activeObject.waitToDispose\ and activeObject.Up\ and thecaller==activeObject.receiver thecaller=callerObject # give the entity to the successor that is waiting for the most time. # (plant simulation does not do this in every occasion!) maxTimeWaiting=0 # dummy variable counting the time a successor is waiting for object in activeObject.next: if(object.canAccept(activeObject)): # if a successor can accept an object timeWaiting=now()-object.timeLastEntityLeft # the time it has been waiting is updated and stored in dummy variable timeWaiting if(timeWaiting>maxTimeWaiting or maxTimeWaiting==0):# if the timeWaiting is the maximum among the ones of the successors maxTimeWaiting=timeWaiting activeObject.receiver=object # set the receiver as the longest waiting possible receiver # in the next loops, check the other successors in the previous list #return True if the Machine in the state of disposing and the caller is the receiver return len(activeObjectQueue)>0\ and activeObject.waitToDispose\ and activeObject.Up\ and (thecaller is self.receiver)
def onTxnLoss(self, txn): self.numTxnsLoss += 1 self.numTxnsDepart += 1 self.monitor.observe('%s.%s'%(BaseSystem.TXN_LOSS_KEY_PREFIX, txn.ID), 0) self.logger.debug('Txn %s loss from system at %s, loss rate=%s/%s' %(txn.ID, now(), self.numTxnsLoss, self.numTxnsSched))
def haveToDispose(self,callerObject=None): # get active and the receiver object activeObject=self.getActiveObject() activeObjectQueue=self.getActiveObjectQueue() receiverObject=activeObject.getReceiverObject() #if we have only one successor just check if object waits to dispose and also is up # this is done to achieve better (cpu) processing time if(len(activeObject.next)==1 or callerObject==None): return len(activeObjectQueue)>0 and activeObjectQueue[0].type!="Batch" thecaller=callerObject #give the entity to the successor that is waiting for the most time. #plant does not do this in every occasion! maxTimeWaiting=0 for object in activeObject.next: if(object.canAccept()): # if the object can accept timeWaiting=now()-object.timeLastEntityLeft # compare the time that it has been waiting if(timeWaiting>maxTimeWaiting or maxTimeWaiting==0):# with the others' maxTimeWaiting=timeWaiting self.receiver=object # and update the successorIndex to the index of this object #return true only to the predecessor from which the queue will take receiverObject=activeObject.getReceiverObject() return len(self.Res.activeQ)==self.numberOfSubBatches and \ (thecaller is receiverObject) and activeObjectQueue[0].type!="Batch"
def run(self): # get active object and its queue activeObject=self.getActiveObject() activeObjectQueue=self.getActiveObjectQueue() while 1: entity=self.createEntity() # create the Entity object and assign its name entity.creationTime=now() # assign the current simulation time as the Entity's creation time entity.startTime=now() # assign the current simulation time as the Entity's start time entity.currentStation=self # update the current station of the Entity G.EntityList.append(entity) self.outputTrace(entity.name, "generated") # output the trace activeObjectQueue.append(entity) # append the entity to the resource self.numberOfArrivals+=1 # we have one new arrival G.numberOfEntities+=1 yield hold,self,self.calculateInterarrivalTime() # wait until the next arrival
def show_progress(self): """Shows a progress indicator during the run of the simulation.""" if self.progress: progress = now() / self.run_time * 100 sys.stdout.write("\b\b\b\b\b\b") sys.stdout.write("%5.2f%%" % (progress)) sys.stdout.flush()
def haveToDispose(self, callerObject=None): # get active object and its queue activeObject=self.getActiveObject() activeObjectQueue=self.getActiveObjectQueue() #if we have only one possible receiver just check if the Queue holds one or more entities if(len(activeObject.next)==1 or callerObject==None): return len(self.Res.activeQ)>0 thecaller=callerObject #give the entity to the possible receiver that is waiting for the most time. #plant does not do this in every occasion! maxTimeWaiting=0 # loop through the object in the successor list for object in activeObject.next: if(object.canAccept()): # if the object can accept timeWaiting=now()-object.timeLastEntityLeft # compare the time that it has been waiting if(timeWaiting>maxTimeWaiting or maxTimeWaiting==0):# with the others' maxTimeWaiting=timeWaiting self.receiver=object # and update the receiver to the index of this object #return true only to the predecessor from which the queue will take receiverObject=activeObject.getReceiverObject() return len(self.Res.activeQ)>0 and (thecaller is receiverObject)
def deliver(self): # an "offeror" PEM while True: lead = 10.0 # time between refills delivery = 10.0 # amount in each refill yield put, self, stock, delivery print('at %6.4f, add %6.4f units, now amount = %6.4f' % (now(), delivery, stock.amount)) yield hold, self, lead
def commit(self): for proxy in self.proxies: self.sendMsg(proxy, 'msg', (self.attemptNo, TPCProtocol.COMMITTED, self.ts)) self.released(proxy.conn) self.logger.debug( '%s "sent commit requests" at %s' %(self.ID, now())) yield hold, self
def abort(self): for proxy in self.proxies: self.sendMsg(proxy, 'msg', (self.attemptNo, TPCProtocol.ABORTED, None)) self.released(proxy.conn) self.logger.debug( '%s "sent abort requests" at %s' %(self.ID, now())) yield hold, self
def lock(self, lockable, state, timeout=infinite): self.logger.debug( '%s lock %r with %s at %s' % (self.ID, lockable, lockable.STATESTRS[state], now())) #try acquire the lockable acquired = lockable.tryAcquire(self, state) if acquired: self.logger.debug('%s acquired %s at %s' % (self.ID, lockable, now())) #notify deadlock detection self.acquired(lockable) return #cannot acquire, block until timeout self.logger.debug('%s "blocked" on %r at %s' % (self.ID, lockable, now())) # here we first ensure all owners who blocked us are all alive # and no deadlock lockable.ensureOwnersAlive() self.tryWait(lockable) # we pass the tests, now we wait events = [] if timeout != infinite: timeoutEvt = Alarm.setOnetime(timeout, 'lock-wait') events.append(timeoutEvt) self.monitor.start(LockThread.LOCK_BLOCK_KEY) blockEvt = lockable.block(self, state) events.append(blockEvt) self.monitor.observe(LockThread.LOCK_BLOCK_HEIGHT_KEY, self.height) self.monitor.observe(LockThread.LOCK_BLOCK_WIDTH_KEY, self.width) yield waitevent, self, events # we are waked up self.endWait(lockable) self.monitor.stop(LockThread.LOCK_BLOCK_KEY) if timeout != infinite: if timeoutEvt in self.eventsFired: self.logger.debug('%s "timedout" on %r at %s' % (self.ID, lockable, now())) raise TimeoutException(lockable, Lockable.STATESTRS[state]) #we are already the owner of the lock assert lockable.isLockedBy(self) and lockable.isState(state), \ ('%s waked up but is not the owner of %r with state %s' %(self.ID, lockable, Lockable.STATESTRS[state])) self.logger.debug('%s acquired %r after wait at %s' % (self.ID, lockable, now())) #notify deadlock detection self.acquired(lockable)
def loop(self): left = 0 while (self.until < 0) or (now() < self.until): yield hold, self, left drift = self.rgen.next() yield hold, self, drift self.event.signal() left = self.interval - drift
def abort(self, content): attemptNo = content[0] for item in self.locks: for step in self.unlock(item): yield step self.progress = (attemptNo, TPCProtocol.ABORTED) self.logger.debug('%s aborted with attemptNo %s at %s' %(self.ID, attemptNo, now()))
def consume(self): # the ConsumerD PEM while True: toGet = 3 yield get, self, buf, toGet assert len(self.got) == toGet print('%s Get widget weights %s' % (now(), [x.weight for x in self.got])) yield hold, self, 11
def read(self, itemID, attr): if itemID.gid in self.snode.groups: #the item is on the snode self.monitor.start('local.lock.acquire.shared') item = self.snode.groups[itemID.gid][itemID] self.logger.debug('%s read %s from local at %s' %(self.ID, itemID, now())) try: for step in self.lock(item, Lockable.SHARED): yield step except Exception as e: self.monitor.stop('local.lock.acquire.shared') raise e self.locks.add(item) self.monitor.stop('local.lock.acquire.shared') self.logger.debug('%s read %s from local "success" at %s' %(self.ID, itemID, now())) else: #the item is on another snode cnode = self.snode.cnode host = cnode.groupLocations[itemID.gid] proxy = host.createProxy(self.txn, self) self.proxies.add(proxy) self.logger.debug('%s send read request %s to %s at %s' %(self.ID, itemID, proxy.ID, now())) self.sendMsg(proxy, 'msg', (self.attemptNo, TPCProtocol.RUNNING, itemID)) self.released(proxy.conn) self.tryWait(proxy.conn) until = now() + self.snode.configs.get('tpc.conn.timeout', infinite) while True: if not self.checkMsg('msg'): for step in self.waitMsg('msg', until - now()): yield step succeeded = False for content in self.popContents('msg'): p, attemptNo, label, result, e = content assert p == proxy and attemptNo <= self.attemptNo if attemptNo < self.attemptNo: continue assert label == TPCProtocol.RUNNING if result == TPCProtocol.FAILED: #the only reason it fails should be deadlock self.logger.debug( '%s acquire read lock %s from %s "failed" at %s' %(self.ID, itemID, proxy.ID, now())) raise e elif result == TPCProtocol.SUCCEEDED: succeeded = True break else: raise ValueError('Invalid result for proxy message: %s' %result) if succeeded: self.logger.debug( '%s acquire read lock %s from %s "succeeded" at %s' %(self.ID, itemID, proxy.ID, now())) self.endWait(proxy.conn) self.acquired(proxy.conn) break
def run(self): periodEvent = Alarm.setPeriodic(self.eLen, name='epoch', drift=self.skew) lastEpochTime = 0 count = 0 lastBatch = False while True: #handle batch transaction event if now() > lastEpochTime + self.eLen and not lastBatch: batch = Batch('%s-%s'%(self, count)) while len(self.newTxns) > 0: txn = self.newTxns.pop() batch.append(txn) #propose the txn for instance self.monitor.start('order.consensus.%s'%batch) self.cnode.paxosPRunner.addRequest(batch) lastEpochTime += self.eLen count += 1 self.logger.debug('%s propose new batch %s at %s' %(self.ID, batch, now())) if self.shouldClose: self.logger.debug('%s sending last batch at %s' %(self, now())) lastBatch = True #handle new instance instances = self.cnode.paxosLearner.instances while self.nextIID in instances: readyBatch = instances[self.nextIID] if self.ID in readyBatch.ID: self.monitor.stop('order.consensus.%s'%readyBatch) if not readyBatch.isEmpty(): self.logger.debug('%s execute new batch %s at %s' %(self.ID, readyBatch, now())) for txn in readyBatch: self.lockingQueue.append(txn) thread = StorageNode.TxnStarter(self, txn) thread.start() self.nextIID += 1 #garbage collection if len(instances) > 1000: for i in range(self.gcID, self.nextIID / 2): del instances[i] self.gcID = self.nextIID / 2 #wait for new event yield waitevent, self, \ (periodEvent, self.cnode.paxosLearner.newInstanceEvent)
def setValue(self, value): """ None <- setValue(value) set the value on the object """ # is called at each time step if the actor's readMode is active from SimPy.Simulation import now print 'setting ', self.name, 'for', self.name, 'at:', now( ), 'to', value
def main(): start = time.time() parser = CustomArgsParser(optFlags=['--verify']) parser.parse(sys.argv[1:]) if len(parser.getPosArgs()) < 1: print 'python sim.py <config dir> --verify' sys.exit(-1) path = parser.getPosArg(0) configFile = '%s/__config__' % path configs = Configuration() configs.read(configFile) if parser.getOption('--verify'): configs['system.should.verify'] = True #simulation init #logging.basicConfig(level=logging.DEBUG) logging.config.fileConfig('%s/__logcfg__' % path) logger = logging.getLogger(__name__) #simpy initialize initialize() #system initialize RTI.initialize(configs) txnGenCls = loadClass(configs['txn.gen.impl']) txnGen = txnGenCls(configs) systemCls = loadClass(configs['system.impl']) system = systemCls(configs) for txn, at in txnGen.generate(): system.schedule(txn, at) system.start() #pdb.set_trace() #simulate logger.info('\n##### START SIMULATION #####\n') simulate(until=configs.get('simulation.duration', 600000)) logger.info('simulated time: %s' % now()) logger.info('\n##### END #####\n') ##verify try: if parser.getOption('--verify'): logger.info('\n##### START VERIFICATION #####\n') v = Verifier() v.check(system) logger.info('VERIFICATION SUCCEEDS\n') logger.info('\n##### END #####\n') except: logger.error('Verification failed.') #get profile logger.info('\n##### PROFILING RESULTS #####\n') system.profile() #system.printMonitor() logger.info('\n##### END #####\n') end = time.time() logger.info('\n##### SIMULATION TIME: %s seconds #####\n' % (end - start))
def onTxnDepart(self, txn): if txn in self.txnsRunning: self.numTxnsDepart += 1 self.monitor.stop('%s.%s' % (BaseSystem.TXN_EXEC_KEY_PREFIX, txn.ID)) self.logger.debug( 'Txn %s depart from system at %s, progress=D:%s/%s' % (txn.ID, now(), self.numTxnsDepart, self.numTxnsSched)) self.txnsRunning.remove(txn)
def demand(self): # a "requester" PEM day = 1.0 # set time-step to one day while True: yield hold, self, day dd = normalvariate(1.20, 0.20) # today's random demand ds = dd - stock.amount # excess of demand over current stock amount if dd > stock.amount: # can't supply requested amount yield get, self, stock, stock.amount # supply all available amount self.stockout += ds # add unsupplied demand to self.stockout print('day %6.4f, demand = %6.4f, shortfall = %6.4f' % (now(), dd, -ds)) else: # can supply requested amount yield get, self, stock, dd print('day %6.4f, supplied %6.4f, now amount = %6.4f' % (now(), dd, stock.amount))
def onTxnArrive(self, txn): self.txnsRunning.add(txn) self.monitor.start('%s.%s' % (BaseSystem.TXN_EXEC_KEY_PREFIX, txn.ID)) self.logger.debug( 'Txn %s arrive in system at %s, progress=A:%s/%s' % (txn.ID, now(), self.numTxnsArrive, self.numTxnsSched)) #for debug if self.numTxnsArrive % 100 == 0: pass
def periodic(self, interval, until=infinite, drift=('fixed', 0)): print 'set periodic alarm' alarmevent = Alarm.setPeriodic(interval, until=until, drift=drift) count = 20 while count > 0: print 'work on something else until alarm' yield waitevent, self, alarmevent print 'alarmed at %s' %now() count -= 1
def read(self, content): attemptNo, label, itemID = content self.progress = (attemptNo, TPCProtocol.RUNNING) try: item = self.snode.groups[itemID.gid][itemID] for step in self.lock(item, Lockable.SHARED): yield step self.locks.add(item) self.logger.debug( '%s read item %s succeeded at %s' %(self.ID, item, now())) self.sendMsg(self.runner, 'msg', (self, attemptNo, label, TPCProtocol.SUCCEEDED, None)) except BThread.DeadlockException as e: self.logger.debug( '%s read items %s failed at %s' %(self.ID, item, now())) #we know it will abort self.sendMsg(self.runner, 'msg', (self, attemptNo, label, TPCProtocol.FAILED, e)) for step in self.abort(content): yield step