def initialize(self): # using the Process __init__ and not the CoreObject __init__ CoreObject.initialize(self) # initialise the internal Queue (type Resource) of the Queue object self.Res=simpy.Resource(self.env, self.capacity) # event used by router self.loadOperatorAvailable=self.env.event()
def __init__(self, id, name, processingTime=None): if not processingTime: processingTime = {'distributionType': 'Fixed', 'mean': 0, 'stdev': 0, 'min': 0, } if processingTime['distributionType'] == 'Normal' and\ processingTime.get('max', None) is None: processingTime['max'] = processingTime['mean'] + 5 * processingTime['stdev'] CoreObject.__init__(self, id, name) self.type="Assembly" #String that shows the type of object self.rng=RandomNumberGenerator(self, **processingTime) self.next=[] #list with the next objects in the flow self.previous=[] #list with the previous objects in the flow self.previousPart=[] #list with the previous objects that send parts self.previousFrame=[] #list with the previous objects that send frames self.nextIds=[] #list with the ids of the next objects in the flow self.previousIds=[] #list with the ids of the previous objects in the flow # XXX previousFrameIds and previousPartIds are not used self.previousPartIds=[] #list with the ids of the previous objects in the flow that bring parts self.previousFrameIds=[] #list with the ids of the previous objects in the flow that bring frames #lists to hold statistics of multiple runs self.Waiting=[] self.Working=[] self.Blockage=[] # ============================== variable that is used for the loading of machines ============= self.exitAssignedToReceiver = False # by default the objects are not blocked
def __init__(self, id, name, distribution='Fixed', mean=1, stdev=0.1, min=0, max=5): CoreObject.__init__(self, id, name) self.type="Dismantle" #String that shows the type of object self.distType=distribution #the distribution that the procTime follows self.rng=RandomNumberGenerator(self, self.distType) self.rng.mean=mean self.rng.stdev=stdev self.rng.min=min self.rng.max=max self.previous=[] #list with the previous objects in the flow self.previousIds=[] #list with the ids of the previous objects in the flow self.nextPart=[] #list with the next objects that receive parts self.nextFrame=[] #list with the next objects that receive frames self.nextIds=[] #list with the ids of the next objects in the flow self.nextPartIds=[] #list with the ids of the next objects that receive parts self.nextFrameIds=[] #list with the ids of the next objects that receive frames self.next=[] #lists to hold statistics of multiple runs self.Waiting=[] self.Working=[] self.Blockage=[] # variable that is used for the loading of machines self.exitAssignedToReceiver = False # by default the objects are not blocked
def initialize(self): # Process.__init__(self) CoreObject.initialize(self) self.Res=simpy.Resource(self.env, capacity='inf') self.position=[] #list that shows the position of the corresponding element in the conveyer self.timeLastMoveHappened=0 #holds the last time that the move was performed (in reality it is #continued, in simulation we have to handle it as discrete) #so when a move is performed we can calculate where the entities should go self.conveyerMover=ConveyerMover(self) #process that is triggered at the times when an entity reached the end or #a place is freed. It performs the move at this point, #so if there are actions to be carried they will self.entityLastReachedEnd=None #the entity that last reached the end of the conveyer self.timeBlockageStarted=self.env.now #the time that the conveyer reached the blocked state #plant considers the conveyer blocked even if it can accept just one entity #I think this is false self.wasFull=False #flag that shows if the conveyer was full. So when an entity is disposed #if this is true we count the blockage time and set it to false self.currentRequestedLength=0 #the length of the entity that last requested the conveyer self.currentAvailableLength=self.length #the available length in the end of the conveyer self.predecessorIndex=0 #holds the index of the predecessor from which the Conveyer will take an entity next self.successorIndex=0 #holds the index of the successor where the Queue Conveyer dispose an entity next self.requestingEntities=[] # list of the entities requesting space on the conveyer # signal that notifies the conveyer that its move is completed self.moveEnd=self.env.event()
def initialize(self): CoreObject.initialize(self) # using the default CoreObject Functionality self.Res=simpy.Resource(self.env, self.numberOfSubBatches) # initialize the Internal resource (Queue) functionality self.expectedSignals['isRequested']=1 self.expectedSignals['interruptionStart']=1 self.expectedSignals['initialWIP']=1
def __init__(self, id='', name='', processingTime=None,**kw): self.type='Dismantle' self.previous=[] #list with the previous objects in the flow self.previousIds=[] #list with the ids of the previous objects in the flow self.nextPart=[] #list with the next objects that receive parts self.nextFrame=[] #list with the next objects that receive frames self.nextIds=[] #list with the ids of the next objects in the flow self.nextPartIds=[] #list with the ids of the next objects that receive parts self.nextFrameIds=[] #list with the ids of the next objects that receive frames self.next=[] #lists to hold statistics of multiple runs self.Waiting=[] self.Working=[] self.Blockage=[] # variable that is used for the loading of machines self.exitAssignedToReceiver = False # by default the objects are not blocked # when the entities have to be loaded to operatedMachines # then the giverObjects have to be blocked for the time # that the machine is being loaded CoreObject.__init__(self, id, name) from Globals import G if not processingTime: processingTime = {'distributionType': 'Fixed', 'mean': 0, 'stdev': 0, 'min': 0, } if processingTime['distributionType'] == 'Normal' and\ processingTime.get('max', None) is None: processingTime['max'] = float(processingTime['mean']) + 5 * float(processingTime['stdev']) self.rng=RandomNumberGenerator(self, **processingTime)
def __init__(self, id='', name='', processingTime=None, inputsDict=None, **kw): self.type="Assembly" #String that shows the type of object self.next=[] #list with the next objects in the flow self.previous=[] #list with the previous objects in the flow self.previousPart=[] #list with the previous objects that send parts self.previousFrame=[] #list with the previous objects that send frames self.nextIds=[] #list with the ids of the next objects in the flow self.previousIds=[] #list with the ids of the previous objects in the flow #lists to hold statistics of multiple runs self.Waiting=[] self.Working=[] self.Blockage=[] if not processingTime: processingTime = {'Fixed':{'mean': 0 }} if 'Normal' in processingTime.keys() and\ processingTime['Normal'].get('max', None) is None: processingTime['Normal']['max'] = float(processingTime['Normal']['mean']) + 5 * float(processingTime['Normal']['stdev']) CoreObject.__init__(self, id, name) self.rng=RandomNumberGenerator(self, processingTime) # ============================== variable that is used for the loading of machines ============= self.exitAssignedToReceiver = False # by default the objects are not blocked # when the entities have to be loaded to operatedMachines # then the giverObjects have to be blocked for the time # that the machine is being loaded from Globals import G G.AssemblyList.append(self)
def __init__(self, id='', name='', capacity=1, isDummy=False, schedulingRule="FIFO", level=None, gatherWipStat=False, **kw): self.type="Queue" # String that shows the type of object CoreObject.__init__(self, id, name) capacity=float(capacity) if capacity<0 or capacity==float("inf"): self.capacity=float("inf") else: self.capacity=int(capacity) self.isDummy=bool(int(isDummy)) #Boolean that shows if it is the dummy first Queue self.schedulingRule=schedulingRule #the scheduling rule that the Queue follows self.multipleCriterionList=[] #list with the criteria used to sort the Entities in the Queue SRlist = [schedulingRule] if schedulingRule.startswith("MC"): # if the first criterion is MC aka multiple criteria SRlist = schedulingRule.split("-") # split the string of the criteria (delimiter -) self.schedulingRule=SRlist.pop(0) # take the first criterion of the list self.multipleCriterionList=SRlist # hold the criteria list in the property multipleCriterionList for scheduling_rule in SRlist: if scheduling_rule not in self.getSupportedSchedulingRules(): raise ValueError("Unknown scheduling rule %s for %s" % (scheduling_rule, id)) self.gatherWipStat=gatherWipStat # Will be populated by an event generator self.wip_stat_list = [] # trigger level for the reallocation of operators if level: assert level<=self.capacity, "the level cannot be bigger than the capacity of the queue" self.level=level from Globals import G G.QueueList.append(self)
def initialize(self): CoreObject.initialize(self) self.waitToDispose=False #flag that shows if the object waits to dispose an entity self.waitToDisposePart=False #flag that shows if the object waits to dispose a part self.waitToDisposeFrame=False #flag that shows if the object waits to dispose a frame self.Up=True #Boolean that shows if the object is in failure ("Down") or not ("up") self.currentEntity=None self.totalFailureTime=0 #holds the total failure time self.timeLastFailure=0 #holds the time that the last failure of the object started self.timeLastFailureEnded=0 #holds the time that the last failure of the object Ended self.downTimeProcessingCurrentEntity=0 #holds the time that the object was down while processing the current entity self.downTimeInTryingToReleaseCurrentEntity=0 #holds the time that the object was down while trying #to release the current entity self.downTimeInCurrentEntity=0 #holds the total time that the object was down while holding current entity self.timeLastEntityLeft=0 #holds the last time that an entity left the object self.processingTimeOfCurrentEntity=0 #holds the total processing time that the current entity required self.totalBlockageTime=0 #holds the total blockage time self.totalWaitingTime=0 #holds the total waiting time self.totalWorkingTime=0 #holds the total working time self.completedJobs=0 #holds the number of completed jobs self.timeLastEntityEnded=0 #holds the last time that an entity ended processing in the object self.timeLastEntityEntered=0 #holds the last time that an entity ended processing in the object self.timeLastFrameWasFull=0 #holds the time that the last frame was full, ie that assembly process started self.nameLastFrameWasFull="" #holds the name of the last frame that was full, ie that assembly process started self.nameLastEntityEntered="" #holds the name of the last frame that entered processing in the object self.nameLastEntityEnded="" #holds the name of the last frame that ended processing in the object self.Res=simpy.Resource(self.env, capacity='inf') self.Res.users=[]
def initialize(self): self.previous=G.ObjList self.next=[] CoreObject.initialize(self) # using the default CoreObject Functionality self.Res=simpy.Resource(self.env, 'inf') # initialize the Internal resource (Queue) functionality. This is a dummy object so # infinite capacity is assumed self.newlyCreatedComponents=[] # a list to hold components just after decomposition self.orderToBeDecomposed=None
def initialize(self): # using the Process __init__ and not the CoreObject __init__ CoreObject.initialize(self) # initialize the internal Queue (type Resource) of the Source self.Res=Resource(capacity=infinity) self.Res.activeQ=[] self.Res.waitQ=[]
def initialize(self): from Globals import G G.BatchWaitingList = [] # batches waiting to be reassembled CoreObject.initialize(self) # using the default CoreObject Functionality self.Res=simpy.Resource(self.env, self.numberOfSubBatches) # initialize the Internal resource (Queue) functionality self.expectedSignals['isRequested']=1 self.expectedSignals['interruptionStart']=1 self.expectedSignals['initialWIP']=1
def initialize(self): # using the Process __init__ and not the CoreObject __init__ CoreObject.initialize(self) # initialise the internal Queue (type Resource) of the Queue object self.Res=simpy.Resource(self.env, self.capacity) # event used by router self.loadOperatorAvailable=self.env.event() self.expectedSignals['isRequested']=1 self.expectedSignals['canDispose']=1 self.expectedSignals['loadOperatorAvailable']=1
def __init__(self, id, name, **kw): self.type="Exit" # XXX needed ? #lists to hold statistics of multiple runs self.Exits=[] self.UnitExits=[] self.Lifespan=[] self.TaktTime=[] # if input is given in a dictionary CoreObject.__init__(self, id, name) from Globals import G G.ExitList.append(self)
def initialize(self): Process.__init__(self) CoreObject.initialize(self) self.Res=Resource(capacity=infinity) self.Up=True #Boolean that shows if the object is in failure ("Down") or not ("up") self.currentEntity=None self.totalBlockageTime=0 #holds the total blockage time self.totalFailureTime=0 #holds the total failure time self.totalWaitingTime=0 #holds the total waiting time self.totalWorkingTime=0 #holds the total working time self.completedJobs=0 #holds the number of completed jobs self.timeLastEntityEnded=0 #holds the last time that an entity ended processing in the object self.nameLastEntityEnded="" #holds the name of the last entity that ended processing in the object self.timeLastEntityEntered=0 #holds the last time that an entity entered in the object self.nameLastEntityEntered="" #holds the name of the last entity that entered in the object self.timeLastFailure=0 #holds the time that the last failure of the object started self.timeLastFailureEnded=0 #holds the time that the last failure of the object Ended self.downTimeProcessingCurrentEntity=0 #holds the time that the object was down while processing the current entity self.downTimeInTryingToReleaseCurrentEntity=0 #holds the time that the object was down while trying #to release the current entity self.downTimeInCurrentEntity=0 #holds the total time that the object was down while holding current entity self.timeLastEntityLeft=0 #holds the last time that an entity left the object self.processingTimeOfCurrentEntity=0 #holds the total processing time that the current entity required self.waitToDispose=False #shows if the object waits to dispose an entity self.position=[] #list that shows the position of the corresponding element in the conveyer self.timeLastMoveHappened=0 #holds the last time that the move was performed (in reality it is #continued, in simulation we have to handle it as discrete) #so when a move is performed we can calculate where the entities should go self.timeToReachEnd=0 #if the conveyer has entities but none has reached the end of it, this calculates #the time when the first entity will reach the end and so it will be ready to be disposed self.timeToBecomeAvailable=0 #if the conveyer has entities on its back this holds the time that it will be again free #for an entity. of course this also depends on the length of the entity who requests it self.conveyerMover=ConveyerMover(self) #process that is triggered at the times when an entity reached the end or #a place is freed. It performs the move at this point, #so if there are actions to be carried they will self.call=False #flag that shows if the ConveyerMover should be triggered self.entityLastReachedEnd=None #the entity that last reached the end of the conveyer self.timeBlockageStarted=now() #the time that the conveyer reached the blocked state #plant considers the conveyer blocked even if it can accept just one entity #I think this is false self.wasFull=False #flag that shows if the conveyer was full. So when an entity is disposed #if this is true we count the blockage time and set it to false self.currentRequestedLength=0 #the length of the entity that last requested the conveyer self.currentAvailableLength=self.length #the available length in the end of the conveyer self.predecessorIndex=0 #holds the index of the predecessor from which the Conveyer will take an entity next self.successorIndex=0 #holds the index of the successor where the Queue Conveyer dispose an entity next
def initialize(self): # using the Process __init__ and not the CoreObject __init__ CoreObject.initialize(self) # initialize the internal Queue (type Resource) of the Exit self.Res=simpy.Resource(self.env, capacity=float('inf')) # The number of resource that exited through this exit. # XXX bug: cannot output as json when nothing has exited. self.numOfExits=0 self.totalNumberOfUnitsExited=0 self.totalLifespan=0 self.totalTaktTime=0 # the total time between to consecutive exits self.intervalThroughPutList=[]
def removeEntity(self, entity=None): activeObject=self.getActiveObject() activeEntity=CoreObject.removeEntity(self, entity) # run the default method activeObject.waitToDispose=False # update the waitToDispose flag if activeObject.canAccept(): activeObject.signalGiver() return activeEntity
def removeEntity(self, entity=None): activeEntity=CoreObject.removeEntity(self, entity) #run the default method self.waitToDispose=False if self.canAccept(): self.printTrace(self.id, attemptSignalGiver='(removeEntity)') self.signalGiver() return activeEntity
def __init__(self, id, name=None): if not name: name = id CoreObject.__init__(self, id, name) self.predecessorIndex=0 # holds the index of the predecessor from which the Exit will take an entity next # general properties of the Exit self.type="Exit" # XXX needed ? # # list with routing information # self.previous=[] # list with the previous objects in the flow # self.nextIds=[] # list with the ids of the next objects in the flow. For the exit it is always empty! # self.previousIds=[] # list with the ids of the previous objects in the flow #lists to hold statistics of multiple runs self.Exits=[] self.UnitExits=[] self.Lifespan=[] self.TaktTime=[]
def __init__(self, id, name, processingTime=None, numberOfSubBatches=1, operator='None'): CoreObject.__init__(self, id, name) self.type="BatchDecomposition" #String that shows the type of object if not processingTime: processingTime = { 'distributionType': 'Fixed', 'mean': 1, } if processingTime['distributionType'] == 'Normal' and\ processingTime.get('max', None) is None: processingTime['max'] = processingTime['mean'] + 5 * processingTime['stdev'] # holds the capacity of the object self.numberOfSubBatches=numberOfSubBatches # sets the operator resource of the Machine self.operator=operator # Sets the attributes of the processing (and failure) time(s) self.rng=RandomNumberGenerator(self, **processingTime)
def removeEntity(self, entity=None): activeEntity = CoreObject.removeEntity(self, entity) #run the default method self.waitToDispose = False if self.canAccept(): self.printTrace(self.id, attemptSignalGiver='(removeEntity)') self.signalGiver() return activeEntity
def __init__(self, id, name, length, speed,**kw): CoreObject.__init__(self, id, name) self.type="Conveyer" self.speed=float(speed) #the speed of the conveyer in m/sec self.length=float(length) #the length of the conveyer in meters # counting the total number of units to be moved through the whole simulation time self.numberOfMoves=0 self.predecessorIndex=0 #holds the index of the predecessor from which the Conveyer will take an entity next self.successorIndex=0 #holds the index of the successor where the Queue Conveyer dispose an entity next # ============================== variable that is used for the loading of machines ============= self.exitAssignedToReceiver = False # by default the objects are not blocked # when the entities have to be loaded to operatedMachines # then the giverObjects have to be blocked for the time # that the machine is being loaded from Globals import G G.ConveyerList.append(self)
def getEntity(self): activeEntity=CoreObject.getEntity(self) self.position.append(0) #the entity is placed in the start of the conveyer #check if the conveyer became full to start counting blockage if self.isFull(): self.timeBlockageStarted=now() self.wasFull=True return activeEntity
def removeEntity(self, entity=None): activeObject = self.getActiveObject() activeEntity = CoreObject.removeEntity( self, entity) # run the default method activeObject.waitToDispose = False # update the waitToDispose flag if activeObject.canAccept(): activeObject.signalGiver() return activeEntity
def __init__(self, id='', name='', capacity=1, isDummy=False, schedulingRule="FIFO", level=None, gatherWipStat=False, **kw): self.type = "Queue" # String that shows the type of object CoreObject.__init__(self, id, name) capacity = float(capacity) if capacity < 0 or capacity == float("inf"): self.capacity = float("inf") else: self.capacity = int(capacity) self.isDummy = bool( int(isDummy)) #Boolean that shows if it is the dummy first Queue self.schedulingRule = schedulingRule #the scheduling rule that the Queue follows self.multipleCriterionList = [ ] #list with the criteria used to sort the Entities in the Queue SRlist = [schedulingRule] if schedulingRule.startswith( "MC"): # if the first criterion is MC aka multiple criteria SRlist = schedulingRule.split( "-") # split the string of the criteria (delimiter -) self.schedulingRule = SRlist.pop( 0) # take the first criterion of the list self.multipleCriterionList = SRlist # hold the criteria list in the property multipleCriterionList for scheduling_rule in SRlist: if scheduling_rule not in self.getSupportedSchedulingRules(): raise ValueError("Unknown scheduling rule %s for %s" % (scheduling_rule, id)) self.gatherWipStat = gatherWipStat # Will be populated by an event generator self.wip_stat_list = [] # trigger level for the reallocation of operators if level: assert level <= self.capacity, "the level cannot be bigger than the capacity of the queue" self.level = level from Globals import G G.QueueList.append(self)
def __init__(self, id, name, processingTime=None, numberOfSubBatches=1, operator='None', **kw): CoreObject.__init__(self, id, name) self.type="BatchDecomposition" #String that shows the type of object if not processingTime: processingTime = {'Fixed':{'mean': 0 }} if 'Normal' in processingTime.keys() and\ processingTime['Normal'].get('max', None) is None: processingTime['Normal']['max'] = float(processingTime['Normal']['mean']) + 5 * float(processingTime['Normal']['stdev']) # holds the capacity of the object self.numberOfSubBatches=int(numberOfSubBatches) # sets the operator resource of the Machine self.operator=operator # Sets the attributes of the processing (and failure) time(s) self.rng=RandomNumberGenerator(self, processingTime) from Globals import G G.BatchDecompositionList.append(self)
def removeEntity(self, entity=None): activeObject=self.getActiveObject() activeEntity=CoreObject.removeEntity(self, entity) #run the default method if self.canAccept(): self.signalGiver() if self.haveToDispose(): # print now(), self.id, 'will try to signal a receiver from removeEntity' self.signalReceiver() return activeEntity
def __init__(self, id, name, numberOfSubBatches=1, processingTime=None, operator='None', **kw): CoreObject.__init__(self,id, name) self.type="BatchRassembly" #String that shows the type of object if not processingTime: processingTime = { 'distributionType': 'Fixed', 'mean': 1, } if processingTime['distributionType'] == 'Normal' and\ processingTime.get('max', None) is None: processingTime['max'] = float(processingTime['mean']) + 5 * float(processingTime['stdev']) # holds the capacity of the object self.numberOfSubBatches=numberOfSubBatches # sets the operator resource of the Machine self.operator=operator # Sets the attributes of the processing (and failure) time(s) self.rng=RandomNumberGenerator(self, **processingTime) from Globals import G G.BatchReassemblyList.append(self)
def __init__(self, id, name, numberOfSubBatches=1, processingTime=None, operator='None', outputResults=False, **kw): CoreObject.__init__(self,id, name) self.type="BatchRassembly" #String that shows the type of object if not processingTime: processingTime = {'Fixed':{'mean': 0 }} if 'Normal' in processingTime.keys() and\ processingTime['Normal'].get('max', None) is None: processingTime['Normal']['max'] = float(processingTime['Normal']['mean']) + 5 * float(processingTime['Normal']['stdev']) # holds the capacity of the object self.numberOfSubBatches=numberOfSubBatches # sets the operator resource of the Machine self.operator=operator # Sets the attributes of the processing (and failure) time(s) self.rng=RandomNumberGenerator(self, processingTime) from Globals import G G.BatchReassemblyList.append(self) # flag to show if the objects outputs results self.outputResults=bool(int(outputResults))
def __init__(self, id, name, length, speed): CoreObject.__init__(self, id, name) self.type="Conveyer" self.speed=speed #the speed of the conveyer in m/sec self.length=length #the length of the conveyer in meters self.previous=[] #list with the previous objects in the flow self.next=[] #list with the next objects in the flow self.nextIds=[] #list with the ids of the next objects in the flow. For the exit it is always empty! self.previousIds=[] #list with the ids of the previous objects in the flow #lists to hold statistics of multiple runs self.Waiting=[] self.Working=[] self.Blockage=[] self.predecessorIndex=0 #holds the index of the predecessor from which the Conveyer will take an entity next self.successorIndex=0 #holds the index of the successor where the Queue Conveyer dispose an entity next # ============================== variable that is used for the loading of machines ============= self.exitAssignedToReceiver = False # by default the objects are not blocked
def getEntity(self): activeEntity=CoreObject.getEntity(self) #run the default behavior # if the level is reached then try to signal the Router to reallocate the operators try: if self.level: if len(self.getActiveObjectQueue())==self.level and self.checkForDedicatedOperators(): self.requestAllocation() except: pass return activeEntity
def removeEntity(self, entity=None): activeObject = self.getActiveObject() activeEntity = CoreObject.removeEntity(self, entity) #run the default method if self.canAccept(): self.signalGiver() if self.haveToDispose(): # print now(), self.id, 'will try to signal a receiver from removeEntity' self.signalReceiver() return activeEntity
def __init__(self, id, name, processingTime=None, numberOfSubBatches=1, operator="None", **kw): CoreObject.__init__(self, id, name) self.type = "BatchDecomposition" # String that shows the type of object if not processingTime: processingTime = {"Fixed": {"mean": 0}} if "Normal" in processingTime.keys() and processingTime["Normal"].get("max", None) is None: processingTime["Normal"]["max"] = float(processingTime["Normal"]["mean"]) + 5 * float( processingTime["Normal"]["stdev"] ) # holds the capacity of the object self.numberOfSubBatches = int(numberOfSubBatches) # sets the operator resource of the Machine self.operator = operator # Sets the attributes of the processing (and failure) time(s) self.rng = RandomNumberGenerator(self, processingTime) from Globals import G G.BatchDecompositionList.append(self)
def removeEntity(self, entity=None): activeObjectQueue=self.getActiveObjectQueue() activeEntity=CoreObject.removeEntity(self, entity) #run the default method #update the flags if(len(activeObjectQueue)==0): self.waitToDisposeFrame=False else: if(len(activeObjectQueue)==1): self.waitToDisposePart=False return activeEntity
def getEntity(self): activeEntity = CoreObject.getEntity(self) #run the default behavior # if the level is reached then try to signal the Router to reallocate the operators try: if self.level: if len(self.getActiveObjectQueue() ) == self.level and self.checkForDedicatedOperators(): self.requestAllocation() except: pass return activeEntity
def getEntity(self): #the entity is placed in the start of the conveyer self.position.append(0) activeEntity=CoreObject.getEntity(self) # counting the total number of units to be moved through the whole simulation time self.numberOfMoves+=1 #check if the conveyer became full to start counting blockage if self.isFull(): self.timeBlockageStarted=self.env.now self.wasFull=True self.printTrace(self.id, conveyerFull=str(len(self.getActiveObjectQueue()))) return activeEntity
def __init__(self, id='', name='', processingTime=None, inputsDict=None, **kw): self.type = "Assembly" #String that shows the type of object self.next = [] #list with the next objects in the flow self.previous = [] #list with the previous objects in the flow self.previousPart = [] #list with the previous objects that send parts self.previousFrame = [ ] #list with the previous objects that send frames self.nextIds = [] #list with the ids of the next objects in the flow self.previousIds = [ ] #list with the ids of the previous objects in the flow #lists to hold statistics of multiple runs self.Waiting = [] self.Working = [] self.Blockage = [] if not processingTime: processingTime = {'Fixed': {'mean': 0}} if 'Normal' in processingTime.keys() and\ processingTime['Normal'].get('max', None) is None: processingTime['Normal']['max'] = float( processingTime['Normal']['mean']) + 5 * float( processingTime['Normal']['stdev']) CoreObject.__init__(self, id, name) self.rng = RandomNumberGenerator(self, processingTime) # ============================== variable that is used for the loading of machines ============= self.exitAssignedToReceiver = False # by default the objects are not blocked # when the entities have to be loaded to operatedMachines # then the giverObjects have to be blocked for the time # that the machine is being loaded from Globals import G G.AssemblyList.append(self)
def getEntity(self): activeEntity = CoreObject.getEntity(self) #run the default method activeObjectQueue = self.getActiveObjectQueue() #get also the parts of the frame so that they can be popped for part in activeEntity.getFrameQueue(): activeObjectQueue.append(part) part.currentStation = self activeEntity.getFrameQueue = [] #empty the frame #move the frame to the end of the internal queue since we want the frame to be disposed first activeObjectQueue.append(activeEntity) activeObjectQueue.pop(0) return activeEntity
def initialize(self): CoreObject.initialize(self) # queue to hold the entities all through the stay of an entity in the composite object self.Res = Resource(self.capacity) # the inner Giver that will feed the compound object receiver self.innerGiver = None # the inner object that will receive the object most recently added to the self.Res.activeQ self.innerReceiver = None # entity that will passed on to the innerReceiver self.entityForInternalProc = None # # inner queues that buffer the entities before they are handed in to the inner objects # # and receive them again after the internal processing # self.entryRes = Resource(self.capacity) # self.exitRes = Resource(self.capacity) # initialize all resources # will have to reconsider as some of the resources may have been already initialized for resource in self.resources: if not resource.isInitialized(): resource.initialize() # initialize all objects - and the entrance object # self.firstObject.initialize() for object in self.coreObjects: object.initialize()
def parseInputs(self, inputsDict): CoreObject.parseInputs(self, inputsDict) processingTime = inputsDict.get('processingTime', {}) if not processingTime: processingTime = { 'distributionType': 'Fixed', 'mean': 0, 'stdev': 0, 'min': 0, } if processingTime['distributionType'] == 'Normal' and\ processingTime.get('max', None) is None: processingTime['max'] = float( processingTime['mean']) + 5 * float(processingTime['stdev']) self.rng = RandomNumberGenerator(self, **processingTime) # ============================== variable that is used for the loading of machines ============= self.exitAssignedToReceiver = False # by default the objects are not blocked # when the entities have to be loaded to operatedMachines # then the giverObjects have to be blocked for the time # that the machine is being loaded from Globals import G G.AssemblyList.append(self)
def getEntity(self): activeEntity = CoreObject.getEntity(self) #run the default method # if the entity is in the G.pendingEntities list then remove it from there from Globals import G # G.pendingEntities[:]=(entity for entity in G.pendingEntities if not entity is activeEntity) if G.Router: if activeEntity in G.pendingEntities: G.pendingEntities.remove(activeEntity) # if activeEntity in G.EntityList: # G.EntityList.remove(activeEntity) # self.clear(activeEntity) self.totalLifespan += self.env.now - activeEntity.startTime #Add the entity's lifespan to the total one. self.numOfExits += 1 # increase the exits by one self.totalNumberOfUnitsExited += activeEntity.numberOfUnits # add the number of units that xited self.totalTaktTime += self.env.now - self.timeLastEntityLeft # add the takt time self.timeLastEntityLeft = self.env.now # update the time that the last entity left from the Exit activeObjectQueue = self.getActiveObjectQueue() del self.Res.users[:] return activeEntity
def removeEntity(self, entity=None): activeEntity=CoreObject.removeEntity(self, entity) #run the default method self.addBlockage() # remove the entity from the position list self.position.pop(0) # the object doesn't wait to dispose any more self.waitToDispose=False #if the conveyer was full, it means that it also was blocked #we count this blockage time if self.wasFull: # self.totalBlockageTime+=self.env.now-self.timeBlockageStarted self.wasFull=False #calculate the time that the conveyer will become available again and trigger the conveyerMover if self.updateMoveTime() and self.conveyerMover.expectedSignals['canMove']: succeedTuple=(self,self.env.now) self.conveyerMover.canMove.succeed(succeedTuple) # if there is anything to dispose of then signal a receiver if self.haveToDispose(): self.printTrace(self.id, attemptSingalReceiver='(removeEntity)') self.signalReceiver() return activeEntity
def removeEntity(self, entity=None): if len(self.getActiveObjectQueue()) == 1 and len( self.scheduledEntities): newEntity = self.createEntity( ) # create the Entity object and assign its name newEntity.creationTime = self.scheduledEntities.pop( 0 ) # assign the current simulation time as the Entity's creation time newEntity.startTime = newEntity.creationTime # assign the current simulation time as the Entity's start time #print self.env.now, 'getting from the list. StartTime=',newEntity.startTime newEntity.currentStation = self # update the current station of the Entity G.EntityList.append(newEntity) self.getActiveObjectQueue().append( newEntity) # append the entity to the resource self.numberOfArrivals += 1 # we have one new arrival G.numberOfEntities += 1 self.appendEntity(newEntity) activeEntity = CoreObject.removeEntity( self, entity) # run the default method if len(self.getActiveObjectQueue()) == 1: if self.expectedSignals['entityCreated']: self.sendSignal(receiver=self, signal=self.entityCreated) return activeEntity
def removeEntity(self, entity=None): activeObject = self.getActiveObject() activeObjectQueue = activeObject.getActiveObjectQueue() #run the default method activeEntity = CoreObject.removeEntity(self, entity, resetFlags=False, addBlockage=False) #update the flags if (len(activeObjectQueue) == 0): activeObject.waitToDisposeFrame = False self.isBlocked = False self.isProcessing = False self.addBlockage() else: if (len(activeObjectQueue) == 1): activeObject.waitToDisposePart = False # if the internal queue is empty then try to signal the giver that the object can now receive if activeObject.canAccept(): activeObject.printTrace(self.id, attemptSignalGiver='(removeEntity)') activeObject.signalGiver() return activeEntity
def removeEntity(self, entity=None): activeEntity = CoreObject.removeEntity(self, entity) #run the default method if self.canAccept(): self.signalGiver() # TODO: disable that for the mouldAssemblyBuffer if not self.__class__.__name__ == 'MouldAssemblyBufferManaged': if self.haveToDispose(): # self.printTrace(self.id, attemptSignalReceiver='(removeEntity)') self.signalReceiver() # reset the signals for the Queue. It be in the start of the loop for now # xxx consider to dothis in all CoreObjects self.expectedSignals['isRequested'] = 1 self.expectedSignals['canDispose'] = 1 self.expectedSignals['loadOperatorAvailable'] = 1 # check if the queue is empty, if yes then try to signal the router, operators may need reallocation try: if self.level: if not len(self.getActiveObjectQueue() ) and self.checkForDedicatedOperators(): self.requestAllocation() except: pass return activeEntity
def postProcessing(self, MaxSimtime=None): CoreObject.postProcessing(self, MaxSimtime) currentWIP = 0 for subBatch in self.getActiveObjectQueue(): currentWIP += subBatch.numberOfUnits self.finalWIP.append(currentWIP)
def __init__(self, id, name, capacity, routing='Series', *objects): CoreObject.__init__(self, id, name) # it would be a good idea to have the arguments provided as dictionary self.type = 'CompoundObject' # variable that can hold according to this implementation two different values # 'Parallel' # 'Series' self.routing = routing # assert that there are arguments provided, and that the type of the arguments provided is dictionary assert ((len(objects)) > 0), 'the number of objects provided is 0' assert (type(objects) is dict), 'the given arguments are not dictionaries' self.numberOfObjects = len(objects) # the capacity of the compound object # have to be careful, the capacity of the compound object should not exceed the # combined capacity of the internal objects self.capacity = capacity # a tuple that holds the arguments provided self.objects = objects # list of the objects that the compoundObject consists of self.coreObjects = [] # list of the objects' IDs that constitute the compooundObject self.coreObjectIds = [] # list of machines self.machines = [] # list of queues self.queues = [] # list with the resources assigned to the object self.resources = [] # list with the repairmen assigned to the object self.repairmen = [] # list with the operators assigned to the object self.operators = [] # list with the inner objects that can receive from the compound object self.innerNext = [] # list with the inner objects that can deliver to the compound object self.innerPrevious = [] # # variable that shows if the entity received is to be processed internally # # or if the internal processing is concluded and can now be handed in to the successor of the compoundOjbect # self.entityToBeProcessedInternally = False # variable which informs that a new entity was just received self.newEntityWillBeReceived = False # variables used to define the sorting of the entities in the internal queue self.schedulingRule = schedulingRule #the scheduling rule that the Queue follows self.multipleCriterionList = [ ] #list with the criteria used to sort the Entities in the Queue if schedulingRule.startswith( "MC"): # if the first criterion is MC aka multiple criteria SRlist = schedulingRule.split( "-") # split the string of the criteria (delimiter -) self.schedulingRule = SRlist.pop( 0) # take the first criterion of the list self.multipleCriterionList = SRlist # hold the criteria list in the property multipleCriterionList # =================================================================== # first assign the operators to the compoundObject # if the operators are described as dictionaries # in the arguments then create them # =================================================================== objectIndex = 0 for object in self.objects: # check if there are repairmen or operators provided try: if object.type == 'Operator': object.coreObjectsIds.append(self.id) object.coreObjects.append(self) self.resources.append(object) self.operators.append(object) # currently only one repairman can work on a machine (the capacity may vary) elif object.type == 'Repairman': object.coreObjectsIds.append(self.id) object.coreObjects.append(self) self.resources.append(object) self.repairmen.append(object) except: type = object.get('type', 'not found') if type == 'Repairman': capacity = object.get('capacity', '1') componentName = self.name + str( self.id) + '_' + type + '_' + str(objecIndex) compoentId = str(self.id) + str(objectIndex) r = Repairman(id=componentId, name=componentName, capacity=capacity) r.coreObjectIds.append(self.id) r.coreObjects.append(self) self.resources.append(r) self.repairmen.append(r) elif type == 'Operator': capacity = object.get('capacity', '1') componentName = self.name + str( self.id) + '_' + type + '_' + str(objecIndex) compoentId = str(self.id) + str(objectIndex) o = Operator(id=componentId, name=componentName, capacity=capacity) o.coreObjectIds.append(self.id) o.coreObjects.append(self) self.resources.append(o) self.operators.append(o) objectIndex += 1 # =================================================================== # walk through the objects of type Machine and Queue and initiate them # the simple objects making up the compoundOjbect # can only be queues and machines for the moment # =================================================================== objectIndex = 0 for object in self.objects: # if the inner-objects are created out of the compound object then # they will be passed to it ass they are try: if object.type == 'Machine': self.machines.append(object) elif object.type == 'Queue': self.queues.append(object) self.coreObjectIds.append(object.id) self.coreObjects.append(object) # if they are not created out of the composite object then they should # be created in the object except: type = object.get('type', 'not found') # object type machine if type == 'Machine': componentName = self.name + str( self.id) + '_' + type + '_' + str(objectIndex) componentId = str(self.id) + str(objectIndex) processingTime = object.get('processingTime', 'not found') distributionType = processingTime.get( 'distributionType', 'Fixed') mean = float(processingTime.get('mean', '0')) stdev = float(processingTime.get('stdev', '0')) min = float(processingTime.get('min', '0')) max = float(processingTime.get('max', '0')) failures = object.get('failures', 'not found') failureDistribution = failures.get('failureDistribution', 'Fixed') MTTF = float(failures.get('MTTF', '0')) MTTR = float(failures.get('MTTR', '0')) availability = float(failures.get('availability', '0')) for repairman in self.repairmen: if (self.id in repairman.coreObjectIds): R = repairman O = [] for operator in self.operators: if (self.id in operator.coreObjectIds): O.append(operator) # there must be an implementation of a machine where the failure is passed as argument # this way it will be possible to have the same failure-interruption for all the inner objects M = OperatedMachine( id, name, 1, distribution=distributionType, failureDistribution=failureDistribution, MTTF=MTTF, MTTR=MTTR, availability=availability, repairman=R, mean=mean, stdev=stdev, min=min, max=max, operatorPool=O) self.coreObjectIds.append(M.id) self.coreObjects.append(M) self.machines.append(M) # object type Queue if type == 'Queue': componentName = self.name + str( self.id) + '_' + type + '_' + str(objectIndex) componentId = str(self.id) + str(objectIndex) capacity = int(object.get('capacity', '1')) isDummy = bool(object.get('isDummy', '0')) schedulingRule = object.get('schedulingRule', 'FIFO') Q = Queue(id=componentId, name=componentName, capacity=capacity, isDummy=isDummy, schedulingRule=schedulingRule) self.coreObjectIds.append(Q.id) self.coreObjects.append(Q) self.queues.append(Q) objectIndex += 1 # the total time the machine has been waiting for the operator self.totalTimeWaitingForOperator = 0