def breakEntity(self, entity, buffer, station, totalAvailableCapacity, totalRequestedCapacity): # calculate what is the capacity that should proceed and what that should remain capacityToMove = totalAvailableCapacity * ( entity.requiredCapacity) / float(totalRequestedCapacity) capacityToStay = entity.requiredCapacity - capacityToMove # if capacityToMove is equal to 0 no need to break. Return false. if capacityToMove == 0: return False else: # remove the capacity entity by the buffer so that the broken ones are created buffer.getActiveObjectQueue().remove(entity) entityToMoveName = entity.capacityProjectId + '_' + station.objName + '_' + str( capacityToMove) entityToMove = CapacityEntity( name=entityToMoveName, capacityProjectId=entity.capacityProjectId, requiredCapacity=capacityToMove) entityToMove.initialize() entityToMove.currentStation = buffer entityToMove.shouldMove = True entityToStayName = entity.capacityProjectId + '_' + station.objName + '_' + str( capacityToStay) entityToStay = CapacityEntity( name=entityToStayName, capacityProjectId=entity.capacityProjectId, requiredCapacity=capacityToStay) entityToStay.initialize() entityToStay.currentStation = buffer import dream.simulation.Globals as Globals Globals.setWIP([entityToMove, entityToStay]) #set the new components as wip return True
def createInCapacityStationBuffers(self): # loop through the exits for exit in G.CapacityStationExitList: # if the exit received nothing currently there is nothing to do if exit.currentlyObtainedEntities == []: continue buffer = exit.nextCapacityStationBuffer # the next buffer # if it is the end of the system there is nothing to do if not buffer: exit.currentlyObtainedEntities = [] continue previousStation = exit.previous[ 0] # the station the the entity just finished from previousBuffer = previousStation.previous[ 0] # the buffer of the station nextStation = buffer.next[0] # the next processing station # for every entity calculate the new entity to be created in the next station and create it for entity in exit.currentlyObtainedEntities: project = entity.capacityProject # if the entity exits from an assembly station # and not all project is finished there, then do not create anything in the next if previousBuffer.requireFullProject: projectFinishedFromLast = True for e in previousBuffer.getActiveObjectQueue(): if e.capacityProject == project: projectFinishedFromLast = False break if not projectFinishedFromLast: continue entityCapacity = entity.requiredCapacity previousRequirement = float( project.capacityRequirementDict[previousStation.id]) nextRequirement = float( project.capacityRequirementDict[nextStation.id]) # if the previous station was assembly then in the next the full project arrives # so requires whatever the project requires if previousBuffer.requireFullProject: nextStationCapacityRequirement = nextRequirement # else calculate proportionally else: proportion = nextRequirement / previousRequirement nextStationCapacityRequirement = proportion * entityCapacity entityToCreateName = entity.capacityProjectId + '_' + nextStation.objName + '_' + str( nextStationCapacityRequirement) entityToCreate = CapacityEntity( name=entityToCreateName, capacityProjectId=entity.capacityProjectId, requiredCapacity=nextStationCapacityRequirement) entityToCreate.currentStation = buffer entityToCreate.initialize() import dream.simulation.Globals as Globals Globals.setWIP([entityToCreate ]) #set the new components as wip # reset the currently obtained entities list to empty exit.currentlyObtainedEntities = []
def createInCapacityStationBuffers(self): # loop through the exits for exit in G.CapacityStationExitList: # if the exit received nothing currently there is nothing to do if exit.currentlyObtainedEntities==[]: continue buffer=exit.nextCapacityStationBuffer # the next buffer # if it is the end of the system there is nothing to do if not buffer: exit.currentlyObtainedEntities=[] continue previousStation=exit.previous[0] # the station the the entity just finished from previousBuffer=previousStation.previous[0] # the buffer of the station nextStation=buffer.next[0] # the next processing station # for every entity calculate the new entity to be created in the next station and create it for entity in exit.currentlyObtainedEntities: project=entity.capacityProject # if the entity exits from an assembly station # and not all project is finished there, then do not create anything in the next if previousBuffer.requireFullProject: projectFinishedFromLast=True for e in previousBuffer.getActiveObjectQueue(): if e.capacityProject==project: projectFinishedFromLast=False break if not projectFinishedFromLast: continue entityCapacity=entity.requiredCapacity previousRequirement=float(project.capacityRequirementDict[previousStation.id]) nextRequirement=float(project.capacityRequirementDict[nextStation.id]) # if the previous station was assembly then in the next the full project arrives # so requires whatever the project requires if previousBuffer.requireFullProject: nextStationCapacityRequirement=nextRequirement # else calculate proportionally else: proportion=nextRequirement/previousRequirement nextStationCapacityRequirement=proportion*entityCapacity entityToCreateName=entity.capacityProjectId+'_'+nextStation.objName+'_'+str(nextStationCapacityRequirement) entityToCreate=CapacityEntity(name=entityToCreateName, capacityProjectId=entity.capacityProjectId, requiredCapacity=nextStationCapacityRequirement) entityToCreate.currentStation=buffer entityToCreate.initialize() import dream.simulation.Globals as Globals Globals.setWIP([entityToCreate]) #set the new components as wip # reset the currently obtained entities list to empty exit.currentlyObtainedEntities=[]
def initialize(self): Queue.initialize(self) # if the station shares resources and the capacity is not defined in this # then read it from some other of the sharing stations if not self.intervalCapacity and self.sharedResources: for stationId in self.sharedResources.get('stationIds', []): import dream.simulation.Globals as Globals station = Globals.findObjectById(stationId) if station.intervalCapacity: self.intervalCapacity = station.intervalCapacity break # initialize variables self.remainingIntervalCapacity = list(self.intervalCapacity) for i in range(self.intervalCapacityStart): self.remainingIntervalCapacity.pop(0) self.isLocked = True self.utilisationDict = [ ] # a list of dicts for the utilization results self.detailedWorkPlan = [] # a list of dicts to keep detailed data from dream.simulation.Globals import G if hasattr(G, 'CapacityStationList'): G.CapacityStationList.append(self) else: G.CapacityStationList = [] G.CapacityStationList.append(self)
def mergeEntities(self): # loop through the capacity station buffers for buffer in G.CapacityStationBufferList: nextStation = buffer.next[0] projectList = [] # loop through the entities to see what projects lie in the buffer for entity in buffer.getActiveObjectQueue(): if entity.capacityProject not in projectList: projectList.append(entity.capacityProject) for project in projectList: entitiesToBeMerged = [] for entity in buffer.getActiveObjectQueue(): if entity.capacityProject == project: entitiesToBeMerged.append(entity) totalCapacityRequirement = 0 # if the buffer acts as assembly there is no need to calculate the total capacity requirement, # it will be the one that the project has as a total for this station if buffer.requireFullProject: # find what has been already processed alreadyProcessed = 0 for record in nextStation.detailedWorkPlan: if record['project'] == project.id: alreadyProcessed += float(record['allocation']) totalCapacityRequirement = project.capacityRequirementDict[ nextStation.id] - alreadyProcessed # else calculate the total capacity requirement by adding the one each entity requires else: for entity in entitiesToBeMerged: totalCapacityRequirement += entity.requiredCapacity # erase the Entities to create the merged one for entity in entitiesToBeMerged: buffer.getActiveObjectQueue().remove(entity) # create the merged entity entityToCreateName = entity.capacityProjectId + '_' + nextStation.objName + '_' + str( totalCapacityRequirement) entityToCreate = CapacityEntity( name=entityToCreateName, capacityProjectId=project.id, requiredCapacity=totalCapacityRequirement) entityToCreate.currentStation = buffer entityToCreate.initialize() import dream.simulation.Globals as Globals Globals.setWIP([entityToCreate ]) #set the new components as wip
def breakEntity(self, entity, buffer, station, totalAvailableCapacity, totalRequestedCapacity): # calculate what is the capacity that should proceed and what that should remain capacityToMove=totalAvailableCapacity*(entity.requiredCapacity)/float(totalRequestedCapacity) capacityToStay=entity.requiredCapacity-capacityToMove # if capacityToMove is equal to 0 no need to break. Return false. if capacityToMove==0: return False else: # remove the capacity entity by the buffer so that the broken ones are created buffer.getActiveObjectQueue().remove(entity) entityToMoveName=entity.capacityProjectId+'_'+station.objName+'_'+str(capacityToMove) entityToMove=CapacityEntity(name=entityToMoveName, capacityProjectId=entity.capacityProjectId, requiredCapacity=capacityToMove) entityToMove.initialize() entityToMove.currentStation=buffer entityToMove.shouldMove=True entityToStayName=entity.capacityProjectId+'_'+station.objName+'_'+str(capacityToStay) entityToStay=CapacityEntity(name=entityToStayName, capacityProjectId=entity.capacityProjectId, requiredCapacity=capacityToStay) entityToStay.initialize() entityToStay.currentStation=buffer import dream.simulation.Globals as Globals Globals.setWIP([entityToMove,entityToStay]) #set the new components as wip return True
def mergeEntities(self): # loop through the capacity station buffers for buffer in G.CapacityStationBufferList: nextStation=buffer.next[0] projectList=[] # loop through the entities to see what projects lie in the buffer for entity in buffer.getActiveObjectQueue(): if entity.capacityProject not in projectList: projectList.append(entity.capacityProject) for project in projectList: entitiesToBeMerged=[] for entity in buffer.getActiveObjectQueue(): if entity.capacityProject==project: entitiesToBeMerged.append(entity) totalCapacityRequirement=0 # if the buffer acts as assembly there is no need to calculate the total capacity requirement, # it will be the one that the project has as a total for this station if buffer.requireFullProject: # find what has been already processed alreadyProcessed=0 for record in nextStation.detailedWorkPlan: if record['project']==project.id: alreadyProcessed+=float(record['allocation']) totalCapacityRequirement=project.capacityRequirementDict[nextStation.id]-alreadyProcessed # else calculate the total capacity requirement by adding the one each entity requires else: for entity in entitiesToBeMerged: totalCapacityRequirement+=entity.requiredCapacity # erase the Entities to create the merged one for entity in entitiesToBeMerged: buffer.getActiveObjectQueue().remove(entity) # create the merged entity entityToCreateName=entity.capacityProjectId+'_'+nextStation.objName+'_'+str(totalCapacityRequirement) entityToCreate=CapacityEntity(name=entityToCreateName, capacityProjectId=project.id, requiredCapacity=totalCapacityRequirement) entityToCreate.currentStation=buffer entityToCreate.initialize() import dream.simulation.Globals as Globals Globals.setWIP([entityToCreate]) #set the new components as wip
def initialize(self): Queue.initialize(self) import dream.simulation.Globals as Globals # identify the notRequiredOperations # input gives only stationId, buffer and exit should be identified notRequiredOperations=[] for id in self.notRequiredOperations: station=Globals.findObjectById(id) notRequiredOperations.append(station.id) bufferId=station.previous[0].id notRequiredOperations.append(bufferId) exitId=station.next[0].id notRequiredOperations.append(exitId) self.notRequiredOperations=notRequiredOperations self.isLocked=True
def initialize(self): Queue.initialize(self) import dream.simulation.Globals as Globals # identify the notRequiredOperations # input gives only stationId, buffer and exit should be identified notRequiredOperations = [] for id in self.notRequiredOperations: station = Globals.findObjectById(id) notRequiredOperations.append(station.id) bufferId = station.previous[0].id notRequiredOperations.append(bufferId) exitId = station.next[0].id notRequiredOperations.append(exitId) self.notRequiredOperations = notRequiredOperations self.isLocked = True
def initialize(self): Queue.initialize(self) # if the station shares resources and the capacity is not defined in this # then read it from some other of the sharing stations if not self.intervalCapacity and self.sharedResources: for stationId in self.sharedResources.get('stationIds',[]): import dream.simulation.Globals as Globals station=Globals.findObjectById(stationId) if station.intervalCapacity: self.intervalCapacity=station.intervalCapacity break # initialize variables self.remainingIntervalCapacity=list(self.intervalCapacity) for i in range(self.intervalCapacityStart): self.remainingIntervalCapacity.pop(0) self.isLocked=True self.utilisationDict=[] # a list of dicts for the utilization results self.detailedWorkPlan=[] # a list of dicts to keep detailed data from dream.simulation.Globals import G if hasattr(G, 'CapacityStationList'): G.CapacityStationList.append(self) else: G.CapacityStationList=[] G.CapacityStationList.append(self)
def createWIP(): G.JobList = [] G.WipList = [] G.EntityList = [] G.PartList = [] G.OrderComponentList = [] G.DesignList = [] # list of the OrderDesigns in the system G.OrderList = [] G.MouldList = [] G.BatchList = [] G.SubBatchList = [] G.CapacityEntityList = [] G.CapacityProjectList = [] # entities that just finished processing in a station # and have to enter the next machine G.pendingEntities = [] #Read the json data json_data = G.JSONData # read from the dictionary the dicts with key 'BOM' (if there are any) input = json_data.get('input', {}) bom = input.get('BOM', None) if bom: orders = bom.get('productionOrders', []) # for every order in the productionOrders list for prodOrder in orders: orderClass = prodOrder.get('_class', None) orderType = Globals.getClassFromName(orderClass) # make sure that their type is Dream.Order if orderClass == 'Dream.Order': id = prodOrder.get('id', 'not found') name = prodOrder.get('name', 'not found') priority = int(prodOrder.get('priority', '0')) dueDate = float(prodOrder.get('dueDate', '0')) orderDate = float(prodOrder.get('orderDate', '0')) isCritical = bool(int(prodOrder.get('isCritical', '0'))) componentsReadyForAssembly = bool( (prodOrder.get('componentsReadyForAssembly', False))) componentsList = prodOrder.get('componentsList', {}) # keep a reference of all extra properties passed to the job extraPropertyDict = {} for key, value in prodOrder.items(): if key not in ('_class', 'id'): extraPropertyDict[key] = value # initiate the Order O = Order( 'G' + id, 'general ' + name, route=[], priority=priority, dueDate=dueDate, orderDate=orderDate, isCritical=isCritical, componentsList=componentsList, componentsReadyForAssembly=componentsReadyForAssembly, extraPropertyDict=extraPropertyDict) G.OrderList.append(O) else: productionOrderClass = prodOrder.get('_class', None) productionOrderType = Globals.getClassFromName( productionOrderClass) inputDict = dict(prodOrder) inputDict.pop('_class') from dream.simulation.Entity import Entity if issubclass(productionOrderType, Entity): entity = productionOrderType(**inputDict) G.EntityList.append(entity) # read from the dictionary the dicts with key 'nodes' nodes = json_data["graph"]['node'] for (element_id, element) in nodes.iteritems(): element['id'] = element_id wip = element.get('wip', []) from dream.simulation.OrderDesign import OrderDesign for entity in wip: # if there is BOM defined if bom: # and production orders in it if bom.get('productionOrders', []): # find which order has the entity in its componentsList for order in G.OrderList: if order.componentsList: for componentDict in order.componentsList: # if the entity has no parent order the following control will not be performed if entity['id'] == componentDict['id']: entityCurrentSeq = int( entity['sequence'] ) # the current seq number of the entity's route entityRemainingProcessingTime = entity.get( 'remainingProcessingTime', {}) entityRemainingSetupTime = entity.get( 'remainingSetupTime', {}) ind = 0 # holder of the route index corresponding to the entityCurrentSeq solution = False # flag to signal that the route step is found # find the step that corresponds to the entityCurrentSeq for i, step in enumerate( componentDict.get('route', [])): stepSeq = step[ 'sequence'] # the sequence of step i if stepSeq == '': stepSeq = 0 # if the seq is ''>OrderDecomposition then 0 # if the entityCurrentSeq is found and the id of the holding Station is in the steps stationIdsList if int( stepSeq ) == entityCurrentSeq and element[ 'id'] in step['stationIdsList']: ind = i # hold the index solution = True # the solution isfound break # assert that there is solution assert solution, 'something is wrong with the initial step of ' + entity[ 'id'] # the remaining route of the entity assuming that the given route doesn't start from the entityCurrentSeq entityRoute = componentDict.get( 'route', [])[ind:] entity = dict( componentDict) # copy the entity dict entity.pop('route') # remove the old route entity[ 'route'] = entityRoute # and hold the new one without the previous steps entity['order'] = order.id entity[ 'remainingProcessingTime'] = entityRemainingProcessingTime entity[ 'remainingSetupTime'] = entityRemainingSetupTime break entityClass = entity.get('_class', None) entityType = Globals.getClassFromName(entityClass) inputDict = dict(entity) inputDict.pop('_class') from dream.simulation.Entity import Entity if issubclass(entityType, Entity) and (not entityClass == 'Dream.Order'): # if orders are provided separately (BOM) provide the parent order as argument if entity.get('order', None): entityOrder = Globals.findObjectById(entity['order']) inputDict.pop('order') entity = entityType(order=entityOrder, **inputDict) entity.routeInBOM = True else: entity = entityType(**inputDict) G.EntityList.append(entity) object = Globals.findObjectById(element['id']) entity.currentStation = object # ToDo order is to defined in a new way if entityClass == 'Dream.Order': id = entity.get('id', 'not found') name = entity.get('name', 'not found') priority = int(entity.get('priority', '0')) dueDate = float(entity.get('dueDate', '0')) orderDate = float(entity.get('orderDate', '0')) isCritical = bool(int(entity.get('isCritical', '0'))) basicsEnded = bool(int(entity.get('basicsEnded', '0'))) componentsReadyForAssembly = bool( (entity.get('componentsReadyForAssembly', False))) # read the manager ID manager = entity.get('manager', None) # if a manager ID is assigned then search for the operator with the corresponding ID # and assign it as the manager of the order if manager: for operator in G.OperatorsList: if manager == operator.id: manager = operator break componentsList = entity.get('componentsList', {}) JSONRoute = entity.get( 'route', []) # dummy variable that holds the routes of the jobs # the route from the JSON file # is a sequence of dictionaries route = [x for x in JSONRoute] # copy JSONRoute # keep a reference of all extra properties passed to the job extraPropertyDict = {} for key, value in entity.items(): if key not in ('_class', 'id'): extraPropertyDict[key] = value #Below it is to assign an order decomposition if it was not assigned in JSON #have to talk about it with NEX odAssigned = False for element in route: elementIds = element.get('stationIdsList', []) for obj in G.ObjList: for elementId in elementIds: if obj.id == elementId and obj.type == 'OrderDecomposition': odAssigned = True if not odAssigned: odId = None for obj in G.ObjList: if obj.type == 'OrderDecomposition': odId = obj.id break if odId: # route.append([odId, 0]) route.append({'stationIdsList':[odId],\ 'processingTime':\ {'distributionType':'Fixed',\ 'mean':'0'}}) # XXX dirty way to implement new approach were the order is abstract and does not run through the system # but the OrderDesign does # XXX initiate the Order and the OrderDesign O = Order( 'G' + id, 'general ' + name, route=[], priority=priority, dueDate=dueDate, orderDate=orderDate, isCritical=isCritical, basicsEnded=basicsEnded, manager=manager, componentsList=componentsList, componentsReadyForAssembly=componentsReadyForAssembly, extraPropertyDict=extraPropertyDict) # create the OrderDesign OD = OrderDesign(id, name, route, priority=priority, dueDate=dueDate, orderDate=orderDate, isCritical=isCritical, order=O, extraPropertyDict=extraPropertyDict) # add the order to the OrderList G.OrderList.append(O) # add the OrderDesign to the DesignList and the OrderComponentList G.OrderComponentList.append(OD) G.DesignList.append(OD) G.WipList.append(OD) G.EntityList.append(OD) G.JobList.append(OD)
def createObjectInterruptions(): G.ObjectInterruptionList = [] G.ScheduledMaintenanceList = [] G.FailureList = [] G.BreakList = [] G.ShiftSchedulerList = [] G.EventGeneratorList = [] G.CapacityStationControllerList = [] G.PeriodicMaintenanceList = [] json_data = G.JSONData #Read the json data nodes = json_data['graph'][ "node"] # read from the dictionary the dicts with key 'nodes' # loop through all the nodes to # search for Event Generator and create them # this is put last, since the EventGenerator # may take other objects as argument for (element_id, element ) in nodes.iteritems(): # use an iterator to go through all the nodes # the key is the element_id and the second is the # element itself element[ 'id'] = element_id # create a new entry for the element (dictionary) # with key 'id' and value the the element_id objClass = element.get( '_class', 'not found') # get the class type of the element from dream.simulation.ObjectInterruption import ObjectInterruption objClass = element.pop('_class') objectType = Globals.getClassFromName(objClass) # from CoreObject import CoreObject # if issubclass(objectType, CoreObject): if issubclass(objectType, ObjectInterruption): # check the object type inputDict = dict(element) # create the ObjectInterruption objectInterruption = objectType(**inputDict) if not 'OperatorRouter' in str(objectType): G.ObjectInterruptionList.append(objectInterruption) # search inside the nodes for encapsulated ObjectInterruptions (failures etc) # ToDo this will be cleaned a lot if we update the JSON notation: # define ObjectInterruption echelon inside node # define interruptions' distribution better from dream.simulation.ScheduledMaintenance import ScheduledMaintenance from dream.simulation.Failure import Failure from dream.simulation.PeriodicMaintenance import PeriodicMaintenance from dream.simulation.ShiftScheduler import ShiftScheduler from dream.simulation.Break import Break for (element_id, element) in nodes.iteritems(): element['id'] = element_id scheduledMaintenance = element.get('interruptions', {}).get('scheduledMaintenance', {}) # if there is a scheduled maintenance initiate it and append it # to the interruptions- and scheduled maintenances- list if len(scheduledMaintenance): start = float(scheduledMaintenance.get('start', 0)) duration = float(scheduledMaintenance.get('duration', 1)) victim = Globals.findObjectById(element['id']) SM = ScheduledMaintenance(victim=victim, start=start, duration=duration) G.ObjectInterruptionList.append(SM) G.ScheduledMaintenanceList.append(SM) failure = element.get('interruptions', {}).get('failure', None) # if there are failures assigned # initiate them if failure: victim = Globals.findObjectById(element['id']) deteriorationType = failure.get('deteriorationType', 'constant') waitOnTie = failure.get('waitOnTie', False) F = Failure(victim=victim, distribution=failure, repairman=victim.repairman, deteriorationType=deteriorationType, waitOnTie=waitOnTie) G.ObjectInterruptionList.append(F) G.FailureList.append(F) # if there are periodic maintenances assigned # initiate them periodicMaintenance = element.get('interruptions', {}).get('periodicMaintenance', None) if periodicMaintenance: distributionType = periodicMaintenance.get('distributionType', 'No') victim = Globals.findObjectById(element['id']) PM = PeriodicMaintenance(victim=victim, distribution=periodicMaintenance, repairman=victim.repairman) G.ObjectInterruptionList.append(PM) G.PeriodicMaintenanceList.append(PM) # if there is a shift pattern defined # initiate them shift = element.get('interruptions', {}).get('shift', {}) if len(shift): victim = Globals.findObjectById(element['id']) shiftPattern = list(shift.get('shiftPattern', [])) # patch to correct if input has end of shift at the same time of start of next shift # TODO check if the backend should be able to handle this for index, record in enumerate(shiftPattern): if record is shiftPattern[-1]: break next = shiftPattern[index + 1] if record[1] == next[0]: record[1] = next[1] shiftPattern.remove(next) endUnfinished = bool(int(shift.get('endUnfinished', 0))) receiveBeforeEndThreshold = float( shift.get('receiveBeforeEndThreshold', 0)) thresholdTimeIsOnShift = bool( int(shift.get('thresholdTimeIsOnShift', 1))) rolling = bool(int(shift.get('rolling', 0))) lastOffShiftDuration = float(shift.get('lastOffShiftDuration', 10)) SS = ShiftScheduler( victim=victim, shiftPattern=shiftPattern, endUnfinished=endUnfinished, receiveBeforeEndThreshold=receiveBeforeEndThreshold, thresholdTimeIsOnShift=thresholdTimeIsOnShift, rolling=rolling, lastOffShiftDuration=lastOffShiftDuration) G.ObjectInterruptionList.append(SS) G.ShiftSchedulerList.append(SS) br = element.get('interruptions', {}).get('break', None) # if there are breaks assigned # initiate them if br: victim = Globals.findObjectById(element['id']) endUnfinished = bool(int(br.get('endUnfinished', 1))) offShiftAnticipation = br.get('offShiftAnticipation', 0) BR = Break(victim=victim, distribution=br, endUnfinished=endUnfinished, offShiftAnticipation=offShiftAnticipation) G.ObjectInterruptionList.append(BR) G.BreakList.append(BR)
def createObjectResourcesAndCoreObjects(): json_data = G.JSONData #Read the json data # nodes = json_data['nodes'] # read from the dictionary the dicts with key 'nodes' nodes = json_data['graph']["node"] # read from the dictionary the dicts with key 'nodes' edges = json_data['graph']["edge"] # read from the dictionary the dicts with key 'edges' ''' getSuccesorList method to get the successor list of object with ID = id XXX slow implementation ''' def getSuccessorList(node_id, predicate=lambda source, destination, edge_class, edge_data: True): successor_list = [] # dummy variable that holds the list to be returned for edge in edges.values(): source = edge["source"] destination = edge["destination"] edge_class = edge["_class"] edge_data = edge.get("data", {}) if source == node_id: # for the node_id argument if predicate(source, destination, edge_class, edge_data): # find its 'destinations' and successor_list.append(destination) # append it to the successor list # XXX We should probably not need to sort, but there is a bug that # prevents Topology10 to work if this sort is not used. successor_list.sort() return successor_list ''' define the lists of each object type ''' G.SourceList=[] G.MachineList=[] G.ExitList=[] G.QueueList=[] G.RepairmanList=[] G.AssemblyList=[] G.DismantleList=[] G.ConveyerList=[] G.MachineJobShopList=[] G.QueueJobShopList=[] G.ExitJobShopList=[] G.BatchDecompositionList=[] G.BatchSourceList=[] G.BatchReassemblyList=[] G.RoutingQueueList=[] G.LineClearanceList=[] G.EventGeneratorList=[] G.OperatorsList = [] G.OperatorManagedJobsList = [] G.OperatorPoolsList = [] G.BrokersList = [] G.OperatedMachineList = [] G.BatchScrapMachineList=[] G.OrderDecompositionList=[] G.ConditionalBufferList=[] G.MouldAssemblyBufferList=[] G.MouldAssemblyList=[] G.MachineManagedJobList=[] G.QueueManagedJobList=[] G.ObjectResourceList=[] G.CapacityStationBufferList=[] G.AllocationManagementList=[] G.CapacityStationList=[] G.CapacityStationExitList=[] G.CapacityStationControllerList=[] ''' loop through all the model resources search for repairmen and operators in order to create them read the data and create them ''' for (element_id, element) in nodes.iteritems(): # use an iterator to go through all the nodes element['id'] = element_id # create a new entry for the element (dictionary) element = element.copy() for k in ('element_id', 'top', 'left'): element.pop(k, None) # with key 'id' and value the the element_id resourceClass = element.pop('_class') # get the class type of the element objectType=Globals.getClassFromName(resourceClass) from dream.simulation.ObjectResource import ObjectResource # operator pools to be created later since they use operators # ToDo maybe it is semantically diferent object if issubclass(objectType, ObjectResource) and not resourceClass=='Dream.OperatorPool': inputDict=dict(element) # create the CoreObject objectResource=objectType(**inputDict) # if there already coreObjectsIds defined then append the successors to them if objectResource.coreObjectIds: for element in getSuccessorList(element['id']): if not element in objectResource.coreObjectIds: objectResource.coreObjectIds.append(element) else: objectResource.coreObjectIds=getSuccessorList(element['id']) ''' loop through all the model resources search for operatorPools in order to create them read the data and create them ''' from dream.simulation.OperatorPool import OperatorPool for (element_id, element) in nodes.iteritems(): # use an iterator to go through all the nodes # the key is the element_id and the second is the # element itself element = element.copy() element['id'] = element_id # create a new entry for the element (dictionary) for k in ('element_id', 'top', 'left'): element.pop(k, None) # with key 'id' and value the the element_id resourceClass = element.pop('_class') # get the class type of the element if resourceClass=='Dream.OperatorPool': id = element.get('id', 'not found') # get the id of the element / default 'not_found' name = element.get('name', 'not found') # get the name of the element / default 'not_found' capacity = int(element.get('capacity') or 1) operatorsList=[] for operator in G.OperatorsList: # find the operators assigned to the operatorPool if id in operator.coreObjectIds: operatorsList.append(operator) # operatorsList = element.get('operatorsList', 'not found') if len(operatorsList)==0: # if the operatorsList is empty then assign no operators OP = OperatorPool(element_id, name, capacity) # create a operatorPool object else: OP = OperatorPool(element_id, name, capacity,operatorsList) # create a operatorPool object OP.coreObjectIds=getSuccessorList(id) # update the list of objects that the operators of the operatorPool operate for operator in operatorsList: operator.coreObjectIds=OP.coreObjectIds # update the list of objects that the operators operate G.OperatorPoolsList.append(OP) # add the operatorPool to the RepairmanList ''' loop through all the elements read the data and create them ''' for (element_id, element) in nodes.iteritems(): element = element.copy() element['id'] = element_id element.setdefault('name', element_id) # XXX not sure about top & left. for k in ('element_id', 'top', 'left'): element.pop(k, None) objClass = element.pop('_class') objectType=Globals.getClassFromName(objClass) from dream.simulation.CoreObject import CoreObject if issubclass(objectType, CoreObject): # remove data that has to do with wip or object interruption. CoreObjects do not need it inputDict=dict(element) # create the CoreObject coreObject=objectType(**inputDict) # update the nextIDs list of the object coreObject.nextIds=getSuccessorList(element['id']) # (Below is only for Dismantle for now) # get the successorList for the 'Parts' coreObject.nextPartIds=getSuccessorList(element['id'], lambda source, destination, edge_class, edge_data: edge_data.get('entity',{}) == 'Part') # get the successorList for the 'Frames' coreObject.nextFrameIds=getSuccessorList(element['id'], lambda source, destination, edge_class, edge_data: edge_data.get('entity',{}) == 'Frame') # loop through all the core objects # to read predecessors for element in G.ObjList: #loop through all the nextIds of the object for nextId in element.nextIds: #loop through all the core objects to find the on that has the id that was read in the successorList for possible_successor in G.ObjList: if possible_successor.id==nextId: possible_successor.previousIds.append(element.id)
def main(argv=[], input_data=None): argv = argv or sys.argv[1:] #create an empty list to store all the objects in G.ObjList=[] G.RouterList=[] if input_data is None: # user passes the topology filename as first argument to the program filename = argv[0] try: # try to open the file with the inputs G.JSONFile=open(filename, "r") # global variable holding the file to be opened except IOError: print "%s could not be open" % filename return "ERROR" G.InputData=G.JSONFile.read() # pass the contents of the input file to the global var InputData else: G.InputData = input_data start=time.time() # start counting execution time #read the input from the JSON file and create the line G.JSONData=json.loads(G.InputData) # create the dictionary JSONData readGeneralInput() createObjectResourcesAndCoreObjects() createObjectInterruptions() setTopology() #run the experiment (replications) for i in xrange(G.numberOfReplications): G.env=simpy.Environment() # initialize the environment G.maxSimTime=float(G.JSONData['general'].get('maxSimTime', '100')) # read the maxSimTime in each replication # since it may be changed for infinite ones if G.RouterList: G.RouterList[0].isActivated=False G.RouterList[0].isInitialized=False if G.seed: G.Rnd=Random('%s%s' % (G.seed, i)) G.numpyRnd.random.seed(G.seed) else: G.Rnd=Random() G.numpyRnd.random.seed() createWIP() initializeObjects() Globals.setWIP(G.EntityList) activateObjects() # if the simulation is ran until no more events are scheduled, # then we have to find the end time as the time the last entity ended. if G.maxSimTime==-1: # If someone does it for a model that has always events, then it will run forever! G.env.run(until=float('inf')) # identify from the exits what is the time that the last entity has ended. endList=[] for exit in G.ExitList: endList.append(exit.timeLastEntityLeft) # identify the time of the last event if float(max(endList))!=0 and G.env.now==float('inf'): #do not let G.maxSimTime=0 so that there will be no crash G.maxSimTime=float(max(endList)) else: print "simulation ran for 0 time, something may have gone wrong" logger.info("simulation ran for 0 time, something may have gone wrong") #else we simulate until the given maxSimTime else: G.env.run(until=G.maxSimTime) #carry on the post processing operations for every object in the topology for element in G.ObjList+G.ObjectResourceList+G.RouterList: element.postProcessing() # added for debugging, print the Route of the Jobs on the same G.traceFile PrintRoute.outputRoute() #output trace to excel if(G.trace=="Yes"): ExcelHandler.outputTrace('trace'+str(i)) import StringIO traceStringIO = StringIO.StringIO() G.traceFile.save(traceStringIO) encodedTrace=traceStringIO.getvalue().encode('base64') ExcelHandler.resetTrace() G.outputJSON['_class'] = 'Dream.Simulation'; G.outputJSON['general'] ={}; G.outputJSON['general']['_class'] = 'Dream.Configuration'; G.outputJSON['general']['totalExecutionTime'] = (time.time()-start); G.outputJSON['elementList'] =[]; #output data to JSON for every object in the topology for object in G.ObjectResourceList + G.EntityList + G.ObjList+G.RouterList: object.outputResultsJSON() # output the trace as encoded if it is set on if G.trace=="Yes": # XXX discuss names on this jsonTRACE = {'_class': 'Dream.Simulation', 'id': 'TraceFile', 'results': {'trace':encodedTrace} } G.outputJSON['elementList'].append(jsonTRACE) outputJSONString=json.dumps(G.outputJSON, indent=True) if 0: G.outputJSONFile=open('outputJSON.json', mode='w') G.outputJSONFile.write(outputJSONString) if not input_data: # Output on stdout print outputJSONString # XXX I am not sure we still need this case return # XXX result_list is not needed here, we could replace result by result_list G.JSONData['result'] = {'result_list': [G.outputJSON]} #logger.info("execution time="+str(time.time()-start)) return json.dumps(G.JSONData, indent=True)
def createWIP(): G.JobList=[] G.WipList=[] G.EntityList=[] G.PartList=[] G.OrderComponentList=[] G.DesignList=[] # list of the OrderDesigns in the system G.OrderList=[] G.MouldList=[] G.BatchList=[] G.SubBatchList=[] G.CapacityEntityList=[] G.CapacityProjectList=[] # entities that just finished processing in a station # and have to enter the next machine G.pendingEntities=[] #Read the json data json_data = G.JSONData # read from the dictionary the dicts with key 'BOM' (if there are any) input=json_data.get('input',{}) bom=input.get('BOM',None) if bom: orders=bom.get('productionOrders',[]) # for every order in the productionOrders list for prodOrder in orders: orderClass=prodOrder.get('_class',None) orderType=Globals.getClassFromName(orderClass) # make sure that their type is Dream.Order if orderClass=='Dream.Order': id=prodOrder.get('id', 'not found') name=prodOrder.get('name', 'not found') priority=int(prodOrder.get('priority', '0')) dueDate=float(prodOrder.get('dueDate', '0')) orderDate=float(prodOrder.get('orderDate', '0')) isCritical=bool(int(prodOrder.get('isCritical', '0'))) componentsReadyForAssembly = bool((prodOrder.get('componentsReadyForAssembly', False))) componentsList=prodOrder.get('componentsList', {}) # keep a reference of all extra properties passed to the job extraPropertyDict = {} for key, value in prodOrder.items(): if key not in ('_class', 'id'): extraPropertyDict[key] = value # initiate the Order O=Order('G'+id, 'general '+name, route=[], priority=priority, dueDate=dueDate,orderDate=orderDate, isCritical=isCritical, componentsList=componentsList, componentsReadyForAssembly=componentsReadyForAssembly, extraPropertyDict=extraPropertyDict) G.OrderList.append(O) else: productionOrderClass=prodOrder.get('_class', None) productionOrderType=Globals.getClassFromName(productionOrderClass) inputDict=dict(prodOrder) inputDict.pop('_class') from dream.simulation.Entity import Entity if issubclass(productionOrderType, Entity): entity=productionOrderType(**inputDict) G.EntityList.append(entity) # read from the dictionary the dicts with key 'nodes' nodes = json_data["graph"]['node'] for (element_id, element) in nodes.iteritems(): element['id'] = element_id wip=element.get('wip', []) from dream.simulation.OrderDesign import OrderDesign for entity in wip: # if there is BOM defined if bom: # and production orders in it if bom.get('productionOrders',[]): # find which order has the entity in its componentsList for order in G.OrderList: if order.componentsList: for componentDict in order.componentsList: # if the entity has no parent order the following control will not be performed if entity['id']==componentDict['id']: entityCurrentSeq=int(entity['sequence'])# the current seq number of the entity's route entityRemainingProcessingTime=entity.get('remainingProcessingTime',{}) entityRemainingSetupTime=entity.get('remainingSetupTime',{}) ind=0 # holder of the route index corresponding to the entityCurrentSeq solution=False # flag to signal that the route step is found # find the step that corresponds to the entityCurrentSeq for i, step in enumerate(componentDict.get('route',[])): stepSeq=step['sequence'] # the sequence of step i if stepSeq=='': stepSeq=0 # if the seq is ''>OrderDecomposition then 0 # if the entityCurrentSeq is found and the id of the holding Station is in the steps stationIdsList if int(stepSeq)==entityCurrentSeq and element['id'] in step['stationIdsList']: ind=i # hold the index solution=True # the solution isfound break # assert that there is solution assert solution, 'something is wrong with the initial step of '+entity['id'] # the remaining route of the entity assuming that the given route doesn't start from the entityCurrentSeq entityRoute=componentDict.get('route',[])[ind:] entity=dict(componentDict) # copy the entity dict entity.pop('route') # remove the old route entity['route']=entityRoute # and hold the new one without the previous steps entity['order']=order.id entity['remainingProcessingTime']=entityRemainingProcessingTime entity['remainingSetupTime']=entityRemainingSetupTime break entityClass=entity.get('_class', None) entityType=Globals.getClassFromName(entityClass) inputDict=dict(entity) inputDict.pop('_class') from dream.simulation.Entity import Entity if issubclass(entityType, Entity) and (not entityClass=='Dream.Order'): # if orders are provided separately (BOM) provide the parent order as argument if entity.get('order',None): entityOrder=Globals.findObjectById(entity['order']) inputDict.pop('order') entity=entityType(order=entityOrder,**inputDict) entity.routeInBOM=True else: entity=entityType(**inputDict) G.EntityList.append(entity) object=Globals.findObjectById(element['id']) entity.currentStation=object # ToDo order is to defined in a new way if entityClass=='Dream.Order': id=entity.get('id', 'not found') name=entity.get('name', 'not found') priority=int(entity.get('priority', '0')) dueDate=float(entity.get('dueDate', '0')) orderDate=float(entity.get('orderDate', '0')) isCritical=bool(int(entity.get('isCritical', '0'))) basicsEnded=bool(int(entity.get('basicsEnded', '0'))) componentsReadyForAssembly = bool((entity.get('componentsReadyForAssembly', False))) # read the manager ID manager=entity.get('manager', None) # if a manager ID is assigned then search for the operator with the corresponding ID # and assign it as the manager of the order if manager: for operator in G.OperatorsList: if manager==operator.id: manager=operator break componentsList=entity.get('componentsList', {}) JSONRoute=entity.get('route', []) # dummy variable that holds the routes of the jobs # the route from the JSON file # is a sequence of dictionaries route = [x for x in JSONRoute] # copy JSONRoute # keep a reference of all extra properties passed to the job extraPropertyDict = {} for key, value in entity.items(): if key not in ('_class', 'id'): extraPropertyDict[key] = value #Below it is to assign an order decomposition if it was not assigned in JSON #have to talk about it with NEX odAssigned=False for element in route: elementIds = element.get('stationIdsList',[]) for obj in G.ObjList: for elementId in elementIds: if obj.id==elementId and obj.type=='OrderDecomposition': odAssigned=True if not odAssigned: odId=None for obj in G.ObjList: if obj.type=='OrderDecomposition': odId=obj.id break if odId: # route.append([odId, 0]) route.append({'stationIdsList':[odId],\ 'processingTime':\ {'distributionType':'Fixed',\ 'mean':'0'}}) # XXX dirty way to implement new approach were the order is abstract and does not run through the system # but the OrderDesign does # XXX initiate the Order and the OrderDesign O=Order('G'+id, 'general '+name, route=[], priority=priority, dueDate=dueDate,orderDate=orderDate, isCritical=isCritical, basicsEnded=basicsEnded, manager=manager, componentsList=componentsList, componentsReadyForAssembly=componentsReadyForAssembly, extraPropertyDict=extraPropertyDict) # create the OrderDesign OD=OrderDesign(id, name, route, priority=priority, dueDate=dueDate,orderDate=orderDate, isCritical=isCritical, order=O, extraPropertyDict=extraPropertyDict) # add the order to the OrderList G.OrderList.append(O) # add the OrderDesign to the DesignList and the OrderComponentList G.OrderComponentList.append(OD) G.DesignList.append(OD) G.WipList.append(OD) G.EntityList.append(OD) G.JobList.append(OD)
def calculateWhatIsToBeProcessed(self): import dream.simulation.Globals as Globals # calculate what space is available availableSpace=self.assemblySpace-self.calculateConsumedSpace() assert availableSpace>=0, 'negative available space' # list to hold the buffers that are already considered (due to shared resources) alreadyConsideredBuffers = [] # loop through the capacity station buffers for buffer in G.CapacityStationBufferList: pr = False # if the buffer was considered before (due to shared resources) continue if buffer in alreadyConsideredBuffers: continue alreadyConsideredBuffers.append(buffer) sharedBuffers = [] station=buffer.next[0] # get the station # find the stations that share resources with the one considered now if station.sharedResources: sharedStations = station.sharedResources.get('stationIds',[]) for element in sharedStations: s = Globals.findObjectById(element) b = s.previous[0] sharedBuffers.append(b) activeObjectQueue=buffer.getActiveObjectQueue() # the entities considered should be the entities in the current buffer plus the ones in buffers # of stations that share resources with the current one entitiesConsidered=list(activeObjectQueue) for b in sharedBuffers: entitiesConsidered+=b.getActiveObjectQueue() alreadyConsideredBuffers.append(b) # sort entities according to due date of the project that each belongs to entitiesConsidered.sort(key=lambda x: x.capacityProject.dueDate) totalAvailableCapacity=station.remainingIntervalCapacity[0] # get the available capacity of the station # for this interval # list to keep entities that have not been already allocated entitiesNotAllocated=list(entitiesConsidered) allCapacityConsumed=False # if there is no available capacity no need to do anything if totalAvailableCapacity==0: continue while not allCapacityConsumed: # list to keep entities that are within a threshold from the EDD entitiesWithinThreshold=[] # list to keep entities that are outside a threshold from the EDD entitiesOutsideThreshold=[] # get the EDD EDD=float('inf') for entity in entitiesNotAllocated: entityBuffer=entity.currentStation entityStation=entity.currentStation.next[0] # consider only projects that can get into station if self.checkIfProjectCanStartInStation(entity.capacityProject, entityStation) and\ (not self.checkIfProjectNeedsToBeAssembled(entity.capacityProject, entityBuffer))\ and self.checkIfThereIsEnoughSpace(entity, entityBuffer, availableSpace): if EDD>entity.capacityProject.dueDate: EDD=entity.capacityProject.dueDate # put the entities in the corresponding list according to their due date for entity in entitiesNotAllocated: if entity.capacityProject.dueDate-EDD<=self.dueDateThreshold: entitiesWithinThreshold.append(entity) else: entitiesOutsideThreshold.append(entity) # calculate the total capacity that is requested totalRequestedCapacity=0 # do not to count projects that cannot move due to space limitations # so check according to considered capacity consideredSpace=float(availableSpace) for entity in entitiesWithinThreshold: # get buffer where the entity is and the station it requests to get in entityBuffer=entity.currentStation entityStation=entity.currentStation.next[0] if self.checkIfProjectCanStartInStation(entity.capacityProject, entityStation) and\ (not self.checkIfProjectNeedsToBeAssembled(entity.capacityProject, entityBuffer))\ and self.checkIfThereIsEnoughSpace(entity, entityBuffer, consideredSpace): if not self.checkIfProjectConsumesAssemblySpace(entity, entityBuffer): consideredSpace-=entity.capacityProject.assemblySpaceRequirement totalRequestedCapacity+=entity.requiredCapacity # if there is enough capacity for all the entities set them that they all should move if totalRequestedCapacity<=totalAvailableCapacity: availableCapacity=float(totalAvailableCapacity) for entity in entitiesWithinThreshold: # get buffer where the entity is and the station it requests to get in entityBuffer=entity.currentStation entityStation=entity.currentStation.next[0] if self.checkIfProjectCanStartInStation(entity.capacityProject, entityStation) and\ (not self.checkIfProjectNeedsToBeAssembled(entity.capacityProject, entityBuffer))\ and self.checkIfThereIsEnoughSpace(entity, entityBuffer, availableSpace) and\ entity.requiredCapacity<=availableCapacity: entity.shouldMove=True availableCapacity-=entity.requiredCapacity assert availableCapacity>=0, 'negative available capacity' # reduce the available space if there is need to if entityBuffer.requireFullProject and \ (not self.checkIfProjectConsumesAssemblySpace(entity, entityBuffer)): availableSpace-=entity.capacityProject.assemblySpaceRequirement assert availableSpace>=0, 'negative available space' # remove the entity from the none allocated ones entitiesNotAllocated.remove(entity) # check if all the capacity is consumed to update the flag and break the loop if totalRequestedCapacity==totalAvailableCapacity: # the capacity will be 0 since we consumed it all totalAvailableCapacity=0 allCapacityConsumed=True # if we still have available capacity else: # check in the entities outside the threshold if there is one or more that can be moved haveMoreEntitiesToAllocate=False for entity in entitiesOutsideThreshold: # get buffer where the entity is and the station it requests to get in entityBuffer=entity.currentStation entityStation=entity.currentStation.next[0] if self.checkIfProjectCanStartInStation(entity.capacityProject, entityStation) and\ (not self.checkIfProjectNeedsToBeAssembled(entity.capacityProject, entityBuffer))\ and self.checkIfThereIsEnoughSpace(entity, entityBuffer, availableSpace): haveMoreEntitiesToAllocate=True break # otherwise we have to calculate the capacity for next loop # the remaining capacity will be decreased by the one that was originally requested totalAvailableCapacity-=totalRequestedCapacity # if we have more entities break if not haveMoreEntitiesToAllocate: break if station.notProcessOutsideThreshold: break # else calculate the capacity for every entity and create the entities else: allCapacityConsumed=True entitiesToBeBroken=list(entitiesWithinThreshold) # we sort the entities so the ones that can finish in current period (if any) go in front entitiesToBeBroken.sort(key=lambda \ x: self.checkIfAProjectCanBeFinishedInStation(x,x.currentStation.next[0], totalAvailableCapacity) \ and self.prioritizeIfCanFinish, reverse=True) # loop through the entities for entity in entitiesToBeBroken: # get buffer where the entity is and the station it requests to get in entityBuffer=entity.currentStation entityStation=entity.currentStation.next[0] # consider only entities that can move - not those waiting for assembly or earliest start if self.checkIfProjectCanStartInStation(entity.capacityProject, entityStation) and\ (not self.checkIfProjectNeedsToBeAssembled(entity.capacityProject, entityBuffer)) and\ self.checkIfThereIsEnoughSpace(entity, entityBuffer, availableSpace): # if we prioritize an entity that can completely finish then check for this if self.checkIfAProjectCanBeFinishedInStation(entity, entityStation, totalAvailableCapacity)\ and self.prioritizeIfCanFinish: # set that the entity can move entity.shouldMove=True # reduce the available space if there is need to if entityBuffer.requireFullProject and \ (not self.checkIfProjectConsumesAssemblySpace(entity, entityBuffer)): availableSpace-=entity.capacityProject.assemblySpaceRequirement assert availableSpace>=0, 'negative available space' # update the values totalAvailableCapacity-=entity.requiredCapacity totalRequestedCapacity-=entity.requiredCapacity # else break the entity according to rule else: if self.breakEntity(entity, entityBuffer, entityStation, totalAvailableCapacity, totalRequestedCapacity): # reduce the available space if there is need to if entityBuffer.requireFullProject and \ (not self.checkIfProjectConsumesAssemblySpace(entity, entityBuffer)): availableSpace-=entity.capacityProject.assemblySpaceRequirement assert availableSpace>=0, 'negative available space'
def calculateWhatIsToBeProcessed(self): import dream.simulation.Globals as Globals # calculate what space is available availableSpace = self.assemblySpace - self.calculateConsumedSpace() assert availableSpace >= 0, 'negative available space' # list to hold the buffers that are already considered (due to shared resources) alreadyConsideredBuffers = [] # loop through the capacity station buffers for buffer in G.CapacityStationBufferList: # if the buffer was considered before (due to shared resources) continue if buffer in alreadyConsideredBuffers: continue alreadyConsideredBuffers.append(buffer) sharedBuffers = [] station = buffer.next[0] # get the station # find the stations that share resources with the one considered now if station.sharedResources: sharedStations = station.sharedResources.get('stationIds', []) for element in sharedStations: s = Globals.findObjectById(element) b = s.previous[0] sharedBuffers.append(b) activeObjectQueue = buffer.getActiveObjectQueue() # the entities considered should be the entities in the current buffer plus the ones in buffers # of stations that share resources with the current one entitiesConsidered = list(activeObjectQueue) for b in sharedBuffers: entitiesConsidered += b.getActiveObjectQueue() alreadyConsideredBuffers.append(b) # sort entities according to due date of the project that each belongs to entitiesConsidered.sort(key=lambda x: x.capacityProject.dueDate) totalAvailableCapacity = station.remainingIntervalCapacity[ 0] # get the available capacity of the station # for this interval # list to keep entities that have not been already allocated entitiesNotAllocated = list(entitiesConsidered) allCapacityConsumed = False # if there is no available capacity no need to do anything if totalAvailableCapacity == 0: continue while not allCapacityConsumed: # list to keep entities that are within a threshold from the EDD entitiesWithinThreshold = [] # list to keep entities that are outside a threshold from the EDD entitiesOutsideThreshold = [] # get the EDD EDD = float('inf') for entity in entitiesNotAllocated: entityBuffer = entity.currentStation entityStation = entity.currentStation.next[0] # consider only projects that can get into station if self.checkIfProjectCanStartInStation(entity.capacityProject, entityStation) and\ (not self.checkIfProjectNeedsToBeAssembled(entity.capacityProject, entityBuffer))\ and self.checkIfThereIsEnoughSpace(entity, entityBuffer, availableSpace): if EDD > entity.capacityProject.dueDate: EDD = entity.capacityProject.dueDate # put the entities in the corresponding list according to their due date for entity in entitiesNotAllocated: if entity.capacityProject.dueDate - EDD <= self.dueDateThreshold: entitiesWithinThreshold.append(entity) else: entitiesOutsideThreshold.append(entity) # calculate the total capacity that is requested totalRequestedCapacity = 0 # do not to count projects that cannot move due to space limitations # so check according to considered capacity consideredSpace = float(availableSpace) for entity in entitiesWithinThreshold: # get buffer where the entity is and the station it requests to get in entityBuffer = entity.currentStation entityStation = entity.currentStation.next[0] if self.checkIfProjectCanStartInStation(entity.capacityProject, entityStation) and\ (not self.checkIfProjectNeedsToBeAssembled(entity.capacityProject, entityBuffer))\ and self.checkIfThereIsEnoughSpace(entity, entityBuffer, consideredSpace): consideredSpace -= entity.capacityProject.assemblySpaceRequirement totalRequestedCapacity += entity.requiredCapacity # if there is enough capacity for all the entities set them that they all should move if totalRequestedCapacity <= totalAvailableCapacity: for entity in entitiesWithinThreshold: # get buffer where the entity is and the station it requests to get in entityBuffer = entity.currentStation entityStation = entity.currentStation.next[0] if self.checkIfProjectCanStartInStation(entity.capacityProject, entityStation) and\ (not self.checkIfProjectNeedsToBeAssembled(entity.capacityProject, entityBuffer))\ and self.checkIfThereIsEnoughSpace(entity, entityBuffer, availableSpace): entity.shouldMove = True # reduce the available space if there is need to if entityBuffer.requireFullProject and \ (not self.checkIfProjectConsumesAssemblySpace(entity, entityBuffer)): availableSpace -= entity.capacityProject.assemblySpaceRequirement assert availableSpace >= 0, 'negative available space' # remove the entity from the none allocated ones entitiesNotAllocated.remove(entity) # check if all the capacity is consumed to update the flag and break the loop if totalRequestedCapacity == totalAvailableCapacity: # the capacity will be 0 since we consumed it all totalAvailableCapacity = 0 allCapacityConsumed = True # if we still have available capacity else: # check in the entities outside the threshold if there is one or more that can be moved haveMoreEntitiesToAllocate = False for entity in entitiesOutsideThreshold: # get buffer where the entity is and the station it requests to get in entityBuffer = entity.currentStation entityStation = entity.currentStation.next[0] if self.checkIfProjectCanStartInStation(entity.capacityProject, entityStation) and\ (not self.checkIfProjectNeedsToBeAssembled(entity.capacityProject, entityBuffer))\ and self.checkIfThereIsEnoughSpace(entity, entityBuffer, availableSpace): haveMoreEntitiesToAllocate = True break # otherwise we have to calculate the capacity for next loop # the remaining capacity will be decreased by the one that was originally requested totalAvailableCapacity -= totalRequestedCapacity # if we have more entities break if not haveMoreEntitiesToAllocate: break if station.notProcessOutsideThreshold: break # else calculate the capacity for every entity and create the entities else: allCapacityConsumed = True entitiesToBeBroken = list(entitiesWithinThreshold) # we sort the entities so the ones that can finish in current period (if any) go in front entitiesToBeBroken.sort(key=lambda \ x: self.checkIfAProjectCanBeFinishedInStation(x,x.currentStation.next[0], totalAvailableCapacity) \ and self.prioritizeIfCanFinish, reverse=True) # loop through the entities for entity in entitiesToBeBroken: # get buffer where the entity is and the station it requests to get in entityBuffer = entity.currentStation entityStation = entity.currentStation.next[0] # consider only entities that can move - not those waiting for assembly or earliest start if self.checkIfProjectCanStartInStation(entity.capacityProject, entityStation) and\ (not self.checkIfProjectNeedsToBeAssembled(entity.capacityProject, entityBuffer)) and\ self.checkIfThereIsEnoughSpace(entity, entityBuffer, availableSpace): # if we prioritize an entity that can completely finish then check for this if self.checkIfAProjectCanBeFinishedInStation(entity, entityStation, totalAvailableCapacity)\ and self.prioritizeIfCanFinish: # set that the entity can move entity.shouldMove = True # reduce the available space if there is need to if entityBuffer.requireFullProject and \ (not self.checkIfProjectConsumesAssemblySpace(entity, entityBuffer)): availableSpace -= entity.capacityProject.assemblySpaceRequirement assert availableSpace >= 0, 'negative available space' # update the values totalAvailableCapacity -= entity.requiredCapacity totalRequestedCapacity -= entity.requiredCapacity # else break the entity according to rule else: if self.breakEntity(entity, entityBuffer, entityStation, totalAvailableCapacity, totalRequestedCapacity): # reduce the available space if there is need to if entityBuffer.requireFullProject and \ (not self.checkIfProjectConsumesAssemblySpace(entity, entityBuffer)): availableSpace -= entity.capacityProject.assemblySpaceRequirement assert availableSpace >= 0, 'negative available space'
def createWIP(): G.JobList=[] G.WipList=[] G.EntityList=[] G.PartList=[] G.OrderComponentList=[] G.DesignList=[] # list of the OrderDesigns in the system G.OrderList=[] G.MouldList=[] G.BatchList=[] G.SubBatchList=[] G.CapacityEntityList=[] G.CapacityProjectList=[] # entities that just finished processing in a station # and have to enter the next machine G.pendingEntities=[] json_data = G.JSONData #Read the json data nodes = json_data['nodes'] # read from the dictionary the dicts with key 'nodes' for (element_id, element) in nodes.iteritems(): element['id'] = element_id wip=element.get('wip', []) from dream.simulation.OrderDesign import OrderDesign from Order import Order for entity in wip: entityClass=entity.get('_class', None) entityType=Globals.getClassFromName(entityClass) inputDict=dict(entity) inputDict.pop('_class') from dream.simulation.Entity import Entity if issubclass(entityType, Entity) and (not entityClass=='Dream.Order'): entity=entityType(**inputDict) G.EntityList.append(entity) object=Globals.findObjectById(element['id']) entity.currentStation=object # ToDo order is to defined in a new way if entityClass=='Dream.Order': id=entity.get('id', 'not found') name=entity.get('name', 'not found') priority=int(entity.get('priority', '0')) dueDate=float(entity.get('dueDate', '0')) orderDate=float(entity.get('orderDate', '0')) isCritical=bool(int(entity.get('isCritical', '0'))) basicsEnded=bool(int(entity.get('basicsEnded', '0'))) componentsReadyForAssembly = bool((entity.get('componentsReadyForAssembly', '0'))) # read the manager ID manager=entity.get('manager', None) # if a manager ID is assigned then search for the operator with the corresponding ID # and assign it as the manager of the order if manager: for operator in G.OperatorsList: if manager==operator.id: manager=operator break componentsList=entity.get('componentsList', {}) JSONRoute=entity.get('route', []) # dummy variable that holds the routes of the jobs # the route from the JSON file # is a sequence of dictionaries route = [x for x in JSONRoute] # copy JSONRoute # keep a reference of all extra properties passed to the job extraPropertyDict = {} for key, value in entity.items(): if key not in ('_class', 'id'): extraPropertyDict[key] = value #Below it is to assign an order decomposition if it was not assigned in JSON #have to talk about it with NEX odAssigned=False for element in route: elementIds = element.get('stationIdsList',[]) for obj in G.ObjList: for elementId in elementIds: if obj.id==elementId and obj.type=='OrderDecomposition': odAssigned=True if not odAssigned: odId=None for obj in G.ObjList: if obj.type=='OrderDecomposition': odId=obj.id break if odId: # route.append([odId, 0]) route.append({'stationIdsList':[odId],\ 'processingTime':\ {'distributionType':'Fixed',\ 'mean':'0'}}) # XXX dirty way to implement new approach were the order is abstract and does not run through the system # but the OrderDesign does # XXX initiate the Order and the OrderDesign O=Order('G'+id, 'general '+name, route=[], priority=priority, dueDate=dueDate,orderDate=orderDate, isCritical=isCritical, basicsEnded=basicsEnded, manager=manager, componentsList=componentsList, componentsReadyForAssembly=componentsReadyForAssembly, extraPropertyDict=extraPropertyDict) # create the OrderDesign OD=OrderDesign(id, name, route, priority=priority, dueDate=dueDate,orderDate=orderDate, isCritical=isCritical, order=O, extraPropertyDict=extraPropertyDict) # add the order to the OrderList G.OrderList.append(O) # add the OrderDesign to the DesignList and the OrderComponentList G.OrderComponentList.append(OD) G.DesignList.append(OD) G.WipList.append(OD) G.EntityList.append(OD) G.JobList.append(OD)
def main(argv=[], input_data=None): argv = argv or sys.argv[1:] #create an empty list to store all the objects in G.ObjList = [] G.RouterList = [] if input_data is None: # user passes the topology filename as first argument to the program filename = argv[0] try: # try to open the file with the inputs G.JSONFile = open( filename, "r") # global variable holding the file to be opened except IOError: print "%s could not be open" % filename return "ERROR" G.InputData = G.JSONFile.read( ) # pass the contents of the input file to the global var InputData else: G.InputData = input_data start = time.time() # start counting execution time #read the input from the JSON file and create the line G.JSONData = json.loads(G.InputData) # create the dictionary JSONData readGeneralInput() createObjectResourcesAndCoreObjects() createObjectInterruptions() setTopology() #run the experiment (replications) for i in xrange(G.numberOfReplications): G.env = simpy.Environment() # initialize the environment G.maxSimTime = float(G.JSONData['general'].get( 'maxSimTime', '100')) # read the maxSimTime in each replication # since it may be changed for infinite ones if G.RouterList: G.RouterList[0].isActivated = False G.RouterList[0].isInitialized = False if G.seed: G.Rnd = Random('%s%s' % (G.seed, i)) G.numpyRnd.random.seed(G.seed) else: G.Rnd = Random() G.numpyRnd.random.seed() createWIP() initializeObjects() Globals.setWIP(G.EntityList) activateObjects() # if the simulation is ran until no more events are scheduled, # then we have to find the end time as the time the last entity ended. if G.maxSimTime == -1: # If someone does it for a model that has always events, then it will run forever! G.env.run(until=float('inf')) # identify from the exits what is the time that the last entity has ended. endList = [] for exit in G.ExitList: endList.append(exit.timeLastEntityLeft) # identify the time of the last event if float(max(endList)) != 0 and G.env.now == float( 'inf' ): #do not let G.maxSimTime=0 so that there will be no crash G.maxSimTime = float(max(endList)) else: print "simulation ran for 0 time, something may have gone wrong" logger.info( "simulation ran for 0 time, something may have gone wrong") #else we simulate until the given maxSimTime else: G.env.run(until=G.maxSimTime) #carry on the post processing operations for every object in the topology for element in G.ObjList + G.ObjectResourceList + G.RouterList: element.postProcessing() # added for debugging, print the Route of the Jobs on the same G.traceFile PrintRoute.outputRoute() #output trace to excel if (G.trace == "Yes"): ExcelHandler.outputTrace('trace' + str(i)) import StringIO traceStringIO = StringIO.StringIO() G.traceFile.save(traceStringIO) encodedTrace = traceStringIO.getvalue().encode('base64') ExcelHandler.resetTrace() G.outputJSON['_class'] = 'Dream.Simulation' G.outputJSON['general'] = {} G.outputJSON['general']['_class'] = 'Dream.Configuration' G.outputJSON['general']['totalExecutionTime'] = (time.time() - start) G.outputJSON['elementList'] = [] #output data to JSON for every object in the topology for object in G.ObjectResourceList + G.EntityList + G.ObjList + G.RouterList: object.outputResultsJSON() # output the trace as encoded if it is set on if G.trace == "Yes": # XXX discuss names on this jsonTRACE = { '_class': 'Dream.Simulation', 'id': 'TraceFile', 'results': { 'trace': encodedTrace } } G.outputJSON['elementList'].append(jsonTRACE) outputJSONString = json.dumps(G.outputJSON, indent=True) if 0: G.outputJSONFile = open('outputJSON.json', mode='w') G.outputJSONFile.write(outputJSONString) if not input_data: # Output on stdout print outputJSONString # XXX I am not sure we still need this case return # XXX result_list is not needed here, we could replace result by result_list G.JSONData['result'] = {'result_list': [G.outputJSON]} #logger.info("execution time="+str(time.time()-start)) return json.dumps(G.JSONData, indent=True)
def createObjectResourcesAndCoreObjects(): json_data = G.JSONData #Read the json data # nodes = json_data['nodes'] # read from the dictionary the dicts with key 'nodes' nodes = json_data['graph'][ "node"] # read from the dictionary the dicts with key 'nodes' edges = json_data['graph'][ "edge"] # read from the dictionary the dicts with key 'edges' ''' getSuccesorList method to get the successor list of object with ID = id XXX slow implementation ''' def getSuccessorList( node_id, predicate=lambda source, destination, edge_class, edge_data: True): successor_list = [ ] # dummy variable that holds the list to be returned for edge in edges.values(): source = edge["source"] destination = edge["destination"] edge_class = edge["_class"] edge_data = edge.get("data", {}) if source == node_id: # for the node_id argument if predicate(source, destination, edge_class, edge_data): # find its 'destinations' and successor_list.append( destination) # append it to the successor list # XXX We should probably not need to sort, but there is a bug that # prevents Topology10 to work if this sort is not used. successor_list.sort() return successor_list ''' define the lists of each object type ''' G.SourceList = [] G.MachineList = [] G.ExitList = [] G.QueueList = [] G.RepairmanList = [] G.AssemblyList = [] G.DismantleList = [] G.ConveyerList = [] G.MachineJobShopList = [] G.QueueJobShopList = [] G.ExitJobShopList = [] G.BatchDecompositionList = [] G.BatchSourceList = [] G.BatchReassemblyList = [] G.RoutingQueueList = [] G.LineClearanceList = [] G.EventGeneratorList = [] G.OperatorsList = [] G.OperatorManagedJobsList = [] G.OperatorPoolsList = [] G.BrokersList = [] G.OperatedMachineList = [] G.BatchScrapMachineList = [] G.OrderDecompositionList = [] G.ConditionalBufferList = [] G.MouldAssemblyBufferList = [] G.MouldAssemblyList = [] G.MachineManagedJobList = [] G.QueueManagedJobList = [] G.ObjectResourceList = [] G.CapacityStationBufferList = [] G.AllocationManagementList = [] G.CapacityStationList = [] G.CapacityStationExitList = [] G.CapacityStationControllerList = [] ''' loop through all the model resources search for repairmen and operators in order to create them read the data and create them ''' for (element_id, element ) in nodes.iteritems(): # use an iterator to go through all the nodes element[ 'id'] = element_id # create a new entry for the element (dictionary) element = element.copy() for k in ('element_id', 'top', 'left'): element.pop(k, None) # with key 'id' and value the the element_id resourceClass = element.pop( '_class') # get the class type of the element objectType = Globals.getClassFromName(resourceClass) from dream.simulation.ObjectResource import ObjectResource # operator pools to be created later since they use operators # ToDo maybe it is semantically diferent object if issubclass( objectType, ObjectResource) and not resourceClass == 'Dream.OperatorPool': inputDict = dict(element) # create the CoreObject objectResource = objectType(**inputDict) # if there already coreObjectsIds defined then append the successors to them if objectResource.coreObjectIds: for element in getSuccessorList(element['id']): if not element in objectResource.coreObjectIds: objectResource.coreObjectIds.append(element) else: objectResource.coreObjectIds = getSuccessorList(element['id']) ''' loop through all the model resources search for operatorPools in order to create them read the data and create them ''' from dream.simulation.OperatorPool import OperatorPool for (element_id, element ) in nodes.iteritems(): # use an iterator to go through all the nodes # the key is the element_id and the second is the # element itself element = element.copy() element[ 'id'] = element_id # create a new entry for the element (dictionary) for k in ('element_id', 'top', 'left'): element.pop(k, None) # with key 'id' and value the the element_id resourceClass = element.pop( '_class') # get the class type of the element if resourceClass == 'Dream.OperatorPool': id = element.get( 'id', 'not found' ) # get the id of the element / default 'not_found' name = element.get( 'name', 'not found' ) # get the name of the element / default 'not_found' capacity = int(element.get('capacity') or 1) operatorsList = [] for operator in G.OperatorsList: # find the operators assigned to the operatorPool if id in operator.coreObjectIds: operatorsList.append(operator) # operatorsList = element.get('operatorsList', 'not found') if len( operatorsList ) == 0: # if the operatorsList is empty then assign no operators OP = OperatorPool(element_id, name, capacity) # create a operatorPool object else: OP = OperatorPool( element_id, name, capacity, operatorsList) # create a operatorPool object OP.coreObjectIds = getSuccessorList( id ) # update the list of objects that the operators of the operatorPool operate for operator in operatorsList: operator.coreObjectIds = OP.coreObjectIds # update the list of objects that the operators operate G.OperatorPoolsList.append( OP) # add the operatorPool to the RepairmanList ''' loop through all the elements read the data and create them ''' for (element_id, element) in nodes.iteritems(): element = element.copy() element['id'] = element_id element.setdefault('name', element_id) # XXX not sure about top & left. for k in ('element_id', 'top', 'left'): element.pop(k, None) objClass = element.pop('_class') objectType = Globals.getClassFromName(objClass) from dream.simulation.CoreObject import CoreObject if issubclass(objectType, CoreObject): # remove data that has to do with wip or object interruption. CoreObjects do not need it inputDict = dict(element) # create the CoreObject coreObject = objectType(**inputDict) # update the nextIDs list of the object coreObject.nextIds = getSuccessorList(element['id']) # (Below is only for Dismantle for now) # get the successorList for the 'Parts' coreObject.nextPartIds = getSuccessorList( element['id'], lambda source, destination, edge_class, edge_data: edge_data.get('entity', {}) == 'Part') # get the successorList for the 'Frames' coreObject.nextFrameIds = getSuccessorList( element['id'], lambda source, destination, edge_class, edge_data: edge_data.get('entity', {}) == 'Frame') # loop through all the core objects # to read predecessors for element in G.ObjList: #loop through all the nextIds of the object for nextId in element.nextIds: #loop through all the core objects to find the on that has the id that was read in the successorList for possible_successor in G.ObjList: if possible_successor.id == nextId: possible_successor.previousIds.append(element.id)
def createObjectInterruptions(): G.ObjectInterruptionList=[] G.ScheduledMaintenanceList=[] G.FailureList=[] G.BreakList=[] G.ShiftSchedulerList=[] G.EventGeneratorList=[] G.CapacityStationControllerList=[] G.PeriodicMaintenanceList=[] json_data = G.JSONData #Read the json data nodes = json_data['graph']["node"] # read from the dictionary the dicts with key 'nodes' # loop through all the nodes to # search for Event Generator and create them # this is put last, since the EventGenerator # may take other objects as argument for (element_id, element) in nodes.iteritems(): # use an iterator to go through all the nodes # the key is the element_id and the second is the # element itself element['id'] = element_id # create a new entry for the element (dictionary) # with key 'id' and value the the element_id objClass = element.get('_class', 'not found') # get the class type of the element from dream.simulation.ObjectInterruption import ObjectInterruption objClass = element.pop('_class') objectType=Globals.getClassFromName(objClass) # from CoreObject import CoreObject # if issubclass(objectType, CoreObject): if issubclass(objectType,ObjectInterruption): # check the object type inputDict=dict(element) # create the ObjectInterruption objectInterruption=objectType(**inputDict) if not 'OperatorRouter' in str(objectType): G.ObjectInterruptionList.append(objectInterruption) # search inside the nodes for encapsulated ObjectInterruptions (failures etc) # ToDo this will be cleaned a lot if we update the JSON notation: # define ObjectInterruption echelon inside node # define interruptions' distribution better from dream.simulation.ScheduledMaintenance import ScheduledMaintenance from dream.simulation.Failure import Failure from dream.simulation.PeriodicMaintenance import PeriodicMaintenance from dream.simulation.ShiftScheduler import ShiftScheduler from dream.simulation.Break import Break for (element_id, element) in nodes.iteritems(): element['id'] = element_id scheduledMaintenance=element.get('interruptions',{}).get('scheduledMaintenance', {}) # if there is a scheduled maintenance initiate it and append it # to the interruptions- and scheduled maintenances- list if len(scheduledMaintenance): start=float(scheduledMaintenance.get('start', 0)) duration=float(scheduledMaintenance.get('duration', 1)) victim=Globals.findObjectById(element['id']) SM=ScheduledMaintenance(victim=victim, start=start, duration=duration) G.ObjectInterruptionList.append(SM) G.ScheduledMaintenanceList.append(SM) failure=element.get('interruptions',{}).get('failure', None) # if there are failures assigned # initiate them if failure: victim=Globals.findObjectById(element['id']) deteriorationType=failure.get('deteriorationType', 'constant') waitOnTie=failure.get('waitOnTie', False) F=Failure(victim=victim, distribution=failure, repairman=victim.repairman, deteriorationType=deteriorationType, waitOnTie=waitOnTie) G.ObjectInterruptionList.append(F) G.FailureList.append(F) # if there are periodic maintenances assigned # initiate them periodicMaintenance=element.get('interruptions',{}).get('periodicMaintenance', None) if periodicMaintenance: distributionType=periodicMaintenance.get('distributionType', 'No') victim=Globals.findObjectById(element['id']) PM=PeriodicMaintenance(victim=victim, distribution=periodicMaintenance, repairman=victim.repairman) G.ObjectInterruptionList.append(PM) G.PeriodicMaintenanceList.append(PM) # if there is a shift pattern defined # initiate them shift=element.get('interruptions',{}).get('shift', {}) if len(shift): victim=Globals.findObjectById(element['id']) shiftPattern=list(shift.get('shiftPattern', [])) # patch to correct if input has end of shift at the same time of start of next shift # TODO check if the backend should be able to handle this for index, record in enumerate(shiftPattern): if record is shiftPattern[-1]: break next = shiftPattern[index + 1] if record[1]==next[0]: record[1]=next[1] shiftPattern.remove(next) endUnfinished=bool(int(shift.get('endUnfinished', 0))) receiveBeforeEndThreshold=float(shift.get('receiveBeforeEndThreshold', 0)) thresholdTimeIsOnShift=bool(int(shift.get('thresholdTimeIsOnShift', 1))) rolling=bool(int(shift.get('rolling', 0))) lastOffShiftDuration=float(shift.get('lastOffShiftDuration', 10)) SS=ShiftScheduler(victim=victim, shiftPattern=shiftPattern, endUnfinished=endUnfinished, receiveBeforeEndThreshold=receiveBeforeEndThreshold, thresholdTimeIsOnShift=thresholdTimeIsOnShift, rolling=rolling, lastOffShiftDuration=lastOffShiftDuration) G.ObjectInterruptionList.append(SS) G.ShiftSchedulerList.append(SS) br=element.get('interruptions',{}).get('break', None) # if there are breaks assigned # initiate them if br: victim=Globals.findObjectById(element['id']) endUnfinished=bool(int(br.get('endUnfinished', 1))) offShiftAnticipation=br.get('offShiftAnticipation',0) BR=Break(victim=victim, distribution=br,endUnfinished=endUnfinished, offShiftAnticipation=offShiftAnticipation) G.ObjectInterruptionList.append(BR) G.BreakList.append(BR)