def main(test=0, simul8XMLFileName='ParallelStations.xml', DBFilePath='C:\Users\Panos\Documents\KE tool_documentation', file_path=None, simul8XMLFile=None): if not file_path: cnxn = ConnectionData(seekName='ServerData', file_path=DBFilePath, implicitExt='txt', number_of_cursors=3) cursors = cnxn.getCursors() #Database queries used to extract the required data, in this example the processing times are given subtracting the TIME IN data point from the TIME OUT data point a = cursors[0].execute(""" select prod_code, stat_code,emp_no, TIMEIN, TIMEOUT from production_status """) MILL1 = [] #Initialization of MILL1 list MILL2 = [] #Initialization of MILL2 list for j in range(a.rowcount): #get the next line ind1 = a.fetchone() if ind1.stat_code == 'MILL1': procTime = [] procTime.insert(0, ind1.TIMEIN) procTime.insert(1, ind1.TIMEOUT) MILL1.append(procTime) elif ind1.stat_code == 'MILL2': procTime = [] procTime.insert(0, ind1.TIMEIN) procTime.insert(1, ind1.TIMEOUT) MILL2.append(procTime) else: continue #The BasicTransformations object is called to conduct some data transformations transform = Transformations() procTime_MILL1 = [] for elem in MILL1: t1 = [] t2 = [] t1.append(((elem[0].hour) * 60) * 60 + (elem[0].minute) * 60 + elem[0].second) t2.append(((elem[1].hour) * 60) * 60 + (elem[1].minute) * 60 + elem[1].second) dt = transform.subtraction(t2, t1) procTime_MILL1.append(dt[0]) procTime_MILL2 = [] for elem in MILL2: t1 = [] t2 = [] t1.append(((elem[0].hour) * 60) * 60 + (elem[0].minute) * 60 + elem[0].second) t2.append(((elem[1].hour) * 60) * 60 + (elem[1].minute) * 60 + elem[1].second) dt = transform.subtraction(t2, t1) procTime_MILL2.append(dt[0]) #Database queries used again to extract the MTTF and MTTR data points b = cursors[1].execute(""" select stat_code, MTTF_hour from failures """) c = cursors[2].execute(""" select stat_code, MTTR_hour from repairs """) MTTF_MILL1 = [ ] #Initialization of the list that will contain the MTTF data points for MILL1 MTTF_MILL2 = [ ] #Initialization of the list that will contain the MTTF data points for MILL2 for j in range(b.rowcount): #get the next line ind2 = b.fetchone() if ind2.stat_code == 'MILL1': MTTF_MILL1.append(ind2.MTTF_hour) elif ind2.stat_code == 'MILL2': MTTF_MILL2.append(ind2.MTTF_hour) else: continue MTTR_MILL1 = [ ] #Initialization of the list that will contain the MTTR data points for MILL1 MTTR_MILL2 = [ ] #Initialization of the list that will contain the MTTR data points for MILL1 for j in range(c.rowcount): #get the next line ind3 = c.fetchone() if ind3.stat_code == 'MILL1': MTTR_MILL1.append(ind3.MTTR_hour) elif ind3.stat_code == 'MILL2': MTTR_MILL2.append(ind3.MTTR_hour) else: continue #======================= Fit data to statistical distributions ================================# #The Distributions object is called to fit statistical distributions to the in scope data dist_proctime = Distributions() distProcTime_MILL1 = dist_proctime.Lognormal_distrfit(procTime_MILL1) distProcTime_MILL2 = dist_proctime.Weibull_distrfit(procTime_MILL2) dist_MTTF = Distributions() dist_MTTR = Distributions() distMTTF_MILL1 = dist_MTTF.Exponential_distrfit(MTTF_MILL1) distMTTF_MILL2 = dist_MTTF.Exponential_distrfit(MTTF_MILL2) distMTTR_MILL1 = dist_MTTR.Normal_distrfit(MTTR_MILL1) distMTTR_MILL2 = dist_MTTR.Normal_distrfit(MTTR_MILL2) #======================= Output preparation: output the updated values in the XML file of this example ================================# if not simul8XMLFile: datafile = (os.path.join(os.path.dirname(os.path.realpath(__file__)), simul8XMLFileName) ) #It defines the name or the directory of the XML file tree = et.parse(datafile) else: datafile = simul8XMLFile tree = et.parse(datafile) simul8 = Simul8Output() #Call the Simul8Output object #Assign the statistical distribution found above in the XML file using methods of the Simul8Output object procTimes1 = simul8.ProcTimes(tree, 'MILL1', distProcTime_MILL1) procTimes2 = simul8.ProcTimes(procTimes1, 'MILL2', distProcTime_MILL2) #Again assign the MTTF and MTTR probability distributions calling the relevant methods from the Simul8Output object MTTF1 = simul8.MTBF(procTimes2, 'MILL1', distMTTF_MILL1) MTTR1 = simul8.MTTR(MTTF1, 'MILL1', distMTTR_MILL1) MTTF2 = simul8.MTBF(MTTR1, 'MILL2', distMTTF_MILL2) MTTR2 = simul8.MTTR(MTTF2, 'MILL2', distMTTR_MILL2) #Output the XML file with the processed data output = MTTR2.write('KEtool_ParallelStations.xml') if test: output = et.parse('KEtool_ParallelStations.xml') return output
def generateDemandPlanning(input_url, PPOSQuantity=1000, PlannedWeek=1, PPOSToBeDisaggregated='PPOS1', MinPackagingSize=10, planningHorizon=10): """Generate random demand from spreadsheet at input_url. """ # id is given as an integer and minus one # ToDo we have to standardize data PPOSToBeDisaggregated='PPOS'+str(PPOSToBeDisaggregated+1) # Read data from the exported Excel file from RapidMiner and call the Import_Excel object of the KE tool to import this data in the tool demand_data = urllib.urlopen(input_url).read() workbook = xlrd.open_workbook(file_contents=demand_data) worksheets = workbook.sheet_names() worksheet_RapidMiner = worksheets[0] A= Import_Excel() Turnovers=A.Input_data(worksheet_RapidMiner, workbook) #Dictionary with the data from the Excel file #Create lists with the MAs' names and the Turnovers for the first twelve weeks of 2010 retrieving this data from the dictionary PPOS=Turnovers.get('Ppos',[]) SP=Turnovers.get('SP',[]) MA=Turnovers.get('FP Material No PGS+',[]) GlobalDemand=Turnovers.get('Global demand',[]) #Call the Distributions object and fit the data from the list in Normal distribution, so as to have info on Global demand (mean and standard deviation) D=Distributions() E=HandleMissingValues() MA=E.DeleteMissingValue(MA) t=D.Normal_distrfit(GlobalDemand) avg=t.get('mean') stdev=t.get('stdev') def constrained_sum_sample_pos(n, total): """Return a randomly chosen list of n positive integers summing to total. Each such list is equally likely to occur.""" dividers = sorted(random.sample(xrange(1, total), n - 1)) return [a - b for a, b in zip(dividers + [total], [0] + dividers)] def constrained_sum_sample_nonneg(n, total): """Return a randomly chosen list of n nonnegative integers summing to total. Each such list is equally likely to occur.""" return [x - 1 for x in constrained_sum_sample_pos(n, total + n)] DemandProfile={} #Create a dictionary week=[] # list that defines the planning horizon, i.e. 10 weeks for i in range(int(planningHorizon)): week.append(i+1) for i in week: Demand=int(abs(random.normalvariate(avg,stdev))) # Generate a random, non-negative, integer number from the Normal distribution AllocatedPercent=0.8-(0.05*i) # Defines a number starts with 0.8 or 80% and reduced with every iteration at 0.05 or 5% Remaining_Demand=int((1-AllocatedPercent)*Demand) # Defines the Remaining demand a=constrained_sum_sample_nonneg(len(MA),100) myInt=100 a=robjects.FloatVector(a) lista = [x/myInt for x in a] # Define a list with the same length as the MA list and elements float numbers with total sum equal to 1 b=constrained_sum_sample_nonneg(len(MA),Remaining_Demand) # Define a list with the same length as the MA list and elements with total sum the Remaining demand dicta={} for index in range(0,len(MA)): MinUnits=round(b[index]*(random.uniform(0,0.2)),0) TotalUnits=b[index] if TotalUnits<MinPackagingSize: TotalUnits=0 if MinUnits<MinPackagingSize: MinUnits=0 dicta.update({MA[index]:[TotalUnits,MinUnits]}) # it updates a dictionary with key the different MAs and values the remaining demand and (b[index]*lista[index]) DemandProfile.update({i:dicta}) #It updates a dictionary with key the number of each iteration (week) and value the dictionary dicta Table=[] i=0 for i in range(len(MA)): Table.append([PPOS[i],SP[i],MA[i]]) i+=1 uniquePPOS=[] for ppos in PPOS: if not ppos in uniquePPOS and ppos!='': uniquePPOS.append(ppos) book=Workbook() sheet1 = book.add_sheet('Future1', cell_overwrite_ok=True) aggrTable=[] for key in DemandProfile.keys(): for elem in DemandProfile[key]: if DemandProfile[key].get(elem)[0]> 0: MAkey=elem totalUnits=DemandProfile[key].get(elem)[0] minUnits=DemandProfile[key].get(elem)[1] plannedWeek=key aggrTable.append([MAkey,totalUnits,minUnits,plannedWeek]) else: continue t=1 aggrTable.sort(key=lambda x:x[1], reverse=False) for i in sorted(aggrTable, key= lambda x:int(x[3])): sheet1.write(0,0,'Order ID') sheet1.write(0,1,'MA ID') sheet1.write(0,2,'Total # Units') sheet1.write(0,3,'Min # Units') sheet1.write(0,4,'Planned Week') sheet1.write(t,1, (i[0].replace('MA', '', 1))) sheet1.write(t,2,i[1]) sheet1.write(t,3,i[2]) sheet1.write(t,4,i[3]) sheet1.write(t,0,t) t+=1 # open json file futureDemandProfileFile=open('futureDemandProfile.json', mode='w') futureDemandProfile={} t=1 for i in sorted(aggrTable, key= lambda x:int(x[3])): dicta={'MAID':i[0],'TotalUnits':i[1],'MinUnits':i[2],'PlannedWeek':i[3]} futureDemandProfile[t]=dicta futureDemandProfileString=json.dumps(futureDemandProfile, indent=5) t+=1 #write json file futureDemandProfileFile.write(futureDemandProfileString) ###==================================================================================================### sheet2 = book.add_sheet('PPOS', cell_overwrite_ok=True) dictPPOS={} dictPPOSMA={} for ind in uniquePPOS: indices=[i for i,j in enumerate(PPOS) if j==ind] mas=[ma for ma in MA if (MA.index(ma) in indices)] dictPPOSMA.update({ind: mas}) t=1 for key in dictPPOSMA.keys(): for elem in dictPPOSMA[key]: if key==PPOSToBeDisaggregated: c=constrained_sum_sample_nonneg(len(dictPPOSMA[key]),PPOSQuantity) d=constrained_sum_sample_nonneg(len(dictPPOSMA[key]),100) myInt=100 d=robjects.FloatVector(d) listd = [x/myInt for x in d] for i in range(0,len(dictPPOSMA[key])): MinUnits=round(c[i]*(random.uniform(0,0.2)),0) TotalUnits=c[i] if TotalUnits<MinPackagingSize: TotalUnits=0 if MinUnits<MinPackagingSize: MinUnits=0 dictPPOS.update({dictPPOSMA[key][i]:[TotalUnits,MinUnits]}) t=1 for i in range(0,len(dictPPOS)): sheet2.write(0,0,'Order ID') sheet2.write(0,1,'MA ID') sheet2.write(0,2,'Total # Units') sheet2.write(0,3,'Min # Units') sheet2.write(0,4,'Planned Week') sheet2.write(t,0,t) # XXX the MA id should not have MA prefix... sheet2.write(t,1,dictPPOSMA[PPOSToBeDisaggregated][i].replace('MA', '', 1)) sheet2.write(t,2,dictPPOS[dictPPOSMA[PPOSToBeDisaggregated][i]][0]) sheet2.write(t,3,dictPPOS[dictPPOSMA[PPOSToBeDisaggregated][i]][1]) sheet2.write(t,4,PlannedWeek) t+=1 # open json file PPOSProfileFile=open('PPOSProfile.json', mode='w') PPOSProfile={} t=1 for i in range(0,len(dictPPOS)): dictb={'MAID':dictPPOSMA[PPOSToBeDisaggregated][i],'TotalUnits':dictPPOS[dictPPOSMA[PPOSToBeDisaggregated][i]][0],'MinUnits':dictPPOS[dictPPOSMA[PPOSToBeDisaggregated][i]][1],'PlannedWeek':PlannedWeek} PPOSProfile[t]=dictb PPOSProfileString=json.dumps(PPOSProfile, indent=5) t+=1 #write json file PPOSProfileFile.write(PPOSProfileString) import StringIO out = StringIO.StringIO() book.save(out) book.save('DP.xls') return out.getvalue()
def main(test=0, ExcelFileName='inputData.xls', JSONFileName='JSON_ParallelStations.json', workbook=None, jsonFile=None): #Read from the given directory the Excel document with the input data if not workbook: workbook = xlrd.open_workbook( os.path.join(os.path.dirname(os.path.realpath(__file__)), ExcelFileName)) worksheets = workbook.sheet_names() worksheet_ProcessingTimes = worksheets[ 0] #Define the worksheet with the Processing times data inputData = ImportExceldata() #Call the Python object Import_Excel ProcessingTimes = inputData.Input_data( worksheet_ProcessingTimes, workbook ) #Create the Processing Times dictionary with key Machines 1,2 and values the processing time data ##Get from the above dictionaries the M1 key and define the following lists with data M1_ProcTime = ProcessingTimes.get('M1', []) M2_ProcTime = ProcessingTimes.get('M2', []) #Call the HandleMissingValues object and replace the missing values in the lists with the mean of the non-missing values misValues = ReplaceMissingValues() M1_ProcTime = misValues.ReplaceWithMean(M1_ProcTime) M2_ProcTime = misValues.ReplaceWithMean(M2_ProcTime) MLE = Distributions( ) #Call the Distributions object (Maximum Likelihood Estimation - MLE) KS = DistFittest( ) #Call the DistFittest object (Kolmoghorov-Smirnov test) M1ProcTime_dist = KS.ks_test(M1_ProcTime) M2ProcTime_dist = MLE.Normal_distrfit(M2_ProcTime) #======================= Output preparation: output the updated values in the JSON file of this example ================================# if not jsonFile: jsonFile = open( os.path.join(os.path.dirname(os.path.realpath(__file__)), JSONFileName), 'r') #It opens the JSON file data = json.load(jsonFile) #It loads the file jsonFile.close() else: data = json.load(jsonFile) exportJSON = JSONOutput() stationId1 = 'St1' stationId2 = 'St2' data1 = exportJSON.ProcessingTimes(data, stationId1, M1ProcTime_dist) data2 = exportJSON.ProcessingTimes(data1, stationId2, M2ProcTime_dist) #================================ Call ManPy and run the simulation model =============================================# #calls ManPy main script with the input simulationOutput = ManPyMain.main(input_data=json.dumps(data2)) # if we run from test return the ManPy result if test: return simulationOutput #=================== Ouput the JSON file ==========================# jsonFile = open('JSON_ParallelStations_Output.json', "w") #It opens the JSON file jsonFile.write(json.dumps( data2, indent=True)) #It writes the updated data to the JSON file jsonFile.close() #It closes the file #=================== Calling the ExcelOutput object, outputs the outcomes of the statistical analysis in xls files ==========================# export = ExcelOutput() export.PrintStatisticalMeasures(M1_ProcTime, 'M1_ProcTime_StatResults.xls') export.PrintStatisticalMeasures(M2_ProcTime, 'M2_ProcTime_StatResults.xls') export.PrintDistributionFit(M1_ProcTime, 'M1_ProcTime_DistFitResults.xls') export.PrintDistributionFit(M2_ProcTime, 'M2_ProcTime_DistFitResults.xls') # save the simulation output jsonFile = open('ManPyOutput.json', "w") #It opens the JSON file jsonFile.write( simulationOutput) #It writes the updated data to the JSON file jsonFile.close() #It closes the file