# GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with DREAM. If not, see <http://www.gnu.org/licenses/>. # =========================================================================== import xlrd import json from DistributionFitting import DistFittest from ImportExceldata import Import_Excel workbook = xlrd.open_workbook('Mockup_ProcessingTimes.xls') #Using xlrd library opens the Excel document with the input data worksheets = workbook.sheet_names() worksheet_ProcessingTimes = worksheets[0] #It defines the worksheet_ProcessingTimes as the first sheet of the Excel file A=Import_Excel() #Call the Import_Excel object B=DistFittest() #Call the Distribution Fitting object ProcessingTimes= A.Input_data(worksheet_ProcessingTimes, workbook) #Create a dictionary with the imported data from the Excel file jsonFile= open('BatchesInput.json','r') #open the json file data = json.load(jsonFile) #It loads the file jsonFile.close() nodes = data['nodes'] lista=[] for (element_id, element) in nodes.iteritems(): #This loop appends in a list the id's of the json file element['id'] = element_id lista.append(element ['id']) fittingDict={} for element in ProcessingTimes: #This loop searches the elements of the Excel imported data and if these elements exist in json file append the distribution fitting results in a dictionary
csv = Import_CSV( ) #call the Import_CSV module and using its method Input_data import the data set from the CSV file to the tool Data = csv.Input_data(filename) Activity2_Proc = Data.get( 'Activity 2', []) #get from the returned Python dictionary the two data sets Activity3_Proc = Data.get('Activity 3', []) #Read from the given directory the Excel document with the data workbook = xlrd.open_workbook('DataSet.xlsx') worksheets = workbook.sheet_names() worksheet_Inter = worksheets[ 0] #Define the worksheet with the Inter-arrivals time data data = Import_Excel() interTimes = data.Input_data( worksheet_Inter, workbook ) #Create the Inter-arrival times dictionary with key the Source and values the inter-arrival time data S1 = interTimes.get('Source', []) #Read from the given directory the Excel document with the data workbook = xlrd.open_workbook('DataSet.xlsx') worksheets = workbook.sheet_names() worksheet_Fail = worksheets[ 1] #Define the worksheet with the failures data (MTTF,MTTR) data = Import_Excel() failures = data.Input_data( worksheet_Fail, workbook
from ImportExceldata import Import_Excel from ReplaceMissingValues import HandleMissingValues from DistributionFitting import Distributions from DistributionFitting import DistFittest from ExcelOutput import Output import xlrd import json #Read from the given directory the Excel document with the input data workbook = xlrd.open_workbook('inputData.xls') worksheets = workbook.sheet_names() worksheet_ProcessingTimes = worksheets[0] #Define the worksheet with the Processing times data worksheet_MTTF = worksheets[1] #Define the worksheet with Time-to-Failure data worksheet_MTTR = worksheets[2] #Define the worksheet with Time-to-Repair data A = Import_Excel() #Call the Python object Import_Excel ProcessingTimes = A.Input_data(worksheet_ProcessingTimes, workbook) #Create the Processing Times dictionary with key the Machine 1 and values the processing time data MTTF=A.Input_data(worksheet_MTTF, workbook) #Create the MTTF dictionary with key the Machine 1 and time-to-failure data MTTR=A.Input_data(worksheet_MTTR, workbook) #Create the MTTR Quantity dictionary with key the Machine 1 and time-to-repair data ##Get from the above dictionaries the M1 key and define the following lists with data ProcTime = ProcessingTimes.get('M1',[]) MTTF = MTTF.get('M1',[]) MTTR = MTTR.get('M1',[]) #Call the HandleMissingValues object and replace the missing values in the lists with the mean of the non-missing values B =HandleMissingValues() ProcTime = B.ReplaceWithMean(ProcTime) MTTF = B.ReplaceWithMean(MTTF) MTTR = B.ReplaceWithMean(MTTR)
from ExcelOutput import Output from ReplaceMissingValues import HandleMissingValues import xlrd import json #================================================ This script is a simple example of the Knowledge extraction tool ===============================================================# #The following is the Main script, that calls two Python objects in order to conduct the three main components of the Knowledge extraction tool #In the following example the operation times of the topology's two machines are given in an Excel document. #Import_Excel object imports data from the Excel document to the tool and DistFittest object fits the data to a statistical distribution using Kolmogorov-Smirnov test workbook = xlrd.open_workbook('inputsTwoServers.xls') #Using xlrd library opens the Excel document with the input data worksheets = workbook.sheet_names() worksheet_OperationTime = worksheets[0] #It creates a variable that holds the first Excel worksheet X=Import_Excel() #Call the import_Excel object OperationTimes= X.Input_data(worksheet_OperationTime,workbook) #It defines a Python dictionary, giving as name OpearationTimes and as value the returned dictionary from the import_Excel object Machine1_OpearationTimes = OperationTimes.get('Machine1',[]) #Two lists are defined (Machine1_OpearationTimes, Machine2_OpearationTimes) with the operation times data of each machine Machine2_OpearationTimes = OperationTimes.get('Machine2',[]) A=HandleMissingValues() #Call the HandleMissingValues object Machine1_OpearationTimes= A.DeleteMissingValue(Machine1_OpearationTimes) #It deletes the missing values in the lists with the operation times data Machine2_OpearationTimes= A.DeleteMissingValue(Machine2_OpearationTimes) Dict={} B=DistFittest() #It calls the DistFittest object Dict['M1']=B.ks_test(Machine1_OpearationTimes) #It conducts the Kolmogorov-Smirnov test in the list with the operation times data Dict['M2']=B.ks_test(Machine2_OpearationTimes) M1=Dict.get('M1') M2=Dict.get('M2')
from DetectOutliers import HandleOutliers from JSON_Output import JSON_example from WIP_Identifier import currentWIP import xlrd from dateutil.parser import * import datetime from time import mktime #Read from the given directory the Excel document with the input data workbook = xlrd.open_workbook('prod_data.xls') worksheets = workbook.sheet_names() main = workbook.sheet_by_name('Export Worksheet') worksheet_ProdData = worksheets[ 0] #Define the worksheet with the production data A = Import_Excel() #Call the Python object Import_Excel ProdData = A.Input_data( worksheet_ProdData, workbook ) #Create the Production data dictionary with keys the labels of Excel's different columns ##Get from the ProdData dictionary the different keys and define the following lists contIds = ProdData.get('CONTAINERNAME', []) prodName = ProdData.get('PRODUCTNAME', []) prodDesc = ProdData.get('PRODUCTDESCRIPTION', []) taskdate = ProdData.get('TASKDATE', []) taskName = ProdData.get('TASKTYPENAME', []) statName = ProdData.get('STATIONNAME', []) contQuant = ProdData.get('CONTAINERQTYATTXN', []) # columns that are used (static) CONTAINERNAME = 0
from DistributionFitting import DistFittest from DistributionFitting import Distributions from ImportExceldata import Import_Excel from ExcelOutput import Output from ReplaceMissingValues import HandleMissingValues import xlrd import json import dream.simulation.LineGenerationJSON as ManPyMain #import ManPy main JSON script #Read from the given directory the Excel document with the input data workbook = xlrd.open_workbook('inputData.xls') worksheets = workbook.sheet_names() worksheet_ProcessingTimes = worksheets[ 0] #Define the worksheet with the Processing times data inputData = Import_Excel() #Call the Python object Import_Excel ProcessingTimes = inputData.Input_data( worksheet_ProcessingTimes, workbook ) #Create the Processing Times dictionary with key Machines 1,2 and values the processing time data ##Get from the above dictionaries the M1 key and define the following lists with data M1_ProcTime = ProcessingTimes.get('M1', []) M2_ProcTime = ProcessingTimes.get('M2', []) #Call the HandleMissingValues object and replace the missing values in the lists with the mean of the non-missing values misValues = HandleMissingValues() M1_ProcTime = misValues.ReplaceWithMean(M1_ProcTime) M2_ProcTime = misValues.ReplaceWithMean(M2_ProcTime) MLE = Distributions( ) #Call the Distributions object (Maximum Likelihood Estimation - MLE)
# =========================================================================== from ImportExceldata import Import_Excel from DistributionFitting import DistFittest from DistributionFitting import Distributions import xlrd from xml.etree import ElementTree as et from Simul8XML import Simul8Output #Read from the given directory the Excel document with the processing times data workbook = xlrd.open_workbook('ProcData.xls') worksheets = workbook.sheet_names() worksheet_Proc = worksheets[ 0] #Define the worksheet with the Processing time data importData = Import_Excel() #Call the Python object Import_Excel procTimes = importData.Input_data( worksheet_Proc, workbook ) #Create the Processing times dictionary with key the M1 and values the processing time data #Get from the above dictionaries the M1 key and the Source key and define the following lists with data M1 = procTimes.get('M1', []) distFitting = DistFittest() #Call the DistFittest object M1 = distFitting.ks_test(M1) #Read from the given directory the Excel document with the inter-arrivals data workbook = xlrd.open_workbook('InterarrivalsData.xls') worksheets = workbook.sheet_names() worksheet_Inter = worksheets[ 0] #Define the worksheet with the Inter-arrivals time data
from JSON_Output import JSON_example from ExcelOutput import Output from ReplaceMissingValues import HandleMissingValues from ImportExceldata import Import_Excel import xlrd #import ManPy main JSON script import dream.simulation.LineGenerationJSON as ManPyMain #================= Main script of KE tool =====================================# #Read from the given directory the Excel document with the input data workbook = xlrd.open_workbook('input_Data.xls') worksheets = workbook.sheet_names() worksheet_ProcessingTimes = worksheets[1] #Define the worksheet with the Processing times data worksheet_ScrapQuantity = worksheets[0] #Define the worksheet with the Scrap Quantity data A=Import_Excel() #Call the Python object Import_Excel ProcessingTimes= A.Input_data(worksheet_ProcessingTimes, workbook) #Create the Processing Times dictionary with keys the different stations in the line and values the processing times of different batches in these stations ScrapQuantity=A.Input_data(worksheet_ScrapQuantity, workbook) #Create the Scrap Quantity dictionary with keys the different stations in the line and values the scrap quantity data of different batches in these stations ##Get from the Scrap Quantity dictionary the different keys and define the following lists with the scrap quantity data of the different stations in the topology P1_Scrap= ScrapQuantity.get('P1',[]) P2_Scrap= ScrapQuantity.get('P2',[]) P3_Scrap= ScrapQuantity.get('P3',[]) P4_Scrap= ScrapQuantity.get('P4',[]) P5_Scrap= ScrapQuantity.get('P5',[]) P6_Scrap= ScrapQuantity.get('P6',[]) P7_Scrap= ScrapQuantity.get('P7',[]) P8_Scrap= ScrapQuantity.get('P8',[]) P9_Scrap= ScrapQuantity.get('P9',[]) P10_Scrap= ScrapQuantity.get('P10',[])
def generateDemandPlanning(input_url, PPOSQuantity=1000, PlannedWeek=1, PPOSToBeDisaggregated='PPOS1', MinPackagingSize=10, planningHorizon=10): """Generate random demand from spreadsheet at input_url. """ # id is given as an integer and minus one # ToDo we have to standardize data # PPOSToBeDisaggregated='PPOS'+str(PPOSToBeDisaggregated+'1') # Read data from the exported Excel file from RapidMiner and call the Import_Excel object of the KE tool to import this data in the tool demand_data = urllib.urlopen(input_url).read() workbook = xlrd.open_workbook(file_contents=demand_data) worksheets = workbook.sheet_names() worksheet_RapidMiner = worksheets[0] A = Import_Excel() Turnovers = A.Input_data( worksheet_RapidMiner, workbook) #Dictionary with the data from the Excel file #Create lists with the MAs' names and the Turnovers for the first twelve weeks of 2010 retrieving this data from the dictionary PPOS = Turnovers.get('Ppos', []) SP = Turnovers.get('SP', []) MA = Turnovers.get('FP Material No PGS+', []) GlobalDemand = Turnovers.get('Global demand', []) #Call the Distributions object and fit the data from the list in Normal distribution, so as to have info on Global demand (mean and standard deviation) D = Distributions() E = HandleMissingValues() MA = E.DeleteMissingValue(MA) t = D.Normal_distrfit(GlobalDemand) avg = t.get('mean') stdev = t.get('stdev') def constrained_sum_sample_pos(n, total): """Return a randomly chosen list of n positive integers summing to total. Each such list is equally likely to occur.""" dividers = sorted(random.sample(xrange(1, total), n - 1)) return [a - b for a, b in zip(dividers + [total], [0] + dividers)] def constrained_sum_sample_nonneg(n, total): """Return a randomly chosen list of n nonnegative integers summing to total. Each such list is equally likely to occur.""" return [x - 1 for x in constrained_sum_sample_pos(n, total + n)] DemandProfile = {} #Create a dictionary week = [] # list that defines the planning horizon, i.e. 10 weeks for i in range(int(planningHorizon)): week.append(i + 1) for i in week: Demand = int( abs(random.normalvariate(avg, stdev)) ) # Generate a random, non-negative, integer number from the Normal distribution AllocatedPercent = 0.8 - ( 0.05 * i ) # Defines a number starts with 0.8 or 80% and reduced with every iteration at 0.05 or 5% Remaining_Demand = int( (1 - AllocatedPercent) * Demand) # Defines the Remaining demand a = constrained_sum_sample_nonneg(len(MA), 100) myInt = 100 a = robjects.FloatVector(a) lista = [ x / myInt for x in a ] # Define a list with the same length as the MA list and elements float numbers with total sum equal to 1 b = constrained_sum_sample_nonneg( len(MA), Remaining_Demand ) # Define a list with the same length as the MA list and elements with total sum the Remaining demand dicta = {} for index in range(0, len(MA)): MinUnits = round(b[index] * (random.uniform(0, 0.2)), 0) TotalUnits = b[index] if TotalUnits < MinPackagingSize: TotalUnits = 0 if MinUnits < MinPackagingSize: MinUnits = 0 dicta.update( {MA[index]: [TotalUnits, MinUnits]} ) # it updates a dictionary with key the different MAs and values the remaining demand and (b[index]*lista[index]) DemandProfile.update( {i: dicta} ) #It updates a dictionary with key the number of each iteration (week) and value the dictionary dicta Table = [] i = 0 for i in range(len(MA)): Table.append([PPOS[i], SP[i], MA[i]]) i += 1 uniquePPOS = [] for ppos in PPOS: if not ppos in uniquePPOS and ppos != '': uniquePPOS.append(ppos) book = Workbook() sheet1 = book.add_sheet('Future1', cell_overwrite_ok=True) aggrTable = [] for key in DemandProfile.keys(): for elem in DemandProfile[key]: if DemandProfile[key].get(elem)[0] > 0: MAkey = elem totalUnits = DemandProfile[key].get(elem)[0] minUnits = DemandProfile[key].get(elem)[1] plannedWeek = key aggrTable.append([MAkey, totalUnits, minUnits, plannedWeek]) else: continue t = 1 aggrTable.sort(key=lambda x: x[1], reverse=False) for i in sorted(aggrTable, key=lambda x: int(x[3])): sheet1.write(0, 0, 'Order ID') sheet1.write(0, 1, 'MA ID') sheet1.write(0, 2, 'Total # Units') sheet1.write(0, 3, 'Min # Units') sheet1.write(0, 4, 'Planned Week') sheet1.write(t, 1, (i[0].replace('MA', '', 1))) sheet1.write(t, 2, i[1]) sheet1.write(t, 3, i[2]) sheet1.write(t, 4, i[3]) sheet1.write(t, 0, t) t += 1 # open json file futureDemandProfileFile = open('futureDemandProfile.json', mode='w') futureDemandProfile = {} t = 1 for i in sorted(aggrTable, key=lambda x: int(x[3])): dicta = { 'MAID': i[0], 'TotalUnits': i[1], 'MinUnits': i[2], 'PlannedWeek': i[3] } futureDemandProfile[t] = dicta futureDemandProfileString = json.dumps(futureDemandProfile, indent=5) t += 1 #write json file futureDemandProfileFile.write(futureDemandProfileString) ###==================================================================================================### sheet2 = book.add_sheet('PPOS', cell_overwrite_ok=True) dictPPOS = {} dictPPOSMA = {} for ind in uniquePPOS: indices = [i for i, j in enumerate(PPOS) if j == ind] mas = [ma for ma in MA if (MA.index(ma) in indices)] dictPPOSMA.update({ind: mas}) t = 1 for key in dictPPOSMA.keys(): for elem in dictPPOSMA[key]: if key == PPOSToBeDisaggregated: c = constrained_sum_sample_nonneg(len(dictPPOSMA[key]), PPOSQuantity) d = constrained_sum_sample_nonneg(len(dictPPOSMA[key]), 100) myInt = 100 d = robjects.FloatVector(d) listd = [x / myInt for x in d] for i in range(0, len(dictPPOSMA[key])): MinUnits = round(c[i] * (random.uniform(0, 0.2)), 0) TotalUnits = c[i] if TotalUnits < MinPackagingSize: TotalUnits = 0 if MinUnits < MinPackagingSize: MinUnits = 0 dictPPOS.update( {dictPPOSMA[key][i]: [TotalUnits, MinUnits]}) t = 1 for i in range(0, len(dictPPOS)): sheet2.write(0, 0, 'Order ID') sheet2.write(0, 1, 'MA ID') sheet2.write(0, 2, 'Total # Units') sheet2.write(0, 3, 'Min # Units') sheet2.write(0, 4, 'Planned Week') sheet2.write(t, 0, t) # XXX the MA id should not have MA prefix... sheet2.write(t, 1, dictPPOSMA[PPOSToBeDisaggregated][i].replace('MA', '', 1)) sheet2.write(t, 2, dictPPOS[dictPPOSMA[PPOSToBeDisaggregated][i]][0]) sheet2.write(t, 3, dictPPOS[dictPPOSMA[PPOSToBeDisaggregated][i]][1]) sheet2.write(t, 4, PlannedWeek) t += 1 # open json file PPOSProfileFile = open('PPOSProfile.json', mode='w') PPOSProfile = {} t = 1 for i in range(0, len(dictPPOS)): dictb = { 'MAID': dictPPOSMA[PPOSToBeDisaggregated][i], 'TotalUnits': dictPPOS[dictPPOSMA[PPOSToBeDisaggregated][i]][0], 'MinUnits': dictPPOS[dictPPOSMA[PPOSToBeDisaggregated][i]][1], 'PlannedWeek': PlannedWeek } PPOSProfile[t] = dictb PPOSProfileString = json.dumps(PPOSProfile, indent=5) t += 1 #write json file PPOSProfileFile.write(PPOSProfileString) import StringIO out = StringIO.StringIO() book.save(out) book.save('DP.xls') return out.getvalue()