def generateDemandPlanning(input_url, PPOSQuantity=1000, PlannedWeek=1, PPOSToBeDisaggregated='PPOS1', 
                           MinPackagingSize=10, planningHorizon=10):
    """Generate random demand from spreadsheet at input_url.
    """
    # id is given as an integer and minus one
    # ToDo we have to standardize data
    PPOSToBeDisaggregated='PPOS'+str(PPOSToBeDisaggregated+1)

    
    # Read data from the exported Excel file from RapidMiner and call the Import_Excel object of the KE tool to import this data in the tool

    demand_data = urllib.urlopen(input_url).read()
    workbook = xlrd.open_workbook(file_contents=demand_data)

    worksheets = workbook.sheet_names()
    worksheet_RapidMiner = worksheets[0] 

    A= Import_Excel()
    Turnovers=A.Input_data(worksheet_RapidMiner, workbook) #Dictionary with the data from the Excel file

    #Create lists with the MAs' names and the Turnovers for the first twelve weeks of 2010 retrieving this data from the dictionary 
    PPOS=Turnovers.get('Ppos',[])
    SP=Turnovers.get('SP',[])
    MA=Turnovers.get('FP Material No PGS+',[])
    GlobalDemand=Turnovers.get('Global demand',[])

    #Call the Distributions object and fit the data from the list in Normal distribution, so as to have info on Global demand (mean and standard deviation)
    D=Distributions()
    E=HandleMissingValues()
    MA=E.DeleteMissingValue(MA)
    t=D.Normal_distrfit(GlobalDemand)
    avg=t.get('mean')
    stdev=t.get('stdev')

    def constrained_sum_sample_pos(n, total):
        """Return a randomly chosen list of n positive integers summing to total.
        Each such list is equally likely to occur."""
     
        dividers = sorted(random.sample(xrange(1, total), n - 1))
        return [a - b for a, b in zip(dividers + [total], [0] + dividers)]

    def constrained_sum_sample_nonneg(n, total):
        """Return a randomly chosen list of n nonnegative integers summing to total.
        Each such list is equally likely to occur."""

        return [x - 1 for x in constrained_sum_sample_pos(n, total + n)]     
        
    DemandProfile={} #Create a dictionary
    
    week=[] # list that defines the planning horizon, i.e. 10 weeks
    for i in range(int(planningHorizon)):
        week.append(i+1)

    for i in week:
        Demand=int(abs(random.normalvariate(avg,stdev))) # Generate a random, non-negative, integer number from the Normal distribution
        AllocatedPercent=0.8-(0.05*i) # Defines a number starts with 0.8 or 80% and reduced with every iteration at 0.05 or 5%
        Remaining_Demand=int((1-AllocatedPercent)*Demand) # Defines the Remaining demand
        a=constrained_sum_sample_nonneg(len(MA),100)
        myInt=100
        a=robjects.FloatVector(a)
        lista = [x/myInt for x in a] # Define a list with the same length as the MA list and elements float numbers with total sum equal to 1  
        b=constrained_sum_sample_nonneg(len(MA),Remaining_Demand) # Define a list with the same length as the MA list and elements with total sum the Remaining demand  
        dicta={}
        for index in range(0,len(MA)):
            MinUnits=round(b[index]*(random.uniform(0,0.2)),0)
            TotalUnits=b[index]
            if TotalUnits<MinPackagingSize:
                TotalUnits=0
            if MinUnits<MinPackagingSize:
                MinUnits=0
            dicta.update({MA[index]:[TotalUnits,MinUnits]}) # it updates a dictionary with key the different MAs and values the remaining demand and (b[index]*lista[index])        
            DemandProfile.update({i:dicta}) #It updates a dictionary with key the number of each iteration (week) and value the dictionary dicta

    Table=[]
    i=0
    for i in range(len(MA)):
        Table.append([PPOS[i],SP[i],MA[i]])
        i+=1
    uniquePPOS=[]
    for ppos in PPOS:
        if not ppos in uniquePPOS and ppos!='':
            uniquePPOS.append(ppos)


    book=Workbook()
    sheet1 = book.add_sheet('Future1', cell_overwrite_ok=True)
    aggrTable=[]
    for key in DemandProfile.keys():
        for elem in DemandProfile[key]:
            if DemandProfile[key].get(elem)[0]> 0:
                MAkey=elem
                totalUnits=DemandProfile[key].get(elem)[0]
                minUnits=DemandProfile[key].get(elem)[1]
                plannedWeek=key
                aggrTable.append([MAkey,totalUnits,minUnits,plannedWeek])
            else: 
                continue  
    t=1
    aggrTable.sort(key=lambda x:x[1], reverse=False)
    for i in sorted(aggrTable, key= lambda x:int(x[3])):
        sheet1.write(0,0,'Order ID')
        sheet1.write(0,1,'MA ID')
        sheet1.write(0,2,'Total # Units')
        sheet1.write(0,3,'Min # Units')
        sheet1.write(0,4,'Planned Week')
        sheet1.write(t,1, (i[0].replace('MA', '', 1)))
        sheet1.write(t,2,i[1])
        sheet1.write(t,3,i[2])
        sheet1.write(t,4,i[3])
        sheet1.write(t,0,t)
        t+=1

    # open json file
    futureDemandProfileFile=open('futureDemandProfile.json', mode='w')
    futureDemandProfile={}
     
    t=1
    for i in sorted(aggrTable, key= lambda x:int(x[3])):
        dicta={'MAID':i[0],'TotalUnits':i[1],'MinUnits':i[2],'PlannedWeek':i[3]}
        futureDemandProfile[t]=dicta
        futureDemandProfileString=json.dumps(futureDemandProfile, indent=5)
        t+=1
     
    #write json file
    futureDemandProfileFile.write(futureDemandProfileString)
            
    ###==================================================================================================### 
    sheet2 = book.add_sheet('PPOS', cell_overwrite_ok=True)

    dictPPOS={}
    dictPPOSMA={}

    for ind in uniquePPOS:
        indices=[i for i,j in enumerate(PPOS) if j==ind]
        mas=[ma for ma in MA if (MA.index(ma) in indices)]
        dictPPOSMA.update({ind: mas})

    t=1
    for key in dictPPOSMA.keys():
        for elem in dictPPOSMA[key]:   
            if key==PPOSToBeDisaggregated:
                c=constrained_sum_sample_nonneg(len(dictPPOSMA[key]),PPOSQuantity)
                d=constrained_sum_sample_nonneg(len(dictPPOSMA[key]),100)
                myInt=100
                d=robjects.FloatVector(d)
                listd = [x/myInt for x in d]
                for i in range(0,len(dictPPOSMA[key])):
                    MinUnits=round(c[i]*(random.uniform(0,0.2)),0)
                    TotalUnits=c[i]
                    if TotalUnits<MinPackagingSize:
                        TotalUnits=0
                    if MinUnits<MinPackagingSize:
                        MinUnits=0
                    dictPPOS.update({dictPPOSMA[key][i]:[TotalUnits,MinUnits]})
                                 
    t=1
    for i in range(0,len(dictPPOS)):
        sheet2.write(0,0,'Order ID')
        sheet2.write(0,1,'MA ID')
        sheet2.write(0,2,'Total # Units')
        sheet2.write(0,3,'Min # Units')
        sheet2.write(0,4,'Planned Week')
        sheet2.write(t,0,t)
        # XXX the MA id should not have MA prefix...
        sheet2.write(t,1,dictPPOSMA[PPOSToBeDisaggregated][i].replace('MA', '', 1))

        sheet2.write(t,2,dictPPOS[dictPPOSMA[PPOSToBeDisaggregated][i]][0])
        sheet2.write(t,3,dictPPOS[dictPPOSMA[PPOSToBeDisaggregated][i]][1])
        sheet2.write(t,4,PlannedWeek)
        t+=1          

    # open json file
    PPOSProfileFile=open('PPOSProfile.json', mode='w')
    PPOSProfile={}
    t=1
    for i in range(0,len(dictPPOS)):
        dictb={'MAID':dictPPOSMA[PPOSToBeDisaggregated][i],'TotalUnits':dictPPOS[dictPPOSMA[PPOSToBeDisaggregated][i]][0],'MinUnits':dictPPOS[dictPPOSMA[PPOSToBeDisaggregated][i]][1],'PlannedWeek':PlannedWeek}
        PPOSProfile[t]=dictb
        PPOSProfileString=json.dumps(PPOSProfile, indent=5)
        t+=1

    #write json file
    PPOSProfileFile.write(PPOSProfileString)

    import StringIO
    out = StringIO.StringIO()
    book.save(out)
    book.save('DP.xls')
    return out.getvalue()
def main(test=0,
         ExcelFileName='inputData.xls',
         JSONFileName='JSON_AssembleDismantle.json',
         CMSDFileName='CMSD_AssemblyDismantle.xml',
         workbook=None,
         jsonFile=None,
         cmsdFile=None):
    if not workbook:
        workbook = xlrd.open_workbook(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         ExcelFileName))
    #Read from the given directory the Excel document with the input data
    worksheets = workbook.sheet_names()
    worksheet_ProcessingTimes = worksheets[
        0]  #Define the worksheet with the Processing times data
    worksheet_MTTF = worksheets[
        1]  #Define the worksheet with Time-to-Failure data
    worksheet_MTTR = worksheets[
        2]  #Define the worksheet with Time-to-Repair data

    A = ImportExceldata()  #Call the Python object Import_Excel
    ProcessingTimes = A.Input_data(
        worksheet_ProcessingTimes, workbook
    )  #Create the Processing Times dictionary with key the Machine 1 and values the processing time data
    MTTF = A.Input_data(
        worksheet_MTTF, workbook
    )  #Create the MTTF dictionary with key the Machine 1 and time-to-failure data
    MTTR = A.Input_data(
        worksheet_MTTR, workbook
    )  #Create the MTTR Quantity dictionary with key the Machine 1 and time-to-repair data

    ##Get from the above dictionaries the M1 key and define the following lists with data
    ProcTime = ProcessingTimes.get('M1', [])
    MTTF = MTTF.get('M1', [])
    MTTR = MTTR.get('M1', [])

    #Call the HandleMissingValues object and replace the missing values in the lists with the mean of the non-missing values
    B = ReplaceMissingValues()
    ProcTime = B.ReplaceWithMean(ProcTime)
    MTTF = B.ReplaceWithMean(MTTF)
    MTTR = B.ReplaceWithMean(MTTR)

    C = Distributions()  #Call the Distributions object
    D = DistFittest()  #Call the DistFittest object

    ProcTime_dist = D.ks_test(ProcTime)
    MTTF_dist = C.Exponential_distrfit(MTTF)
    MTTR_dist = C.Exponential_distrfit(MTTR)
    #======================== Output preparation: output the values prepared in the CMSD information model of this model ====================================================#
    if not cmsdFile:
        datafile = (
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         CMSDFileName)
        )  #It defines the name or the directory of the XML file that is manually written the CMSD information model
        tree = et.parse(
            datafile
        )  #This file will be parsed using the XML.ETREE Python library

    exportCMSD = CMSDOutput()
    stationId1 = 'M1'

    procTime = exportCMSD.ProcessingTimes(tree, stationId1, ProcTime_dist)
    TTF = exportCMSD.TTF(procTime, stationId1, MTTF_dist)
    TTR = exportCMSD.TTR(TTF, stationId1, MTTR_dist)

    TTR.write(
        'CMSD_AssemblyDismantle_Output.xml', encoding="utf8"
    )  #It writes the element tree to a specified file, using the 'utf8' output encoding

    #================================= Output preparation: output the updated values in the JSON file of this example =========================================================#
    if not jsonFile:
        jsonFile = open(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         JSONFileName), 'r')  #It opens the JSON file
        data = json.load(jsonFile)  #It loads the file
        jsonFile.close()
    else:
        data = json.load(jsonFile)

    exportJSON = JSONOutput()
    stationId = 'M1'

    data = exportJSON.ProcessingTimes(data, stationId, ProcTime_dist)
    data1 = exportJSON.TTF(data, stationId, MTTF_dist)
    data2 = exportJSON.TTR(data1, stationId, MTTR_dist)

    #================================ Call ManPy and run the simulation model =============================================#
    #calls ManPy main script with the input
    simulationOutput = ManPyMain.main(input_data=json.dumps(data2))
    # if we run from test return the ManPy result
    if test:
        return simulationOutput

    #===================== Output the JSON file ========================================#
    jsonFile = open('JSON_AssembleDismantle_Output.json',
                    "w")  #It opens the JSON file
    jsonFile.write(json.dumps(
        data2, indent=True))  #It writes the updated data to the JSON file
    jsonFile.close()  #It closes the file
    #================================ Calling the ExcelOutput object, outputs the outcomes of the statistical analysis in xls files =============================================#
    C = ExcelOutput()
    C.PrintStatisticalMeasures(ProcTime, 'ProcTime_StatResults.xls')
    C.PrintStatisticalMeasures(MTTR, 'MTTR_StatResults.xls')
    C.PrintStatisticalMeasures(MTTF, 'MTTF_StatResults.xls')
    C.PrintDistributionFit(ProcTime, 'ProcTime_DistFitResults.xls')
    C.PrintDistributionFit(MTTR, 'MTTR_DistFitResults.xls')

    #================================ Call ManPy and run the simulation model =============================================#
    #calls ManPy main script with the input
    simulationOutput = ManPyMain.main(input_data=json.dumps(data))
    # save the simulation output
    jsonFile = open('ManPyOutput.json', "w")  #It opens the JSON file
    jsonFile.write(
        simulationOutput)  #It writes the updated data to the JSON file
    jsonFile.close()  #It closes the file
예제 #3
0
def main(test=0,
         ExcelFileName='DataSet.xlsx',
         CSVFileName='ProcTimesData.csv',
         simul8XMLFileName='Topology1.xml',
         workbook=None,
         csvFile=None,
         simul8XMLFile=None):
    #================================= Extract the required data from the data files ==========================================#
    if csvFile:
        CSVFileName = csvFile.name
    filename = CSVFileName
    csv = ImportCSVdata(
    )  #call the Import_CSV module and using its method Input_data import the data set from the CSV file to the tool
    Data = csv.Input_data(filename)

    Activity2_Proc = Data.get(
        'Activity 2',
        [])  #get from the returned Python dictionary the two data sets
    Activity3_Proc = Data.get('Activity 3', [])

    #Read from the given directory the Excel document with the data
    if not workbook:
        workbook = xlrd.open_workbook(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         ExcelFileName))
    worksheets = workbook.sheet_names()
    worksheet_Inter = worksheets[
        0]  #Define the worksheet with the Inter-arrivals time data

    data = ImportExceldata()
    interTimes = data.Input_data(
        worksheet_Inter, workbook
    )  #Create the Inter-arrival times dictionary with key the Source and values the inter-arrival time data

    S1 = interTimes.get('Source', [])

    #Read from the given directory the Excel document with the data
    worksheets = workbook.sheet_names()
    worksheet_Fail = worksheets[
        1]  #Define the worksheet with the failures data (MTTF,MTTR)

    data = ImportExceldata()
    failures = data.Input_data(
        worksheet_Fail, workbook
    )  #Create the failures dictionary with key the MTTF and MTTR data points

    MTTF = failures.get('MTTF', [])
    MTTR = failures.get('MTTR', [])

    #======================= Fit data to probability distributions ================================#
    #The Distributions and DistFittest objects are called to fit statistical distributions to the in scope data
    dist = Distributions()
    act2Proc = dist.Weibull_distrfit(Activity2_Proc)
    act3Proc = dist.Weibull_distrfit(Activity3_Proc)

    s1Times = dist.Exponential_distrfit(S1)

    distFit = DistFittest()
    act1MTTF = distFit.ks_test(MTTF)
    act1MTTR = distFit.ks_test(MTTR)

    #======================= Output preparation: output the updated values in the XML file of this example ================================#

    if not simul8XMLFile:
        datafile = (os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                 simul8XMLFileName)
                    )  #It defines the name or the directory of the XML file
        tree = et.parse(datafile)
    else:
        datafile = simul8XMLFile
        tree = et.parse(datafile)

    simul8 = Simul8Output()  #Call the Simul8Output object
    #Assign the statistical distribution calculated above in the XML file using methods of the Simul8Output object
    interTimes = simul8.InterArrivalTime(tree, 'Source', s1Times)

    procTimes2 = simul8.ProcTimes(interTimes, 'Activity 2', act2Proc)
    procTimes3 = simul8.ProcTimes(procTimes2, 'Activity 3', act3Proc)

    #Again assign the MTTF and MTTR probability distributions calling the relevant methods from the Simul8Output object
    MTTF1 = simul8.MTBF(procTimes3, 'Activity 1', act1MTTF)
    MTTR1 = simul8.MTTR(MTTF1, 'Activity 1', act1MTTR)
    #Output the XML file with the processed data
    output = MTTR1.write('KEtool_Topology1.xml')

    if test:
        output = et.parse('KEtool_Topology1.xml')
        return output
예제 #4
0
D=DistFittest()
dictProc={} #Create a dictionary that holds the statistical distributions of the processing times of each station
dictProc['MA']= D.ks_test(MA_Proc)
dictProc['M1A']= D.ks_test(M1A_Proc)
dictProc['M1B']= D.ks_test(M1B_Proc)
dictProc['M2A']= D.ks_test(M2A_Proc)
dictProc['M2B']= D.ks_test(M2B_Proc)
dictProc['M3A']= D.ks_test(M3A_Proc)
dictProc['M3B']= D.ks_test(M3B_Proc)
dictProc['MM']= D.ks_test(MM_Proc)
dictProc['PrA']= D.ks_test(PrA_Proc)
dictProc['PrB']= D.ks_test(PrB_Proc)
dictProc['PaA']= D.ks_test(PaA_Proc)
dictProc['PaB']= D.ks_test(PaB_Proc)
#Call the Distributions object and fit (using the Maximum Likelihood Estimation) the lists with the scrap quantity into a discrete statistical distribution, i.e. Geometric distribution 
D=Distributions()
dictScrap={} #Create a dictionary that holds the Geometric, which is a discrete statistical distribution of the processing times of each station
dictScrap['MA']= D.Geometric_distrfit(MA_Scrap)
dictScrap['M1A']= D.Geometric_distrfit(M1A_Scrap)
dictScrap['M1B']= D.Geometric_distrfit(M1B_Scrap)
dictScrap['M2A']= D.Geometric_distrfit(M2A_Scrap)
dictScrap['M2B']= D.Geometric_distrfit(M2B_Scrap)
dictScrap['M3A']= D.Geometric_distrfit(M3A_Scrap)
dictScrap['M3B']= D.Geometric_distrfit(M3B_Scrap)
dictScrap['MM']= D.Geometric_distrfit(MM_Scrap)
dictScrap['PrA']= D.Geometric_distrfit(PrA_Scrap)
dictScrap['PrB']= D.Geometric_distrfit(PrB_Scrap)
dictScrap['PaA']= D.Geometric_distrfit(PaA_Scrap)
dictScrap['PaB']= D.Geometric_distrfit(PaB_Scrap)

#Call the CMSDOutput object giving as attributes the dictionaries with the processing times distributions and the scrap quantities distributions
def main(test=0,
         simul8XMLFileName='ParallelStations.xml',
         DBFilePath='C:\Users\Panos\Documents\KE tool_documentation',
         file_path=None,
         simul8XMLFile=None):
    if not file_path:
        cnxn = ConnectionData(seekName='ServerData',
                              file_path=DBFilePath,
                              implicitExt='txt',
                              number_of_cursors=3)
        cursors = cnxn.getCursors()


#Database queries used to extract the required data, in this example the processing times are given subtracting the TIME IN data point from the TIME OUT data point
    a = cursors[0].execute("""
            select prod_code, stat_code,emp_no, TIMEIN, TIMEOUT
            from production_status
                    """)
    MILL1 = []  #Initialization of MILL1 list
    MILL2 = []  #Initialization of MILL2 list
    for j in range(a.rowcount):
        #get the next line
        ind1 = a.fetchone()
        if ind1.stat_code == 'MILL1':
            procTime = []
            procTime.insert(0, ind1.TIMEIN)
            procTime.insert(1, ind1.TIMEOUT)
            MILL1.append(procTime)
        elif ind1.stat_code == 'MILL2':
            procTime = []
            procTime.insert(0, ind1.TIMEIN)
            procTime.insert(1, ind1.TIMEOUT)
            MILL2.append(procTime)
        else:
            continue
    #The  BasicTransformations object is called to conduct some data transformations
    transform = Transformations()
    procTime_MILL1 = []
    for elem in MILL1:
        t1 = []
        t2 = []
        t1.append(((elem[0].hour) * 60) * 60 + (elem[0].minute) * 60 +
                  elem[0].second)
        t2.append(((elem[1].hour) * 60) * 60 + (elem[1].minute) * 60 +
                  elem[1].second)
        dt = transform.subtraction(t2, t1)
        procTime_MILL1.append(dt[0])

    procTime_MILL2 = []
    for elem in MILL2:
        t1 = []
        t2 = []
        t1.append(((elem[0].hour) * 60) * 60 + (elem[0].minute) * 60 +
                  elem[0].second)
        t2.append(((elem[1].hour) * 60) * 60 + (elem[1].minute) * 60 +
                  elem[1].second)
        dt = transform.subtraction(t2, t1)
        procTime_MILL2.append(dt[0])
    #Database queries used again to extract the MTTF and MTTR data points
    b = cursors[1].execute("""
            select stat_code, MTTF_hour
            from failures
                    """)

    c = cursors[2].execute("""
            select stat_code, MTTR_hour
            from repairs
                    """)
    MTTF_MILL1 = [
    ]  #Initialization of the list that will contain the MTTF data points for MILL1
    MTTF_MILL2 = [
    ]  #Initialization of the list that will contain the MTTF data points for MILL2
    for j in range(b.rowcount):
        #get the next line
        ind2 = b.fetchone()
        if ind2.stat_code == 'MILL1':
            MTTF_MILL1.append(ind2.MTTF_hour)
        elif ind2.stat_code == 'MILL2':
            MTTF_MILL2.append(ind2.MTTF_hour)
        else:
            continue

    MTTR_MILL1 = [
    ]  #Initialization of the list that will contain the MTTR data points for MILL1
    MTTR_MILL2 = [
    ]  #Initialization of the list that will contain the MTTR data points for MILL1
    for j in range(c.rowcount):
        #get the next line
        ind3 = c.fetchone()
        if ind3.stat_code == 'MILL1':
            MTTR_MILL1.append(ind3.MTTR_hour)
        elif ind3.stat_code == 'MILL2':
            MTTR_MILL2.append(ind3.MTTR_hour)
        else:
            continue

    #======================= Fit data to statistical distributions ================================#
    #The Distributions object is called to fit statistical distributions to the in scope data
    dist_proctime = Distributions()
    distProcTime_MILL1 = dist_proctime.Lognormal_distrfit(procTime_MILL1)
    distProcTime_MILL2 = dist_proctime.Weibull_distrfit(procTime_MILL2)

    dist_MTTF = Distributions()
    dist_MTTR = Distributions()
    distMTTF_MILL1 = dist_MTTF.Exponential_distrfit(MTTF_MILL1)
    distMTTF_MILL2 = dist_MTTF.Exponential_distrfit(MTTF_MILL2)

    distMTTR_MILL1 = dist_MTTR.Normal_distrfit(MTTR_MILL1)
    distMTTR_MILL2 = dist_MTTR.Normal_distrfit(MTTR_MILL2)

    #======================= Output preparation: output the updated values in the XML file of this example ================================#

    if not simul8XMLFile:
        datafile = (os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                 simul8XMLFileName)
                    )  #It defines the name or the directory of the XML file
        tree = et.parse(datafile)
    else:
        datafile = simul8XMLFile
        tree = et.parse(datafile)

    simul8 = Simul8Output()  #Call the Simul8Output object
    #Assign the statistical distribution found above in the XML file using methods of the Simul8Output object
    procTimes1 = simul8.ProcTimes(tree, 'MILL1', distProcTime_MILL1)
    procTimes2 = simul8.ProcTimes(procTimes1, 'MILL2', distProcTime_MILL2)
    #Again assign the MTTF and MTTR probability distributions calling the relevant methods from the Simul8Output object
    MTTF1 = simul8.MTBF(procTimes2, 'MILL1', distMTTF_MILL1)
    MTTR1 = simul8.MTTR(MTTF1, 'MILL1', distMTTR_MILL1)

    MTTF2 = simul8.MTBF(MTTR1, 'MILL2', distMTTF_MILL2)
    MTTR2 = simul8.MTTR(MTTF2, 'MILL2', distMTTR_MILL2)
    #Output the XML file with the processed data
    output = MTTR2.write('KEtool_ParallelStations.xml')

    if test:
        output = et.parse('KEtool_ParallelStations.xml')
        return output
예제 #6
0
def main(test=0,
         JSONFileName='JSON_example.json',
         CMSDFileName='CMSD_ParallelStations.xml',
         DBFilePath='C:\Users\Panos\Documents\KE tool_documentation',
         file_path=None,
         jsonFile=None,
         cmsdFile=None):
    if not file_path:
        cnxn = ConnectionData(seekName='ServerData',
                              file_path=DBFilePath,
                              implicitExt='txt',
                              number_of_cursors=3)
        cursors = cnxn.getCursors()

    a = cursors[0].execute("""
            select prod_code, stat_code,emp_no, TIMEIN, TIMEOUT
            from production_status
                    """)
    MILL1 = []
    MILL2 = []
    for j in range(a.rowcount):
        #get the next line
        ind1 = a.fetchone()
        if ind1.stat_code == 'MILL1':
            procTime = []
            procTime.insert(0, ind1.TIMEIN)
            procTime.insert(1, ind1.TIMEOUT)
            MILL1.append(procTime)
        elif ind1.stat_code == 'MILL2':
            procTime = []
            procTime.insert(0, ind1.TIMEIN)
            procTime.insert(1, ind1.TIMEOUT)
            MILL2.append(procTime)
        else:
            continue

    transform = Transformations()
    procTime_MILL1 = []
    for elem in MILL1:
        t1 = []
        t2 = []
        t1.append(((elem[0].hour) * 60) * 60 + (elem[0].minute) * 60 +
                  elem[0].second)
        t2.append(((elem[1].hour) * 60) * 60 + (elem[1].minute) * 60 +
                  elem[1].second)
        dt = transform.subtraction(t2, t1)
        procTime_MILL1.append(dt[0])

    procTime_MILL2 = []
    for elem in MILL2:
        t1 = []
        t2 = []
        t1.append(((elem[0].hour) * 60) * 60 + (elem[0].minute) * 60 +
                  elem[0].second)
        t2.append(((elem[1].hour) * 60) * 60 + (elem[1].minute) * 60 +
                  elem[1].second)
        dt = transform.subtraction(t2, t1)
        procTime_MILL2.append(dt[0])

    b = cursors[1].execute("""
            select stat_code, MTTF_hour
            from failures
                    """)

    c = cursors[2].execute("""
            select stat_code, MTTR_hour
            from repairs
                    """)
    MTTF_MILL1 = []
    MTTF_MILL2 = []
    for j in range(b.rowcount):
        #get the next line
        ind2 = b.fetchone()
        if ind2.stat_code == 'MILL1':
            MTTF_MILL1.append(ind2.MTTF_hour)
        elif ind2.stat_code == 'MILL2':
            MTTF_MILL2.append(ind2.MTTF_hour)
        else:
            continue

    MTTR_MILL1 = []
    MTTR_MILL2 = []
    for j in range(c.rowcount):
        #get the next line
        ind3 = c.fetchone()
        if ind3.stat_code == 'MILL1':
            MTTR_MILL1.append(ind3.MTTR_hour)
        elif ind3.stat_code == 'MILL2':
            MTTR_MILL2.append(ind3.MTTR_hour)
        else:
            continue

    #======================= Fit data to statistical distributions ================================#
    dist_proctime = DistFittest()
    distProcTime_MILL1 = dist_proctime.ks_test(procTime_MILL1)
    distProcTime_MILL2 = dist_proctime.ks_test(procTime_MILL2)

    dist_MTTF = Distributions()
    dist_MTTR = Distributions()
    distMTTF_MILL1 = dist_MTTF.Weibull_distrfit(MTTF_MILL1)
    distMTTF_MILL2 = dist_MTTF.Weibull_distrfit(MTTF_MILL2)

    distMTTR_MILL1 = dist_MTTR.Poisson_distrfit(MTTR_MILL1)
    distMTTR_MILL2 = dist_MTTR.Poisson_distrfit(MTTR_MILL2)

    #======================== Output preparation: output the values prepared in the CMSD information model of this model ====================================================#
    if not cmsdFile:
        datafile = (
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         CMSDFileName)
        )  #It defines the name or the directory of the XML file that is manually written the CMSD information model
        tree = et.parse(
            datafile
        )  #This file will be parsed using the XML.ETREE Python library

    exportCMSD = CMSDOutput()
    stationId1 = 'M1'
    stationId2 = 'M2'
    procTime1 = exportCMSD.ProcessingTimes(tree, stationId1,
                                           distProcTime_MILL1)
    procTime2 = exportCMSD.ProcessingTimes(procTime1, stationId2,
                                           distProcTime_MILL2)

    TTF1 = exportCMSD.TTF(procTime2, stationId1, distMTTF_MILL1)
    TTR1 = exportCMSD.TTR(TTF1, stationId1, distMTTR_MILL1)

    TTF2 = exportCMSD.TTF(TTR1, stationId2, distMTTF_MILL2)
    TTR2 = exportCMSD.TTR(TTF2, stationId2, distMTTR_MILL2)

    TTR2.write(
        'CMSD_ParallelStations_Output.xml', encoding="utf8"
    )  #It writes the element tree to a specified file, using the 'utf8' output encoding

    #======================= Output preparation: output the updated values in the JSON file of this example ================================#
    if not jsonFile:
        jsonFile = open(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         JSONFileName), 'r')  #It opens the JSON file
        data = json.load(jsonFile)  #It loads the file
        jsonFile.close()
    else:
        data = json.load(jsonFile)

    exportJSON = JSONOutput()
    stationId1 = 'M1'
    stationId2 = 'M2'
    data1 = exportJSON.ProcessingTimes(data, stationId1, distProcTime_MILL1)
    data2 = exportJSON.ProcessingTimes(data1, stationId2, distProcTime_MILL2)

    data3 = exportJSON.TTF(data2, stationId1, distMTTF_MILL1)
    data4 = exportJSON.TTR(data3, stationId1, distMTTR_MILL1)

    data5 = exportJSON.TTF(data4, stationId2, distMTTF_MILL2)
    data6 = exportJSON.TTR(data5, stationId2, distMTTR_MILL2)

    # if we run from test return the data6
    if test:
        return data6

    jsonFile = open('JSON_ParallelStations_Output.json',
                    "w")  #It opens the JSON file
    jsonFile.write(json.dumps(
        data6, indent=True))  #It writes the updated data to the JSON file
    jsonFile.close()  #It closes the file

    #=================== Calling the ExcelOutput object, outputs the outcomes of the statistical analysis in xls files ==========================#
    export = ExcelOutput()

    export.PrintStatisticalMeasures(procTime_MILL1,
                                    'procTimeMILL1_StatResults.xls')
    export.PrintStatisticalMeasures(procTime_MILL2,
                                    'procTimeMILL2_StatResults.xls')
    export.PrintStatisticalMeasures(MTTF_MILL1, 'MTTFMILL1_StatResults.xls')
    export.PrintStatisticalMeasures(MTTF_MILL2, 'MTTFMILL2_StatResults.xls')
    export.PrintStatisticalMeasures(MTTR_MILL1, 'MTTRMILL1_StatResults.xls')
    export.PrintStatisticalMeasures(MTTR_MILL2, 'MTTRMILL2_StatResults.xls')

    export.PrintDistributionFit(procTime_MILL1,
                                'procTimeMILL1_DistFitResults.xls')
    export.PrintDistributionFit(procTime_MILL2,
                                'procTimeMILL2_DistFitResults.xls')
    export.PrintDistributionFit(MTTF_MILL1, 'MTTFMILL1_DistFitResults.xls')
    export.PrintDistributionFit(MTTF_MILL2, 'MTTFMILL2_DistFitResults.xls')
    export.PrintDistributionFit(MTTR_MILL1, 'MTTRMILL1_DistFitResults.xls')
    export.PrintDistributionFit(MTTR_MILL2, 'MTTRMILL2_DistFitResults.xls')
예제 #7
0
def main(test=0,
         ExcelFileName='inputData.xls',
         JSONFileName='JSON_ParallelStations.json',
         workbook=None,
         jsonFile=None):

    #Read from the given directory the Excel document with the input data
    if not workbook:
        workbook = xlrd.open_workbook(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         ExcelFileName))
    worksheets = workbook.sheet_names()
    worksheet_ProcessingTimes = worksheets[
        0]  #Define the worksheet with the Processing times data

    inputData = ImportExceldata()  #Call the Python object Import_Excel
    ProcessingTimes = inputData.Input_data(
        worksheet_ProcessingTimes, workbook
    )  #Create the Processing Times dictionary with key Machines 1,2 and values the processing time data

    ##Get from the above dictionaries the M1 key and define the following lists with data
    M1_ProcTime = ProcessingTimes.get('M1', [])
    M2_ProcTime = ProcessingTimes.get('M2', [])

    #Call the HandleMissingValues object and replace the missing values in the lists with the mean of the non-missing values
    misValues = ReplaceMissingValues()
    M1_ProcTime = misValues.ReplaceWithMean(M1_ProcTime)
    M2_ProcTime = misValues.ReplaceWithMean(M2_ProcTime)

    MLE = Distributions(
    )  #Call the Distributions object (Maximum Likelihood Estimation - MLE)
    KS = DistFittest(
    )  #Call the DistFittest object  (Kolmoghorov-Smirnov test)

    M1ProcTime_dist = KS.ks_test(M1_ProcTime)
    M2ProcTime_dist = MLE.Normal_distrfit(M2_ProcTime)

    #======================= Output preparation: output the updated values in the JSON file of this example ================================#
    if not jsonFile:
        jsonFile = open(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         JSONFileName), 'r')  #It opens the JSON file
        data = json.load(jsonFile)  #It loads the file
        jsonFile.close()
    else:
        data = json.load(jsonFile)

    exportJSON = JSONOutput()
    stationId1 = 'St1'
    stationId2 = 'St2'
    data1 = exportJSON.ProcessingTimes(data, stationId1, M1ProcTime_dist)
    data2 = exportJSON.ProcessingTimes(data1, stationId2, M2ProcTime_dist)

    #================================ Call ManPy and run the simulation model =============================================#
    #calls ManPy main script with the input
    simulationOutput = ManPyMain.main(input_data=json.dumps(data2))

    # if we run from test return the ManPy result
    if test:
        return simulationOutput

    #=================== Ouput the JSON file ==========================#
    jsonFile = open('JSON_ParallelStations_Output.json',
                    "w")  #It opens the JSON file
    jsonFile.write(json.dumps(
        data2, indent=True))  #It writes the updated data to the JSON file
    jsonFile.close()  #It closes the file

    #=================== Calling the ExcelOutput object, outputs the outcomes of the statistical analysis in xls files ==========================#
    export = ExcelOutput()

    export.PrintStatisticalMeasures(M1_ProcTime, 'M1_ProcTime_StatResults.xls')
    export.PrintStatisticalMeasures(M2_ProcTime, 'M2_ProcTime_StatResults.xls')

    export.PrintDistributionFit(M1_ProcTime, 'M1_ProcTime_DistFitResults.xls')
    export.PrintDistributionFit(M2_ProcTime, 'M2_ProcTime_DistFitResults.xls')

    # save the simulation output
    jsonFile = open('ManPyOutput.json', "w")  #It opens the JSON file
    jsonFile.write(
        simulationOutput)  #It writes the updated data to the JSON file
    jsonFile.close()  #It closes the file
예제 #8
0
def main(test=0,
         ExcelFileName1='InterarrivalsData.xls',
         ExcelFileName2='ProcData.xls',
         simul8XMLFileName='SingleServer.xml',
         workbook1=None,
         workbook2=None,
         simul8XMLFile=None):

    #Read from the given directory the Excel document with the processing times data
    if not workbook2:
        workbook2 = xlrd.open_workbook(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         ExcelFileName2))
    worksheets = workbook2.sheet_names()
    worksheet_Proc = worksheets[
        0]  #Define the worksheet with the Processing time data

    importData = ImportExceldata()  #Call the Python object Import_Excel
    procTimes = importData.Input_data(
        worksheet_Proc, workbook2
    )  #Create the Processing times dictionary with key the M1 and values the processing time data

    #Get from the above dictionaries the M1 key and the Source key and define the following lists with data
    M1 = procTimes.get('M1', [])

    distFitting = DistFittest()  #Call the DistFittest object
    M1 = distFitting.ks_test(M1)

    #Read from the given directory the Excel document with the inter-arrivals data
    if not workbook1:
        workbook1 = xlrd.open_workbook(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         ExcelFileName1))
    worksheets = workbook1.sheet_names()
    worksheet_Inter = worksheets[
        0]  #Define the worksheet with the Inter-arrivals time data

    data = ImportExceldata()
    interTimes = data.Input_data(
        worksheet_Inter, workbook1
    )  #Create the Inter-arrival times dictionary with key the Source and values the inter-arrival time data

    S1 = interTimes.get('Source', [])

    distMLE = Distributions()  #Call the Distributions object
    S1 = distMLE.Exponential_distrfit(S1)

    if not simul8XMLFile:
        datafile = (os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                 simul8XMLFileName)
                    )  #It defines the name or the directory of the XML file
        tree = et.parse(datafile)
    else:
        datafile = simul8XMLFile
        tree = et.parse(datafile)

    simul8 = Simul8Output()  #Call the Simul8Output object
    title = 'KEtool_SingleServer'
    interTimes = simul8.InterArrivalTime(tree, 'Source', S1)
    procTimes = simul8.ProcTimes(interTimes, 'Activity 1', M1)
    title = simul8.Title(procTimes, title)
    #Output the XML file with the processed data
    output = title.write('KEtool_SingleServer.xml')

    if test:
        output = et.parse('KEtool_SingleServer.xml')
        return output
예제 #9
0
def main(test=0,
         CSVFileName1='InterArrivalData.csv',
         CSVFileName2='DataSet.csv',
         JSONFileName='JSON_ConveyerLine.json',
         jsonFile=None,
         csvFile1=None,
         csvFile2=None):
    if csvFile2:
        CSVFileName2 = csvFile2.name
    if csvFile1:
        CSVFileName1 = csvFile1.name

    CSV = ImportCSVdata(
    )  #call the Import_CSV module and using its method Input_data import the data set from the CSV file to the tool
    procData = CSV.Input_data(CSVFileName2)
    sourceData = CSV.Input_data(CSVFileName1)
    M1 = procData.get(
        'M1', [])  #get from the returned Python dictionary the data sets
    M2 = procData.get('M2', [])
    S1 = sourceData.get('S1', [])

    ################### Processing of the data sets calling the following objects ###################################
    #Replace missing values calling the corresponding object
    missingValues = ReplaceMissingValues()
    M1 = missingValues.DeleteMissingValue(M1)
    M2 = missingValues.DeleteMissingValue(M2)
    S1 = missingValues.ReplaceWithMean(S1)

    #Detect outliers calling the DetectOutliers object
    outliers = DetectOutliers()
    M1 = outliers.DeleteExtremeOutliers(M1)
    M2 = outliers.DeleteExtremeOutliers(M2)
    S1 = outliers.DeleteOutliers(S1)

    #Conduct distribution fitting calling the Distributions object and DistFittest object
    MLE = Distributions()
    KStest = DistFittest()
    M1 = KStest.ks_test(M1)
    M2 = KStest.ks_test(M2)
    S1 = MLE.Exponential_distrfit(S1)
    #================================= Output preparation: output the updated values in the JSON file of this example =========================================================#
    if not jsonFile:
        jsonFile = open(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         JSONFileName), 'r')  #It opens the JSON file
        data = json.load(jsonFile)  #It loads the file
        jsonFile.close()
    else:
        data = json.load(jsonFile)

    exportJSON = JSONOutput()
    stationId1 = 'M1'
    stationId2 = 'M2'
    stationId3 = 'S1'

    data = exportJSON.ProcessingTimes(data, stationId1, M1)
    data1 = exportJSON.ProcessingTimes(data, stationId2, M2)
    data2 = exportJSON.InterarrivalTime(data1, stationId3, S1)

    # if we run from test return the data2
    if test:
        return data2

    jsonFile = open('JSON_ConveyerLine_Output.json',
                    "w")  #It opens the JSON file
    jsonFile.write(json.dumps(
        data2, indent=True))  #It writes the updated data to the JSON file
    jsonFile.close()  #It closes the file