def main(test=0,
         ExcelFileName='inputData.xls',
         JSONFileName='JSON_AssembleDismantle.json',
         CMSDFileName='CMSD_AssemblyDismantle.xml',
         workbook=None,
         jsonFile=None,
         cmsdFile=None):
    if not workbook:
        workbook = xlrd.open_workbook(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         ExcelFileName))
    #Read from the given directory the Excel document with the input data
    worksheets = workbook.sheet_names()
    worksheet_ProcessingTimes = worksheets[
        0]  #Define the worksheet with the Processing times data
    worksheet_MTTF = worksheets[
        1]  #Define the worksheet with Time-to-Failure data
    worksheet_MTTR = worksheets[
        2]  #Define the worksheet with Time-to-Repair data

    A = ImportExceldata()  #Call the Python object Import_Excel
    ProcessingTimes = A.Input_data(
        worksheet_ProcessingTimes, workbook
    )  #Create the Processing Times dictionary with key the Machine 1 and values the processing time data
    MTTF = A.Input_data(
        worksheet_MTTF, workbook
    )  #Create the MTTF dictionary with key the Machine 1 and time-to-failure data
    MTTR = A.Input_data(
        worksheet_MTTR, workbook
    )  #Create the MTTR Quantity dictionary with key the Machine 1 and time-to-repair data

    ##Get from the above dictionaries the M1 key and define the following lists with data
    ProcTime = ProcessingTimes.get('M1', [])
    MTTF = MTTF.get('M1', [])
    MTTR = MTTR.get('M1', [])

    #Call the HandleMissingValues object and replace the missing values in the lists with the mean of the non-missing values
    B = ReplaceMissingValues()
    ProcTime = B.ReplaceWithMean(ProcTime)
    MTTF = B.ReplaceWithMean(MTTF)
    MTTR = B.ReplaceWithMean(MTTR)

    C = Distributions()  #Call the Distributions object
    D = DistFittest()  #Call the DistFittest object

    ProcTime_dist = D.ks_test(ProcTime)
    MTTF_dist = C.Exponential_distrfit(MTTF)
    MTTR_dist = C.Exponential_distrfit(MTTR)
    #======================== Output preparation: output the values prepared in the CMSD information model of this model ====================================================#
    if not cmsdFile:
        datafile = (
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         CMSDFileName)
        )  #It defines the name or the directory of the XML file that is manually written the CMSD information model
        tree = et.parse(
            datafile
        )  #This file will be parsed using the XML.ETREE Python library

    exportCMSD = CMSDOutput()
    stationId1 = 'M1'

    procTime = exportCMSD.ProcessingTimes(tree, stationId1, ProcTime_dist)
    TTF = exportCMSD.TTF(procTime, stationId1, MTTF_dist)
    TTR = exportCMSD.TTR(TTF, stationId1, MTTR_dist)

    TTR.write(
        'CMSD_AssemblyDismantle_Output.xml', encoding="utf8"
    )  #It writes the element tree to a specified file, using the 'utf8' output encoding

    #================================= Output preparation: output the updated values in the JSON file of this example =========================================================#
    if not jsonFile:
        jsonFile = open(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         JSONFileName), 'r')  #It opens the JSON file
        data = json.load(jsonFile)  #It loads the file
        jsonFile.close()
    else:
        data = json.load(jsonFile)

    exportJSON = JSONOutput()
    stationId = 'M1'

    data = exportJSON.ProcessingTimes(data, stationId, ProcTime_dist)
    data1 = exportJSON.TTF(data, stationId, MTTF_dist)
    data2 = exportJSON.TTR(data1, stationId, MTTR_dist)

    #================================ Call ManPy and run the simulation model =============================================#
    #calls ManPy main script with the input
    simulationOutput = ManPyMain.main(input_data=json.dumps(data2))
    # if we run from test return the ManPy result
    if test:
        return simulationOutput

    #===================== Output the JSON file ========================================#
    jsonFile = open('JSON_AssembleDismantle_Output.json',
                    "w")  #It opens the JSON file
    jsonFile.write(json.dumps(
        data2, indent=True))  #It writes the updated data to the JSON file
    jsonFile.close()  #It closes the file
    #================================ Calling the ExcelOutput object, outputs the outcomes of the statistical analysis in xls files =============================================#
    C = ExcelOutput()
    C.PrintStatisticalMeasures(ProcTime, 'ProcTime_StatResults.xls')
    C.PrintStatisticalMeasures(MTTR, 'MTTR_StatResults.xls')
    C.PrintStatisticalMeasures(MTTF, 'MTTF_StatResults.xls')
    C.PrintDistributionFit(ProcTime, 'ProcTime_DistFitResults.xls')
    C.PrintDistributionFit(MTTR, 'MTTR_DistFitResults.xls')

    #================================ Call ManPy and run the simulation model =============================================#
    #calls ManPy main script with the input
    simulationOutput = ManPyMain.main(input_data=json.dumps(data))
    # save the simulation output
    jsonFile = open('ManPyOutput.json', "w")  #It opens the JSON file
    jsonFile.write(
        simulationOutput)  #It writes the updated data to the JSON file
    jsonFile.close()  #It closes the file
Exemple #2
0
def main(test=0,
         ExcelFileName='DataSet.xlsx',
         CSVFileName='ProcTimesData.csv',
         simul8XMLFileName='Topology1.xml',
         workbook=None,
         csvFile=None,
         simul8XMLFile=None):
    #================================= Extract the required data from the data files ==========================================#
    if csvFile:
        CSVFileName = csvFile.name
    filename = CSVFileName
    csv = ImportCSVdata(
    )  #call the Import_CSV module and using its method Input_data import the data set from the CSV file to the tool
    Data = csv.Input_data(filename)

    Activity2_Proc = Data.get(
        'Activity 2',
        [])  #get from the returned Python dictionary the two data sets
    Activity3_Proc = Data.get('Activity 3', [])

    #Read from the given directory the Excel document with the data
    if not workbook:
        workbook = xlrd.open_workbook(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         ExcelFileName))
    worksheets = workbook.sheet_names()
    worksheet_Inter = worksheets[
        0]  #Define the worksheet with the Inter-arrivals time data

    data = ImportExceldata()
    interTimes = data.Input_data(
        worksheet_Inter, workbook
    )  #Create the Inter-arrival times dictionary with key the Source and values the inter-arrival time data

    S1 = interTimes.get('Source', [])

    #Read from the given directory the Excel document with the data
    worksheets = workbook.sheet_names()
    worksheet_Fail = worksheets[
        1]  #Define the worksheet with the failures data (MTTF,MTTR)

    data = ImportExceldata()
    failures = data.Input_data(
        worksheet_Fail, workbook
    )  #Create the failures dictionary with key the MTTF and MTTR data points

    MTTF = failures.get('MTTF', [])
    MTTR = failures.get('MTTR', [])

    #======================= Fit data to probability distributions ================================#
    #The Distributions and DistFittest objects are called to fit statistical distributions to the in scope data
    dist = Distributions()
    act2Proc = dist.Weibull_distrfit(Activity2_Proc)
    act3Proc = dist.Weibull_distrfit(Activity3_Proc)

    s1Times = dist.Exponential_distrfit(S1)

    distFit = DistFittest()
    act1MTTF = distFit.ks_test(MTTF)
    act1MTTR = distFit.ks_test(MTTR)

    #======================= Output preparation: output the updated values in the XML file of this example ================================#

    if not simul8XMLFile:
        datafile = (os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                 simul8XMLFileName)
                    )  #It defines the name or the directory of the XML file
        tree = et.parse(datafile)
    else:
        datafile = simul8XMLFile
        tree = et.parse(datafile)

    simul8 = Simul8Output()  #Call the Simul8Output object
    #Assign the statistical distribution calculated above in the XML file using methods of the Simul8Output object
    interTimes = simul8.InterArrivalTime(tree, 'Source', s1Times)

    procTimes2 = simul8.ProcTimes(interTimes, 'Activity 2', act2Proc)
    procTimes3 = simul8.ProcTimes(procTimes2, 'Activity 3', act3Proc)

    #Again assign the MTTF and MTTR probability distributions calling the relevant methods from the Simul8Output object
    MTTF1 = simul8.MTBF(procTimes3, 'Activity 1', act1MTTF)
    MTTR1 = simul8.MTTR(MTTF1, 'Activity 1', act1MTTR)
    #Output the XML file with the processed data
    output = MTTR1.write('KEtool_Topology1.xml')

    if test:
        output = et.parse('KEtool_Topology1.xml')
        return output
Exemple #3
0
def main(test=0,
         ExcelFileName1='InterarrivalsData.xls',
         ExcelFileName2='ProcData.xls',
         simul8XMLFileName='SingleServer.xml',
         workbook1=None,
         workbook2=None,
         simul8XMLFile=None):

    #Read from the given directory the Excel document with the processing times data
    if not workbook2:
        workbook2 = xlrd.open_workbook(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         ExcelFileName2))
    worksheets = workbook2.sheet_names()
    worksheet_Proc = worksheets[
        0]  #Define the worksheet with the Processing time data

    importData = ImportExceldata()  #Call the Python object Import_Excel
    procTimes = importData.Input_data(
        worksheet_Proc, workbook2
    )  #Create the Processing times dictionary with key the M1 and values the processing time data

    #Get from the above dictionaries the M1 key and the Source key and define the following lists with data
    M1 = procTimes.get('M1', [])

    distFitting = DistFittest()  #Call the DistFittest object
    M1 = distFitting.ks_test(M1)

    #Read from the given directory the Excel document with the inter-arrivals data
    if not workbook1:
        workbook1 = xlrd.open_workbook(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         ExcelFileName1))
    worksheets = workbook1.sheet_names()
    worksheet_Inter = worksheets[
        0]  #Define the worksheet with the Inter-arrivals time data

    data = ImportExceldata()
    interTimes = data.Input_data(
        worksheet_Inter, workbook1
    )  #Create the Inter-arrival times dictionary with key the Source and values the inter-arrival time data

    S1 = interTimes.get('Source', [])

    distMLE = Distributions()  #Call the Distributions object
    S1 = distMLE.Exponential_distrfit(S1)

    if not simul8XMLFile:
        datafile = (os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                 simul8XMLFileName)
                    )  #It defines the name or the directory of the XML file
        tree = et.parse(datafile)
    else:
        datafile = simul8XMLFile
        tree = et.parse(datafile)

    simul8 = Simul8Output()  #Call the Simul8Output object
    title = 'KEtool_SingleServer'
    interTimes = simul8.InterArrivalTime(tree, 'Source', S1)
    procTimes = simul8.ProcTimes(interTimes, 'Activity 1', M1)
    title = simul8.Title(procTimes, title)
    #Output the XML file with the processed data
    output = title.write('KEtool_SingleServer.xml')

    if test:
        output = et.parse('KEtool_SingleServer.xml')
        return output
def main(test=0,
         simul8XMLFileName='ParallelStations.xml',
         DBFilePath='C:\Users\Panos\Documents\KE tool_documentation',
         file_path=None,
         simul8XMLFile=None):
    if not file_path:
        cnxn = ConnectionData(seekName='ServerData',
                              file_path=DBFilePath,
                              implicitExt='txt',
                              number_of_cursors=3)
        cursors = cnxn.getCursors()


#Database queries used to extract the required data, in this example the processing times are given subtracting the TIME IN data point from the TIME OUT data point
    a = cursors[0].execute("""
            select prod_code, stat_code,emp_no, TIMEIN, TIMEOUT
            from production_status
                    """)
    MILL1 = []  #Initialization of MILL1 list
    MILL2 = []  #Initialization of MILL2 list
    for j in range(a.rowcount):
        #get the next line
        ind1 = a.fetchone()
        if ind1.stat_code == 'MILL1':
            procTime = []
            procTime.insert(0, ind1.TIMEIN)
            procTime.insert(1, ind1.TIMEOUT)
            MILL1.append(procTime)
        elif ind1.stat_code == 'MILL2':
            procTime = []
            procTime.insert(0, ind1.TIMEIN)
            procTime.insert(1, ind1.TIMEOUT)
            MILL2.append(procTime)
        else:
            continue
    #The  BasicTransformations object is called to conduct some data transformations
    transform = Transformations()
    procTime_MILL1 = []
    for elem in MILL1:
        t1 = []
        t2 = []
        t1.append(((elem[0].hour) * 60) * 60 + (elem[0].minute) * 60 +
                  elem[0].second)
        t2.append(((elem[1].hour) * 60) * 60 + (elem[1].minute) * 60 +
                  elem[1].second)
        dt = transform.subtraction(t2, t1)
        procTime_MILL1.append(dt[0])

    procTime_MILL2 = []
    for elem in MILL2:
        t1 = []
        t2 = []
        t1.append(((elem[0].hour) * 60) * 60 + (elem[0].minute) * 60 +
                  elem[0].second)
        t2.append(((elem[1].hour) * 60) * 60 + (elem[1].minute) * 60 +
                  elem[1].second)
        dt = transform.subtraction(t2, t1)
        procTime_MILL2.append(dt[0])
    #Database queries used again to extract the MTTF and MTTR data points
    b = cursors[1].execute("""
            select stat_code, MTTF_hour
            from failures
                    """)

    c = cursors[2].execute("""
            select stat_code, MTTR_hour
            from repairs
                    """)
    MTTF_MILL1 = [
    ]  #Initialization of the list that will contain the MTTF data points for MILL1
    MTTF_MILL2 = [
    ]  #Initialization of the list that will contain the MTTF data points for MILL2
    for j in range(b.rowcount):
        #get the next line
        ind2 = b.fetchone()
        if ind2.stat_code == 'MILL1':
            MTTF_MILL1.append(ind2.MTTF_hour)
        elif ind2.stat_code == 'MILL2':
            MTTF_MILL2.append(ind2.MTTF_hour)
        else:
            continue

    MTTR_MILL1 = [
    ]  #Initialization of the list that will contain the MTTR data points for MILL1
    MTTR_MILL2 = [
    ]  #Initialization of the list that will contain the MTTR data points for MILL1
    for j in range(c.rowcount):
        #get the next line
        ind3 = c.fetchone()
        if ind3.stat_code == 'MILL1':
            MTTR_MILL1.append(ind3.MTTR_hour)
        elif ind3.stat_code == 'MILL2':
            MTTR_MILL2.append(ind3.MTTR_hour)
        else:
            continue

    #======================= Fit data to statistical distributions ================================#
    #The Distributions object is called to fit statistical distributions to the in scope data
    dist_proctime = Distributions()
    distProcTime_MILL1 = dist_proctime.Lognormal_distrfit(procTime_MILL1)
    distProcTime_MILL2 = dist_proctime.Weibull_distrfit(procTime_MILL2)

    dist_MTTF = Distributions()
    dist_MTTR = Distributions()
    distMTTF_MILL1 = dist_MTTF.Exponential_distrfit(MTTF_MILL1)
    distMTTF_MILL2 = dist_MTTF.Exponential_distrfit(MTTF_MILL2)

    distMTTR_MILL1 = dist_MTTR.Normal_distrfit(MTTR_MILL1)
    distMTTR_MILL2 = dist_MTTR.Normal_distrfit(MTTR_MILL2)

    #======================= Output preparation: output the updated values in the XML file of this example ================================#

    if not simul8XMLFile:
        datafile = (os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                 simul8XMLFileName)
                    )  #It defines the name or the directory of the XML file
        tree = et.parse(datafile)
    else:
        datafile = simul8XMLFile
        tree = et.parse(datafile)

    simul8 = Simul8Output()  #Call the Simul8Output object
    #Assign the statistical distribution found above in the XML file using methods of the Simul8Output object
    procTimes1 = simul8.ProcTimes(tree, 'MILL1', distProcTime_MILL1)
    procTimes2 = simul8.ProcTimes(procTimes1, 'MILL2', distProcTime_MILL2)
    #Again assign the MTTF and MTTR probability distributions calling the relevant methods from the Simul8Output object
    MTTF1 = simul8.MTBF(procTimes2, 'MILL1', distMTTF_MILL1)
    MTTR1 = simul8.MTTR(MTTF1, 'MILL1', distMTTR_MILL1)

    MTTF2 = simul8.MTBF(MTTR1, 'MILL2', distMTTF_MILL2)
    MTTR2 = simul8.MTTR(MTTF2, 'MILL2', distMTTR_MILL2)
    #Output the XML file with the processed data
    output = MTTR2.write('KEtool_ParallelStations.xml')

    if test:
        output = et.parse('KEtool_ParallelStations.xml')
        return output
def main(test=0,
         CSVFileName1='InterArrivalData.csv',
         CSVFileName2='DataSet.csv',
         JSONFileName='JSON_ConveyerLine.json',
         jsonFile=None,
         csvFile1=None,
         csvFile2=None):
    if csvFile2:
        CSVFileName2 = csvFile2.name
    if csvFile1:
        CSVFileName1 = csvFile1.name

    CSV = ImportCSVdata(
    )  #call the Import_CSV module and using its method Input_data import the data set from the CSV file to the tool
    procData = CSV.Input_data(CSVFileName2)
    sourceData = CSV.Input_data(CSVFileName1)
    M1 = procData.get(
        'M1', [])  #get from the returned Python dictionary the data sets
    M2 = procData.get('M2', [])
    S1 = sourceData.get('S1', [])

    ################### Processing of the data sets calling the following objects ###################################
    #Replace missing values calling the corresponding object
    missingValues = ReplaceMissingValues()
    M1 = missingValues.DeleteMissingValue(M1)
    M2 = missingValues.DeleteMissingValue(M2)
    S1 = missingValues.ReplaceWithMean(S1)

    #Detect outliers calling the DetectOutliers object
    outliers = DetectOutliers()
    M1 = outliers.DeleteExtremeOutliers(M1)
    M2 = outliers.DeleteExtremeOutliers(M2)
    S1 = outliers.DeleteOutliers(S1)

    #Conduct distribution fitting calling the Distributions object and DistFittest object
    MLE = Distributions()
    KStest = DistFittest()
    M1 = KStest.ks_test(M1)
    M2 = KStest.ks_test(M2)
    S1 = MLE.Exponential_distrfit(S1)
    #================================= Output preparation: output the updated values in the JSON file of this example =========================================================#
    if not jsonFile:
        jsonFile = open(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         JSONFileName), 'r')  #It opens the JSON file
        data = json.load(jsonFile)  #It loads the file
        jsonFile.close()
    else:
        data = json.load(jsonFile)

    exportJSON = JSONOutput()
    stationId1 = 'M1'
    stationId2 = 'M2'
    stationId3 = 'S1'

    data = exportJSON.ProcessingTimes(data, stationId1, M1)
    data1 = exportJSON.ProcessingTimes(data, stationId2, M2)
    data2 = exportJSON.InterarrivalTime(data1, stationId3, S1)

    # if we run from test return the data2
    if test:
        return data2

    jsonFile = open('JSON_ConveyerLine_Output.json',
                    "w")  #It opens the JSON file
    jsonFile.write(json.dumps(
        data2, indent=True))  #It writes the updated data to the JSON file
    jsonFile.close()  #It closes the file