Beispiel #1
0
def _runKnowledgeExtraction(parameter_dict):
  try:
    workbook = xlrd.open_workbook(
        file_contents=urllib.urlopen(parameter_dict['general']['ke_url']).read())
    worksheets = workbook.sheet_names()
    worksheet_ProcessingTimes = worksheets[0]   #It defines the worksheet_ProcessingTimes as the first sheet of the Excel file

    A=ImportExceldata()            #Call the Import_Excel object 
    B=DistFittest()             #Call the Distribution Fitting object
    ProcessingTimes= A.Input_data(worksheet_ProcessingTimes, workbook)  #Create a dictionary with the imported data from the Excel file

    data = parameter_dict                            #It loads the file
#     nodes = data['nodes'] 
    nodes = data['graph']['node']

    for station, values in ProcessingTimes.items():             #This loop searches the elements of the Excel imported data and if these elements exist in json file append the distribution fitting results in a dictionary   
      if station in nodes: 
        temp= B.ks_test(values)
        dist=temp['distributionType']
        del temp['distributionType']
        temp={dist:temp, "distribution": dist}
        from pprint import pprint
        pprint(temp)
        parameter_dict['graph']['node'][station]['processingTime'] = temp
    return dict(success=True, data=parameter_dict)
  except Exception, e:
    tb = traceback.format_exc()
    app.logger.error(tb)
    return dict(error=tb)
Beispiel #2
0
def _runKnowledgeExtraction(parameter_dict):
    try:
        workbook = xlrd.open_workbook(file_contents=urllib.urlopen(
            parameter_dict['general']['ke_url']).read())
        worksheets = workbook.sheet_names()
        worksheet_ProcessingTimes = worksheets[
            0]  #It defines the worksheet_ProcessingTimes as the first sheet of the Excel file

        A = Import_Excel()  #Call the Import_Excel object
        B = DistFittest()  #Call the Distribution Fitting object
        ProcessingTimes = A.Input_data(
            worksheet_ProcessingTimes, workbook
        )  #Create a dictionary with the imported data from the Excel file

        data = parameter_dict  #It loads the file
        nodes = data['nodes']

        for station, values in ProcessingTimes.items(
        ):  #This loop searches the elements of the Excel imported data and if these elements exist in json file append the distribution fitting results in a dictionary
            if station in nodes:
                parameter_dict['nodes'][station]['processingTime'] = B.ks_test(
                    values)
        return dict(success=True, data=parameter_dict)
    except Exception, e:
        tb = traceback.format_exc()
        app.logger.error(tb)
        return dict(error=tb)
Beispiel #3
0
def main(test=0, CSVFileName1='InterArrivalData.csv',
                CSVFileName2='DataSet.csv',
                JSONFileName='JSON_ConveyerLine.json',
                jsonFile=None, csvFile1=None, csvFile2=None):
    if csvFile2:
        CSVFileName2 = csvFile2.name
    if csvFile1:
        CSVFileName1 = csvFile1.name
    
    CSV=ImportCSVdata()   #call the Import_CSV module and using its method Input_data import the data set from the CSV file to the tool
    procData=CSV.Input_data(CSVFileName2)
    sourceData=CSV.Input_data(CSVFileName1)
    M1=procData.get('M1',[])       #get from the returned Python dictionary the data sets
    M2=procData.get('M2',[])
    S1=sourceData.get('S1',[])
    
    ################### Processing of the data sets calling the following objects ###################################
    #Replace missing values calling the corresponding object
    missingValues=ReplaceMissingValues()
    M1=missingValues.DeleteMissingValue(M1)
    M2=missingValues.DeleteMissingValue(M2)
    S1=missingValues.ReplaceWithMean(S1)
    
    #Detect outliers calling the DetectOutliers object
    outliers=DetectOutliers()
    M1=outliers.DeleteExtremeOutliers(M1)
    M2=outliers.DeleteExtremeOutliers(M2)
    S1=outliers.DeleteOutliers(S1)
    
    #Conduct distribution fitting calling the Distributions object and DistFittest object
    MLE=Distributions()
    KStest=DistFittest()
    M1=KStest.ks_test(M1)
    M2=KStest.ks_test(M2)
    S1=MLE.Exponential_distrfit(S1)
    #================================= Output preparation: output the updated values in the JSON file of this example =========================================================#
    if not jsonFile:
        jsonFile = open(os.path.join(os.path.dirname(os.path.realpath(__file__)), JSONFileName),'r')      #It opens the JSON file 
        data = json.load(jsonFile)             #It loads the file
        jsonFile.close()
    else:
        data = json.load(jsonFile) 
    
    exportJSON=JSONOutput()
    stationId1='M1'
    stationId2='M2'
    stationId3='S1'
    
    data=exportJSON.ProcessingTimes(data, stationId1, M1)                           
    data1=exportJSON.ProcessingTimes(data, stationId2, M2)
    data2=exportJSON.InterarrivalTime(data1, stationId3, S1)
    
    # if we run from test return the data2
    if test:
        return data2
    
    jsonFile = open('JSON_ConveyerLine_Output.json',"w")     #It opens the JSON file
    jsonFile.write(json.dumps(data2, indent=True))           #It writes the updated data to the JSON file 
    jsonFile.close()                                         #It closes the file
Beispiel #4
0
def main(test=0, ExcelFileName1='InterarrivalsData.xls',
                ExcelFileName2='ProcData.xls',
                simul8XMLFileName='SingleServer.xml',
                workbook1=None, workbook2=None, simul8XMLFile=None):
    
    #Read from the given directory the Excel document with the processing times data
    if not workbook2:
        workbook2 = xlrd.open_workbook(os.path.join(os.path.dirname(os.path.realpath(__file__)), ExcelFileName2)) 
    worksheets = workbook2.sheet_names()
    worksheet_Proc = worksheets[0]     #Define the worksheet with the Processing time data
    
    importData = ImportExceldata()   #Call the Python object Import_Excel
    procTimes = importData.Input_data(worksheet_Proc, workbook2)   #Create the Processing times dictionary with key the M1 and values the processing time data
    
    #Get from the above dictionaries the M1 key and the Source key and define the following lists with data
    M1 = procTimes.get('M1',[])
          
    distFitting = DistFittest()  #Call the DistFittest object
    M1 = distFitting.ks_test(M1)
    
    #Read from the given directory the Excel document with the inter-arrivals data
    if not workbook1:
        workbook1 = xlrd.open_workbook(os.path.join(os.path.dirname(os.path.realpath(__file__)), ExcelFileName1))
    worksheets = workbook1.sheet_names()
    worksheet_Inter = worksheets[0]     #Define the worksheet with the Inter-arrivals time data
    
    data = ImportExceldata()
    interTimes = data.Input_data(worksheet_Inter, workbook1) #Create the Inter-arrival times dictionary with key the Source and values the inter-arrival time data
    
    S1 = interTimes.get('Source',[])  
    
    distMLE = Distributions() #Call the Distributions object
    S1 = distMLE.Exponential_distrfit(S1)
    
    if not simul8XMLFile:
        datafile=(os.path.join(os.path.dirname(os.path.realpath(__file__)), simul8XMLFileName))       #It defines the name or the directory of the XML file 
        tree = et.parse(datafile)    
    else:
        datafile=simul8XMLFile
        tree = et.parse(datafile)
        
    simul8 = Simul8Output()    #Call the Simul8Output object
    title = 'KEtool_SingleServer'
    interTimes = simul8.InterArrivalTime(tree,'Source', S1)
    procTimes = simul8.ProcTimes(interTimes,'Activity 1', M1)
    title = simul8.Title(procTimes,title)
    #Output the XML file with the processed data
    output= title.write('KEtool_SingleServer.xml')
    
    if test:
        output=et.parse('KEtool_SingleServer.xml')
        return output    
M1A_Proc = C.DeleteOutliers(M1A_Proc)
M1B_Proc = C.DeleteOutliers(M1B_Proc)
M2A_Proc = C.DeleteOutliers(M2A_Proc)
M2B_Proc = C.DeleteOutliers(M2B_Proc)
M3A_Proc = C.DeleteOutliers(M3A_Proc)
M3B_Proc = C.DeleteOutliers(M3B_Proc)
CB_Proc = C.DeleteOutliers(CB_Proc)
FL_Proc = C.DeleteOutliers(FL_Proc)
M3B_Proc = C.DeleteOutliers(M3B_Proc)
PrA_Proc = C.DeleteOutliers(PrA_Proc)
PrB_Proc = C.DeleteOutliers(PrB_Proc)
PaA_Proc = C.DeleteOutliers(PaA_Proc)
Pb_Proc = C.DeleteOutliers(Pb_Proc)

# Call the DistFittest object and conduct Kolmogorov-Smirnov distribution fitting test in the processing times lists of each station
D = DistFittest()
dictProc = {
}  #Create a dictionary that holds the statistical distributions of the processing times of each station
dictProc['MA'] = D.ks_test(MA_Proc)
dictProc['M1A'] = D.ks_test(M1A_Proc)
dictProc['M1B'] = D.ks_test(M1B_Proc)
dictProc['M2A'] = D.ks_test(M2A_Proc)
dictProc['M2B'] = D.ks_test(M2B_Proc)
dictProc['M3A'] = D.ks_test(M3A_Proc)
dictProc['M3B'] = D.ks_test(M3B_Proc)
dictProc['CB'] = D.ks_test(CB_Proc)
dictProc['MM'] = D.ks_test(MM_Proc)
dictProc['FL'] = D.ks_test(FL_Proc)
dictProc['PrA'] = D.ks_test(PrA_Proc)
dictProc['PrB'] = D.ks_test(PrB_Proc)
dictProc['PaA'] = D.ks_test(PaA_Proc)
Beispiel #6
0
def main(test=0,
         ExcelFileName='DataSet.xlsx',
         CSVFileName='ProcTimesData.csv',
         simul8XMLFileName='Topology1.xml',
         workbook=None,
         csvFile=None,
         simul8XMLFile=None):
    #================================= Extract the required data from the data files ==========================================#
    if csvFile:
        CSVFileName = csvFile.name
    filename = CSVFileName
    csv = ImportCSVdata(
    )  #call the Import_CSV module and using its method Input_data import the data set from the CSV file to the tool
    Data = csv.Input_data(filename)

    Activity2_Proc = Data.get(
        'Activity 2',
        [])  #get from the returned Python dictionary the two data sets
    Activity3_Proc = Data.get('Activity 3', [])

    #Read from the given directory the Excel document with the data
    if not workbook:
        workbook = xlrd.open_workbook(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         ExcelFileName))
    worksheets = workbook.sheet_names()
    worksheet_Inter = worksheets[
        0]  #Define the worksheet with the Inter-arrivals time data

    data = ImportExceldata()
    interTimes = data.Input_data(
        worksheet_Inter, workbook
    )  #Create the Inter-arrival times dictionary with key the Source and values the inter-arrival time data

    S1 = interTimes.get('Source', [])

    #Read from the given directory the Excel document with the data
    worksheets = workbook.sheet_names()
    worksheet_Fail = worksheets[
        1]  #Define the worksheet with the failures data (MTTF,MTTR)

    data = ImportExceldata()
    failures = data.Input_data(
        worksheet_Fail, workbook
    )  #Create the failures dictionary with key the MTTF and MTTR data points

    MTTF = failures.get('MTTF', [])
    MTTR = failures.get('MTTR', [])

    #======================= Fit data to probability distributions ================================#
    #The Distributions and DistFittest objects are called to fit statistical distributions to the in scope data
    dist = Distributions()
    act2Proc = dist.Weibull_distrfit(Activity2_Proc)
    act3Proc = dist.Weibull_distrfit(Activity3_Proc)

    s1Times = dist.Exponential_distrfit(S1)

    distFit = DistFittest()
    act1MTTF = distFit.ks_test(MTTF)
    act1MTTR = distFit.ks_test(MTTR)

    #======================= Output preparation: output the updated values in the XML file of this example ================================#

    if not simul8XMLFile:
        datafile = (os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                 simul8XMLFileName)
                    )  #It defines the name or the directory of the XML file
        tree = et.parse(datafile)
    else:
        datafile = simul8XMLFile
        tree = et.parse(datafile)

    simul8 = Simul8Output()  #Call the Simul8Output object
    #Assign the statistical distribution calculated above in the XML file using methods of the Simul8Output object
    interTimes = simul8.InterArrivalTime(tree, 'Source', s1Times)

    procTimes2 = simul8.ProcTimes(interTimes, 'Activity 2', act2Proc)
    procTimes3 = simul8.ProcTimes(procTimes2, 'Activity 3', act3Proc)

    #Again assign the MTTF and MTTR probability distributions calling the relevant methods from the Simul8Output object
    MTTF1 = simul8.MTBF(procTimes3, 'Activity 1', act1MTTF)
    MTTR1 = simul8.MTTR(MTTF1, 'Activity 1', act1MTTR)
    #Output the XML file with the processed data
    output = MTTR1.write('KEtool_Topology1.xml')

    if test:
        output = et.parse('KEtool_Topology1.xml')
        return output
Beispiel #7
0
def main(test=0, ExcelFileName='inputsTwoServers.xls',
                JSONFileName='JSON_TwoServers.json',
                CMSDFileName='CMSD_TwoServers.xml',
                workbook=None,
                jsonFile=None, cmsdFile=None):
    if not workbook:
        workbook = xlrd.open_workbook(os.path.join(os.path.dirname(os.path.realpath(__file__)), ExcelFileName))      #Using xlrd library opens the Excel document with the input data      
    worksheets = workbook.sheet_names()
    worksheet_OperationTime = worksheets[0]             #It creates a variable that holds the first Excel worksheet 
     
    X=ImportExceldata()                                    #Call the import_Excel object
    OperationTimes= X.Input_data(worksheet_OperationTime,workbook)      #It defines a Python dictionary, giving as name OpearationTimes and as value the returned dictionary from the import_Excel object 
    Machine1_OpearationTimes = OperationTimes.get('Machine1',[])        #Two lists are defined (Machine1_OpearationTimes, Machine2_OpearationTimes) with the operation times data of each machine
    Machine2_OpearationTimes = OperationTimes.get('Machine2',[])
    
    A=ReplaceMissingValues()                                     #Call the HandleMissingValues object
    Machine1_OpearationTimes= A.DeleteMissingValue(Machine1_OpearationTimes)        #It deletes the missing values in the lists with the operation times data
    Machine2_OpearationTimes= A.DeleteMissingValue(Machine2_OpearationTimes)
    
    Dict={}
    B=DistFittest()                                     #It calls the DistFittest object
    Dict['M1']=B.ks_test(Machine1_OpearationTimes)                 #It conducts the Kolmogorov-Smirnov test in the list with the operation times data 
    Dict['M2']=B.ks_test(Machine2_OpearationTimes)
    M1=Dict.get('M1')
    M2=Dict.get('M2')
        
    #==================================== Output preparation: output the updated values in the CMSD information model ====================================================#
    if not cmsdFile:
        datafile=(os.path.join(os.path.dirname(os.path.realpath(__file__)), CMSDFileName))       #It defines the name or the directory of the XML file that is manually written the CMSD information model
        tree = et.parse(datafile)                                               #This file will be parsed using the XML.ETREE Python library
    
    exportCMSD=CMSDOutput()
    stationId1='A020'
    stationId2='A040'
    procTime1=exportCMSD.ProcessingTimes(tree, stationId1, M1) 
    procTime2=exportCMSD.ProcessingTimes(procTime1, stationId2, M2)
    
    procTime2.write('CMSD_TwoServers_Output.xml',encoding="utf8")                         #It writes the element tree to a specified file, using the 'utf8' output encoding
    #================================= Output preparation: output the updated values in the JSON file of Topology10 =========================================================#
    if not jsonFile:
        jsonFile = open(os.path.join(os.path.dirname(os.path.realpath(__file__)), JSONFileName),'r')      #It opens the JSON file 
        data = json.load(jsonFile)             #It loads the file
        jsonFile.close()
    else:
        data = json.load(jsonFile) 
    
    exportJSON=JSONOutput()
    stationId1='M1'
    stationId2='M2'
    data=exportJSON.ProcessingTimes(data, stationId1, M1)                           
    data1=exportJSON.ProcessingTimes(data, stationId2, M2)         
    
    #================================ Call ManPy and run the simulation model =============================================#
    #calls ManPy main script with the input
    simulationOutput=ManPyMain.main(input_data=json.dumps(data1))
    
    # if we run from test return the ManPy result
    if test:
        return simulationOutput
    
    #=================== Ouput the JSON file ==========================#
    jsonFile = open('JSON_TwoServers_Output.json',"w")     #It opens the JSON file
    jsonFile.write(json.dumps(data1, indent=True))                                           #It writes the updated data to the JSON file 
    jsonFile.close()                                                                        #It closes the file
        
    #================================ Calling the ExcelOutput object, outputs the outcomes of the statistical analysis in Excel files =============================================#
    C=ExcelOutput()
    C.PrintDistributionFit(Machine1_OpearationTimes,'Machine1_DistFitResults.xls')   
    C.PrintStatisticalMeasures(Machine1_OpearationTimes,'Machine1_StatResults.xls')
    C.PrintDistributionFit(Machine2_OpearationTimes,'Machine2_DistFitResults.xls')   
    C.PrintStatisticalMeasures(Machine2_OpearationTimes,'Machine2_StatResults.xls')
    
    #================================ Call ManPy and run the simulation model =============================================#
    #calls ManPy main script with the input
    simulationOutput=ManPyMain.main(input_data=json.dumps(data))
    # save the simulation output
    jsonFile = open('ManPyOutput.json',"w")     #It opens the JSON file
    jsonFile.write(simulationOutput)                                           #It writes the updated data to the JSON file 
    jsonFile.close()                                                                        #It closes the file
Beispiel #8
0
def main(test=0,
         JSONFileName='JSON_example.json',
         CMSDFileName='CMSD_ParallelStations.xml',
         DBFilePath='C:\Users\Panos\Documents\KE tool_documentation',
         file_path=None,
         jsonFile=None,
         cmsdFile=None):
    if not file_path:
        cnxn = ConnectionData(seekName='ServerData',
                              file_path=DBFilePath,
                              implicitExt='txt',
                              number_of_cursors=3)
        cursors = cnxn.getCursors()

    a = cursors[0].execute("""
            select prod_code, stat_code,emp_no, TIMEIN, TIMEOUT
            from production_status
                    """)
    MILL1 = []
    MILL2 = []
    for j in range(a.rowcount):
        #get the next line
        ind1 = a.fetchone()
        if ind1.stat_code == 'MILL1':
            procTime = []
            procTime.insert(0, ind1.TIMEIN)
            procTime.insert(1, ind1.TIMEOUT)
            MILL1.append(procTime)
        elif ind1.stat_code == 'MILL2':
            procTime = []
            procTime.insert(0, ind1.TIMEIN)
            procTime.insert(1, ind1.TIMEOUT)
            MILL2.append(procTime)
        else:
            continue

    transform = Transformations()
    procTime_MILL1 = []
    for elem in MILL1:
        t1 = []
        t2 = []
        t1.append(((elem[0].hour) * 60) * 60 + (elem[0].minute) * 60 +
                  elem[0].second)
        t2.append(((elem[1].hour) * 60) * 60 + (elem[1].minute) * 60 +
                  elem[1].second)
        dt = transform.subtraction(t2, t1)
        procTime_MILL1.append(dt[0])

    procTime_MILL2 = []
    for elem in MILL2:
        t1 = []
        t2 = []
        t1.append(((elem[0].hour) * 60) * 60 + (elem[0].minute) * 60 +
                  elem[0].second)
        t2.append(((elem[1].hour) * 60) * 60 + (elem[1].minute) * 60 +
                  elem[1].second)
        dt = transform.subtraction(t2, t1)
        procTime_MILL2.append(dt[0])

    b = cursors[1].execute("""
            select stat_code, MTTF_hour
            from failures
                    """)

    c = cursors[2].execute("""
            select stat_code, MTTR_hour
            from repairs
                    """)
    MTTF_MILL1 = []
    MTTF_MILL2 = []
    for j in range(b.rowcount):
        #get the next line
        ind2 = b.fetchone()
        if ind2.stat_code == 'MILL1':
            MTTF_MILL1.append(ind2.MTTF_hour)
        elif ind2.stat_code == 'MILL2':
            MTTF_MILL2.append(ind2.MTTF_hour)
        else:
            continue

    MTTR_MILL1 = []
    MTTR_MILL2 = []
    for j in range(c.rowcount):
        #get the next line
        ind3 = c.fetchone()
        if ind3.stat_code == 'MILL1':
            MTTR_MILL1.append(ind3.MTTR_hour)
        elif ind3.stat_code == 'MILL2':
            MTTR_MILL2.append(ind3.MTTR_hour)
        else:
            continue

    #======================= Fit data to statistical distributions ================================#
    dist_proctime = DistFittest()
    distProcTime_MILL1 = dist_proctime.ks_test(procTime_MILL1)
    distProcTime_MILL2 = dist_proctime.ks_test(procTime_MILL2)

    dist_MTTF = Distributions()
    dist_MTTR = Distributions()
    distMTTF_MILL1 = dist_MTTF.Weibull_distrfit(MTTF_MILL1)
    distMTTF_MILL2 = dist_MTTF.Weibull_distrfit(MTTF_MILL2)

    distMTTR_MILL1 = dist_MTTR.Poisson_distrfit(MTTR_MILL1)
    distMTTR_MILL2 = dist_MTTR.Poisson_distrfit(MTTR_MILL2)

    #======================== Output preparation: output the values prepared in the CMSD information model of this model ====================================================#
    if not cmsdFile:
        datafile = (
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         CMSDFileName)
        )  #It defines the name or the directory of the XML file that is manually written the CMSD information model
        tree = et.parse(
            datafile
        )  #This file will be parsed using the XML.ETREE Python library

    exportCMSD = CMSDOutput()
    stationId1 = 'M1'
    stationId2 = 'M2'
    procTime1 = exportCMSD.ProcessingTimes(tree, stationId1,
                                           distProcTime_MILL1)
    procTime2 = exportCMSD.ProcessingTimes(procTime1, stationId2,
                                           distProcTime_MILL2)

    TTF1 = exportCMSD.TTF(procTime2, stationId1, distMTTF_MILL1)
    TTR1 = exportCMSD.TTR(TTF1, stationId1, distMTTR_MILL1)

    TTF2 = exportCMSD.TTF(TTR1, stationId2, distMTTF_MILL2)
    TTR2 = exportCMSD.TTR(TTF2, stationId2, distMTTR_MILL2)

    TTR2.write(
        'CMSD_ParallelStations_Output.xml', encoding="utf8"
    )  #It writes the element tree to a specified file, using the 'utf8' output encoding

    #======================= Output preparation: output the updated values in the JSON file of this example ================================#
    if not jsonFile:
        jsonFile = open(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         JSONFileName), 'r')  #It opens the JSON file
        data = json.load(jsonFile)  #It loads the file
        jsonFile.close()
    else:
        data = json.load(jsonFile)

    exportJSON = JSONOutput()
    stationId1 = 'M1'
    stationId2 = 'M2'
    data1 = exportJSON.ProcessingTimes(data, stationId1, distProcTime_MILL1)
    data2 = exportJSON.ProcessingTimes(data1, stationId2, distProcTime_MILL2)

    data3 = exportJSON.TTF(data2, stationId1, distMTTF_MILL1)
    data4 = exportJSON.TTR(data3, stationId1, distMTTR_MILL1)

    data5 = exportJSON.TTF(data4, stationId2, distMTTF_MILL2)
    data6 = exportJSON.TTR(data5, stationId2, distMTTR_MILL2)

    # if we run from test return the data6
    if test:
        return data6

    jsonFile = open('JSON_ParallelStations_Output.json',
                    "w")  #It opens the JSON file
    jsonFile.write(json.dumps(
        data6, indent=True))  #It writes the updated data to the JSON file
    jsonFile.close()  #It closes the file

    #=================== Calling the ExcelOutput object, outputs the outcomes of the statistical analysis in xls files ==========================#
    export = ExcelOutput()

    export.PrintStatisticalMeasures(procTime_MILL1,
                                    'procTimeMILL1_StatResults.xls')
    export.PrintStatisticalMeasures(procTime_MILL2,
                                    'procTimeMILL2_StatResults.xls')
    export.PrintStatisticalMeasures(MTTF_MILL1, 'MTTFMILL1_StatResults.xls')
    export.PrintStatisticalMeasures(MTTF_MILL2, 'MTTFMILL2_StatResults.xls')
    export.PrintStatisticalMeasures(MTTR_MILL1, 'MTTRMILL1_StatResults.xls')
    export.PrintStatisticalMeasures(MTTR_MILL2, 'MTTRMILL2_StatResults.xls')

    export.PrintDistributionFit(procTime_MILL1,
                                'procTimeMILL1_DistFitResults.xls')
    export.PrintDistributionFit(procTime_MILL2,
                                'procTimeMILL2_DistFitResults.xls')
    export.PrintDistributionFit(MTTF_MILL1, 'MTTFMILL1_DistFitResults.xls')
    export.PrintDistributionFit(MTTF_MILL2, 'MTTFMILL2_DistFitResults.xls')
    export.PrintDistributionFit(MTTR_MILL1, 'MTTRMILL1_DistFitResults.xls')
    export.PrintDistributionFit(MTTR_MILL2, 'MTTRMILL2_DistFitResults.xls')
Beispiel #9
0
listScrap=D.round(listScrap)       #Round the mean values of the list so as to get integers

dictScrap={}
dictScrap['P1']= listScrap[0]
dictScrap['P2']= listScrap[1]
dictScrap['P3']= listScrap[2]
dictScrap['P4']= listScrap[3]
dictScrap['P5']= listScrap[4]
dictScrap['P6']= listScrap[5]
dictScrap['P7']= listScrap[6]
dictScrap['P8']= listScrap[7]
dictScrap['P9']= listScrap[8]
dictScrap['P10']= listScrap[9]
dictScrap['P11']= listScrap[10]

E= DistFittest()
dictProc={}
dictProc['P1']= E.ks_test(P1_Proc)
dictProc['P2']= E.ks_test(P2_Proc)
dictProc['P3']= E.ks_test(P3_Proc)
dictProc['P4']= E.ks_test(P4_Proc)
dictProc['P5']= E.ks_test(P5_Proc)
dictProc['P6']= E.ks_test(P6_Proc)
dictProc['P7']= E.ks_test(P7_Proc)
dictProc['P8']= E.ks_test(P8_Proc)
dictProc['P9']= E.ks_test(P9_Proc)
dictProc['P10']= E.ks_test(P10_Proc)
dictProc['P11']= E.ks_test(P11_Proc) 

F= ExcelOutput()
F.PrintDistributionFit(P2_Proc,"DistributionFittingResults_P2Proc.xls")
Beispiel #10
0
def main(test=0,
         ExcelFileName1='InterarrivalsData.xls',
         ExcelFileName2='ProcData.xls',
         simul8XMLFileName='SingleServer.xml',
         workbook1=None,
         workbook2=None,
         simul8XMLFile=None):

    #Read from the given directory the Excel document with the processing times data
    if not workbook2:
        workbook2 = xlrd.open_workbook(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         ExcelFileName2))
    worksheets = workbook2.sheet_names()
    worksheet_Proc = worksheets[
        0]  #Define the worksheet with the Processing time data

    importData = ImportExceldata()  #Call the Python object Import_Excel
    procTimes = importData.Input_data(
        worksheet_Proc, workbook2
    )  #Create the Processing times dictionary with key the M1 and values the processing time data

    #Get from the above dictionaries the M1 key and the Source key and define the following lists with data
    M1 = procTimes.get('M1', [])

    distFitting = DistFittest()  #Call the DistFittest object
    M1 = distFitting.ks_test(M1)

    #Read from the given directory the Excel document with the inter-arrivals data
    if not workbook1:
        workbook1 = xlrd.open_workbook(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         ExcelFileName1))
    worksheets = workbook1.sheet_names()
    worksheet_Inter = worksheets[
        0]  #Define the worksheet with the Inter-arrivals time data

    data = ImportExceldata()
    interTimes = data.Input_data(
        worksheet_Inter, workbook1
    )  #Create the Inter-arrival times dictionary with key the Source and values the inter-arrival time data

    S1 = interTimes.get('Source', [])

    distMLE = Distributions()  #Call the Distributions object
    S1 = distMLE.Exponential_distrfit(S1)

    if not simul8XMLFile:
        datafile = (os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                 simul8XMLFileName)
                    )  #It defines the name or the directory of the XML file
        tree = et.parse(datafile)
    else:
        datafile = simul8XMLFile
        tree = et.parse(datafile)

    simul8 = Simul8Output()  #Call the Simul8Output object
    title = 'KEtool_SingleServer'
    interTimes = simul8.InterArrivalTime(tree, 'Source', S1)
    procTimes = simul8.ProcTimes(interTimes, 'Activity 1', M1)
    title = simul8.Title(procTimes, title)
    #Output the XML file with the processed data
    output = title.write('KEtool_SingleServer.xml')

    if test:
        output = et.parse('KEtool_SingleServer.xml')
        return output
def main(test=0,
         CSVFileName1='InterArrivalData.csv',
         CSVFileName2='DataSet.csv',
         JSONFileName='JSON_ConveyerLine.json',
         jsonFile=None,
         csvFile1=None,
         csvFile2=None):
    if csvFile2:
        CSVFileName2 = csvFile2.name
    if csvFile1:
        CSVFileName1 = csvFile1.name

    CSV = ImportCSVdata(
    )  #call the Import_CSV module and using its method Input_data import the data set from the CSV file to the tool
    procData = CSV.Input_data(CSVFileName2)
    sourceData = CSV.Input_data(CSVFileName1)
    M1 = procData.get(
        'M1', [])  #get from the returned Python dictionary the data sets
    M2 = procData.get('M2', [])
    S1 = sourceData.get('S1', [])

    ################### Processing of the data sets calling the following objects ###################################
    #Replace missing values calling the corresponding object
    missingValues = ReplaceMissingValues()
    M1 = missingValues.DeleteMissingValue(M1)
    M2 = missingValues.DeleteMissingValue(M2)
    S1 = missingValues.ReplaceWithMean(S1)

    #Detect outliers calling the DetectOutliers object
    outliers = DetectOutliers()
    M1 = outliers.DeleteExtremeOutliers(M1)
    M2 = outliers.DeleteExtremeOutliers(M2)
    S1 = outliers.DeleteOutliers(S1)

    #Conduct distribution fitting calling the Distributions object and DistFittest object
    MLE = Distributions()
    KStest = DistFittest()
    M1 = KStest.ks_test(M1)
    M2 = KStest.ks_test(M2)
    S1 = MLE.Exponential_distrfit(S1)
    #================================= Output preparation: output the updated values in the JSON file of this example =========================================================#
    if not jsonFile:
        jsonFile = open(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         JSONFileName), 'r')  #It opens the JSON file
        data = json.load(jsonFile)  #It loads the file
        jsonFile.close()
    else:
        data = json.load(jsonFile)

    exportJSON = JSONOutput()
    stationId1 = 'M1'
    stationId2 = 'M2'
    stationId3 = 'S1'

    data = exportJSON.ProcessingTimes(data, stationId1, M1)
    data1 = exportJSON.ProcessingTimes(data, stationId2, M2)
    data2 = exportJSON.InterarrivalTime(data1, stationId3, S1)

    # if we run from test return the data2
    if test:
        return data2

    jsonFile = open('JSON_ConveyerLine_Output.json',
                    "w")  #It opens the JSON file
    jsonFile.write(json.dumps(
        data2, indent=True))  #It writes the updated data to the JSON file
    jsonFile.close()  #It closes the file
Beispiel #12
0
def main(test=0, ExcelFileName='DataSet.xlsx',
                CSVFileName='ProcTimesData.csv',
                simul8XMLFileName='Topology1.xml',
                workbook=None, csvFile=None, simul8XMLFile=None):
#================================= Extract the required data from the data files ==========================================#
    if csvFile:
        CSVFileName = csvFile.name    
    filename = CSVFileName
    csv = ImportCSVdata()   #call the Import_CSV module and using its method Input_data import the data set from the CSV file to the tool
    Data = csv.Input_data(filename)
    
    Activity2_Proc = Data.get('Activity 2',[])       #get from the returned Python dictionary the two data sets
    Activity3_Proc = Data.get('Activity 3',[])
    
    #Read from the given directory the Excel document with the data
    if not workbook:
        workbook = xlrd.open_workbook(os.path.join(os.path.dirname(os.path.realpath(__file__)), ExcelFileName))
    worksheets = workbook.sheet_names()
    worksheet_Inter = worksheets[0]     #Define the worksheet with the Inter-arrivals time data
    
    data = ImportExceldata()
    interTimes = data.Input_data(worksheet_Inter, workbook) #Create the Inter-arrival times dictionary with key the Source and values the inter-arrival time data
    
    S1 = interTimes.get('Source',[])  
    
    #Read from the given directory the Excel document with the data
    worksheets = workbook.sheet_names()
    worksheet_Fail = worksheets[1]     #Define the worksheet with the failures data (MTTF,MTTR)
    
    data = ImportExceldata()
    failures = data.Input_data(worksheet_Fail, workbook) #Create the failures dictionary with key the MTTF and MTTR data points
    
    MTTF = failures.get('MTTF',[])  
    MTTR = failures.get('MTTR',[])
    
    #======================= Fit data to probability distributions ================================#
    #The Distributions and DistFittest objects are called to fit statistical distributions to the in scope data 
    dist = Distributions()
    act2Proc = dist.Weibull_distrfit(Activity2_Proc)
    act3Proc = dist.Weibull_distrfit(Activity3_Proc)
    
    s1Times = dist.Exponential_distrfit(S1)
    
    distFit = DistFittest()
    act1MTTF = distFit.ks_test(MTTF)
    act1MTTR = distFit.ks_test(MTTR)
    
    #======================= Output preparation: output the updated values in the XML file of this example ================================#
    
    if not simul8XMLFile:
        datafile=(os.path.join(os.path.dirname(os.path.realpath(__file__)), simul8XMLFileName))       #It defines the name or the directory of the XML file 
        tree = et.parse(datafile)    
    else:
        datafile=simul8XMLFile
        tree = et.parse(datafile)
        
    simul8 = Simul8Output()    #Call the Simul8Output object
    #Assign the statistical distribution calculated above in the XML file using methods of the Simul8Output object
    interTimes = simul8.InterArrivalTime(tree,'Source', s1Times)
    
    procTimes2 = simul8.ProcTimes(interTimes,'Activity 2', act2Proc)
    procTimes3 = simul8.ProcTimes(procTimes2,'Activity 3', act3Proc)
    
    #Again assign the MTTF and MTTR probability distributions calling the relevant methods from the Simul8Output object
    MTTF1 = simul8.MTBF(procTimes3,'Activity 1', act1MTTF)
    MTTR1 = simul8.MTTR(MTTF1,'Activity 1', act1MTTR)
    #Output the XML file with the processed data 
    output= MTTR1.write('KEtool_Topology1.xml')
    
    if test:
        output=et.parse('KEtool_Topology1.xml')
        return output 
def main(test=0, ExcelFileName='inputData.xls',
                JSONFileName='JSON_AssembleDismantle.json',
                CMSDFileName='CMSD_AssemblyDismantle.xml',
                workbook=None,
                jsonFile=None, cmsdFile=None):
    if not workbook:
        workbook = xlrd.open_workbook(os.path.join(os.path.dirname(os.path.realpath(__file__)), ExcelFileName))
    #Read from the given directory the Excel document with the input data
    worksheets = workbook.sheet_names()
    worksheet_ProcessingTimes = worksheets[0]     #Define the worksheet with the Processing times data
    worksheet_MTTF = worksheets[1]       #Define the worksheet with Time-to-Failure data
    worksheet_MTTR = worksheets[2]       #Define the worksheet with Time-to-Repair data
    
    A = ImportExceldata()                              #Call the Python object Import_Excel
    ProcessingTimes = A.Input_data(worksheet_ProcessingTimes, workbook)   #Create the Processing Times dictionary with key the Machine 1 and values the processing time data
    MTTF=A.Input_data(worksheet_MTTF, workbook)        #Create the MTTF dictionary with key the Machine 1 and time-to-failure data 
    MTTR=A.Input_data(worksheet_MTTR, workbook)        #Create the MTTR Quantity dictionary with key the Machine 1 and time-to-repair data 
    
    ##Get from the above dictionaries the M1 key and define the following lists with data 
    ProcTime = ProcessingTimes.get('M1',[])         
    MTTF = MTTF.get('M1',[])
    MTTR = MTTR.get('M1',[])
    
    #Call the HandleMissingValues object and replace the missing values in the lists with the mean of the non-missing values
    B = ReplaceMissingValues()
    ProcTime = B.ReplaceWithMean(ProcTime)
    MTTF = B.ReplaceWithMean(MTTF)
    MTTR = B.ReplaceWithMean(MTTR)
    
    C = Distributions()      #Call the Distributions object
    D = DistFittest()      #Call the DistFittest object
    
    ProcTime_dist = D.ks_test(ProcTime)
    MTTF_dist = C.Exponential_distrfit(MTTF)
    MTTR_dist = C.Exponential_distrfit(MTTR)
    #======================== Output preparation: output the values prepared in the CMSD information model of this model ====================================================#
    if not cmsdFile:
        datafile=(os.path.join(os.path.dirname(os.path.realpath(__file__)), CMSDFileName))       #It defines the name or the directory of the XML file that is manually written the CMSD information model
        tree = et.parse(datafile)                                               #This file will be parsed using the XML.ETREE Python library
    
    exportCMSD=CMSDOutput()
    stationId1='M1'
    
    procTime=exportCMSD.ProcessingTimes(tree, stationId1, ProcTime_dist) 
    TTF=exportCMSD.TTF(procTime, stationId1, MTTF_dist)
    TTR=exportCMSD.TTR(TTF, stationId1, MTTR_dist)
    
    TTR.write('CMSD_AssemblyDismantle_Output.xml',encoding="utf8")                         #It writes the element tree to a specified file, using the 'utf8' output encoding
    
    #================================= Output preparation: output the updated values in the JSON file of this example =========================================================#
    if not jsonFile:
        jsonFile = open(os.path.join(os.path.dirname(os.path.realpath(__file__)), JSONFileName),'r')      #It opens the JSON file 
        data = json.load(jsonFile)             #It loads the file
        jsonFile.close()
    else:
        data = json.load(jsonFile) 
    
    exportJSON=JSONOutput()
    stationId='M1'
    
    data=exportJSON.ProcessingTimes(data, stationId, ProcTime_dist)
    data1=exportJSON.TTF(data, stationId, MTTF_dist)
    data2=exportJSON.TTR(data1, stationId, MTTR_dist)
    
    #================================ Call ManPy and run the simulation model =============================================#
    #calls ManPy main script with the input
    simulationOutput=ManPyMain.main(input_data=json.dumps(data2))
    # if we run from test return the ManPy result
    if test:
        return simulationOutput
    
    #===================== Output the JSON file ========================================#
    jsonFile = open('JSON_AssembleDismantle_Output.json',"w")     #It opens the JSON file
    jsonFile.write(json.dumps(data2, indent=True))                                           #It writes the updated data to the JSON file 
    jsonFile.close()                                                                        #It closes the file
    #================================ Calling the ExcelOutput object, outputs the outcomes of the statistical analysis in xls files =============================================#
    C=ExcelOutput()
    C.PrintStatisticalMeasures(ProcTime,'ProcTime_StatResults.xls')   
    C.PrintStatisticalMeasures(MTTR,'MTTR_StatResults.xls')
    C.PrintStatisticalMeasures(MTTF,'MTTF_StatResults.xls')   
    C.PrintDistributionFit(ProcTime,'ProcTime_DistFitResults.xls')
    C.PrintDistributionFit(MTTR,'MTTR_DistFitResults.xls')
    
    #================================ Call ManPy and run the simulation model =============================================#
    #calls ManPy main script with the input
    simulationOutput=ManPyMain.main(input_data=json.dumps(data))
    # save the simulation output
    jsonFile = open('ManPyOutput.json',"w")     #It opens the JSON file
    jsonFile.write(simulationOutput)           #It writes the updated data to the JSON file 
    jsonFile.close()                         #It closes the file
Beispiel #14
0
            WIP[key].append(unitsToProcess)
        elif WIP[key][0]=='PaA':
            secs = WIP[key][1].total_seconds()
            minutes= int(secs / 60)
            unitsToProcess= round(batchSize - (minutes / meanPaA_Proc))
            WIP[key].append(unitsToProcess)
        elif WIP[key][0]=='PaB':
            secs = WIP[key][1].total_seconds()
            minutes= int(secs / 60)
            unitsToProcess= round(batchSize - (minutes / meanPaB_Proc))
            WIP[key].append(unitsToProcess)
    except IndexError:
        continue

# Call the DistFittest object and conduct Kolmogorov-Smirnov distribution fitting test in the processing times lists of each station
D=DistFittest()
dictProc={} #Create a dictionary that holds the statistical distributions of the processing times of each station
dictProc['MA']= D.ks_test(MA_Proc)
dictProc['M1A']= D.ks_test(M1A_Proc)
dictProc['M1B']= D.ks_test(M1B_Proc)
dictProc['M2A']= D.ks_test(M2A_Proc)
dictProc['M2B']= D.ks_test(M2B_Proc)
dictProc['M3A']= D.ks_test(M3A_Proc)
dictProc['M3B']= D.ks_test(M3B_Proc)
dictProc['MM']= D.ks_test(MM_Proc)
dictProc['PrA']= D.ks_test(PrA_Proc)
dictProc['PrB']= D.ks_test(PrB_Proc)
dictProc['PaA']= D.ks_test(PaA_Proc)
dictProc['PaB']= D.ks_test(PaB_Proc)
#Call the Distributions object and fit (using the Maximum Likelihood Estimation) the lists with the scrap quantity into a discrete statistical distribution, i.e. Geometric distribution 
D=Distributions()
Beispiel #15
0
def main(test=0,
         ExcelFileName='inputData.xls',
         JSONFileName='JSON_ParallelStations.json',
         workbook=None,
         jsonFile=None):

    #Read from the given directory the Excel document with the input data
    if not workbook:
        workbook = xlrd.open_workbook(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         ExcelFileName))
    worksheets = workbook.sheet_names()
    worksheet_ProcessingTimes = worksheets[
        0]  #Define the worksheet with the Processing times data

    inputData = ImportExceldata()  #Call the Python object Import_Excel
    ProcessingTimes = inputData.Input_data(
        worksheet_ProcessingTimes, workbook
    )  #Create the Processing Times dictionary with key Machines 1,2 and values the processing time data

    ##Get from the above dictionaries the M1 key and define the following lists with data
    M1_ProcTime = ProcessingTimes.get('M1', [])
    M2_ProcTime = ProcessingTimes.get('M2', [])

    #Call the HandleMissingValues object and replace the missing values in the lists with the mean of the non-missing values
    misValues = ReplaceMissingValues()
    M1_ProcTime = misValues.ReplaceWithMean(M1_ProcTime)
    M2_ProcTime = misValues.ReplaceWithMean(M2_ProcTime)

    MLE = Distributions(
    )  #Call the Distributions object (Maximum Likelihood Estimation - MLE)
    KS = DistFittest(
    )  #Call the DistFittest object  (Kolmoghorov-Smirnov test)

    M1ProcTime_dist = KS.ks_test(M1_ProcTime)
    M2ProcTime_dist = MLE.Normal_distrfit(M2_ProcTime)

    #======================= Output preparation: output the updated values in the JSON file of this example ================================#
    if not jsonFile:
        jsonFile = open(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         JSONFileName), 'r')  #It opens the JSON file
        data = json.load(jsonFile)  #It loads the file
        jsonFile.close()
    else:
        data = json.load(jsonFile)

    exportJSON = JSONOutput()
    stationId1 = 'St1'
    stationId2 = 'St2'
    data1 = exportJSON.ProcessingTimes(data, stationId1, M1ProcTime_dist)
    data2 = exportJSON.ProcessingTimes(data1, stationId2, M2ProcTime_dist)

    #================================ Call ManPy and run the simulation model =============================================#
    #calls ManPy main script with the input
    simulationOutput = ManPyMain.main(input_data=json.dumps(data2))

    # if we run from test return the ManPy result
    if test:
        return simulationOutput

    #=================== Ouput the JSON file ==========================#
    jsonFile = open('JSON_ParallelStations_Output.json',
                    "w")  #It opens the JSON file
    jsonFile.write(json.dumps(
        data2, indent=True))  #It writes the updated data to the JSON file
    jsonFile.close()  #It closes the file

    #=================== Calling the ExcelOutput object, outputs the outcomes of the statistical analysis in xls files ==========================#
    export = ExcelOutput()

    export.PrintStatisticalMeasures(M1_ProcTime, 'M1_ProcTime_StatResults.xls')
    export.PrintStatisticalMeasures(M2_ProcTime, 'M2_ProcTime_StatResults.xls')

    export.PrintDistributionFit(M1_ProcTime, 'M1_ProcTime_DistFitResults.xls')
    export.PrintDistributionFit(M2_ProcTime, 'M2_ProcTime_DistFitResults.xls')

    # save the simulation output
    jsonFile = open('ManPyOutput.json', "w")  #It opens the JSON file
    jsonFile.write(
        simulationOutput)  #It writes the updated data to the JSON file
    jsonFile.close()  #It closes the file
def main(test=0,
         ExcelFileName='inputData.xls',
         JSONFileName='JSON_AssembleDismantle.json',
         CMSDFileName='CMSD_AssemblyDismantle.xml',
         workbook=None,
         jsonFile=None,
         cmsdFile=None):
    if not workbook:
        workbook = xlrd.open_workbook(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         ExcelFileName))
    #Read from the given directory the Excel document with the input data
    worksheets = workbook.sheet_names()
    worksheet_ProcessingTimes = worksheets[
        0]  #Define the worksheet with the Processing times data
    worksheet_MTTF = worksheets[
        1]  #Define the worksheet with Time-to-Failure data
    worksheet_MTTR = worksheets[
        2]  #Define the worksheet with Time-to-Repair data

    A = ImportExceldata()  #Call the Python object Import_Excel
    ProcessingTimes = A.Input_data(
        worksheet_ProcessingTimes, workbook
    )  #Create the Processing Times dictionary with key the Machine 1 and values the processing time data
    MTTF = A.Input_data(
        worksheet_MTTF, workbook
    )  #Create the MTTF dictionary with key the Machine 1 and time-to-failure data
    MTTR = A.Input_data(
        worksheet_MTTR, workbook
    )  #Create the MTTR Quantity dictionary with key the Machine 1 and time-to-repair data

    ##Get from the above dictionaries the M1 key and define the following lists with data
    ProcTime = ProcessingTimes.get('M1', [])
    MTTF = MTTF.get('M1', [])
    MTTR = MTTR.get('M1', [])

    #Call the HandleMissingValues object and replace the missing values in the lists with the mean of the non-missing values
    B = ReplaceMissingValues()
    ProcTime = B.ReplaceWithMean(ProcTime)
    MTTF = B.ReplaceWithMean(MTTF)
    MTTR = B.ReplaceWithMean(MTTR)

    C = Distributions()  #Call the Distributions object
    D = DistFittest()  #Call the DistFittest object

    ProcTime_dist = D.ks_test(ProcTime)
    MTTF_dist = C.Exponential_distrfit(MTTF)
    MTTR_dist = C.Exponential_distrfit(MTTR)
    #======================== Output preparation: output the values prepared in the CMSD information model of this model ====================================================#
    if not cmsdFile:
        datafile = (
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         CMSDFileName)
        )  #It defines the name or the directory of the XML file that is manually written the CMSD information model
        tree = et.parse(
            datafile
        )  #This file will be parsed using the XML.ETREE Python library

    exportCMSD = CMSDOutput()
    stationId1 = 'M1'

    procTime = exportCMSD.ProcessingTimes(tree, stationId1, ProcTime_dist)
    TTF = exportCMSD.TTF(procTime, stationId1, MTTF_dist)
    TTR = exportCMSD.TTR(TTF, stationId1, MTTR_dist)

    TTR.write(
        'CMSD_AssemblyDismantle_Output.xml', encoding="utf8"
    )  #It writes the element tree to a specified file, using the 'utf8' output encoding

    #================================= Output preparation: output the updated values in the JSON file of this example =========================================================#
    if not jsonFile:
        jsonFile = open(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         JSONFileName), 'r')  #It opens the JSON file
        data = json.load(jsonFile)  #It loads the file
        jsonFile.close()
    else:
        data = json.load(jsonFile)

    exportJSON = JSONOutput()
    stationId = 'M1'

    data = exportJSON.ProcessingTimes(data, stationId, ProcTime_dist)
    data1 = exportJSON.TTF(data, stationId, MTTF_dist)
    data2 = exportJSON.TTR(data1, stationId, MTTR_dist)

    #================================ Call ManPy and run the simulation model =============================================#
    #calls ManPy main script with the input
    simulationOutput = ManPyMain.main(input_data=json.dumps(data2))
    # if we run from test return the ManPy result
    if test:
        return simulationOutput

    #===================== Output the JSON file ========================================#
    jsonFile = open('JSON_AssembleDismantle_Output.json',
                    "w")  #It opens the JSON file
    jsonFile.write(json.dumps(
        data2, indent=True))  #It writes the updated data to the JSON file
    jsonFile.close()  #It closes the file
    #================================ Calling the ExcelOutput object, outputs the outcomes of the statistical analysis in xls files =============================================#
    C = ExcelOutput()
    C.PrintStatisticalMeasures(ProcTime, 'ProcTime_StatResults.xls')
    C.PrintStatisticalMeasures(MTTR, 'MTTR_StatResults.xls')
    C.PrintStatisticalMeasures(MTTF, 'MTTF_StatResults.xls')
    C.PrintDistributionFit(ProcTime, 'ProcTime_DistFitResults.xls')
    C.PrintDistributionFit(MTTR, 'MTTR_DistFitResults.xls')

    #================================ Call ManPy and run the simulation model =============================================#
    #calls ManPy main script with the input
    simulationOutput = ManPyMain.main(input_data=json.dumps(data))
    # save the simulation output
    jsonFile = open('ManPyOutput.json', "w")  #It opens the JSON file
    jsonFile.write(
        simulationOutput)  #It writes the updated data to the JSON file
    jsonFile.close()  #It closes the file
M1A_Proc= C.DeleteOutliers(M1A_Proc)
M1B_Proc= C.DeleteOutliers(M1B_Proc)
M2A_Proc= C.DeleteOutliers(M2A_Proc)
M2B_Proc= C.DeleteOutliers(M2B_Proc)
M3A_Proc= C.DeleteOutliers(M3A_Proc)
M3B_Proc= C.DeleteOutliers(M3B_Proc)
CB_Proc= C.DeleteOutliers(CB_Proc)
FL_Proc= C.DeleteOutliers(FL_Proc)
M3B_Proc= C.DeleteOutliers(M3B_Proc)
PrA_Proc= C.DeleteOutliers(PrA_Proc)
PrB_Proc= C.DeleteOutliers(PrB_Proc)
PaA_Proc= C.DeleteOutliers(PaA_Proc)
Pb_Proc= C.DeleteOutliers(Pb_Proc)
    
# Call the DistFittest object and conduct Kolmogorov-Smirnov distribution fitting test in the processing times lists of each station
D=DistFittest()
dictProc={} #Create a dictionary that holds the statistical distributions of the processing times of each station
dictProc['MA']= D.ks_test(MA_Proc)
dictProc['M1A']= D.ks_test(M1A_Proc)
dictProc['M1B']= D.ks_test(M1B_Proc)
dictProc['M2A']= D.ks_test(M2A_Proc)
dictProc['M2B']= D.ks_test(M2B_Proc)
dictProc['M3A']= D.ks_test(M3A_Proc)
dictProc['M3B']= D.ks_test(M3B_Proc)
dictProc['CB']= D.ks_test(CB_Proc)
dictProc['MM']= D.ks_test(MM_Proc)
dictProc['FL']= D.ks_test(FL_Proc)
dictProc['PrA']= D.ks_test(PrA_Proc)
dictProc['PrB']= D.ks_test(PrB_Proc)
dictProc['PaA']= D.ks_test(PaA_Proc)
dictProc['Pb']= D.ks_test(Pb_Proc)
def main(test=0, JSONFileName='JSON_example.json',
                CMSDFileName='CMSD_ParallelStations.xml',
                DBFilePath = 'C:\Users\Panos\Documents\KE tool_documentation',
                file_path=None,
                jsonFile=None, cmsdFile=None):
    if not file_path:
        cnxn=ConnectionData(seekName='ServerData', file_path=DBFilePath, implicitExt='txt', number_of_cursors=3)
        cursors=cnxn.getCursors()
    
    a = cursors[0].execute("""
            select prod_code, stat_code,emp_no, TIMEIN, TIMEOUT
            from production_status
                    """)
    MILL1=[]
    MILL2=[]
    for j in range(a.rowcount):
        #get the next line
        ind1=a.fetchone() 
        if ind1.stat_code == 'MILL1':
            procTime=[]
            procTime.insert(0,ind1.TIMEIN)
            procTime.insert(1,ind1.TIMEOUT)
            MILL1.append(procTime)
        elif ind1.stat_code == 'MILL2':
            procTime=[]
            procTime.insert(0,ind1.TIMEIN)
            procTime.insert(1,ind1.TIMEOUT)
            MILL2.append(procTime)
        else:
            continue
        
    transform = Transformations()
    procTime_MILL1=[]
    for elem in MILL1:
        t1=[]
        t2=[]
        t1.append(((elem[0].hour)*60)*60 + (elem[0].minute)*60 + elem[0].second)
        t2.append(((elem[1].hour)*60)*60 + (elem[1].minute)*60 + elem[1].second)
        dt=transform.subtraction(t2, t1)
        procTime_MILL1.append(dt[0])
    
    procTime_MILL2=[]
    for elem in MILL2:
        t1=[]
        t2=[]
        t1.append(((elem[0].hour)*60)*60 + (elem[0].minute)*60 + elem[0].second)
        t2.append(((elem[1].hour)*60)*60 + (elem[1].minute)*60 + elem[1].second)
        dt=transform.subtraction(t2, t1)
        procTime_MILL2.append(dt[0])
    
    
    b = cursors[1].execute("""
            select stat_code, MTTF_hour
            from failures
                    """)
    
    c = cursors[2].execute("""
            select stat_code, MTTR_hour
            from repairs
                    """)         
    MTTF_MILL1=[]
    MTTF_MILL2=[]
    for j in range(b.rowcount):
        #get the next line
        ind2=b.fetchone() 
        if ind2.stat_code == 'MILL1':
            MTTF_MILL1.append(ind2.MTTF_hour)
        elif ind2.stat_code == 'MILL2':
            MTTF_MILL2.append(ind2.MTTF_hour)
        else:
            continue
    
    MTTR_MILL1=[]
    MTTR_MILL2=[]
    for j in range(c.rowcount):
        #get the next line
        ind3=c.fetchone() 
        if ind3.stat_code == 'MILL1':
            MTTR_MILL1.append(ind3.MTTR_hour)
        elif ind3.stat_code == 'MILL2':
            MTTR_MILL2.append(ind3.MTTR_hour)
        else:
            continue
    
    #======================= Fit data to statistical distributions ================================#
    dist_proctime = DistFittest()
    distProcTime_MILL1 = dist_proctime.ks_test(procTime_MILL1)
    distProcTime_MILL2 = dist_proctime.ks_test(procTime_MILL2)
    
    dist_MTTF = Distributions()
    dist_MTTR = Distributions()
    distMTTF_MILL1 = dist_MTTF.Weibull_distrfit(MTTF_MILL1)
    distMTTF_MILL2 = dist_MTTF.Weibull_distrfit(MTTF_MILL2)
    
    distMTTR_MILL1 = dist_MTTR.Poisson_distrfit(MTTR_MILL1)
    distMTTR_MILL2 = dist_MTTR.Poisson_distrfit(MTTR_MILL2)
    
    #======================== Output preparation: output the values prepared in the CMSD information model of this model ====================================================#
    if not cmsdFile:
        datafile=(os.path.join(os.path.dirname(os.path.realpath(__file__)), CMSDFileName))       #It defines the name or the directory of the XML file that is manually written the CMSD information model
        tree = et.parse(datafile)                                               #This file will be parsed using the XML.ETREE Python library
    
    exportCMSD=CMSDOutput()
    stationId1='M1'
    stationId2='M2'
    procTime1=exportCMSD.ProcessingTimes(tree, stationId1, distProcTime_MILL1) 
    procTime2=exportCMSD.ProcessingTimes(procTime1, stationId2, distProcTime_MILL2)
    
    TTF1=exportCMSD.TTF(procTime2, stationId1, distMTTF_MILL1)
    TTR1=exportCMSD.TTR(TTF1, stationId1, distMTTR_MILL1)
    
    TTF2=exportCMSD.TTF(TTR1, stationId2, distMTTF_MILL2)
    TTR2=exportCMSD.TTR(TTF2, stationId2, distMTTR_MILL2)
    
    TTR2.write('CMSD_ParallelStations_Output.xml',encoding="utf8")                         #It writes the element tree to a specified file, using the 'utf8' output encoding
    
    #======================= Output preparation: output the updated values in the JSON file of this example ================================#
    if not jsonFile:
        jsonFile = open(os.path.join(os.path.dirname(os.path.realpath(__file__)), JSONFileName),'r')      #It opens the JSON file 
        data = json.load(jsonFile)             #It loads the file
        jsonFile.close()
    else:
        data = json.load(jsonFile) 
    
    exportJSON=JSONOutput()
    stationId1='M1'
    stationId2='M2'
    data1=exportJSON.ProcessingTimes(data, stationId1, distProcTime_MILL1)
    data2=exportJSON.ProcessingTimes(data1, stationId2, distProcTime_MILL2)
    
    data3=exportJSON.TTF(data2, stationId1, distMTTF_MILL1)
    data4=exportJSON.TTR(data3, stationId1, distMTTR_MILL1)
    
    data5=exportJSON.TTF(data4, stationId2, distMTTF_MILL2)
    data6=exportJSON.TTR(data5, stationId2, distMTTR_MILL2)
    
    # if we run from test return the data6
    if test:
        return data6
        
    jsonFile = open('JSON_ParallelStations_Output.json',"w")     #It opens the JSON file
    jsonFile.write(json.dumps(data6, indent=True))               #It writes the updated data to the JSON file 
    jsonFile.close()                                             #It closes the file
    
    #=================== Calling the ExcelOutput object, outputs the outcomes of the statistical analysis in xls files ==========================#
    export=ExcelOutput()
    
    export.PrintStatisticalMeasures(procTime_MILL1,'procTimeMILL1_StatResults.xls')   
    export.PrintStatisticalMeasures(procTime_MILL2,'procTimeMILL2_StatResults.xls')
    export.PrintStatisticalMeasures(MTTF_MILL1,'MTTFMILL1_StatResults.xls')   
    export.PrintStatisticalMeasures(MTTF_MILL2,'MTTFMILL2_StatResults.xls')
    export.PrintStatisticalMeasures(MTTR_MILL1,'MTTRMILL1_StatResults.xls')   
    export.PrintStatisticalMeasures(MTTR_MILL2,'MTTRMILL2_StatResults.xls')
    
    export.PrintDistributionFit(procTime_MILL1,'procTimeMILL1_DistFitResults.xls')
    export.PrintDistributionFit(procTime_MILL2,'procTimeMILL2_DistFitResults.xls')
    export.PrintDistributionFit(MTTF_MILL1,'MTTFMILL1_DistFitResults.xls')
    export.PrintDistributionFit(MTTF_MILL2,'MTTFMILL2_DistFitResults.xls')
    export.PrintDistributionFit(MTTR_MILL1,'MTTRMILL1_DistFitResults.xls')
    export.PrintDistributionFit(MTTR_MILL2,'MTTRMILL2_DistFitResults.xls')
def main(test=0, ExcelFileName='inputData.xls',
                JSONFileName='JSON_ParallelStations.json',
                workbook=None,
                jsonFile=None):
    
    #Read from the given directory the Excel document with the input data
    if not workbook:
        workbook = xlrd.open_workbook(os.path.join(os.path.dirname(os.path.realpath(__file__)), ExcelFileName))
    worksheets = workbook.sheet_names()
    worksheet_ProcessingTimes = worksheets[0]     #Define the worksheet with the Processing times data
    
    inputData = ImportExceldata()                              #Call the Python object Import_Excel
    ProcessingTimes = inputData.Input_data(worksheet_ProcessingTimes, workbook)   #Create the Processing Times dictionary with key Machines 1,2 and values the processing time data
    
    ##Get from the above dictionaries the M1 key and define the following lists with data 
    M1_ProcTime = ProcessingTimes.get('M1',[])         
    M2_ProcTime = ProcessingTimes.get('M2',[])  
    
    #Call the HandleMissingValues object and replace the missing values in the lists with the mean of the non-missing values
    misValues = ReplaceMissingValues()
    M1_ProcTime = misValues.ReplaceWithMean(M1_ProcTime)
    M2_ProcTime = misValues.ReplaceWithMean(M2_ProcTime)
    
    MLE = Distributions()      #Call the Distributions object (Maximum Likelihood Estimation - MLE)
    KS = DistFittest()      #Call the DistFittest object  (Kolmoghorov-Smirnov test)
    
    M1ProcTime_dist = KS.ks_test(M1_ProcTime)
    M2ProcTime_dist = MLE.Normal_distrfit(M2_ProcTime)
    
    
    #======================= Output preparation: output the updated values in the JSON file of this example ================================#
    if not jsonFile:
        jsonFile = open(os.path.join(os.path.dirname(os.path.realpath(__file__)), JSONFileName),'r')      #It opens the JSON file 
        data = json.load(jsonFile)                                                              #It loads the file
        jsonFile.close()
    else:
        data = json.load(jsonFile) 
    
    exportJSON=JSONOutput()
    stationId1='St1'
    stationId2='St2'
    data1=exportJSON.ProcessingTimes(data, stationId1, M1ProcTime_dist)
    data2=exportJSON.ProcessingTimes(data1, stationId2, M2ProcTime_dist)
        
    #================================ Call ManPy and run the simulation model =============================================#
    #calls ManPy main script with the input
    simulationOutput=ManPyMain.main(input_data=json.dumps(data2))
    
    # if we run from test return the ManPy result
    if test:
        return simulationOutput

    #=================== Ouput the JSON file ==========================#
    jsonFile = open('JSON_ParallelStations_Output.json',"w")     #It opens the JSON file
    jsonFile.write(json.dumps(data2, indent=True))               #It writes the updated data to the JSON file 
    jsonFile.close()                                             #It closes the file
    
    #=================== Calling the ExcelOutput object, outputs the outcomes of the statistical analysis in xls files ==========================#
    export=ExcelOutput()
    
    export.PrintStatisticalMeasures(M1_ProcTime,'M1_ProcTime_StatResults.xls')   
    export.PrintStatisticalMeasures(M2_ProcTime,'M2_ProcTime_StatResults.xls')
    
    export.PrintDistributionFit(M1_ProcTime,'M1_ProcTime_DistFitResults.xls')
    export.PrintDistributionFit(M2_ProcTime,'M2_ProcTime_DistFitResults.xls')


    # save the simulation output
    jsonFile = open('ManPyOutput.json',"w")     #It opens the JSON file
    jsonFile.write(simulationOutput)                                           #It writes the updated data to the JSON file 
    jsonFile.close()                                                           #It closes the file