Example #1
0
    def alreadyInsertedWP(self):
        cnxn = ConnectionData(
            seekName='ServerData',
            file_path='C:\Users\Panos\Documents\DB_Approach\CapacityStations',
            implicitExt='txt',
            number_of_cursors=6)
        cursor = cnxn.getCursors()

        c = cursor[0].execute("""
                select WP_id, END_DATE
                from production_status
                        """)
        listb = []
        for i in range(c.rowcount):
            ind = c.fetchone()
            if ind.WP_id:
                listb.append(ind.WP_id)
            else:
                continue
        return
Example #2
0
    def checkInsertedProject(self):
        cnxn = ConnectionData(
            seekName='ServerData',
            file_path='C:\Users\Panos\Documents\DB_Approach\CapacityStations',
            implicitExt='txt',
            number_of_cursors=6)
        cursor = cnxn.getCursors()

        a = cursor[0].execute("""
                select Order_id, ProjectName, Status
                from orders
                        """)

        availableProject = []
        for j in range(a.rowcount):
            #get the next line
            ind1 = a.fetchone()
            #and create a dictionary order
            if ind1.Status == 'in progress' or ind1.Status == 'accepted':
                availableProject.append(ind1.Order_id)
        return availableProject
    def alreadyInsertedWP(self):
        cnxn = ConnectionData(
            seekName="ServerData",
            file_path="C:\Users\Panos\Documents\DB_Approach\CapacityStations",
            implicitExt="txt",
            number_of_cursors=6,
        )
        cursor = cnxn.getCursors()

        c = cursor[0].execute(
            """
                select WP_id, END_DATE
                from production_status
                        """
        )
        listb = []
        for i in range(c.rowcount):
            ind = c.fetchone()
            if ind.WP_id:
                listb.append(ind.WP_id)
            else:
                continue
        return
    def checkInsertedProject(self):
        cnxn = ConnectionData(
            seekName="ServerData",
            file_path="C:\Users\Panos\Documents\DB_Approach\CapacityStations",
            implicitExt="txt",
            number_of_cursors=6,
        )
        cursor = cnxn.getCursors()

        a = cursor[0].execute(
            """
                select Order_id, ProjectName, Status
                from orders
                        """
        )

        availableProject = []
        for j in range(a.rowcount):
            # get the next line
            ind1 = a.fetchone()
            # and create a dictionary order
            if ind1.Status == "in progress" or ind1.Status == "accepted":
                availableProject.append(ind1.Order_id)
        return availableProject
Example #5
0
    def updateDatabase(self):
        cnxn = ConnectionData(
            seekName='ServerData',
            file_path='C:\Users\Panos\Documents\DB_Approach\CapacityStations',
            implicitExt='txt',
            number_of_cursors=13)
        cursor = cnxn.getCursors()

        if self.checkBoxVal2.get():
            update_order = (
                "INSERT INTO production_status(`status_id`, `WP_id`, `Operation_Name`, `START_DATE`, `Capacity_left`, `Remarks`,`END_DATE`)  VALUES ( ?, ?, ?, ?, ?, ?, ?)"
            )
            cursor[0].execute("SELECT @@IDENTITY AS ID")
            order = self.OrderOption.get()
            a = cursor[1].execute(
                """
                        select WP_id, Order_id
                        from sequence where Order_id=?
                                """, order)
            for j in range(a.rowcount):
                ind2 = a.fetchone()
            lastWP = ind2.WP_id
            b = cursor[2].execute(
                """
                                select WP_id
                                 from sequence where Operation_Name=? and Order_id=?
                                """, self.operationOption.get(), order)
            ind4 = b.fetchone()
            status2 = 'finished'
            row = cursor[0].fetchone()
            WP = ind4[0]
            order_ref = row.ID
            status1 = 'in progress'
            cursor[4].execute(update_order,
                              (order_ref, WP, self.operationOption.get(),
                               str(datetime.now()), self.capacity.get(),
                               self.comments.get(), str(datetime.now())))
            if WP == lastWP:
                cursor[5].execute(
                    "UPDATE orders SET `Status`=? WHERE Order_id=? ", status2,
                    order)
            cursor[6].commit()
            self.close_window()
        else:
            update_order = (
                "INSERT INTO production_status(`status_id`, `WP_id`, `Operation_Name`, `START_DATE`, `Capacity_left`, `Remarks`)  VALUES ( ?, ?, ?, ?, ?, ?)"
            )

            cursor[7].execute("SELECT @@IDENTITY AS ID")
            order = self.OrderOption.get()
            a = cursor[8].execute(
                """
                                select sequence.WP_id, sequence.Order_id, sequence.Operation_Name
                                 from sequence where Order_id=? and Operation_Name=?
                                """, order, self.operationOption.get())
            ind3 = a.fetchone()
            WP = ind3.WP_id
            row = cursor[7].fetchone()
            print row
            order_ref = row.ID
            status1 = 'in progress'
            cursor[10].execute(
                update_order,
                (order_ref, WP, self.operationOption.get(), str(
                    datetime.now()), self.capacity.get(), self.comments.get()))
            cursor[11].execute(
                "UPDATE orders SET `Status`=? WHERE Order_id=? ", status1,
                order)
            cursor[12].commit()
            self.close_window()
        return
def main(test=0, JSONFileName='JSON_example.json',
                CMSDFileName='CMSD_ParallelStations.xml',
                DBFilePath = 'C:\Users\Panos\Documents\KE tool_documentation',
                file_path=None,
                jsonFile=None, cmsdFile=None):
    if not file_path:
        cnxn=ConnectionData(seekName='ServerData', file_path=DBFilePath, implicitExt='txt', number_of_cursors=3)
        cursors=cnxn.getCursors()
    
    a = cursors[0].execute("""
            select prod_code, stat_code,emp_no, TIMEIN, TIMEOUT
            from production_status
                    """)
    MILL1=[]
    MILL2=[]
    for j in range(a.rowcount):
        #get the next line
        ind1=a.fetchone() 
        if ind1.stat_code == 'MILL1':
            procTime=[]
            procTime.insert(0,ind1.TIMEIN)
            procTime.insert(1,ind1.TIMEOUT)
            MILL1.append(procTime)
        elif ind1.stat_code == 'MILL2':
            procTime=[]
            procTime.insert(0,ind1.TIMEIN)
            procTime.insert(1,ind1.TIMEOUT)
            MILL2.append(procTime)
        else:
            continue
        
    transform = Transformations()
    procTime_MILL1=[]
    for elem in MILL1:
        t1=[]
        t2=[]
        t1.append(((elem[0].hour)*60)*60 + (elem[0].minute)*60 + elem[0].second)
        t2.append(((elem[1].hour)*60)*60 + (elem[1].minute)*60 + elem[1].second)
        dt=transform.subtraction(t2, t1)
        procTime_MILL1.append(dt[0])
    
    procTime_MILL2=[]
    for elem in MILL2:
        t1=[]
        t2=[]
        t1.append(((elem[0].hour)*60)*60 + (elem[0].minute)*60 + elem[0].second)
        t2.append(((elem[1].hour)*60)*60 + (elem[1].minute)*60 + elem[1].second)
        dt=transform.subtraction(t2, t1)
        procTime_MILL2.append(dt[0])
    
    
    b = cursors[1].execute("""
            select stat_code, MTTF_hour
            from failures
                    """)
    
    c = cursors[2].execute("""
            select stat_code, MTTR_hour
            from repairs
                    """)         
    MTTF_MILL1=[]
    MTTF_MILL2=[]
    for j in range(b.rowcount):
        #get the next line
        ind2=b.fetchone() 
        if ind2.stat_code == 'MILL1':
            MTTF_MILL1.append(ind2.MTTF_hour)
        elif ind2.stat_code == 'MILL2':
            MTTF_MILL2.append(ind2.MTTF_hour)
        else:
            continue
    
    MTTR_MILL1=[]
    MTTR_MILL2=[]
    for j in range(c.rowcount):
        #get the next line
        ind3=c.fetchone() 
        if ind3.stat_code == 'MILL1':
            MTTR_MILL1.append(ind3.MTTR_hour)
        elif ind3.stat_code == 'MILL2':
            MTTR_MILL2.append(ind3.MTTR_hour)
        else:
            continue
    
    #======================= Fit data to statistical distributions ================================#
    dist_proctime = DistFittest()
    distProcTime_MILL1 = dist_proctime.ks_test(procTime_MILL1)
    distProcTime_MILL2 = dist_proctime.ks_test(procTime_MILL2)
    
    dist_MTTF = Distributions()
    dist_MTTR = Distributions()
    distMTTF_MILL1 = dist_MTTF.Weibull_distrfit(MTTF_MILL1)
    distMTTF_MILL2 = dist_MTTF.Weibull_distrfit(MTTF_MILL2)
    
    distMTTR_MILL1 = dist_MTTR.Poisson_distrfit(MTTR_MILL1)
    distMTTR_MILL2 = dist_MTTR.Poisson_distrfit(MTTR_MILL2)
    
    #======================== Output preparation: output the values prepared in the CMSD information model of this model ====================================================#
    if not cmsdFile:
        datafile=(os.path.join(os.path.dirname(os.path.realpath(__file__)), CMSDFileName))       #It defines the name or the directory of the XML file that is manually written the CMSD information model
        tree = et.parse(datafile)                                               #This file will be parsed using the XML.ETREE Python library
    
    exportCMSD=CMSDOutput()
    stationId1='M1'
    stationId2='M2'
    procTime1=exportCMSD.ProcessingTimes(tree, stationId1, distProcTime_MILL1) 
    procTime2=exportCMSD.ProcessingTimes(procTime1, stationId2, distProcTime_MILL2)
    
    TTF1=exportCMSD.TTF(procTime2, stationId1, distMTTF_MILL1)
    TTR1=exportCMSD.TTR(TTF1, stationId1, distMTTR_MILL1)
    
    TTF2=exportCMSD.TTF(TTR1, stationId2, distMTTF_MILL2)
    TTR2=exportCMSD.TTR(TTF2, stationId2, distMTTR_MILL2)
    
    TTR2.write('CMSD_ParallelStations_Output.xml',encoding="utf8")                         #It writes the element tree to a specified file, using the 'utf8' output encoding
    
    #======================= Output preparation: output the updated values in the JSON file of this example ================================#
    if not jsonFile:
        jsonFile = open(os.path.join(os.path.dirname(os.path.realpath(__file__)), JSONFileName),'r')      #It opens the JSON file 
        data = json.load(jsonFile)             #It loads the file
        jsonFile.close()
    else:
        data = json.load(jsonFile) 
    
    exportJSON=JSONOutput()
    stationId1='M1'
    stationId2='M2'
    data1=exportJSON.ProcessingTimes(data, stationId1, distProcTime_MILL1)
    data2=exportJSON.ProcessingTimes(data1, stationId2, distProcTime_MILL2)
    
    data3=exportJSON.TTF(data2, stationId1, distMTTF_MILL1)
    data4=exportJSON.TTR(data3, stationId1, distMTTR_MILL1)
    
    data5=exportJSON.TTF(data4, stationId2, distMTTF_MILL2)
    data6=exportJSON.TTR(data5, stationId2, distMTTR_MILL2)
    
    # if we run from test return the data6
    if test:
        return data6
        
    jsonFile = open('JSON_ParallelStations_Output.json',"w")     #It opens the JSON file
    jsonFile.write(json.dumps(data6, indent=True))               #It writes the updated data to the JSON file 
    jsonFile.close()                                             #It closes the file
    
    #=================== Calling the ExcelOutput object, outputs the outcomes of the statistical analysis in xls files ==========================#
    export=ExcelOutput()
    
    export.PrintStatisticalMeasures(procTime_MILL1,'procTimeMILL1_StatResults.xls')   
    export.PrintStatisticalMeasures(procTime_MILL2,'procTimeMILL2_StatResults.xls')
    export.PrintStatisticalMeasures(MTTF_MILL1,'MTTFMILL1_StatResults.xls')   
    export.PrintStatisticalMeasures(MTTF_MILL2,'MTTFMILL2_StatResults.xls')
    export.PrintStatisticalMeasures(MTTR_MILL1,'MTTRMILL1_StatResults.xls')   
    export.PrintStatisticalMeasures(MTTR_MILL2,'MTTRMILL2_StatResults.xls')
    
    export.PrintDistributionFit(procTime_MILL1,'procTimeMILL1_DistFitResults.xls')
    export.PrintDistributionFit(procTime_MILL2,'procTimeMILL2_DistFitResults.xls')
    export.PrintDistributionFit(MTTF_MILL1,'MTTFMILL1_DistFitResults.xls')
    export.PrintDistributionFit(MTTF_MILL2,'MTTFMILL2_DistFitResults.xls')
    export.PrintDistributionFit(MTTR_MILL1,'MTTRMILL1_DistFitResults.xls')
    export.PrintDistributionFit(MTTR_MILL2,'MTTRMILL2_DistFitResults.xls')
def main(test=0, simul8XMLFileName='ParallelStations.xml',
                DBFilePath = 'C:\Users\Panos\Documents\KE tool_documentation',
                file_path=None, simul8XMLFile=None):
    if not file_path:
        cnxn=ConnectionData(seekName='ServerData', file_path=DBFilePath, implicitExt='txt', number_of_cursors=3)
        cursors=cnxn.getCursors()
#Database queries used to extract the required data, in this example the processing times are given subtracting the TIME IN data point from the TIME OUT data point
    a = cursors[0].execute("""
            select prod_code, stat_code,emp_no, TIMEIN, TIMEOUT
            from production_status
                    """)
    MILL1=[] #Initialization of MILL1 list
    MILL2=[] #Initialization of MILL2 list
    for j in range(a.rowcount):
        #get the next line
        ind1=a.fetchone() 
        if ind1.stat_code == 'MILL1':
            procTime=[]
            procTime.insert(0,ind1.TIMEIN)
            procTime.insert(1,ind1.TIMEOUT)
            MILL1.append(procTime)
        elif ind1.stat_code == 'MILL2':
            procTime=[]
            procTime.insert(0,ind1.TIMEIN)
            procTime.insert(1,ind1.TIMEOUT)
            MILL2.append(procTime)
        else:
            continue
    #The  BasicTransformations object is called to conduct some data transformations     
    transform = Transformations()
    procTime_MILL1=[]
    for elem in MILL1:
        t1=[]
        t2=[]
        t1.append(((elem[0].hour)*60)*60 + (elem[0].minute)*60 + elem[0].second)
        t2.append(((elem[1].hour)*60)*60 + (elem[1].minute)*60 + elem[1].second)
        dt=transform.subtraction(t2, t1)
        procTime_MILL1.append(dt[0])
    
    procTime_MILL2=[]
    for elem in MILL2:
        t1=[]
        t2=[]
        t1.append(((elem[0].hour)*60)*60 + (elem[0].minute)*60 + elem[0].second)
        t2.append(((elem[1].hour)*60)*60 + (elem[1].minute)*60 + elem[1].second)
        dt=transform.subtraction(t2, t1)
        procTime_MILL2.append(dt[0])
    #Database queries used again to extract the MTTF and MTTR data points
    b = cursors[1].execute("""
            select stat_code, MTTF_hour
            from failures
                    """)
    
    c = cursors[2].execute("""
            select stat_code, MTTR_hour
            from repairs
                    """)         
    MTTF_MILL1=[] #Initialization of the list that will contain the MTTF data points for MILL1  
    MTTF_MILL2=[] #Initialization of the list that will contain the MTTF data points for MILL2
    for j in range(b.rowcount):
        #get the next line
        ind2=b.fetchone() 
        if ind2.stat_code == 'MILL1':
            MTTF_MILL1.append(ind2.MTTF_hour)
        elif ind2.stat_code == 'MILL2':
            MTTF_MILL2.append(ind2.MTTF_hour)
        else:
            continue
    
    MTTR_MILL1=[] #Initialization of the list that will contain the MTTR data points for MILL1 
    MTTR_MILL2=[] #Initialization of the list that will contain the MTTR data points for MILL1 
    for j in range(c.rowcount):
        #get the next line
        ind3=c.fetchone() 
        if ind3.stat_code == 'MILL1':
            MTTR_MILL1.append(ind3.MTTR_hour)
        elif ind3.stat_code == 'MILL2':
            MTTR_MILL2.append(ind3.MTTR_hour)
        else:
            continue   
    
    #======================= Fit data to statistical distributions ================================#
    #The Distributions object is called to fit statistical distributions to the in scope data
    dist_proctime = Distributions() 
    distProcTime_MILL1 = dist_proctime.Lognormal_distrfit(procTime_MILL1)
    distProcTime_MILL2 = dist_proctime.Weibull_distrfit(procTime_MILL2)
    
    dist_MTTF = Distributions()
    dist_MTTR = Distributions()
    distMTTF_MILL1 = dist_MTTF.Exponential_distrfit(MTTF_MILL1)
    distMTTF_MILL2 = dist_MTTF.Exponential_distrfit(MTTF_MILL2)
    
    distMTTR_MILL1 = dist_MTTR.Normal_distrfit(MTTR_MILL1)
    distMTTR_MILL2 = dist_MTTR.Normal_distrfit(MTTR_MILL2) 
    
    #======================= Output preparation: output the updated values in the XML file of this example ================================#
    
    if not simul8XMLFile:
        datafile=(os.path.join(os.path.dirname(os.path.realpath(__file__)), simul8XMLFileName))       #It defines the name or the directory of the XML file 
        tree = et.parse(datafile)    
    else:
        datafile=simul8XMLFile
        tree = et.parse(datafile)
         
    simul8 = Simul8Output()    #Call the Simul8Output object
    #Assign the statistical distribution found above in the XML file using methods of the Simul8Output object
    procTimes1 = simul8.ProcTimes(tree,'MILL1',distProcTime_MILL1)
    procTimes2 = simul8.ProcTimes(procTimes1,'MILL2',distProcTime_MILL2)
    #Again assign the MTTF and MTTR probability distributions calling the relevant methods from the Simul8Output object
    MTTF1 = simul8.MTBF(procTimes2,'MILL1',distMTTF_MILL1)
    MTTR1 = simul8.MTTR(MTTF1,'MILL1',distMTTR_MILL1)
    
    MTTF2 = simul8.MTBF(MTTR1,'MILL2',distMTTF_MILL2)
    MTTR2 = simul8.MTTR(MTTF2,'MILL2',distMTTR_MILL2)
    #Output the XML file with the processed data 
    output= MTTR2.write('KEtool_ParallelStations.xml')
    
    if test:
        output=et.parse('KEtool_ParallelStations.xml')
        return output 
Example #8
0
from dream.KnowledgeExtraction.DistributionFitting import DistFittest
from dream.KnowledgeExtraction.ReplaceMissingValues import HandleMissingValues
from dream.KnowledgeExtraction.ImportDatabase import ConnectionData
from dream.KnowledgeExtraction.DetectOutliers import HandleOutliers
from JSONOutput import JSONOutput
from dream.KnowledgeExtraction.CMSDOutput import CMSDOutput
from xml.etree import ElementTree as et
# from WIP_Identifier import currentWIP
import xlrd
from dateutil.parser import *
import datetime
from time import mktime

cnxn = ConnectionData(
    seekName='ServerData',
    file_path='C:\Users\Panos\Documents\DB_Approach\BatchModel',
    implicitExt='txt',
    number_of_cursors=3)
cursors = cnxn.getCursors()

mesExtract = cursors[0].execute("""
            select CONTAINERNAME, PRODUCTNAME, PRODUCTDESCRIPTION, TASKDATE, TASKTYPENAME, STATIONNAME, CONTAINERQTYATTXN, EMPLOYEENAME
            from mes2
                    """)


# method that returns the processStory dictionary, which contains the production steps of the container ids
def contProcessStory(contId):
    processStory[contId] = {}
    mesExtract = cursors[0].execute("""
            select CONTAINERNAME, PRODUCTNAME, PRODUCTDESCRIPTION, TASKDATE, TASKTYPENAME, STATIONNAME, CONTAINERQTYATTXN, EMPLOYEENAME
def main(test=0,
         simul8XMLFileName='ParallelStations.xml',
         DBFilePath='C:\Users\Panos\Documents\KE tool_documentation',
         file_path=None,
         simul8XMLFile=None):
    if not file_path:
        cnxn = ConnectionData(seekName='ServerData',
                              file_path=DBFilePath,
                              implicitExt='txt',
                              number_of_cursors=3)
        cursors = cnxn.getCursors()


#Database queries used to extract the required data, in this example the processing times are given subtracting the TIME IN data point from the TIME OUT data point
    a = cursors[0].execute("""
            select prod_code, stat_code,emp_no, TIMEIN, TIMEOUT
            from production_status
                    """)
    MILL1 = []  #Initialization of MILL1 list
    MILL2 = []  #Initialization of MILL2 list
    for j in range(a.rowcount):
        #get the next line
        ind1 = a.fetchone()
        if ind1.stat_code == 'MILL1':
            procTime = []
            procTime.insert(0, ind1.TIMEIN)
            procTime.insert(1, ind1.TIMEOUT)
            MILL1.append(procTime)
        elif ind1.stat_code == 'MILL2':
            procTime = []
            procTime.insert(0, ind1.TIMEIN)
            procTime.insert(1, ind1.TIMEOUT)
            MILL2.append(procTime)
        else:
            continue
    #The  BasicTransformations object is called to conduct some data transformations
    transform = Transformations()
    procTime_MILL1 = []
    for elem in MILL1:
        t1 = []
        t2 = []
        t1.append(((elem[0].hour) * 60) * 60 + (elem[0].minute) * 60 +
                  elem[0].second)
        t2.append(((elem[1].hour) * 60) * 60 + (elem[1].minute) * 60 +
                  elem[1].second)
        dt = transform.subtraction(t2, t1)
        procTime_MILL1.append(dt[0])

    procTime_MILL2 = []
    for elem in MILL2:
        t1 = []
        t2 = []
        t1.append(((elem[0].hour) * 60) * 60 + (elem[0].minute) * 60 +
                  elem[0].second)
        t2.append(((elem[1].hour) * 60) * 60 + (elem[1].minute) * 60 +
                  elem[1].second)
        dt = transform.subtraction(t2, t1)
        procTime_MILL2.append(dt[0])
    #Database queries used again to extract the MTTF and MTTR data points
    b = cursors[1].execute("""
            select stat_code, MTTF_hour
            from failures
                    """)

    c = cursors[2].execute("""
            select stat_code, MTTR_hour
            from repairs
                    """)
    MTTF_MILL1 = [
    ]  #Initialization of the list that will contain the MTTF data points for MILL1
    MTTF_MILL2 = [
    ]  #Initialization of the list that will contain the MTTF data points for MILL2
    for j in range(b.rowcount):
        #get the next line
        ind2 = b.fetchone()
        if ind2.stat_code == 'MILL1':
            MTTF_MILL1.append(ind2.MTTF_hour)
        elif ind2.stat_code == 'MILL2':
            MTTF_MILL2.append(ind2.MTTF_hour)
        else:
            continue

    MTTR_MILL1 = [
    ]  #Initialization of the list that will contain the MTTR data points for MILL1
    MTTR_MILL2 = [
    ]  #Initialization of the list that will contain the MTTR data points for MILL1
    for j in range(c.rowcount):
        #get the next line
        ind3 = c.fetchone()
        if ind3.stat_code == 'MILL1':
            MTTR_MILL1.append(ind3.MTTR_hour)
        elif ind3.stat_code == 'MILL2':
            MTTR_MILL2.append(ind3.MTTR_hour)
        else:
            continue

    #======================= Fit data to statistical distributions ================================#
    #The Distributions object is called to fit statistical distributions to the in scope data
    dist_proctime = Distributions()
    distProcTime_MILL1 = dist_proctime.Lognormal_distrfit(procTime_MILL1)
    distProcTime_MILL2 = dist_proctime.Weibull_distrfit(procTime_MILL2)

    dist_MTTF = Distributions()
    dist_MTTR = Distributions()
    distMTTF_MILL1 = dist_MTTF.Exponential_distrfit(MTTF_MILL1)
    distMTTF_MILL2 = dist_MTTF.Exponential_distrfit(MTTF_MILL2)

    distMTTR_MILL1 = dist_MTTR.Normal_distrfit(MTTR_MILL1)
    distMTTR_MILL2 = dist_MTTR.Normal_distrfit(MTTR_MILL2)

    #======================= Output preparation: output the updated values in the XML file of this example ================================#

    if not simul8XMLFile:
        datafile = (os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                 simul8XMLFileName)
                    )  #It defines the name or the directory of the XML file
        tree = et.parse(datafile)
    else:
        datafile = simul8XMLFile
        tree = et.parse(datafile)

    simul8 = Simul8Output()  #Call the Simul8Output object
    #Assign the statistical distribution found above in the XML file using methods of the Simul8Output object
    procTimes1 = simul8.ProcTimes(tree, 'MILL1', distProcTime_MILL1)
    procTimes2 = simul8.ProcTimes(procTimes1, 'MILL2', distProcTime_MILL2)
    #Again assign the MTTF and MTTR probability distributions calling the relevant methods from the Simul8Output object
    MTTF1 = simul8.MTBF(procTimes2, 'MILL1', distMTTF_MILL1)
    MTTR1 = simul8.MTTR(MTTF1, 'MILL1', distMTTR_MILL1)

    MTTF2 = simul8.MTBF(MTTR1, 'MILL2', distMTTF_MILL2)
    MTTR2 = simul8.MTTR(MTTF2, 'MILL2', distMTTR_MILL2)
    #Output the XML file with the processed data
    output = MTTR2.write('KEtool_ParallelStations.xml')

    if test:
        output = et.parse('KEtool_ParallelStations.xml')
        return output
Example #10
0
def main(test=0,
         JSONFileName='JSON_example.json',
         CMSDFileName='CMSD_ParallelStations.xml',
         DBFilePath='C:\Users\Panos\Documents\KE tool_documentation',
         file_path=None,
         jsonFile=None,
         cmsdFile=None):
    if not file_path:
        cnxn = ConnectionData(seekName='ServerData',
                              file_path=DBFilePath,
                              implicitExt='txt',
                              number_of_cursors=3)
        cursors = cnxn.getCursors()

    a = cursors[0].execute("""
            select prod_code, stat_code,emp_no, TIMEIN, TIMEOUT
            from production_status
                    """)
    MILL1 = []
    MILL2 = []
    for j in range(a.rowcount):
        #get the next line
        ind1 = a.fetchone()
        if ind1.stat_code == 'MILL1':
            procTime = []
            procTime.insert(0, ind1.TIMEIN)
            procTime.insert(1, ind1.TIMEOUT)
            MILL1.append(procTime)
        elif ind1.stat_code == 'MILL2':
            procTime = []
            procTime.insert(0, ind1.TIMEIN)
            procTime.insert(1, ind1.TIMEOUT)
            MILL2.append(procTime)
        else:
            continue

    transform = Transformations()
    procTime_MILL1 = []
    for elem in MILL1:
        t1 = []
        t2 = []
        t1.append(((elem[0].hour) * 60) * 60 + (elem[0].minute) * 60 +
                  elem[0].second)
        t2.append(((elem[1].hour) * 60) * 60 + (elem[1].minute) * 60 +
                  elem[1].second)
        dt = transform.subtraction(t2, t1)
        procTime_MILL1.append(dt[0])

    procTime_MILL2 = []
    for elem in MILL2:
        t1 = []
        t2 = []
        t1.append(((elem[0].hour) * 60) * 60 + (elem[0].minute) * 60 +
                  elem[0].second)
        t2.append(((elem[1].hour) * 60) * 60 + (elem[1].minute) * 60 +
                  elem[1].second)
        dt = transform.subtraction(t2, t1)
        procTime_MILL2.append(dt[0])

    b = cursors[1].execute("""
            select stat_code, MTTF_hour
            from failures
                    """)

    c = cursors[2].execute("""
            select stat_code, MTTR_hour
            from repairs
                    """)
    MTTF_MILL1 = []
    MTTF_MILL2 = []
    for j in range(b.rowcount):
        #get the next line
        ind2 = b.fetchone()
        if ind2.stat_code == 'MILL1':
            MTTF_MILL1.append(ind2.MTTF_hour)
        elif ind2.stat_code == 'MILL2':
            MTTF_MILL2.append(ind2.MTTF_hour)
        else:
            continue

    MTTR_MILL1 = []
    MTTR_MILL2 = []
    for j in range(c.rowcount):
        #get the next line
        ind3 = c.fetchone()
        if ind3.stat_code == 'MILL1':
            MTTR_MILL1.append(ind3.MTTR_hour)
        elif ind3.stat_code == 'MILL2':
            MTTR_MILL2.append(ind3.MTTR_hour)
        else:
            continue

    #======================= Fit data to statistical distributions ================================#
    dist_proctime = DistFittest()
    distProcTime_MILL1 = dist_proctime.ks_test(procTime_MILL1)
    distProcTime_MILL2 = dist_proctime.ks_test(procTime_MILL2)

    dist_MTTF = Distributions()
    dist_MTTR = Distributions()
    distMTTF_MILL1 = dist_MTTF.Weibull_distrfit(MTTF_MILL1)
    distMTTF_MILL2 = dist_MTTF.Weibull_distrfit(MTTF_MILL2)

    distMTTR_MILL1 = dist_MTTR.Poisson_distrfit(MTTR_MILL1)
    distMTTR_MILL2 = dist_MTTR.Poisson_distrfit(MTTR_MILL2)

    #======================== Output preparation: output the values prepared in the CMSD information model of this model ====================================================#
    if not cmsdFile:
        datafile = (
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         CMSDFileName)
        )  #It defines the name or the directory of the XML file that is manually written the CMSD information model
        tree = et.parse(
            datafile
        )  #This file will be parsed using the XML.ETREE Python library

    exportCMSD = CMSDOutput()
    stationId1 = 'M1'
    stationId2 = 'M2'
    procTime1 = exportCMSD.ProcessingTimes(tree, stationId1,
                                           distProcTime_MILL1)
    procTime2 = exportCMSD.ProcessingTimes(procTime1, stationId2,
                                           distProcTime_MILL2)

    TTF1 = exportCMSD.TTF(procTime2, stationId1, distMTTF_MILL1)
    TTR1 = exportCMSD.TTR(TTF1, stationId1, distMTTR_MILL1)

    TTF2 = exportCMSD.TTF(TTR1, stationId2, distMTTF_MILL2)
    TTR2 = exportCMSD.TTR(TTF2, stationId2, distMTTR_MILL2)

    TTR2.write(
        'CMSD_ParallelStations_Output.xml', encoding="utf8"
    )  #It writes the element tree to a specified file, using the 'utf8' output encoding

    #======================= Output preparation: output the updated values in the JSON file of this example ================================#
    if not jsonFile:
        jsonFile = open(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         JSONFileName), 'r')  #It opens the JSON file
        data = json.load(jsonFile)  #It loads the file
        jsonFile.close()
    else:
        data = json.load(jsonFile)

    exportJSON = JSONOutput()
    stationId1 = 'M1'
    stationId2 = 'M2'
    data1 = exportJSON.ProcessingTimes(data, stationId1, distProcTime_MILL1)
    data2 = exportJSON.ProcessingTimes(data1, stationId2, distProcTime_MILL2)

    data3 = exportJSON.TTF(data2, stationId1, distMTTF_MILL1)
    data4 = exportJSON.TTR(data3, stationId1, distMTTR_MILL1)

    data5 = exportJSON.TTF(data4, stationId2, distMTTF_MILL2)
    data6 = exportJSON.TTR(data5, stationId2, distMTTR_MILL2)

    # if we run from test return the data6
    if test:
        return data6

    jsonFile = open('JSON_ParallelStations_Output.json',
                    "w")  #It opens the JSON file
    jsonFile.write(json.dumps(
        data6, indent=True))  #It writes the updated data to the JSON file
    jsonFile.close()  #It closes the file

    #=================== Calling the ExcelOutput object, outputs the outcomes of the statistical analysis in xls files ==========================#
    export = ExcelOutput()

    export.PrintStatisticalMeasures(procTime_MILL1,
                                    'procTimeMILL1_StatResults.xls')
    export.PrintStatisticalMeasures(procTime_MILL2,
                                    'procTimeMILL2_StatResults.xls')
    export.PrintStatisticalMeasures(MTTF_MILL1, 'MTTFMILL1_StatResults.xls')
    export.PrintStatisticalMeasures(MTTF_MILL2, 'MTTFMILL2_StatResults.xls')
    export.PrintStatisticalMeasures(MTTR_MILL1, 'MTTRMILL1_StatResults.xls')
    export.PrintStatisticalMeasures(MTTR_MILL2, 'MTTRMILL2_StatResults.xls')

    export.PrintDistributionFit(procTime_MILL1,
                                'procTimeMILL1_DistFitResults.xls')
    export.PrintDistributionFit(procTime_MILL2,
                                'procTimeMILL2_DistFitResults.xls')
    export.PrintDistributionFit(MTTF_MILL1, 'MTTFMILL1_DistFitResults.xls')
    export.PrintDistributionFit(MTTF_MILL2, 'MTTFMILL2_DistFitResults.xls')
    export.PrintDistributionFit(MTTR_MILL1, 'MTTRMILL1_DistFitResults.xls')
    export.PrintDistributionFit(MTTR_MILL2, 'MTTRMILL2_DistFitResults.xls')
Example #11
0
from dream.KnowledgeExtraction.StatisticalMeasures import BasicStatisticalMeasures
from dream.KnowledgeExtraction.DistributionFitting import Distributions
from dream.KnowledgeExtraction.DistributionFitting import DistFittest
from dream.KnowledgeExtraction.ReplaceMissingValues import HandleMissingValues
from dream.KnowledgeExtraction.ImportDatabase import ConnectionData
from dream.KnowledgeExtraction.DetectOutliers import HandleOutliers
from JSONOutput import JSONOutput
from dream.KnowledgeExtraction.CMSDOutput import CMSDOutput
from xml.etree import ElementTree as et
# from WIP_Identifier import currentWIP
import xlrd
from dateutil.parser import *
import datetime
from time import mktime

cnxn=ConnectionData(seekName='ServerData', file_path='C:\Users\Panos\Documents\DB_Approach\BatchModel', implicitExt='txt', number_of_cursors=3)
cursors=cnxn.getCursors()

mesExtract=cursors[0].execute("""
            select CONTAINERNAME, PRODUCTNAME, PRODUCTDESCRIPTION, TASKDATE, TASKTYPENAME, STATIONNAME, CONTAINERQTYATTXN, EMPLOYEENAME
            from mes2
                    """)

# method that returns the processStory dictionary, which contains the production steps of the container ids
def contProcessStory(contId):
    processStory[contId]={}
    mesExtract=cursors[0].execute("""
            select CONTAINERNAME, PRODUCTNAME, PRODUCTDESCRIPTION, TASKDATE, TASKTYPENAME, STATIONNAME, CONTAINERQTYATTXN, EMPLOYEENAME
            from mes2
                    """)
    for i in range(mesExtract.rowcount):
    def updateDatabase(self):
        cnxn = ConnectionData(
            seekName="ServerData",
            file_path="C:\Users\Panos\Documents\DB_Approach\CapacityStations",
            implicitExt="txt",
            number_of_cursors=13,
        )
        cursor = cnxn.getCursors()

        if self.checkBoxVal2.get():
            update_order = "INSERT INTO production_status(`status_id`, `WP_id`, `Operation_Name`, `START_DATE`, `Capacity_left`, `Remarks`,`END_DATE`)  VALUES ( ?, ?, ?, ?, ?, ?, ?)"
            cursor[0].execute("SELECT @@IDENTITY AS ID")
            order = self.OrderOption.get()
            a = cursor[1].execute(
                """
                        select WP_id, Order_id
                        from sequence where Order_id=?
                                """,
                order,
            )
            for j in range(a.rowcount):
                ind2 = a.fetchone()
            lastWP = ind2.WP_id
            b = cursor[2].execute(
                """
                                select WP_id
                                 from sequence where Operation_Name=? and Order_id=?
                                """,
                self.operationOption.get(),
                order,
            )
            ind4 = b.fetchone()
            status2 = "finished"
            row = cursor[0].fetchone()
            WP = ind4[0]
            order_ref = row.ID
            status1 = "in progress"
            cursor[4].execute(
                update_order,
                (
                    order_ref,
                    WP,
                    self.operationOption.get(),
                    str(datetime.now()),
                    self.capacity.get(),
                    self.comments.get(),
                    str(datetime.now()),
                ),
            )
            if WP == lastWP:
                cursor[5].execute("UPDATE orders SET `Status`=? WHERE Order_id=? ", status2, order)
            cursor[6].commit()
            self.close_window()
        else:
            update_order = "INSERT INTO production_status(`status_id`, `WP_id`, `Operation_Name`, `START_DATE`, `Capacity_left`, `Remarks`)  VALUES ( ?, ?, ?, ?, ?, ?)"

            cursor[7].execute("SELECT @@IDENTITY AS ID")
            order = self.OrderOption.get()
            a = cursor[8].execute(
                """
                                select sequence.WP_id, sequence.Order_id, sequence.Operation_Name
                                 from sequence where Order_id=? and Operation_Name=?
                                """,
                order,
                self.operationOption.get(),
            )
            ind3 = a.fetchone()
            WP = ind3.WP_id
            row = cursor[7].fetchone()
            print row
            order_ref = row.ID
            status1 = "in progress"
            cursor[10].execute(
                update_order,
                (
                    order_ref,
                    WP,
                    self.operationOption.get(),
                    str(datetime.now()),
                    self.capacity.get(),
                    self.comments.get(),
                ),
            )
            cursor[11].execute("UPDATE orders SET `Status`=? WHERE Order_id=? ", status1, order)
            cursor[12].commit()
            self.close_window()
        return