예제 #1
0
workbook = xlrd.open_workbook('DataSet.xlsx')
worksheets = workbook.sheet_names()
worksheet_Fail = worksheets[
    1]  #Define the worksheet with the failures data (MTTF,MTTR)

data = Import_Excel()
failures = data.Input_data(
    worksheet_Fail, workbook
)  #Create the failures dictionary with key the MTTF and MTTR data points

MTTF = failures.get('MTTF', [])
MTTR = failures.get('MTTR', [])

#======================= Fit data to probability distributions ================================#
#The Distributions and DistFittest objects are called to fit statistical distributions to the in scope data
dist = Distributions()
act2Proc = dist.Weibull_distrfit(Activity2_Proc)
act3Proc = dist.Weibull_distrfit(Activity3_Proc)

s1Times = dist.Exponential_distrfit(S1)

distFit = DistFittest()
act1MTTF = distFit.ks_test(MTTF)
act1MTTR = distFit.ks_test(MTTR)

#======================= Output preparation: output the updated values in the XML file of this example ================================#

datafile = ('Topology1.xml')  #define the input xml file
tree = et.parse(datafile)
simul8 = Simul8Output()  #Call the Simul8Output object
#Assign the statistical distribution calculated above in the XML file using methods of the Simul8Output object
ProcessingTimes = A.Input_data(worksheet_ProcessingTimes, workbook)   #Create the Processing Times dictionary with key the Machine 1 and values the processing time data
MTTF=A.Input_data(worksheet_MTTF, workbook)        #Create the MTTF dictionary with key the Machine 1 and time-to-failure data 
MTTR=A.Input_data(worksheet_MTTR, workbook)        #Create the MTTR Quantity dictionary with key the Machine 1 and time-to-repair data 

##Get from the above dictionaries the M1 key and define the following lists with data 
ProcTime = ProcessingTimes.get('M1',[])         
MTTF = MTTF.get('M1',[])
MTTR = MTTR.get('M1',[])

#Call the HandleMissingValues object and replace the missing values in the lists with the mean of the non-missing values
B =HandleMissingValues()
ProcTime = B.ReplaceWithMean(ProcTime)
MTTF = B.ReplaceWithMean(MTTF)
MTTR = B.ReplaceWithMean(MTTR)

C = Distributions()      #Call the Distributions object
D = DistFittest()      #Call the DistFittest object

ProcTime_dist = D.ks_test(ProcTime)
MTTF_dist = C.Exponential_distrfit(MTTF)
MTTR_dist = C.Exponential_distrfit(MTTR)

#================================= Output preparation: output the updated values in the JSON file of this example =========================================================#
jsonFile = open('JSON_AssembleDismantle.json','r')      #It opens the JSON file 
data = json.load(jsonFile)                                                              #It loads the file
jsonFile.close()
nodes = data.get('nodes',[])                                                         #It creates a variable that holds the 'nodes' dictionary

for element in nodes:
    processingTime = nodes[element].get('processingTime',{})        #It creates a variable that gets the element attribute 'processingTime'
    MTTF_Nodes = nodes[element].get('MTTF',{})                            #It creates a variable that gets the element attribute 'MTTF'
for j in range(c.rowcount):
    #get the next line
    ind3=c.fetchone() 
    if ind3.stat_code == 'MILL1':
        MTTR_MILL1.append(ind3.MTTR_hour)
    elif ind3.stat_code == 'MILL2':
        MTTR_MILL2.append(ind3.MTTR_hour)
    else:
        continue

#======================= Fit data to statistical distributions ================================#
dist_proctime = DistFittest()
distProcTime_MILL1 = dist_proctime.ks_test(procTime_MILL1)
distProcTime_MILL2 = dist_proctime.ks_test(procTime_MILL2)

dist_MTTF = Distributions()
dist_MTTR = Distributions()
distMTTF_MILL1 = dist_MTTF.Weibull_distrfit(MTTF_MILL1)
distMTTF_MILL2 = dist_MTTF.Weibull_distrfit(MTTF_MILL2)

distMTTR_MILL1 = dist_MTTR.Poisson_distrfit(MTTR_MILL1)
distMTTR_MILL2 = dist_MTTR.Poisson_distrfit(MTTR_MILL2) 

#======================= Output preparation: output the updated values in the JSON file of this example ================================#
jsonFile = open('JSON_example.json','r')      #It opens the JSON file 
data = json.load(jsonFile)                                                              #It loads the file
jsonFile.close()
nodes = data.get('nodes',[])                                                         #It creates a variable that holds the 'nodes' dictionary

for element in nodes:
    processingTime = nodes[element].get('processingTime',{})        #It creates a variable that gets the element attribute 'processingTime'
예제 #4
0
dictProc = {
}  #Create a dictionary that holds the statistical distributions of the processing times of each station
dictProc['MA'] = D.ks_test(MA_Proc)
dictProc['M1A'] = D.ks_test(M1A_Proc)
dictProc['M1B'] = D.ks_test(M1B_Proc)
dictProc['M2A'] = D.ks_test(M2A_Proc)
dictProc['M2B'] = D.ks_test(M2B_Proc)
dictProc['M3A'] = D.ks_test(M3A_Proc)
dictProc['M3B'] = D.ks_test(M3B_Proc)
dictProc['MM'] = D.ks_test(MM_Proc)
dictProc['PrA'] = D.ks_test(PrA_Proc)
dictProc['PrB'] = D.ks_test(PrB_Proc)
dictProc['PaA'] = D.ks_test(PaA_Proc)
dictProc['PaB'] = D.ks_test(PaB_Proc)
#Call the Distributions object and fit (using the Maximum Likelihood Estimation) the lists with the scrap quantity into a discrete statistical distribution, i.e. Geometric distribution
D = Distributions()
dictScrap = {
}  #Create a dictionary that holds the Geometric, which is a discrete statistical distribution of the processing times of each station
dictScrap['MA'] = D.Geometric_distrfit(MA_Scrap)
dictScrap['M1A'] = D.Geometric_distrfit(M1A_Scrap)
dictScrap['M1B'] = D.Geometric_distrfit(M1B_Scrap)
dictScrap['M2A'] = D.Geometric_distrfit(M2A_Scrap)
dictScrap['M2B'] = D.Geometric_distrfit(M2B_Scrap)
dictScrap['M3A'] = D.Geometric_distrfit(M3A_Scrap)
dictScrap['M3B'] = D.Geometric_distrfit(M3B_Scrap)
dictScrap['MM'] = D.Geometric_distrfit(MM_Scrap)
dictScrap['PrA'] = D.Geometric_distrfit(PrA_Scrap)
dictScrap['PrB'] = D.Geometric_distrfit(PrB_Scrap)
dictScrap['PaA'] = D.Geometric_distrfit(PaA_Scrap)
dictScrap['PaB'] = D.Geometric_distrfit(PaB_Scrap)
#Call the JSON_example method giving as attributes the dictionaries with the processing times distributions and the scrap quantities distributions and the WIP levels in the assembly line
inputData = Import_Excel()  #Call the Python object Import_Excel
ProcessingTimes = inputData.Input_data(
    worksheet_ProcessingTimes, workbook
)  #Create the Processing Times dictionary with key Machines 1,2 and values the processing time data

##Get from the above dictionaries the M1 key and define the following lists with data
M1_ProcTime = ProcessingTimes.get('M1', [])
M2_ProcTime = ProcessingTimes.get('M2', [])

#Call the HandleMissingValues object and replace the missing values in the lists with the mean of the non-missing values
misValues = HandleMissingValues()
M1_ProcTime = misValues.ReplaceWithMean(M1_ProcTime)
M2_ProcTime = misValues.ReplaceWithMean(M2_ProcTime)

MLE = Distributions(
)  #Call the Distributions object (Maximum Likelihood Estimation - MLE)
KS = DistFittest()  #Call the DistFittest object  (Kolmoghorov-Smirnov test)

M1ProcTime_dist = KS.ks_test(M1_ProcTime)
M2ProcTime_dist = MLE.Normal_distrfit(M2_ProcTime)

#======================= Output preparation: output the updated values in the JSON file of this example ================================#
jsonFile = open('JSON_TwoParallelStations.json', 'r')  #It opens the JSON file
data = json.load(jsonFile)  #It loads the file
jsonFile.close()
nodes = data.get('nodes',
                 [])  #It creates a variable that holds the 'nodes' dictionary

for element in nodes:
    processingTime = nodes[element].get(
        'processingTime', {}
예제 #6
0
]  #Initialization of the list that will contain the MTTR data points for MILL1
MTTR_MILL2 = [
]  #Initialization of the list that will contain the MTTR data points for MILL1
for j in range(c.rowcount):
    #get the next line
    ind3 = c.fetchone()
    if ind3.stat_code == 'MILL1':
        MTTR_MILL1.append(ind3.MTTR_hour)
    elif ind3.stat_code == 'MILL2':
        MTTR_MILL2.append(ind3.MTTR_hour)
    else:
        continue

#======================= Fit data to statistical distributions ================================#
#The Distributions object is called to fit statistical distributions to the in scope data
dist_proctime = Distributions()
distProcTime_MILL1 = dist_proctime.Lognormal_distrfit(procTime_MILL1)
distProcTime_MILL2 = dist_proctime.Weibull_distrfit(procTime_MILL2)

dist_MTTF = Distributions()
dist_MTTR = Distributions()
distMTTF_MILL1 = dist_MTTF.Exponential_distrfit(MTTF_MILL1)
distMTTF_MILL2 = dist_MTTF.Exponential_distrfit(MTTF_MILL2)

distMTTR_MILL1 = dist_MTTR.Normal_distrfit(MTTR_MILL1)
distMTTR_MILL2 = dist_MTTR.Normal_distrfit(MTTR_MILL2)

#======================= Output preparation: output the updated values in the XML file of this example ================================#

datafile = ('ParallelStations.xml')  #define the input xml file
tree = et.parse(datafile)
예제 #7
0
distFitting = DistFittest()  #Call the DistFittest object
M1 = distFitting.ks_test(M1)

#Read from the given directory the Excel document with the inter-arrivals data
workbook = xlrd.open_workbook('InterarrivalsData.xls')
worksheets = workbook.sheet_names()
worksheet_Inter = worksheets[
    0]  #Define the worksheet with the Inter-arrivals time data

data = Import_Excel()
interTimes = data.Input_data(
    worksheet_Inter, workbook
)  #Create the Inter-arrival times dictionary with key the Source and values the inter-arrival time data

S1 = interTimes.get('Source', [])

distMLE = Distributions()  #Call the Distributions object
S1 = distMLE.Exponential_distrfit(S1)

datafile = ('SingleServer.xml')  #define the input xml file
tree = et.parse(datafile)
simul8 = Simul8Output()  #Call the Simul8Output object

title = 'KEtool_SingleServer'
interTimes = simul8.InterArrivalTime(tree, 'Source', S1)
procTimes = simul8.ProcTimes(interTimes, 'Activity 1', M1)
title = simul8.Title(procTimes, title)
#Output the XML file with the processed data
output = procTimes.write('KEtool_SingleServer.xml')
def generateDemandPlanning(input_url,
                           PPOSQuantity=1000,
                           PlannedWeek=1,
                           PPOSToBeDisaggregated='PPOS1',
                           MinPackagingSize=10,
                           planningHorizon=10):
    """Generate random demand from spreadsheet at input_url.
    """
    # id is given as an integer and minus one
    # ToDo we have to standardize data
    #     PPOSToBeDisaggregated='PPOS'+str(PPOSToBeDisaggregated+'1')

    # Read data from the exported Excel file from RapidMiner and call the Import_Excel object of the KE tool to import this data in the tool

    demand_data = urllib.urlopen(input_url).read()
    workbook = xlrd.open_workbook(file_contents=demand_data)

    worksheets = workbook.sheet_names()
    worksheet_RapidMiner = worksheets[0]

    A = Import_Excel()
    Turnovers = A.Input_data(
        worksheet_RapidMiner,
        workbook)  #Dictionary with the data from the Excel file

    #Create lists with the MAs' names and the Turnovers for the first twelve weeks of 2010 retrieving this data from the dictionary
    PPOS = Turnovers.get('Ppos', [])
    SP = Turnovers.get('SP', [])
    MA = Turnovers.get('FP Material No PGS+', [])
    GlobalDemand = Turnovers.get('Global demand', [])

    #Call the Distributions object and fit the data from the list in Normal distribution, so as to have info on Global demand (mean and standard deviation)
    D = Distributions()
    E = HandleMissingValues()
    MA = E.DeleteMissingValue(MA)
    t = D.Normal_distrfit(GlobalDemand)
    avg = t.get('mean')
    stdev = t.get('stdev')

    def constrained_sum_sample_pos(n, total):
        """Return a randomly chosen list of n positive integers summing to total.
        Each such list is equally likely to occur."""

        dividers = sorted(random.sample(xrange(1, total), n - 1))
        return [a - b for a, b in zip(dividers + [total], [0] + dividers)]

    def constrained_sum_sample_nonneg(n, total):
        """Return a randomly chosen list of n nonnegative integers summing to total.
        Each such list is equally likely to occur."""

        return [x - 1 for x in constrained_sum_sample_pos(n, total + n)]

    DemandProfile = {}  #Create a dictionary

    week = []  # list that defines the planning horizon, i.e. 10 weeks
    for i in range(int(planningHorizon)):
        week.append(i + 1)

    for i in week:
        Demand = int(
            abs(random.normalvariate(avg, stdev))
        )  # Generate a random, non-negative, integer number from the Normal distribution
        AllocatedPercent = 0.8 - (
            0.05 * i
        )  # Defines a number starts with 0.8 or 80% and reduced with every iteration at 0.05 or 5%
        Remaining_Demand = int(
            (1 - AllocatedPercent) * Demand)  # Defines the Remaining demand
        a = constrained_sum_sample_nonneg(len(MA), 100)
        myInt = 100
        a = robjects.FloatVector(a)
        lista = [
            x / myInt for x in a
        ]  # Define a list with the same length as the MA list and elements float numbers with total sum equal to 1
        b = constrained_sum_sample_nonneg(
            len(MA), Remaining_Demand
        )  # Define a list with the same length as the MA list and elements with total sum the Remaining demand
        dicta = {}
        for index in range(0, len(MA)):
            MinUnits = round(b[index] * (random.uniform(0, 0.2)), 0)
            TotalUnits = b[index]
            if TotalUnits < MinPackagingSize:
                TotalUnits = 0
            if MinUnits < MinPackagingSize:
                MinUnits = 0
            dicta.update(
                {MA[index]: [TotalUnits, MinUnits]}
            )  # it updates a dictionary with key the different MAs and values the remaining demand and (b[index]*lista[index])
            DemandProfile.update(
                {i: dicta}
            )  #It updates a dictionary with key the number of each iteration (week) and value the dictionary dicta

    Table = []
    i = 0
    for i in range(len(MA)):
        Table.append([PPOS[i], SP[i], MA[i]])
        i += 1
    uniquePPOS = []
    for ppos in PPOS:
        if not ppos in uniquePPOS and ppos != '':
            uniquePPOS.append(ppos)

    book = Workbook()
    sheet1 = book.add_sheet('Future1', cell_overwrite_ok=True)
    aggrTable = []
    for key in DemandProfile.keys():
        for elem in DemandProfile[key]:
            if DemandProfile[key].get(elem)[0] > 0:
                MAkey = elem
                totalUnits = DemandProfile[key].get(elem)[0]
                minUnits = DemandProfile[key].get(elem)[1]
                plannedWeek = key
                aggrTable.append([MAkey, totalUnits, minUnits, plannedWeek])
            else:
                continue
    t = 1
    aggrTable.sort(key=lambda x: x[1], reverse=False)
    for i in sorted(aggrTable, key=lambda x: int(x[3])):
        sheet1.write(0, 0, 'Order ID')
        sheet1.write(0, 1, 'MA ID')
        sheet1.write(0, 2, 'Total # Units')
        sheet1.write(0, 3, 'Min # Units')
        sheet1.write(0, 4, 'Planned Week')
        sheet1.write(t, 1, (i[0].replace('MA', '', 1)))
        sheet1.write(t, 2, i[1])
        sheet1.write(t, 3, i[2])
        sheet1.write(t, 4, i[3])
        sheet1.write(t, 0, t)
        t += 1

    # open json file
    futureDemandProfileFile = open('futureDemandProfile.json', mode='w')
    futureDemandProfile = {}

    t = 1
    for i in sorted(aggrTable, key=lambda x: int(x[3])):
        dicta = {
            'MAID': i[0],
            'TotalUnits': i[1],
            'MinUnits': i[2],
            'PlannedWeek': i[3]
        }
        futureDemandProfile[t] = dicta
        futureDemandProfileString = json.dumps(futureDemandProfile, indent=5)
        t += 1

    #write json file
    futureDemandProfileFile.write(futureDemandProfileString)

    ###==================================================================================================###
    sheet2 = book.add_sheet('PPOS', cell_overwrite_ok=True)

    dictPPOS = {}
    dictPPOSMA = {}

    for ind in uniquePPOS:
        indices = [i for i, j in enumerate(PPOS) if j == ind]
        mas = [ma for ma in MA if (MA.index(ma) in indices)]
        dictPPOSMA.update({ind: mas})

    t = 1
    for key in dictPPOSMA.keys():
        for elem in dictPPOSMA[key]:
            if key == PPOSToBeDisaggregated:
                c = constrained_sum_sample_nonneg(len(dictPPOSMA[key]),
                                                  PPOSQuantity)
                d = constrained_sum_sample_nonneg(len(dictPPOSMA[key]), 100)
                myInt = 100
                d = robjects.FloatVector(d)
                listd = [x / myInt for x in d]
                for i in range(0, len(dictPPOSMA[key])):
                    MinUnits = round(c[i] * (random.uniform(0, 0.2)), 0)
                    TotalUnits = c[i]
                    if TotalUnits < MinPackagingSize:
                        TotalUnits = 0
                    if MinUnits < MinPackagingSize:
                        MinUnits = 0
                    dictPPOS.update(
                        {dictPPOSMA[key][i]: [TotalUnits, MinUnits]})

    t = 1
    for i in range(0, len(dictPPOS)):
        sheet2.write(0, 0, 'Order ID')
        sheet2.write(0, 1, 'MA ID')
        sheet2.write(0, 2, 'Total # Units')
        sheet2.write(0, 3, 'Min # Units')
        sheet2.write(0, 4, 'Planned Week')
        sheet2.write(t, 0, t)
        # XXX the MA id should not have MA prefix...
        sheet2.write(t, 1,
                     dictPPOSMA[PPOSToBeDisaggregated][i].replace('MA', '', 1))

        sheet2.write(t, 2, dictPPOS[dictPPOSMA[PPOSToBeDisaggregated][i]][0])
        sheet2.write(t, 3, dictPPOS[dictPPOSMA[PPOSToBeDisaggregated][i]][1])
        sheet2.write(t, 4, PlannedWeek)
        t += 1

    # open json file
    PPOSProfileFile = open('PPOSProfile.json', mode='w')
    PPOSProfile = {}
    t = 1
    for i in range(0, len(dictPPOS)):
        dictb = {
            'MAID': dictPPOSMA[PPOSToBeDisaggregated][i],
            'TotalUnits': dictPPOS[dictPPOSMA[PPOSToBeDisaggregated][i]][0],
            'MinUnits': dictPPOS[dictPPOSMA[PPOSToBeDisaggregated][i]][1],
            'PlannedWeek': PlannedWeek
        }
        PPOSProfile[t] = dictb
        PPOSProfileString = json.dumps(PPOSProfile, indent=5)
        t += 1

    #write json file
    PPOSProfileFile.write(PPOSProfileString)

    import StringIO
    out = StringIO.StringIO()
    book.save(out)
    book.save('DP.xls')
    return out.getvalue()