示例#1
0
def oldmethod():
    #Compare this method and CIRP method.
    if scenario == INSPECT:  #Scrap
        #ropt = x[0:m]
        #kopt = x[m:]
        #sigmaopt = hp.sigma(E,F,ropt)
        sigmacompare = np.array([0.09, 0.06, 0.1])
        sigmaY_Taylorcompare = hp.sigmaY(sigmacompare, D, scenario, k)
        rcompare = hp.sigmator(sigmacompare, E, F)
        costcompare = hp.C(A, B, rcompare)
        kcompare = np.array([2.47, 2.34,
                             2.83])  #np.array([2.450709, 1.9927, 3.1678])
        #Update Lambda by simulation
        #lamada = hp.updateLambda(D,sigmacompare,kcompare,miu,NSample)
        #lamada = 0.876
        U_compare = hp.U_scrap(costcompare, USY, miuY, sigmaY_Taylorcompare,
                               kcompare, Sp, Sc)
        print('Old Method minimum value = ', U_compare)
    elif scenario == NOINSPECT:
        #ropt = x
        #sigmaopt = hp.sigma(E,F,ropt)
        sigmacompare = np.array([0.09, 0.06, 0.1])
        sigmaY_Taylorcompare = hp.sigmaY(sigmacompare, D, scenario, k)
        rcompare = hp.sigmator(sigmacompare, E, F)
        costcompare = hp.C(A, B, rcompare)
        U_compare = hp.U_noscrap(costcompare, USY, miuY, sigmaY_Taylorcompare,
                                 Sp)
        print('Old Method minimum value = ', U_compare)
示例#2
0
def obj_nlopt_inspect(x, grad):
    #retrieve r and k
    num_m = int(x.size / 2)
    r = x[0:num_m]
    k = x[num_m:]
    sigmaX = hp.sigma(E, F, r)
    sigmaY_Taylor = hp.sigmaY(sigmaX, D)
    sigmaY_Taylor = lamada * sigmaY_Taylor
    #Compute Unit Cost
    C = hp.C(A, B, r)
    U = hp.U_scrap(C, USY, miuY, sigmaY_Taylor, k)

    sigmaX = hp.sigma(E, F, r)
    sigmaY_Taylor = hp.sigmaY(sigmaX, D)
    sigmaY_Taylor = lamada * sigmaY_Taylor
    #Compute Unit Cost
    C = hp.C(A, B, r)
    for i in range(0, m):  # Change this for loop to vectorization
        dCi_dri_v = hp.dCi_dri(B[i], r[i])
        dsigmai_dri_v = hp.dsigmai_dri(F[i], r[i])
        dsigmaY_dri_v = hp.dsigmaY_dri(D, sigmaX, r, i, dsigmai_dri_v)
        grad_r[i] = hp.dU_dri_scrap(USY, miuY, sigmaY_Taylor, C, k, i, lamada,
                                    dsigmaY_dri_v, dCi_dri_v)

        grad_k[i] = hp.dU_dki_scrap(USY, miuY, sigmaY_Taylor, k[i], C[i])
    grad_combine = np.concatenate((grad_r, grad_k), axis=0)
    if grad.size > 0:
        grad[:] = grad_combine
    return U
示例#3
0
def obj_nlopt_inspect(x, grad):
    #retrieve r and k
    num_m = int(x.size / 2)
    r = x[0:num_m]
    k = x[num_m:]
    sigmaX = hp.sigma(E, F, r)
    sigmaY_Taylor = hp.sigmaY(sigmaX, D)
    #Update Lambda by simulation
    global lamada
    #lamada = hp.updateLambda(D,sigmaX,k,miu,NSample)
    sigmaY_Taylor = lamada * sigmaY_Taylor
    #Compute Unit Cost
    C = hp.C(A, B, r)
    U = hp.U_scrap(C, USY, miuY, sigmaY_Taylor, k, Sp, Sc)

    #Compute Unit Cost
    C = hp.C(A, B, r)
    for i in range(0, m):  # Change this for loop to vectorization
        dCi_dri_v = hp.dCi_dri(B[i], r[i])
        dsigmai_dri_v = hp.dsigmai_dri(F[i], r[i])
        dsigmaY_dri_v = hp.dsigmaY_dri(D, sigmaX, r, i, dsigmai_dri_v)
        grad_r[i] = hp.dU_dri_scrap(USY, miuY, sigmaY_Taylor, C, k, i, lamada,
                                    dsigmaY_dri_v, dCi_dri_v, Sp, Sc)

        grad_k[i] = hp.dU_dki_scrap(USY, miuY, sigmaY_Taylor, k[i], C[i],
                                    Sc[i])
    grad_combine = np.concatenate((grad_r, grad_k), axis=0)
    if grad.size > 0:
        grad[:] = grad_combine  #Make sure to assign value using [:]
    print(U)
    print(lamada)
    return U
示例#4
0
def obj_nlopt_inspect(x, grad, para):
    #retrieve r and k
    A = para[0]
    B = para[1]
    E = para[2]
    F = para[3]
    num_m = int(x.size / 2)
    r = x[0:num_m]
    k = x[num_m:]
    sigmaX = hp.sigma(E, F, r)
    sigmaY_Taylor = hp.sigmaY(sigmaX, D, scenario, k)

    C = hp.C(A, B, r)
    U = hp.U_scrap(C, USY, miuY, sigmaY_Taylor, k, Sp, Sc)

    for i in range(0, m):  # Change this for loop to vectorization
        dCi_dri_v = hp.dCi_dri(B[i], r[i])
        dsigmai_dri_v = hp.dsigmai_dri(F[i], r[i])
        dsigmaY_dri_v = hp.dsigmaY_dri(D, sigmaX, r, i, dsigmai_dri_v,
                                       scenario, k)
        grad_r[i] = hp.dU_dri_scrap(USY, miuY, sigmaY_Taylor, C, k, i,
                                    dsigmaY_dri_v, dCi_dri_v, Sp, Sc)
        dsigmaY_dki = hp.dsigmaY_dki(D, sigmaX, r, i, k)
        grad_k[i] = hp.dU_dki_scrap(USY, miuY, sigmaY_Taylor, k, i, C, Sc,
                                    dsigmaY_dki, Sp)
    grad_combine = np.concatenate((grad_r, grad_k), axis=0)

    if grad.size > 0:
        grad[:] = grad_combine  #Make sure to assign value using [:]
    #print(U)
    #print(lamada)
    return U
示例#5
0
def obj_nlopt_inspect_fixk(x, grad, para):
    A = para[0]
    B = para[1]
    E = para[2]
    F = para[3]
    r = x[0:m]
    k = 3.0 * np.ones_like(r)
    sigmaX = hp.sigma(E, F, r)
    sigmaY_Taylor = hp.sigmaY(sigmaX, D, scenario, k)
    #Compute Unit Cost
    C = hp.C(A, B, r)
    U = hp.U_scrap(C, USY, miuY, sigmaY_Taylor, k, Sp, Sc)

    for i in range(0, m):  # Change this for loop to vectorization
        dCi_dri_v = hp.dCi_dri(B[i], r[i])
        dsigmai_dri_v = hp.dsigmai_dri(F[i], r[i])
        dsigmaY_dri_v = hp.dsigmaY_dri(D, sigmaX, r, i, dsigmai_dri_v,
                                       scenario, k)
        grad_r[i] = hp.dU_dri_scrap(USY, miuY, sigmaY_Taylor, C, k, i,
                                    dsigmaY_dri_v, dCi_dri_v, Sp, Sc)

    if grad.size > 0:
        grad[:] = grad_r  #Make sure to assign value using [:]
    #print(U)
    #print(lamada)

    return U
示例#6
0
def obj_nlopt_noinspect(x, grad):
    #retrieve r as the optimization variable x. (k will not be optimized, so just use const)
    r = x[0:x.size]
    sigmaX = hp.sigma(E, F, r)
    sigmaY_Taylor = hp.sigmaY(sigmaX, D)
    #Compute Unit Cost
    C = hp.C(A, B, r)
    U = hp.U_noscrap(C, USY, miuY, sigmaY_Taylor)

    sigmaX = hp.sigma(E, F, r)
    sigmaY_Taylor = hp.sigmaY(sigmaX, D)
    for i in range(0, m):  # Change this for loop to vectorization
        dCi_dri_v = hp.dCi_dri(B[i], r[i])
        dsigmai_dri_v = hp.dsigmai_dri(F[i], r[i])
        dsigmaY_dri_v = hp.dsigmaY_dri(D, sigmaX, r, i, dsigmai_dri_v)
        grad_r[i] = hp.dU_dri_noscrap(USY, miuY, sigmaY_Taylor, C, k, i,
                                      dsigmaY_dri_v, dCi_dri_v)
    if grad.size > 0:
        grad[:] = grad_r
    return U
示例#7
0
def obj_scipy_noinspect(x):
    #retrieve r and k
    num_m = int(x.size / 2)
    r = x[0:num_m]
    sigmaX = hp.sigma(E, F, r)
    sigmaY_Taylor = hp.sigmaY(sigmaX, D)
    #Compute Unit Cost
    C = hp.C(A, B, r)
    U = hp.U_noscrap(C, USY, miuY, sigmaY_Taylor)
    print(U)
    return U
示例#8
0
def obj_scipy_inspect(x):
    #retrieve r and k
    num_m = int(x.size / 2)
    r = x[0:num_m]
    k = x[num_m:]
    sigmaX = hp.sigma(E, F, r)
    sigmaY_Taylor = hp.sigmaY(sigmaX, D)
    sigmaY_Taylor = lamada * sigmaY_Taylor
    #Compute Unit Cost
    C = hp.C(A, B, r)
    U = hp.U_scrap(C, USY, miuY, sigmaY_Taylor, k)
    print(U)
    return U
示例#9
0
def obj_scipy_inspect(x):
    #retrieve r and k
    num_m = int(x.size / 2)
    r = x[0:num_m]
    k = x[num_m:]
    sigmaX = hp.sigma(E, F, r)
    sigmaY_Taylor = hp.sigmaY(sigmaX, D)
    #Update Lambda by simulation
    #lamada = hp.updateLambda(D,sigmaX,k,miu,NSample)
    sigmaY_Taylor = lamada * sigmaY_Taylor
    #Compute Unit Cost
    C = hp.C(A, B, r)
    U = hp.U_scrap(C, USY, miuY, sigmaY_Taylor, k, Sp, Sc)
    print(U)
    return U
示例#10
0
def obj_grad_scipy_noinspect(x):
    #retrieve r and k
    grad = np.zeros(m)
    r = x[0:m]
    k = x[m:]
    sigmaX = hp.sigma(E, F, r)
    sigmaY_Taylor = hp.sigmaY(sigmaX, D)
    #Compute Unit Cost
    C = hp.C(A, B, r)
    for i in range(0, m):  # Change this for loop to vectorization
        dCi_dri_v = hp.dCi_dri(B[i], r[i])
        dsigmai_dri_v = hp.dsigmai_dri(F[i], r[i])
        dsigmaY_dri_v = hp.dsigmaY_dri(D, sigmaX, r, i, dsigmai_dri_v)
        grad_r[i] = hp.dU_dri_noscrap(USY, miuY, sigmaY_Taylor, C, k, i,
                                      dsigmaY_dri_v, dCi_dri_v)
    grad[:] = grad_r
    return grad
示例#11
0
def output(x):
    #Retrieve r and k
    global ite
    r = x[0:m]
    #k = x[m:]
    cost = hp.C(A, B, r)
    for i in range(m):
        print(ite,
              'r' + str(i + 1) + ' ',
              r[i],
              'k' + str(i + 1) + ' ',
              k[i],
              end='')
    sigmaX = hp.sigma(E, F, r)
    sigmaY_Taylor = hp.sigmaY(sigmaX, D)
    U = hp.U_scrap(cost, USY, miuY, sigmaY_Taylor, k)
    print(ite, ' U=', U)
    ite += 1
示例#12
0
def obj_grad_scipy_inspect(x):
    #retrieve r and k
    grad = np.zeros_like(x)
    r = x[0:m]
    k = x[m:]
    sigmaX = hp.sigma(E, F, r)
    sigmaY_Taylor = hp.sigmaY(sigmaX, D)
    sigmaY_Taylor = lamada * sigmaY_Taylor
    #Compute Unit Cost
    C = hp.C(A, B, r)
    for i in range(0, m):  # Change this for loop to vectorization
        dCi_dri_v = hp.dCi_dri(B[i], r[i])
        dsigmai_dri_v = hp.dsigmai_dri(F[i], r[i])
        dsigmaY_dri_v = hp.dsigmaY_dri(D, sigmaX, r, i, dsigmai_dri_v)
        grad_r[i] = hp.dU_dri_scrap(USY, miuY, sigmaY_Taylor, C, k, i, lamada,
                                    dsigmaY_dri_v, dCi_dri_v)

        grad_k[i] = hp.dU_dki_scrap(USY, miuY, sigmaY_Taylor, k[i], C[i])
    grad_combine = np.concatenate((grad_r, grad_k), axis=0)
    grad[:] = grad_combine
    return grad
示例#13
0
def casestudy_U():
    para = np.array([A, B, E, F])
    result = optimize(True, para)
    U_equation = result['U']
    r_opt = result['r']
    if scenario == INSPECT:
        k_opt = result['k']
    else:
        k_opt = 3 * np.ones_like(r_opt)
    sigma_opt = hp.sigma(E, F, r_opt)
    [N, M] = hp.estimateNandM(miu, E, F, r_opt, k_opt, NSample, USY, miuY,
                              scenario)
    U_simulation = hp.U_inspect_simulation(NSample, r_opt, A, B, E, F, k_opt,
                                           miu, USY, miuY, Sp, Sc)
    print('U Equation: ', U_equation)
    print('U Simulation: ', U_simulation)
    satisfactionrate = hp.satisfactionrate_component_product(
        miu, E, F, r, k, NSample, USY, miuY, scenario)
    print('beta: ', satisfactionrate['beta'])
    print('sigmaY: ', hp.sigmaY(sigma_opt, D, scenario, k_opt))
    print('opt cost:', hp.C(A, B, r_opt))
    print('N: ', N)
    print('M: ', M)
    print('Gama', satisfactionrate['gammas'])
示例#14
0
def obj_nlopt_noinspect(x, grad, para):
    #retrieve r as the optimization variable x. (k will not be optimized, so just use const)
    A = para[0]
    B = para[1]
    E = para[2]
    F = para[3]
    r = x[0:m]
    sigmaX = hp.sigma(E, F, r)
    sigmaY_Taylor = hp.sigmaY(sigmaX, D, scenario, k)
    #Compute Unit Cost
    C = hp.C(A, B, r)
    U = hp.U_noscrap(C, USY, miuY, sigmaY_Taylor, Sp)

    for i in range(0, m):  # Change this for loop to vectorization
        dCi_dri_v = hp.dCi_dri(B[i], r[i])
        dsigmai_dri_v = hp.dsigmai_dri(F[i], r[i])
        dsigmaY_dri_v = hp.dsigmaY_dri(D, sigmaX, r, i, dsigmai_dri_v,
                                       scenario, k)
        grad_r[i] = hp.dU_dri_noscrap(USY, miuY, sigmaY_Taylor, C, k, i,
                                      dsigmaY_dri_v, dCi_dri_v, Sp)
    if grad.size > 0:
        grad[:] = grad_r  #Make sure to assign value using [:]
    #print(U)
    return U
示例#15
0
def gradientcheck(x, case):
    if case == SCRAP:
        grad_equation = obj_grad_scipy_inspect(x)

        #retrieve grad of r and k
        grad_equation_r = grad_equation[0:m]
        grad_equation_k = grad_equation[m:]

        grad_numerical_k = np.zeros(m)
        grad_numerical_r = np.zeros(m)
    elif case == NOSCRAP:
        grad_equation_r = obj_grad_scipy_noinspect(x)
        grad_numerical_r = np.zeros(m)

    C = hp.C(A, B, r)

    for i in range(0, m):
        ri_add_epsilon = np.copy(r)
        ri_minus_epsilon = np.copy(r)
        ri_add_epsilon[i] += epsilon
        ri_minus_epsilon[i] -= epsilon
        ki_add_epsilon = np.copy(k)
        ki_minus_epsilon = np.copy(k)
        ki_add_epsilon[i] += epsilon
        ki_minus_epsilon[i] -= epsilon
        sigmaX_plus = hp.sigma(E, F, ri_add_epsilon)
        sigmaX_minus = hp.sigma(E, F, ri_minus_epsilon)
        C_plus = hp.C(A, B, ri_add_epsilon)
        C_minus = hp.C(A, B, ri_minus_epsilon)
        sigmaY_Taylor_plus = hp.sigmaY(sigmaX_plus, D)
        sigmaY_Taylor_minus = hp.sigmaY(sigmaX_minus, D)

        if case == SCRAP:
            sigmaY_Taylor_p = lamada * sigmaY_Taylor
            #Varify dr
            sigmaY_Taylor_plus *= lamada
            sigmaY_Taylor_minus *= lamada
            #gradient computed by numerical estimation
            grad_numerical_r[i] = (
                hp.U_scrap(C_plus, USY, miuY, sigmaY_Taylor_plus, k) -
                hp.U_scrap(C_minus, USY, miuY, sigmaY_Taylor_minus, k)) / (
                    2 * epsilon)
            #varify dk
            grad_numerical_k[i] = (
                hp.U_scrap(C, USY, miuY, sigmaY_Taylor_p, ki_add_epsilon) -
                hp.U_scrap(C, USY, miuY, sigmaY_Taylor_p,
                           ki_minus_epsilon)) / (2 * epsilon)
            print('Numerical_scrap_' + 'dr' + str(i), '=', grad_numerical_r[i])
            print('Equation_scrap_' + 'dr' + str(i), '=', grad_equation_r[i])
            print('Numerical_scrap_' + 'dk' + str(i), '=', grad_numerical_k[i])
            print('Equation_scrap_' + 'dk' + str(i), '=', grad_equation_k[i])

        elif case == NOSCRAP:
            #gradient computed by numerical estimation
            grad_numerical_r[i] = (hp.U_noscrap(
                C_plus, USY, miuY, sigmaY_Taylor_plus) - hp.U_noscrap(
                    C_minus, USY, miuY, sigmaY_Taylor_minus)) / (2 * epsilon)
            print('Numerical_No scrap_' + 'dr' + str(i), '=',
                  grad_numerical_r[i])
            print('Equation_No scrap_' + 'dr' + str(i), '=',
                  grad_equation_r[i])

    distance12_r = distance.euclidean(grad_equation_r, grad_numerical_r)
    length1_r = distance.euclidean(grad_equation_r,
                                   np.zeros_like(grad_equation_r))
    length2_r = distance.euclidean(grad_numerical_r,
                                   np.zeros_like(grad_numerical_r))
    graderror_r = distance12_r / (length1_r + length2_r)
    print('error of dr=', graderror_r)

    if case == SCRAP:
        distance12_k = distance.euclidean(grad_equation_k, grad_numerical_k)
        length1_k = distance.euclidean(grad_equation_k,
                                       np.zeros_like(grad_equation_k))
        length2_k = distance.euclidean(grad_numerical_k,
                                       np.zeros_like(grad_numerical_k))
        graderror_k = distance12_k / (length1_k + length2_k)
        print('error of dk=', graderror_k)
示例#16
0
        print(ite,
              'r' + str(i + 1) + ' ',
              r[i],
              'k' + str(i + 1) + ' ',
              k[i],
              end='')
    sigmaX = hp.sigma(E, F, r)
    sigmaY_Taylor = hp.sigmaY(sigmaX, D)
    U = hp.U_scrap(cost, USY, miuY, sigmaY_Taylor, k)
    print(ite, ' U=', U)
    ite += 1


#Unit cost of initial values
sigmaX = hp.sigma(E, F, r)
sigmaY_Taylor = hp.sigmaY(sigmaX, D)
cost = hp.C(A, B, r)

SCIPY = 0
NLOPT = 1
opt_lib = NLOPT

if opt_lib == SCIPY:
    if case == SCRAP:  #Scrap
        #Define Upper and Lower boundaries
        #The order is ([lower bnd for x1, lower bnd for x2], [Higher bnd for x1, Higher bnd for x2])
        mbounds = Bounds([
            smallvalue, smallvalue, smallvalue, smallvalue, smallvalue,
            smallvalue
        ], [
            largevalue, largevalue, largevalue, largevalue, largevalue,
示例#17
0

miuX = np.array([miuX1,miuX2,miuX3])



D1 = hp.dy_dx1(miuX[0],miuX[1],miuX[2])
D2 = hp.dy_dx2(miuX[0],miuX[1],miuX[2])
D3 = hp.dy_dx3(miuX[0],miuX[1],miuX[2])

D = np.array([D1,D2,D3])



sigma = np.array([sigmax1,sigmax2,sigmax3])  
sigmaY = hp.sigmaY(sigma,D)


#Sigma estimated by simulation
X1 = np.random.normal(miuX[0], sigmax1, nsample)
X2 = np.random.normal(miuX[1], sigmax2, nsample)
X3 = np.random.normal(miuX[2], sigmax3, nsample)
X = np.array([X1,X2,X3])
products_simulation = hp.assembly(X)
sigmaY_simulation = np.std(products_simulation)

#Sigma estimated by simulation - with scrap
(X1_satis,N1) = hp.produce_satisfactory_output(miuX[0], sigmax1, nsample, TX1)
(X2_satis,N2) = hp.produce_satisfactory_output(miuX[1], sigmax2, nsample, TX2)
(X3_satis,N3) = hp.produce_satisfactory_output(miuX[2], sigmax3, nsample, TX3)
X_satis = np.array([X1_satis,X2_satis,X3_satis])
示例#18
0
miuX2 = 22.86
miuX3 = 101.6

sigmaX = np.array([0.11, 0.1, 0.15])
Kcompare = np.array([2.05, 1.385, 3.478])
#sigmaX = np.array([1.117320573685910284e-01,1.044633649141074733e-01,1.526516278137779736e-01])
#k = np.array([6.341588383548683, 6.412625456198882, 7.431651149276303])
tol = np.multiply(sigmaX, Kcompare)
#kcompare = np.array([3, 3, 3])

D1 = hp.dy_dx1(miuX1, miuX2, miuX3)
D2 = hp.dy_dx2(miuX1, miuX2, miuX3)
D3 = hp.dy_dx3(miuX1, miuX2, miuX3)

D = np.array([D1, D2, D3])

sigmaY_equation = hp.sigmaY(sigmaX, D)

#Sigma estimated by simulation - with scrap
(X1_satis, N1) = hp.produce_satisfactory_output(miuX1, sigmaX[0], nsample,
                                                tol[0])
(X2_satis, N2) = hp.produce_satisfactory_output(miuX2, sigmaX[1], nsample,
                                                tol[1])
(X3_satis, N3) = hp.produce_satisfactory_output(miuX3, sigmaX[2], nsample,
                                                tol[2])
X = np.array([X1_satis, X2_satis, X3_satis])
products_simulation_satis = hp.assembly(X)
sigmaY_simulation_satis = np.std(products_simulation_satis)

lambdav = sigmaY_simulation_satis / sigmaY_equation
print('lambda=', lambdav)
示例#19
0
D1 = hp.dy_dx1(miu[0], miu[1], miu[2])
D2 = hp.dy_dx2(miu[0], miu[1], miu[2])
D3 = hp.dy_dx3(miu[0], miu[1], miu[2])

D = np.array([D1, D2, D3])

#r=hp.sigmator(sigmaX,E,F)
r = 10 * np.random.rand(3)
k = 5 * np.random.rand(3)  #3 * np.ones_like(r) #

sigmaX = hp.sigma(E, F, r)
#Compute Unit Cost of initial value
C = hp.C(A, B, r)

sigmaY_Taylor = hp.sigmaY(sigmaX, D, scenario, k)

#Nominal value of Y
miuY = np.radians(7.0124)
##Upper specification limit
USY = miuY + np.radians(2.0)

#U = hp.U_scrap(C,USY,sigmaY,k)

grad_numerical_r = np.zeros(m)
grad_equation_r = np.zeros(m)
grad_numerical_k = np.zeros(m)
grad_equation_k = np.zeros(m)
for i in range(0, m):
    ri_add_epsilon = np.copy(r)
    ri_minus_epsilon = np.copy(r)
示例#20
0
        print(ite,
              'r' + str(i + 1) + ' ',
              r[i],
              'k' + str(i + 1) + ' ',
              k[i],
              end='')
    sigmaX = hp.sigma(E, F, r)
    sigmaY_Taylor = hp.sigmaY(sigmaX, D)
    U = hp.U_scrap(cost, USY, miuY, sigmaY_Taylor, k, Sp, Sc)
    print(ite, ' U=', U)
    ite += 1


#Unit cost of initial values
sigmaX = hp.sigma(E, F, r)
sigmaY_Taylor = hp.sigmaY(sigmaX, D)
cost = hp.C(A, B, r)

SCIPY = 0
NLOPT = 1
opt_lib = NLOPT

if opt_lib == SCIPY:
    if scenario == INSPECT:  #Scrap
        #Define Upper and Lower boundaries
        #The order is ([lower bnd for x1, lower bnd for x2], [Higher bnd for x1, Higher bnd for x2])
        mbounds = Bounds([
            smallvalue, smallvalue, smallvalue, smallvalue, smallvalue,
            smallvalue
        ], [
            largevalue, largevalue, largevalue, largevalue, largevalue,