def oldmethod(): #Compare this method and CIRP method. if scenario == INSPECT: #Scrap #ropt = x[0:m] #kopt = x[m:] #sigmaopt = hp.sigma(E,F,ropt) sigmacompare = np.array([0.09, 0.06, 0.1]) sigmaY_Taylorcompare = hp.sigmaY(sigmacompare, D, scenario, k) rcompare = hp.sigmator(sigmacompare, E, F) costcompare = hp.C(A, B, rcompare) kcompare = np.array([2.47, 2.34, 2.83]) #np.array([2.450709, 1.9927, 3.1678]) #Update Lambda by simulation #lamada = hp.updateLambda(D,sigmacompare,kcompare,miu,NSample) #lamada = 0.876 U_compare = hp.U_scrap(costcompare, USY, miuY, sigmaY_Taylorcompare, kcompare, Sp, Sc) print('Old Method minimum value = ', U_compare) elif scenario == NOINSPECT: #ropt = x #sigmaopt = hp.sigma(E,F,ropt) sigmacompare = np.array([0.09, 0.06, 0.1]) sigmaY_Taylorcompare = hp.sigmaY(sigmacompare, D, scenario, k) rcompare = hp.sigmator(sigmacompare, E, F) costcompare = hp.C(A, B, rcompare) U_compare = hp.U_noscrap(costcompare, USY, miuY, sigmaY_Taylorcompare, Sp) print('Old Method minimum value = ', U_compare)
def obj_scipy_noinspect(x): #retrieve r and k num_m = int(x.size / 2) r = x[0:num_m] sigmaX = hp.sigma(E, F, r) sigmaY_Taylor = hp.sigmaY(sigmaX, D) #Compute Unit Cost C = hp.C(A, B, r) U = hp.U_noscrap(C, USY, miuY, sigmaY_Taylor) print(U) return U
def obj_nlopt_noinspect(x, grad): #retrieve r as the optimization variable x. (k will not be optimized, so just use const) r = x[0:x.size] sigmaX = hp.sigma(E, F, r) sigmaY_Taylor = hp.sigmaY(sigmaX, D) #Compute Unit Cost C = hp.C(A, B, r) U = hp.U_noscrap(C, USY, miuY, sigmaY_Taylor) sigmaX = hp.sigma(E, F, r) sigmaY_Taylor = hp.sigmaY(sigmaX, D) for i in range(0, m): # Change this for loop to vectorization dCi_dri_v = hp.dCi_dri(B[i], r[i]) dsigmai_dri_v = hp.dsigmai_dri(F[i], r[i]) dsigmaY_dri_v = hp.dsigmaY_dri(D, sigmaX, r, i, dsigmai_dri_v) grad_r[i] = hp.dU_dri_noscrap(USY, miuY, sigmaY_Taylor, C, k, i, dsigmaY_dri_v, dCi_dri_v) if grad.size > 0: grad[:] = grad_r return U
def obj_nlopt_noinspect(x, grad, para): #retrieve r as the optimization variable x. (k will not be optimized, so just use const) A = para[0] B = para[1] E = para[2] F = para[3] r = x[0:m] sigmaX = hp.sigma(E, F, r) sigmaY_Taylor = hp.sigmaY(sigmaX, D, scenario, k) #Compute Unit Cost C = hp.C(A, B, r) U = hp.U_noscrap(C, USY, miuY, sigmaY_Taylor, Sp) for i in range(0, m): # Change this for loop to vectorization dCi_dri_v = hp.dCi_dri(B[i], r[i]) dsigmai_dri_v = hp.dsigmai_dri(F[i], r[i]) dsigmaY_dri_v = hp.dsigmaY_dri(D, sigmaX, r, i, dsigmai_dri_v, scenario, k) grad_r[i] = hp.dU_dri_noscrap(USY, miuY, sigmaY_Taylor, C, k, i, dsigmaY_dri_v, dCi_dri_v, Sp) if grad.size > 0: grad[:] = grad_r #Make sure to assign value using [:] #print(U) return U
grad_equation_k = np.zeros(m) for i in range(0, m): ri_add_epsilon = np.copy(r) ri_minus_epsilon = np.copy(r) ri_add_epsilon[i] += epsilon ri_minus_epsilon[i] -= epsilon sigmaX_plus = hp.sigma(E, F, ri_add_epsilon) sigmaX_minus = hp.sigma(E, F, ri_minus_epsilon) C_plus = hp.C(A, B, ri_add_epsilon) C_minus = hp.C(A, B, ri_minus_epsilon) sigmaY_Taylor_plus = hp.sigmaY(sigmaX_plus, D, scenario, k) sigmaY_Taylor_minus = hp.sigmaY(sigmaX_minus, D, scenario, k) if scenario == 1: #NO INSPECT #gradient computed by numerical estimation grad_numerical_r[i] = (hp.U_noscrap( C_plus, USY, miuY, sigmaY_Taylor_plus, Sp) - hp.U_noscrap( C_minus, USY, miuY, sigmaY_Taylor_minus, Sp)) / (2 * epsilon) print('Numerical_No scrap_' + 'dr' + str(i), '=', grad_numerical_r[i]) #gradient computed by equation dCi_dri_v = hp.dCi_dri(B[i], r[i]) dsigmai_dri_v = hp.dsigmai_dri(F[i], r[i]) dsigmaY_dri_v = hp.dsigmaY_dri(D, sigmaX, r, i, dsigmai_dri_v, scenario, k) grad_equation_r[i] = hp.dU_dri_noscrap(USY, miuY, sigmaY_Taylor, C, k, i, dsigmaY_dri_v, dCi_dri_v, Sp) print('Equation_No scrap_' + 'dr' + str(i), '=', grad_equation_r[i]) elif scenario == 2: #Inspection FIX k #Varify dr #gradient computed by numerical estimation grad_numerical_r[i] = (
def gradientcheck(x, case): if case == SCRAP: grad_equation = obj_grad_scipy_inspect(x) #retrieve grad of r and k grad_equation_r = grad_equation[0:m] grad_equation_k = grad_equation[m:] grad_numerical_k = np.zeros(m) grad_numerical_r = np.zeros(m) elif case == NOSCRAP: grad_equation_r = obj_grad_scipy_noinspect(x) grad_numerical_r = np.zeros(m) C = hp.C(A, B, r) for i in range(0, m): ri_add_epsilon = np.copy(r) ri_minus_epsilon = np.copy(r) ri_add_epsilon[i] += epsilon ri_minus_epsilon[i] -= epsilon ki_add_epsilon = np.copy(k) ki_minus_epsilon = np.copy(k) ki_add_epsilon[i] += epsilon ki_minus_epsilon[i] -= epsilon sigmaX_plus = hp.sigma(E, F, ri_add_epsilon) sigmaX_minus = hp.sigma(E, F, ri_minus_epsilon) C_plus = hp.C(A, B, ri_add_epsilon) C_minus = hp.C(A, B, ri_minus_epsilon) sigmaY_Taylor_plus = hp.sigmaY(sigmaX_plus, D) sigmaY_Taylor_minus = hp.sigmaY(sigmaX_minus, D) if case == SCRAP: sigmaY_Taylor_p = lamada * sigmaY_Taylor #Varify dr sigmaY_Taylor_plus *= lamada sigmaY_Taylor_minus *= lamada #gradient computed by numerical estimation grad_numerical_r[i] = ( hp.U_scrap(C_plus, USY, miuY, sigmaY_Taylor_plus, k) - hp.U_scrap(C_minus, USY, miuY, sigmaY_Taylor_minus, k)) / ( 2 * epsilon) #varify dk grad_numerical_k[i] = ( hp.U_scrap(C, USY, miuY, sigmaY_Taylor_p, ki_add_epsilon) - hp.U_scrap(C, USY, miuY, sigmaY_Taylor_p, ki_minus_epsilon)) / (2 * epsilon) print('Numerical_scrap_' + 'dr' + str(i), '=', grad_numerical_r[i]) print('Equation_scrap_' + 'dr' + str(i), '=', grad_equation_r[i]) print('Numerical_scrap_' + 'dk' + str(i), '=', grad_numerical_k[i]) print('Equation_scrap_' + 'dk' + str(i), '=', grad_equation_k[i]) elif case == NOSCRAP: #gradient computed by numerical estimation grad_numerical_r[i] = (hp.U_noscrap( C_plus, USY, miuY, sigmaY_Taylor_plus) - hp.U_noscrap( C_minus, USY, miuY, sigmaY_Taylor_minus)) / (2 * epsilon) print('Numerical_No scrap_' + 'dr' + str(i), '=', grad_numerical_r[i]) print('Equation_No scrap_' + 'dr' + str(i), '=', grad_equation_r[i]) distance12_r = distance.euclidean(grad_equation_r, grad_numerical_r) length1_r = distance.euclidean(grad_equation_r, np.zeros_like(grad_equation_r)) length2_r = distance.euclidean(grad_numerical_r, np.zeros_like(grad_numerical_r)) graderror_r = distance12_r / (length1_r + length2_r) print('error of dr=', graderror_r) if case == SCRAP: distance12_k = distance.euclidean(grad_equation_k, grad_numerical_k) length1_k = distance.euclidean(grad_equation_k, np.zeros_like(grad_equation_k)) length2_k = distance.euclidean(grad_numerical_k, np.zeros_like(grad_numerical_k)) graderror_k = distance12_k / (length1_k + length2_k) print('error of dk=', graderror_k)
obj_scipy_inspect, x, method='SLSQP', jac=obj_grad_scipy_inspect, #Nelder-Mead #SLSQP options={ 'ftol': 1e-9, 'maxiter': 1000, 'disp': True }, bounds=mbounds) #constraints=ineq_cons, #,callback=output elif case == NOSCRAP: #Define Upper and Lower boundaries #The order is ([lower bnd for x1, lower bnd for x2], [Higher bnd for x1, Higher bnd for x2]) mbounds = Bounds([smallvalue, smallvalue, smallvalue], [largevalue, largevalue, largevalue]) U_init = hp.U_noscrap(cost, USY, miuY, sigmaY_Taylor) x = np.copy(r) res = minimize( obj_scipy_noinspect, x, method='SLSQP', jac=obj_grad_scipy_noinspect, #Nelder-Mead #SLSQP options={ 'ftol': 1e-9, 'maxiter': 1000, 'disp': True }, bounds=mbounds) #constraints=ineq_cons, #,callback=output elif opt_lib == NLOPT: if case == SCRAP: #Scrap opt = nlopt.opt(nlopt.LD_MMA, 2 * m)
obj_scipy_inspect, x, method='SLSQP', jac=obj_grad_scipy_inspect, #Nelder-Mead #SLSQP options={ 'ftol': 1e-9, 'maxiter': 1000, 'disp': True }, bounds=mbounds) #constraints=ineq_cons, #,callback=output elif scenario == NOINSPECT: #Define Upper and Lower boundaries #The order is ([lower bnd for x1, lower bnd for x2], [Higher bnd for x1, Higher bnd for x2]) mbounds = Bounds([smallvalue, smallvalue, smallvalue], [largevalue, largevalue, largevalue]) U_init = hp.U_noscrap(cost, USY, miuY, sigmaY_Taylor, Sp) x = np.copy(r) res = minimize( obj_scipy_noinspect, x, method='SLSQP', jac=obj_grad_scipy_noinspect, #Nelder-Mead #SLSQP options={ 'ftol': 1e-9, 'maxiter': 1000, 'disp': True }, bounds=mbounds) #constraints=ineq_cons, #,callback=output elif opt_lib == NLOPT: if scenario == INSPECT: #Scrap opt = nlopt.opt(nlopt.LD_MMA,