def n_samp_wrap(n, individual, propDB, Kinp): numpy.random.seed(n) ncomp, spc_names, propvec = make_property_vector_all_sample_cost(propDB) this_ron = blend(individual, propvec, 'RON') this_s = blend(individual, propvec, 'S') this_HoV = blend(individual, propvec, 'HoV') # this_SL = blend(individual, propvec, 'SL') this_AFR = blend(individual, propvec, 'AFR_STOICH') this_LFV150 = blend(individual, propvec, 'LFV150') this_PMI = blend(individual, propvec, 'PMI') cost_f = blend(individual, propvec, 'COST') # merit_f = mmf_single(RON=this_ron, S=this_s, # HoV=this_HoV, SL=this_SL, K=Kinp) merit_vec = mmf_single(RON=this_ron, S=this_s, HoV=this_HoV, AFR=this_AFR, PMI=this_PMI, K=Kinp) cost_vec = cost_f # cost_vec = numpy.random.rand(1)[0] return cost_vec # merit_vec
def eval_mo(individual, propvec, Kinp): this_ron = blend(individual, propvec, 'RON') this_s = blend(individual, propvec, 'S') this_HoV = blend(individual, propvec, 'HoV') this_SL = blend(individual, propvec, 'SL') this_AFR = blend(individual, propvec, 'AFR_STOICH') this_LFV150 = blend(individual, propvec, 'LFV150') this_PMI = blend(individual, propvec, 'PMI') cost_f = blend(individual, propvec, 'COST') merit_f = mmf_single(RON=this_ron, S=this_s, HoV=this_HoV, AFR=this_AFR, PMI=this_PMI, K=Kinp) g_val = fraction_constraint(individual) if (numpy.abs(g_val - 1) <= 1.0e-3): penalty_mmf = 0 penalty_cost = 0 else: # theoretically, this part of the condition will never be entered raise 'Your parameters dont add up to 1 ' # penalty_cost = 0 if nothing is changed about creation of individuals c_p_p = cost_f + penalty_cost # penalty_mmf = 0 if nothing is changed about creation of individuals mmf_p_p = (merit_f + penalty_mmf) # minimize -Merit # add here more objective functions as needed -- # add 1/-1 in line 125, weights (-1 = minimize, 1 = maximize) # obj3 = numpy.prod(individual) return mmf_p_p, c_p_p # , obj3
def obj_fun(model): this_ron = blend_linear_pyomo(model, 'RON') this_s = blend_linear_pyomo(model, 'S') this_HoV = blend_linear_pyomo(model, 'HoV') this_SL = blend_linear_pyomo(model, 'SL') this_LFV150 = blend_linear_pyomo(model, 'LFV150') this_PMI = blend_linear_pyomo(model, 'PMI') return mmf_single(RON=this_ron, S=this_s, HoV=this_HoV, SL=this_SL, K=KK)
def obj_fun(model): this_ron = blend_linear_pyomo(model, 'RON') this_s = blend_linear_pyomo(model, 'S') this_HoV = blend_linear_pyomo(model, 'HoV') this_SL = blend_linear_pyomo(model, 'SL') this_LFV150 = blend_linear_pyomo(model, 'LFV150') this_PMI = blend_linear_pyomo(model, 'PMI') if(ref is None): return mmf_single(RON=this_ron, S=this_s, HoV=this_HoV, SL=this_SL, K=KK) else: return mmf_single_param(ref, sen, RON=this_ron, S=this_s, HoV=this_HoV, SL=this_SL, K=KK)
def objective(individual, propvec, Kinp): # compute miles function this_ron = blend(individual, propvec, 'RON') this_s = blend(individual, propvec, 'S') this_HoV = blend(individual, propvec, 'HoV') this_SL = blend(individual, propvec, 'SL') this_AFR = blend(individual, propvec, 'AFR_STOICH') this_LFV150 = blend(individual, propvec, 'LFV150') this_PMI = blend(individual, propvec, 'PMI') cost_f = blend(individual, propvec, 'COST') merit_f = mmf_single(RON=this_ron, S=this_s, HoV=this_HoV, AFR=this_AFR, PMI=this_PMI, K=Kinp) return -merit_f # maximize merit = minimize -merit
def comp_to_cost_mmf(comp, propDB, k): # Evaluate resulting mmf value # print comp prop_list = ['RON', 'S', 'HoV', 'SL', 'LFV150', 'PMI'] props = {} for p in prop_list: props[p] = blend_linear_propDB(p, propDB, comp) mmf = mmf_single(RON=props['RON'], S=props['S'], HoV=props['HoV'], SL=props['SL'], LFV150=props['LFV150'], PMI=props['PMI'], K=k) cost = blend_linear_propDB('COST', propDB, comp) # print "Cost: {}, cstar = {} ".format(cost,cstar) # This to make a radar plot of the composition for each solution # cpt.plot_comp_radar(propDB,comp,savefile="{}_mfK{}".format(cstar,KK), # title="Composition, "\ # "C*={}, mmf={:.2f}".format(cstar,mmf)) return cost, mmf
def comp_to_mmf(comp, propDB, k, ref=None, sen=None): # Evaluate resulting mmf value # print comp prop_list = ['RON', 'S', 'HoV', 'SL', 'LFV150', 'PMI'] props = {} for p in prop_list: props[p] = blend_linear_propDB(p, propDB, comp) if ref is None and sen is None: mmf = mmf_single(RON=props['RON'], S=props['S'], HoV=props['HoV'], SL=props['SL'], LFV150=props['LFV150'], PMI=props['PMI'], K=k) else: mmf = mmf_single_param(ref, sen, RON=props['RON'], S=props['S'], HoV=props['HoV'], SL=props['SL'], LFV150=props['LFV150'], PMI=props['PMI'], K=k) cost = blend_linear_propDB('COST', propDB, comp) return mmf
def objective_function(x, propvec, Kinp): w = np.zeros(len(x) + 1) w[0:len(x)] = x w[-1] = 1 - np.sum(x) this_ron = blend(w, propvec, 'RON') this_s = blend(w, propvec, 'S') this_HoV = blend(w, propvec, 'HoV') this_SL = blend(w, propvec, 'SL') this_AFR = blend(w, propvec, 'AFR_STOICH') this_LFV150 = blend(w, propvec, 'LFV150') this_PMI = blend(w, propvec, 'PMI') cost_f = blend(w, propvec, 'COST') f0 = -mmf_single(RON=this_ron, S=this_s, HoV=this_HoV, AFR=this_AFR, PMI=this_PMI, K=Kinp) return f0
def objective_function_exp(x, propvec, Kinp): # needs to be generalized later -- add a component to make the sum = 1 if len(x) == 21: w = np.zeros(len(x)+1) w[0:len(x)] = x w[-1] = 1 - np.sum(x) else: w = x this_ron = blend(w, propvec, 'RON') this_s = blend(w, propvec, 'S') this_HoV = blend(w, propvec, 'HoV') this_SL = blend(w, propvec, 'SL') this_AFR = blend(w, propvec, 'AFR_STOICH') this_LFV150 = blend(w, propvec, 'LFV150') this_PMI = blend(w, propvec, 'PMI') cost_f = blend(w, propvec, 'COST') f0 = mmf_single(RON=this_ron, S=this_s, HoV=this_HoV, AFR=this_AFR, PMI=this_PMI, K=Kinp) # f1 = cost_f return -f0 # , f1 #minimize negative merit
def sample_mmf_k(bplist, kmean, kvar, nsamples): # Now let some parameters be random variables # First look at K mlist = [] klist = [] for bp in bplist: ksamp = np.random.normal(kmean, kvar, nsamples) msamp = np.zeros([nsamples]) for ns in range(nsamples): msamp[ns] = mmf_single(RON=bp['RON'], S=bp['S'], HoV=bp['HoV'], SL=bp['SL'], LFV150=bp['LFV150'], PMI=bp['PMI'], K=ksamp[ns]) klist.append(ksamp) mlist.append(msamp) return mlist, klist
def eval_MMF_gp(individual, propvec, Kinp, GP, scal): # print(individual) # individual = [RON, S, HOV, SL, LFV150, PMI, CA50, IAT, KI] # OG bounds CA50, IAT, KI, RON, S,HOV low = numpy.array([6.7, 35., 2., 99.1, 0., 303.]) # OG bounds CA50, IAT, KI, RON, S,HOV up = numpy.array([23.8, 90., 10.5, 105.6, 12.2, 595.]) CA50 = low[0] + (up[0] - low[0]) * individual[22] IAT = low[1] + (up[1] - low[1]) * individual[23] KI = low[2] + (up[2] - low[2]) * individual[24] # print(propvec) this_ron = blend(individual[0:22], propvec, 'RON') this_s = blend(individual[0:22], propvec, 'S') this_HoV = blend(individual[0:22], propvec, 'HoV') this_SL = blend(individual[0:22], propvec, 'SL') # this_AFR = blend(individual, propvec, 'AFR_STOICH') this_LFV150 = blend(individual[0:22], propvec, 'LFV150') this_PMI = blend(individual[0:22], propvec, 'PMI') cost_f = blend(individual[0:22], propvec, 'COST') merit_f = mmf_single(RON=this_ron, S=this_s, HoV=this_HoV, SL=this_SL, K=-1.25) RON = this_ron S = this_s HOV = this_HoV # CA50, IAT, KI, RON, S,HOV gp_in = numpy.array([[CA50, IAT, KI, RON, S, HOV]]).reshape(1, -1) pred_mean, pred_std = predict_GP(GP, scal, gp_in) return merit_f, pred_mean[0]
def eval_MMF_gp_opt(individual, propvec, Kinp, GP, scal): # print(individual) # individual = [RON, S, HOV, SL, LFV150, PMI, CA50, IAT, KI] # OG bounds CA50, IAT, KI, RON, S,HOV low = numpy.array([6.7, 35., 2., 99.1, 0., 303.]) # OG bounds CA50, IAT, KI, RON, S,HOV up = numpy.array([23.8, 90., 10.5, 105.6, 12.2, 595.]) # CA50 = low[0]+(up[0]-low[0])*individual[22] # IAT = low[1]+(up[1]-low[1])*individual[23] # KI = low[2]+(up[2]-low[2])*individual[24] # print(propvec) this_ron = blend(individual[0:22], propvec, 'RON') this_s = blend(individual[0:22], propvec, 'S') this_HoV = blend(individual[0:22], propvec, 'HoV') this_SL = blend(individual[0:22], propvec, 'SL') # this_AFR = blend(individual, propvec, 'AFR_STOICH') this_LFV150 = blend(individual[0:22], propvec, 'LFV150') this_PMI = blend(individual[0:22], propvec, 'PMI') cost_f = blend(individual[0:22], propvec, 'COST') merit_f = mmf_single(RON=this_ron, S=this_s, HoV=this_HoV, SL=this_SL, K=-1.25) RON = this_ron S = this_s HOV = this_HoV # if the values for RON, S, HOV are outside the # GP training domani, assign bad fitness values if ((RON < 99.1) or (RON > 105.6) or (S < 0) or (S > 12.2) or (HOV < 303) or (HOV > 595)): merit_f = -100 mean_out = -100 else: # print(RON, S, HOV) # RON,S,HOV fixed, now optimize over IAT, CA50, KI bound_list = [(6.7, 23.8), (35., 90.), (2., 10.5)] # fbest = numpy.inf # xnew = None # opti_exit = None # number of local searches that we want to do -- bump this up # if you want to do several # local searches (may lead to different local optima) # n_t = 5 # for jj in range(n_t): x0 = (numpy.array([23.8, 90., 10.5]) + numpy.array([6.7, 35., 2.])) / 2. # numpy.asarray([6.7, 35., 2.]) + numpy.asarray(numpy.array([23.8, 90., 10.5])-numpy.array([6.7, 35., 2.])) * numpy.asarray(numpy.random.rand(1,3)) #random starting point res = minimize(f_gp, x0, bounds=bound_list, args=( RON, S, HOV, GP, scal, )) mean_out = -f_gp(res.x, RON, S, HOV, GP, scal) return merit_f, mean_out