def run_one_big_IPSAA(numforecasts, mC, tsteps, ipoutageattr, samplesize): IPSAAModel = exeADP.create_models_temp_framework(tsteps, mC) nx.set_edge_attributes(mC["netobj"].dinet, "t"+str(tsteps-1)+"_capacity", ipoutageattr) exeADP.update_models_orig_scens(IPSAAModel, mC, numscens=numforecasts*samplesize, mrange=[tsteps-1], reset=True,\ updateConstr=True) IPSAAModel[tsteps-1].linear_constraints.add( \ lin_expr = [cplex.SparsePair(ind = mC["temp_var_insts"], \ val = [1]*len(mC["temp_var_insts"])) ], \ senses = ["L"], rhs = [tsteps*mC["pertinstbudget"]], names = ["TInstConstr"] ) try: IPSAAModel[tsteps-1].solve() vcontr = IPSAAModel[tsteps-1].solution.get_objective_value() except: print "testADP: control IP SAA model infeasible" __dbg = raw_input("execution halted, press ENTER to exit") sys.exit() exeADP.trash_model(IPSAAModel) sys.stdout.write("Done.\n") return vcontr
def run_one_big_IPSAA(numforecasts, mC, tsteps, ipoutageattr, samplesize): IPSAAModel = exeADP.create_models_temp_framework(tsteps, mC) nx.set_edge_attributes(mC["netobj"].dinet, "t" + str(tsteps - 1) + "_capacity", ipoutageattr) exeADP.update_models_orig_scens(IPSAAModel, mC, numscens=numforecasts*samplesize, mrange=[tsteps-1], reset=True,\ updateConstr=True) IPSAAModel[tsteps-1].linear_constraints.add( \ lin_expr = [cplex.SparsePair(ind = mC["temp_var_insts"], \ val = [1]*len(mC["temp_var_insts"])) ], \ senses = ["L"], rhs = [tsteps*mC["pertinstbudget"]], names = ["TInstConstr"] ) try: IPSAAModel[tsteps - 1].solve() vcontr = IPSAAModel[tsteps - 1].solution.get_objective_value() except: print "testADP: control IP SAA model infeasible" __dbg = raw_input("execution halted, press ENTER to exit") sys.exit() exeADP.trash_model(IPSAAModel) sys.stdout.write("Done.\n") return vcontr
def run_one_big_LPSAA(numforecasts, mC, tsteps, adpftdmgattr, adpoutageattr, features, lpoutageattr): policyEvalModels = [None for nf in xrange(numforecasts)] polpicks = [None for nf in xrange(numforecasts)] lpobjval = [None for nf in xrange(numforecasts)] LPSAAModel = exeADP.create_models_temp_framework(tsteps, mC) nx.set_edge_attributes(mC["netobj"].dinet, "t"+str(tsteps-1)+"_capacity", lpoutageattr) exeADP.update_models_orig_scens(LPSAAModel, mC, numscens=len(lpoutageattr.values()[0]), mrange=[tsteps-1], \ reset=True, updateConstr=True) LPSAAModel[tsteps-1].variables.set_types([ (mC["temp_var_insts"][j], \ LPSAAModel[tsteps-1].variables.type.continuous) \ for j in xrange(len(mC["temp_var_insts"])) ]) LPSAAModel[tsteps-1].set_problem_type(LPSAAModel[tsteps-1].problem_type.LP) # find multiple policy evaluations for robustness for nf in xrange(numforecasts): # step 5: evaluation of policy policyEvalModels[nf] = exeADP.create_models_temp_framework(tsteps, mC) # override netobj's info with the stored scenario data for t in xrange(tsteps): nx.set_edge_attributes(mC["netobj"].dinet, "t"+str(t)+"_dmg_pct", adpftdmgattr[nf][t]) nx.set_edge_attributes(mC["netobj"].dinet, "t"+str(t)+"_capacity", adpoutageattr[nf][t]) mC["mcsim"].eval_adp_basis_functions(mC["netobj"], tsteps, mC) featurelist = mC["mcsim"].calc_feature_ctrmassquad(mC["netobj"], tsteps, features) exeADP.update_models_orig_scens(policyEvalModels[nf], mC, mrange=xrange(len(policyEvalModels[nf])), \ reset=True, updateConstr=True) # solve policy search problem to find install arcs mC["instchoices"] = [ [0]*len(mC["temp_var_insts"]) for i in xrange(tsteps)] mC["previnsts"] = [[] for i in xrange(tsteps)] for t in xrange(tsteps): #TODO6: this may be semantically out of date optpolicy,tflows,slks,bov,vfav = exeADP.adp_policy_search(t, featurelist, policyEvalModels[nf], mC, \ randpol=False) # this is all we care about -- the policy evaluation at the last time step (i.e. all installed arcs) polpicks[nf] = optpolicy # step 6: fix the temp arcs to the decisions made in 5 and run an LP SAA over these samples at time T LPSAAModel[tsteps-1].variables.set_lower_bounds( \ [ (mC["temp_var_insts"][i], polpicks[nf][i]) for i in xrange(len(mC["temp_var_insts"])) ]) LPSAAModel[tsteps-1].variables.set_upper_bounds( \ [ (mC["temp_var_insts"][i], polpicks[nf][i]) for i in xrange(len(mC["temp_var_insts"])) ]) try: LPSAAModel[tsteps-1].solve() lpobjval[nf] = LPSAAModel[tsteps-1].solution.get_objective_value() except: print "testADP: test LP SAA model infeasible" LPSAAModel[tsteps-1].conflict.refine(LPSAAModel[tsteps-1].conflict.all_constraints()) conflicts = LPSAAModel[tsteps-1].conflict.get() conflicts = [i for i in xrange(len(conflicts)) if conflicts[i] != -1] cgs = LPSAAModel[tsteps-1].conflict.get_groups(conflicts) ubcs=[j[0][1] for i,j in cgs if j[0][0] == 2] lbcs=[j[0][1] for i,j in cgs if j[0][0] == 1] lccs=[j[0][1] for i,j in cgs if j[0][0] == 3] constrByVar = exeADP.find_constr_4_vars(LPSAAModel[tsteps-1], \ LPSAAModel[tsteps-1].variables.get_names()) conflConstrs = exeADP.find_constr_4_vars(LPSAAModel[tsteps-1], \ LPSAAModel[tsteps-1].linear_constraints.get_names(lccs), \ vartype="constraint") __dbg = raw_input("execution halted, press ENTER to exit") sys.exit() exeADP.trash_model(LPSAAModel) exeADP.trash_model([k for l in policyEvalModels for k in l]) return polpicks, lpobjval
def run_one_big_LPSAA(numforecasts, mC, tsteps, adpftdmgattr, adpoutageattr, features, lpoutageattr): policyEvalModels = [None for nf in xrange(numforecasts)] polpicks = [None for nf in xrange(numforecasts)] lpobjval = [None for nf in xrange(numforecasts)] LPSAAModel = exeADP.create_models_temp_framework(tsteps, mC) nx.set_edge_attributes(mC["netobj"].dinet, "t" + str(tsteps - 1) + "_capacity", lpoutageattr) exeADP.update_models_orig_scens(LPSAAModel, mC, numscens=len(lpoutageattr.values()[0]), mrange=[tsteps-1], \ reset=True, updateConstr=True) LPSAAModel[tsteps-1].variables.set_types([ (mC["temp_var_insts"][j], \ LPSAAModel[tsteps-1].variables.type.continuous) \ for j in xrange(len(mC["temp_var_insts"])) ]) LPSAAModel[tsteps - 1].set_problem_type(LPSAAModel[tsteps - 1].problem_type.LP) # find multiple policy evaluations for robustness for nf in xrange(numforecasts): # step 5: evaluation of policy policyEvalModels[nf] = exeADP.create_models_temp_framework(tsteps, mC) # override netobj's info with the stored scenario data for t in xrange(tsteps): nx.set_edge_attributes(mC["netobj"].dinet, "t" + str(t) + "_dmg_pct", adpftdmgattr[nf][t]) nx.set_edge_attributes(mC["netobj"].dinet, "t" + str(t) + "_capacity", adpoutageattr[nf][t]) mC["mcsim"].eval_adp_basis_functions(mC["netobj"], tsteps, mC) featurelist = mC["mcsim"].calc_feature_ctrmassquad( mC["netobj"], tsteps, features) exeADP.update_models_orig_scens(policyEvalModels[nf], mC, mrange=xrange(len(policyEvalModels[nf])), \ reset=True, updateConstr=True) # solve policy search problem to find install arcs mC["instchoices"] = [[0] * len(mC["temp_var_insts"]) for i in xrange(tsteps)] mC["previnsts"] = [[] for i in xrange(tsteps)] for t in xrange(tsteps): #TODO6: this may be semantically out of date optpolicy,tflows,slks,bov,vfav = exeADP.adp_policy_search(t, featurelist, policyEvalModels[nf], mC, \ randpol=False) # this is all we care about -- the policy evaluation at the last time step (i.e. all installed arcs) polpicks[nf] = optpolicy # step 6: fix the temp arcs to the decisions made in 5 and run an LP SAA over these samples at time T LPSAAModel[tsteps-1].variables.set_lower_bounds( \ [ (mC["temp_var_insts"][i], polpicks[nf][i]) for i in xrange(len(mC["temp_var_insts"])) ]) LPSAAModel[tsteps-1].variables.set_upper_bounds( \ [ (mC["temp_var_insts"][i], polpicks[nf][i]) for i in xrange(len(mC["temp_var_insts"])) ]) try: LPSAAModel[tsteps - 1].solve() lpobjval[nf] = LPSAAModel[tsteps - 1].solution.get_objective_value() except: print "testADP: test LP SAA model infeasible" LPSAAModel[tsteps - 1].conflict.refine( LPSAAModel[tsteps - 1].conflict.all_constraints()) conflicts = LPSAAModel[tsteps - 1].conflict.get() conflicts = [ i for i in xrange(len(conflicts)) if conflicts[i] != -1 ] cgs = LPSAAModel[tsteps - 1].conflict.get_groups(conflicts) ubcs = [j[0][1] for i, j in cgs if j[0][0] == 2] lbcs = [j[0][1] for i, j in cgs if j[0][0] == 1] lccs = [j[0][1] for i, j in cgs if j[0][0] == 3] constrByVar = exeADP.find_constr_4_vars(LPSAAModel[tsteps-1], \ LPSAAModel[tsteps-1].variables.get_names()) conflConstrs = exeADP.find_constr_4_vars(LPSAAModel[tsteps-1], \ LPSAAModel[tsteps-1].linear_constraints.get_names(lccs), \ vartype="constraint") __dbg = raw_input("execution halted, press ENTER to exit") sys.exit() exeADP.trash_model(LPSAAModel) exeADP.trash_model([k for l in policyEvalModels for k in l]) return polpicks, lpobjval