def run_LPs_per_forecast(numforecasts, mC, tsteps, adpftdmgattr, adpoutageattr, features, lpoutageattr, \ poleval="adp"): policyEvalModels = [None for nf in xrange(numforecasts)] LPSAAModel = [None for nf in xrange(numforecasts)] lpobjval = [None for nf in xrange(numforecasts)] polpicks = [None for nf in xrange(numforecasts)] # find multiple policy evaluations for robustness for nf in xrange(numforecasts): # step 1: evaluation of policy if poleval == "alt": netobj = copy.deepcopy(mC["netobj"]) samplesize = len(lpoutageattr[0].values()[0]) rankedarcs = exeADP.alt_vfa_calculation(tsteps, mC, netobj, features, numforecasts, adpftdmgattr) policies = exeADP.alt_control_step(tsteps, mC, netobj, samplesize, rankedarcs) polpicks[nf] = policies[-1] elif poleval == "adp": policyEvalModels[nf] = exeADP.create_models_temp_framework(tsteps, mC) # override netobj's info with the stored scenario data for t in xrange(tsteps): nx.set_edge_attributes(mC["netobj"].dinet, "t"+str(t)+"_dmg_pct", adpftdmgattr[nf][t]) nx.set_edge_attributes(mC["netobj"].dinet, "t"+str(t)+"_capacity", adpoutageattr[nf][t]) # solve policy search problem to find install arcs mC["mcsim"].eval_adp_basis_functions(mC["netobj"], tsteps, mC) featurelist = mC["mcsim"].calc_feature_ctrmassquad(mC["netobj"], tsteps, features) if "ud_vfa_combined" in mC["polmethod"] or "node_constr" in mC["polmethod"]: exeADP.update_models_orig_scens(policyEvalModels[nf], mC, mrange=xrange(len(policyEvalModels[nf])), \ reset=True, updateConstr=True) elif "ud_vfa_separate" in mC["polmethod"]: exeADP.update_models_orig_scens(policyEvalModels[nf], mC, mrange=xrange(len(policyEvalModels[nf])), \ reset=True, updateConstr=False) else: print "run_LPs_per_forecast: invalid policy method specified." __dbg = raw_input("execution halted, press ENTER to exit") sys.exit() mC["instchoices"] = [ [0]*len(mC["temp_var_insts"]) for i in xrange(tsteps)] mC["previnsts"] = [[] for i in xrange(tsteps)] for t in xrange(tsteps): optpolicy,tflows,slks,bov,vfav = exeADP.adp_policy_search(t, featurelist, policyEvalModels[nf], mC,\ randpol=False) # this is all we care about -- the policy evaluation at the last time step (i.e. all installed arcs) polpicks[nf] = optpolicy #DEBUG begin ## idx = [ i for i in xrange(len(optpolicy)) if int(round(optpolicy[i]))] ## instnames = [mC["temp_var_insts"][i] for i in idx] ## ltdict = dict() ## for ins in instnames: ## crds = exeADP.convert_cplex_structs(cpobj=ins) ## if mC["netobj"].tnet.edge[crds[0]][crds[1]]["linetype"] not in ltdict: ## ltdict[mC["netobj"].tnet.edge[crds[0]][crds[1]]["linetype"]] = 1 ## else: ## ltdict[mC["netobj"].tnet.edge[crds[0]][crds[1]]["linetype"]] += 1 ## print "\nLinetype counts for forecast "+str(nf) ## for k,v in ltdict.iteritems(): ## print k+": "+str(v) ## print "policy eval slacks: "+str(len([i for i in slks if int(round(i,4)) != 0])) #DEBUG end # step 2: fix the temp arcs to the decisions and run an LP SAA over these samples at time T LPSAAModel[nf] = exeADP.create_models_temp_framework(tsteps, mC) nx.set_edge_attributes(mC["netobj"].dinet, "t"+str(tsteps-1)+"_capacity", lpoutageattr[nf]) exeADP.update_models_orig_scens(LPSAAModel[nf], mC, numscens=len(lpoutageattr[nf].values()[0]), \ mrange=[tsteps-1], reset=True, updateConstr=True) LPSAAModel[nf][tsteps-1].variables.set_types([ (mC["temp_var_insts"][j], \ LPSAAModel[nf][tsteps-1].variables.type.continuous) \ for j in xrange(len(mC["temp_var_insts"])) ]) LPSAAModel[nf][tsteps-1].variables.set_lower_bounds( \ zip(mC["temp_var_insts"], polpicks[nf]) ) LPSAAModel[nf][tsteps-1].variables.set_upper_bounds( \ zip(mC["temp_var_insts"], polpicks[nf]) ) LPSAAModel[nf][tsteps-1].set_problem_type(LPSAAModel[nf][tsteps-1].problem_type.LP) try: LPSAAModel[nf][tsteps-1].solve() lpobjval[nf] = LPSAAModel[nf][tsteps-1].solution.get_objective_value() #DEBUG begin ## slaks = LPSAAModel[nf][tsteps-1].solution.get_values(mC["slak_var_names"]) ## print "avg # of slacks: "+str(len([i for i in slaks if int(round(i,4)) != 0]) / \ ## float(len(lpoutageattr[nf].values()[0])) ) #DEBUG end except: print "testADP: test LP SAA model infeasible" LPSAAModel[nf][tsteps-1].conflict.refine(LPSAAModel[nf][tsteps-1].conflict.all_constraints()) conflicts = LPSAAModel[nf][tsteps-1].conflict.get() conflicts = [i for i in xrange(len(conflicts)) if conflicts[i] != -1] cgs = LPSAAModel[nf][tsteps-1].conflict.get_groups(conflicts) ubcs=[j[0][1] for i,j in cgs if j[0][0] == 2] lbcs=[j[0][1] for i,j in cgs if j[0][0] == 1] lccs=[j[0][1] for i,j in cgs if j[0][0] == 3] constrByVar = exeADP.find_constr_4_vars(LPSAAModel[nf][tsteps-1], \ LPSAAModel[nf][tsteps-1].variables.get_names()) conflConstrs = exeADP.find_constr_4_vars(LPSAAModel[nf][tsteps-1], \ LPSAAModel[nf][tsteps-1].linear_constraints.get_names(lccs), \ vartype="constraint") __dbg = raw_input("execution halted, press ENTER to exit") sys.exit() # this is INCREDIBLY slow, and there's tsteps*numforecasts models, so we'll sacrifice memory for now ## exeADP.trash_model([k for l in LPSAAModel for k in l]) ## exeADP.trash_model([k for l in policyEvalModels for k in l]) return polpicks, lpobjval
def run_LPs_per_forecast(numforecasts, mC, tsteps, adpftdmgattr, adpoutageattr, features, lpoutageattr, \ poleval="adp"): policyEvalModels = [None for nf in xrange(numforecasts)] LPSAAModel = [None for nf in xrange(numforecasts)] lpobjval = [None for nf in xrange(numforecasts)] polpicks = [None for nf in xrange(numforecasts)] # find multiple policy evaluations for robustness for nf in xrange(numforecasts): # step 1: evaluation of policy if poleval == "alt": netobj = copy.deepcopy(mC["netobj"]) samplesize = len(lpoutageattr[0].values()[0]) rankedarcs = exeADP.alt_vfa_calculation(tsteps, mC, netobj, features, numforecasts, adpftdmgattr) policies = exeADP.alt_control_step(tsteps, mC, netobj, samplesize, rankedarcs) polpicks[nf] = policies[-1] elif poleval == "adp": policyEvalModels[nf] = exeADP.create_models_temp_framework( tsteps, mC) # override netobj's info with the stored scenario data for t in xrange(tsteps): nx.set_edge_attributes(mC["netobj"].dinet, "t" + str(t) + "_dmg_pct", adpftdmgattr[nf][t]) nx.set_edge_attributes(mC["netobj"].dinet, "t" + str(t) + "_capacity", adpoutageattr[nf][t]) # solve policy search problem to find install arcs mC["mcsim"].eval_adp_basis_functions(mC["netobj"], tsteps, mC) featurelist = mC["mcsim"].calc_feature_ctrmassquad( mC["netobj"], tsteps, features) if "ud_vfa_combined" in mC["polmethod"] or "node_constr" in mC[ "polmethod"]: exeADP.update_models_orig_scens(policyEvalModels[nf], mC, mrange=xrange(len(policyEvalModels[nf])), \ reset=True, updateConstr=True) elif "ud_vfa_separate" in mC["polmethod"]: exeADP.update_models_orig_scens(policyEvalModels[nf], mC, mrange=xrange(len(policyEvalModels[nf])), \ reset=True, updateConstr=False) else: print "run_LPs_per_forecast: invalid policy method specified." __dbg = raw_input("execution halted, press ENTER to exit") sys.exit() mC["instchoices"] = [[0] * len(mC["temp_var_insts"]) for i in xrange(tsteps)] mC["previnsts"] = [[] for i in xrange(tsteps)] for t in xrange(tsteps): optpolicy,tflows,slks,bov,vfav = exeADP.adp_policy_search(t, featurelist, policyEvalModels[nf], mC,\ randpol=False) # this is all we care about -- the policy evaluation at the last time step (i.e. all installed arcs) polpicks[nf] = optpolicy #DEBUG begin ## idx = [ i for i in xrange(len(optpolicy)) if int(round(optpolicy[i]))] ## instnames = [mC["temp_var_insts"][i] for i in idx] ## ltdict = dict() ## for ins in instnames: ## crds = exeADP.convert_cplex_structs(cpobj=ins) ## if mC["netobj"].tnet.edge[crds[0]][crds[1]]["linetype"] not in ltdict: ## ltdict[mC["netobj"].tnet.edge[crds[0]][crds[1]]["linetype"]] = 1 ## else: ## ltdict[mC["netobj"].tnet.edge[crds[0]][crds[1]]["linetype"]] += 1 ## print "\nLinetype counts for forecast "+str(nf) ## for k,v in ltdict.iteritems(): ## print k+": "+str(v) ## print "policy eval slacks: "+str(len([i for i in slks if int(round(i,4)) != 0])) #DEBUG end # step 2: fix the temp arcs to the decisions and run an LP SAA over these samples at time T LPSAAModel[nf] = exeADP.create_models_temp_framework(tsteps, mC) nx.set_edge_attributes(mC["netobj"].dinet, "t" + str(tsteps - 1) + "_capacity", lpoutageattr[nf]) exeADP.update_models_orig_scens(LPSAAModel[nf], mC, numscens=len(lpoutageattr[nf].values()[0]), \ mrange=[tsteps-1], reset=True, updateConstr=True) LPSAAModel[nf][tsteps-1].variables.set_types([ (mC["temp_var_insts"][j], \ LPSAAModel[nf][tsteps-1].variables.type.continuous) \ for j in xrange(len(mC["temp_var_insts"])) ]) LPSAAModel[nf][tsteps-1].variables.set_lower_bounds( \ zip(mC["temp_var_insts"], polpicks[nf]) ) LPSAAModel[nf][tsteps-1].variables.set_upper_bounds( \ zip(mC["temp_var_insts"], polpicks[nf]) ) LPSAAModel[nf][tsteps - 1].set_problem_type( LPSAAModel[nf][tsteps - 1].problem_type.LP) try: LPSAAModel[nf][tsteps - 1].solve() lpobjval[nf] = LPSAAModel[nf][tsteps - 1].solution.get_objective_value() #DEBUG begin ## slaks = LPSAAModel[nf][tsteps-1].solution.get_values(mC["slak_var_names"]) ## print "avg # of slacks: "+str(len([i for i in slaks if int(round(i,4)) != 0]) / \ ## float(len(lpoutageattr[nf].values()[0])) ) #DEBUG end except: print "testADP: test LP SAA model infeasible" LPSAAModel[nf][tsteps - 1].conflict.refine( LPSAAModel[nf][tsteps - 1].conflict.all_constraints()) conflicts = LPSAAModel[nf][tsteps - 1].conflict.get() conflicts = [ i for i in xrange(len(conflicts)) if conflicts[i] != -1 ] cgs = LPSAAModel[nf][tsteps - 1].conflict.get_groups(conflicts) ubcs = [j[0][1] for i, j in cgs if j[0][0] == 2] lbcs = [j[0][1] for i, j in cgs if j[0][0] == 1] lccs = [j[0][1] for i, j in cgs if j[0][0] == 3] constrByVar = exeADP.find_constr_4_vars(LPSAAModel[nf][tsteps-1], \ LPSAAModel[nf][tsteps-1].variables.get_names()) conflConstrs = exeADP.find_constr_4_vars(LPSAAModel[nf][tsteps-1], \ LPSAAModel[nf][tsteps-1].linear_constraints.get_names(lccs), \ vartype="constraint") __dbg = raw_input("execution halted, press ENTER to exit") sys.exit() # this is INCREDIBLY slow, and there's tsteps*numforecasts models, so we'll sacrifice memory for now ## exeADP.trash_model([k for l in LPSAAModel for k in l]) ## exeADP.trash_model([k for l in policyEvalModels for k in l]) return polpicks, lpobjval
def run_one_big_LPSAA(numforecasts, mC, tsteps, adpftdmgattr, adpoutageattr, features, lpoutageattr): policyEvalModels = [None for nf in xrange(numforecasts)] polpicks = [None for nf in xrange(numforecasts)] lpobjval = [None for nf in xrange(numforecasts)] LPSAAModel = exeADP.create_models_temp_framework(tsteps, mC) nx.set_edge_attributes(mC["netobj"].dinet, "t"+str(tsteps-1)+"_capacity", lpoutageattr) exeADP.update_models_orig_scens(LPSAAModel, mC, numscens=len(lpoutageattr.values()[0]), mrange=[tsteps-1], \ reset=True, updateConstr=True) LPSAAModel[tsteps-1].variables.set_types([ (mC["temp_var_insts"][j], \ LPSAAModel[tsteps-1].variables.type.continuous) \ for j in xrange(len(mC["temp_var_insts"])) ]) LPSAAModel[tsteps-1].set_problem_type(LPSAAModel[tsteps-1].problem_type.LP) # find multiple policy evaluations for robustness for nf in xrange(numforecasts): # step 5: evaluation of policy policyEvalModels[nf] = exeADP.create_models_temp_framework(tsteps, mC) # override netobj's info with the stored scenario data for t in xrange(tsteps): nx.set_edge_attributes(mC["netobj"].dinet, "t"+str(t)+"_dmg_pct", adpftdmgattr[nf][t]) nx.set_edge_attributes(mC["netobj"].dinet, "t"+str(t)+"_capacity", adpoutageattr[nf][t]) mC["mcsim"].eval_adp_basis_functions(mC["netobj"], tsteps, mC) featurelist = mC["mcsim"].calc_feature_ctrmassquad(mC["netobj"], tsteps, features) exeADP.update_models_orig_scens(policyEvalModels[nf], mC, mrange=xrange(len(policyEvalModels[nf])), \ reset=True, updateConstr=True) # solve policy search problem to find install arcs mC["instchoices"] = [ [0]*len(mC["temp_var_insts"]) for i in xrange(tsteps)] mC["previnsts"] = [[] for i in xrange(tsteps)] for t in xrange(tsteps): #TODO6: this may be semantically out of date optpolicy,tflows,slks,bov,vfav = exeADP.adp_policy_search(t, featurelist, policyEvalModels[nf], mC, \ randpol=False) # this is all we care about -- the policy evaluation at the last time step (i.e. all installed arcs) polpicks[nf] = optpolicy # step 6: fix the temp arcs to the decisions made in 5 and run an LP SAA over these samples at time T LPSAAModel[tsteps-1].variables.set_lower_bounds( \ [ (mC["temp_var_insts"][i], polpicks[nf][i]) for i in xrange(len(mC["temp_var_insts"])) ]) LPSAAModel[tsteps-1].variables.set_upper_bounds( \ [ (mC["temp_var_insts"][i], polpicks[nf][i]) for i in xrange(len(mC["temp_var_insts"])) ]) try: LPSAAModel[tsteps-1].solve() lpobjval[nf] = LPSAAModel[tsteps-1].solution.get_objective_value() except: print "testADP: test LP SAA model infeasible" LPSAAModel[tsteps-1].conflict.refine(LPSAAModel[tsteps-1].conflict.all_constraints()) conflicts = LPSAAModel[tsteps-1].conflict.get() conflicts = [i for i in xrange(len(conflicts)) if conflicts[i] != -1] cgs = LPSAAModel[tsteps-1].conflict.get_groups(conflicts) ubcs=[j[0][1] for i,j in cgs if j[0][0] == 2] lbcs=[j[0][1] for i,j in cgs if j[0][0] == 1] lccs=[j[0][1] for i,j in cgs if j[0][0] == 3] constrByVar = exeADP.find_constr_4_vars(LPSAAModel[tsteps-1], \ LPSAAModel[tsteps-1].variables.get_names()) conflConstrs = exeADP.find_constr_4_vars(LPSAAModel[tsteps-1], \ LPSAAModel[tsteps-1].linear_constraints.get_names(lccs), \ vartype="constraint") __dbg = raw_input("execution halted, press ENTER to exit") sys.exit() exeADP.trash_model(LPSAAModel) exeADP.trash_model([k for l in policyEvalModels for k in l]) return polpicks, lpobjval
def run_one_big_LPSAA(numforecasts, mC, tsteps, adpftdmgattr, adpoutageattr, features, lpoutageattr): policyEvalModels = [None for nf in xrange(numforecasts)] polpicks = [None for nf in xrange(numforecasts)] lpobjval = [None for nf in xrange(numforecasts)] LPSAAModel = exeADP.create_models_temp_framework(tsteps, mC) nx.set_edge_attributes(mC["netobj"].dinet, "t" + str(tsteps - 1) + "_capacity", lpoutageattr) exeADP.update_models_orig_scens(LPSAAModel, mC, numscens=len(lpoutageattr.values()[0]), mrange=[tsteps-1], \ reset=True, updateConstr=True) LPSAAModel[tsteps-1].variables.set_types([ (mC["temp_var_insts"][j], \ LPSAAModel[tsteps-1].variables.type.continuous) \ for j in xrange(len(mC["temp_var_insts"])) ]) LPSAAModel[tsteps - 1].set_problem_type(LPSAAModel[tsteps - 1].problem_type.LP) # find multiple policy evaluations for robustness for nf in xrange(numforecasts): # step 5: evaluation of policy policyEvalModels[nf] = exeADP.create_models_temp_framework(tsteps, mC) # override netobj's info with the stored scenario data for t in xrange(tsteps): nx.set_edge_attributes(mC["netobj"].dinet, "t" + str(t) + "_dmg_pct", adpftdmgattr[nf][t]) nx.set_edge_attributes(mC["netobj"].dinet, "t" + str(t) + "_capacity", adpoutageattr[nf][t]) mC["mcsim"].eval_adp_basis_functions(mC["netobj"], tsteps, mC) featurelist = mC["mcsim"].calc_feature_ctrmassquad( mC["netobj"], tsteps, features) exeADP.update_models_orig_scens(policyEvalModels[nf], mC, mrange=xrange(len(policyEvalModels[nf])), \ reset=True, updateConstr=True) # solve policy search problem to find install arcs mC["instchoices"] = [[0] * len(mC["temp_var_insts"]) for i in xrange(tsteps)] mC["previnsts"] = [[] for i in xrange(tsteps)] for t in xrange(tsteps): #TODO6: this may be semantically out of date optpolicy,tflows,slks,bov,vfav = exeADP.adp_policy_search(t, featurelist, policyEvalModels[nf], mC, \ randpol=False) # this is all we care about -- the policy evaluation at the last time step (i.e. all installed arcs) polpicks[nf] = optpolicy # step 6: fix the temp arcs to the decisions made in 5 and run an LP SAA over these samples at time T LPSAAModel[tsteps-1].variables.set_lower_bounds( \ [ (mC["temp_var_insts"][i], polpicks[nf][i]) for i in xrange(len(mC["temp_var_insts"])) ]) LPSAAModel[tsteps-1].variables.set_upper_bounds( \ [ (mC["temp_var_insts"][i], polpicks[nf][i]) for i in xrange(len(mC["temp_var_insts"])) ]) try: LPSAAModel[tsteps - 1].solve() lpobjval[nf] = LPSAAModel[tsteps - 1].solution.get_objective_value() except: print "testADP: test LP SAA model infeasible" LPSAAModel[tsteps - 1].conflict.refine( LPSAAModel[tsteps - 1].conflict.all_constraints()) conflicts = LPSAAModel[tsteps - 1].conflict.get() conflicts = [ i for i in xrange(len(conflicts)) if conflicts[i] != -1 ] cgs = LPSAAModel[tsteps - 1].conflict.get_groups(conflicts) ubcs = [j[0][1] for i, j in cgs if j[0][0] == 2] lbcs = [j[0][1] for i, j in cgs if j[0][0] == 1] lccs = [j[0][1] for i, j in cgs if j[0][0] == 3] constrByVar = exeADP.find_constr_4_vars(LPSAAModel[tsteps-1], \ LPSAAModel[tsteps-1].variables.get_names()) conflConstrs = exeADP.find_constr_4_vars(LPSAAModel[tsteps-1], \ LPSAAModel[tsteps-1].linear_constraints.get_names(lccs), \ vartype="constraint") __dbg = raw_input("execution halted, press ENTER to exit") sys.exit() exeADP.trash_model(LPSAAModel) exeADP.trash_model([k for l in policyEvalModels for k in l]) return polpicks, lpobjval