def execute(self, *args, **kwargs): masterTbl = MasterTbl() target = masterTbl['target'] epoch = masterTbl['origEpoch'] prefix = "" if (target != "") : prefix = target + '-' uuid = prefix + full_date_string(epoch) + '-' + masterTbl['os_mach'] tst_report_fn = os.path.join(masterTbl['testReportDir'], uuid + masterTbl['testRptExt']) masterTbl['tstReportFn'] = tst_report_fn #-------------------------------------------------------- # Do not create a report when there are no tests to run if (not masterTbl['tstT']): return human_data = '' test_results = build_test_reportT(human_data, masterTbl) dir_name, fn = os.path.split(tst_report_fn) if (not os.path.exists(dir_name)): os.makedirs(dir_name) write_table(tst_report_fn, test_results)
def run_test(self, masterTbl, tst, iTest, num_tests): fn_envA = ['testDir', 'outputDir', 'resultFn', 'testdescriptFn', 'cmdResultFn', 'messageFn', 'runtimeFn'] envA = ('idtag', 'test_name', 'packageName', 'packageDir', 'TARGET', 'target', 'tag') envTbl = {} projectDir = masterTbl['projectDir'] for v in fn_envA: envTbl[v] = fullFn(projectDir, tst.get(v)) for v in envA: envTbl[v] = tst.get(v) envTbl['projectDir'] = masterTbl['projectDir'] job_submit_method = tst.get('job_submit_method') job = JobSubmitBase.build(job_submit_method, masterTbl) try: run_script = tst.expand_run_script(envTbl, job) except Exception as e: to_stderr("Problem: ", "Failed to create job script for test file: ",tst.get('fn'),"\n") raise #print(e) #sys.exit(-1) cwd = os.getcwd() os.chdir(envTbl['outputDir']) resultFn = fullFn(projectDir, tst.get('resultFn')) write_table(resultFn, resultTbl['started']) stime = { 'T0' : time.time(), 'T1' : -1 } runtimeFn = fullFn(projectDir, tst.get('runtimeFn')) write_table(runtimeFn, stime) idtag = tst.get('idtag') scriptFn = idtag + ".script" f = open(scriptFn,"w") f.write(tst.top_of_script()) f.write(run_script) f.close() st = os.stat(scriptFn) os.chmod(scriptFn,st.st_mode | stat.S_IEXEC) ident = tst.get('id') background = tst.get('background') or (job_submit_method == "BATCH") tst.set('runInBackground', background) job.msg('Started', iTest, num_tests, ident, envTbl['resultFn'], background) job.runtest(scriptFn = scriptFn, idtag = idtag, background = background) job.msg('Finished', iTest, num_tests, ident, envTbl['resultFn'], background) os.chdir(cwd)
def old_post_ph_execution(self, ph): # NOTE: when running in serial mode (without pyro), # ph.get_scenario_tree().get_arbitrary_scenario()._instance # contains a solved instance at this point, which can be used # to report results, as done below. However, on parallel systems, # this instance doesn't exist yet. So we have to use the variable # ids and names from the RootNode as shown above. import pdb pdb.Pdb(stdout=sys.__stdout__).set_trace( ) # have to grab original stdout, or this crashes # note: it is not clear whether all scenarios in this node have been # pushed to the same solution at this point, but they should at least # be close. The progressive hedging code sometimes uses a function # ExtractInternalNodeSolutionsforInner to choose the scenario which is # closest to the average, which would be great to use here. But it's # not clear how the result of this function should be used. One would # hope it was used to provide data for the # ph._scenario_tree.findRootNode().get_variable_value(name, index) function, but that # function says no data are available at this point # (and ph._scenario_tree.findRootNode()._solution is indeed empty at this point). # So, lacking any better option, we pull values from an arbitrary instance. # (This could just as well be ph._scenario_tree._scenarios[0]._instance, and in fact it is.) m = ph.get_scenario_tree().get_arbitrary_scenario()._instance vars = [getattr(m, v) for v in build_vars if hasattr(m, v)] vardata = [v[k] for v in vars for k in v] def safe_value(v): try: return value( v) # note: using v.value returns None, with no error except ValueError: # for some reason, some variable values are uninitialized, # which gives errors when they're accessed print "No value found for {v}.".format(v=v.cname()) return "*** None ***" jobid = os.environ.get('SLURM_JOBID') if jobid is None: jobid = datetime.datetime.now().isoformat("_").replace(":", ".") print "writing results for job {}...".format(jobid) print "variables to write:" print ", ".join(["{v}[{k}]".format(v=v, k=k) for v in vars for k in v]) util.write_table(m, vardata, output_file=os.path.join( "outputs", "build_{}.tsv".format(jobid)), headings=("variable", "value"), values=lambda m, v: (v.cname(), safe_value(v)))
def create_output_dirs(self, projectDir, tstT): for ident in tstT: tst = tstT[ident] my_dir = fullFn(projectDir, tst.get('outputDir')) if (not os.path.isdir(my_dir)): os.makedirs(my_dir) for ident in tstT: tst = tstT[ident] resultFn = fullFn(projectDir, tst.get('resultFn')) runtimeFn = fullFn(projectDir, tst.get('runtimeFn')) write_table(resultFn, resultTbl['notrun']) write_table(runtimeFn, runtimeT)
def write_results(m): outputs_dir = m.options.outputs_dir tag = filename_tag(m) avg_ts_scale = float(sum(m.ts_scale_to_year[ts] for ts in m.TIMESERIES))/len(m.TIMESERIES) last_bid = m.DR_BID_LIST.last() util.write_table( m, m.LOAD_ZONES, m.TIMEPOINTS, output_file=os.path.join(outputs_dir, "energy_sources{t}.tsv".format(t=tag)), headings= ("load_zone", "period", "timepoint_label") +tuple(m.FUELS) +tuple(m.NON_FUEL_ENERGY_SOURCES) +tuple("curtail_"+s for s in m.NON_FUEL_ENERGY_SOURCES) +tuple(m.Zone_Power_Injections) +tuple(m.Zone_Power_Withdrawals) +("marginal_cost","final_marginal_cost","price","bid_load","peak_day","base_load","base_price"), values=lambda m, z, t: (z, m.tp_period[t], m.tp_timestamp[t]) +tuple( sum(get(m.DispatchGenByFuel, (p, t, f), 0.0) for p in m.GENS_BY_FUEL[f]) for f in m.FUELS ) +tuple( sum(get(m.DispatchGen, (p, t), 0.0) for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s]) for s in m.NON_FUEL_ENERGY_SOURCES ) +tuple( sum( get(m.DispatchUpperLimit, (p, t), 0.0) - get(m.DispatchGen, (p, t), 0.0) for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] ) for s in m.NON_FUEL_ENERGY_SOURCES ) +tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Injections) +tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals) +( m.prev_marginal_cost[z, t], electricity_marginal_cost(m, z, t), m.dr_price[last_bid, z, t], m.dr_bid[last_bid, z, t], 'peak' if m.ts_scale_to_year[m.tp_ts[t]] < 0.5*avg_ts_scale else 'typical', m.base_data_dict[z, t][0], m.base_data_dict[z, t][1], ) )
def write_results(m): outputs_dir = m.options.outputs_dir tag = filename_tag(m) avg_ts_scale = float(sum(m.ts_scale_to_year[ts] for ts in m.TIMESERIES)) / len(m.TIMESERIES) last_bid = m.DR_BID_LIST.last() util.write_table( m, m.LOAD_ZONES, m.TIMEPOINTS, output_file=os.path.join(outputs_dir, "energy_sources{t}.tsv".format(t=tag)), headings=("load_zone", "period", "timepoint_label") + tuple(m.FUELS) + tuple(m.NON_FUEL_ENERGY_SOURCES) + tuple("curtail_" + s for s in m.NON_FUEL_ENERGY_SOURCES) + tuple(m.Zone_Power_Injections) + tuple(m.Zone_Power_Withdrawals) + ("marginal_cost", "final_marginal_cost", "price", "bid_load", "peak_day", "base_load", "base_price"), values=lambda m, z, t: (z, m.tp_period[t], m.tp_timestamp[t]) + tuple( sum( get(m.DispatchGenByFuel, (p, t, f), 0.0) for p in m.GENS_BY_FUEL[f]) for f in m.FUELS) + tuple( sum( get(m.DispatchGen, (p, t), 0.0) for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s]) for s in m.NON_FUEL_ENERGY_SOURCES) + tuple( sum( get(m.DispatchUpperLimit, (p, t), 0.0) - get(m.DispatchGen, (p, t), 0.0) for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s]) for s in m.NON_FUEL_ENERGY_SOURCES) + tuple( getattr(m, component)[z, t] for component in m.Zone_Power_Injections) + tuple( getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals) + ( m.prev_marginal_cost[z, t], electricity_marginal_cost(m, z, t), m.dr_price[last_bid, z, t], m.dr_bid[last_bid, z, t], 'peak' if m.ts_scale_to_year[m.tp_ts[t]] < 0.5 * avg_ts_scale else 'typical', m.base_data_dict[z, t][0], m.base_data_dict[z, t][1], ))
def write_results(m, tag=None): # format the tag to append to file names (if any) if tag is not None: t = "_"+str(tag) else: t = "" # write out results util.write_table(m, m.TIMEPOINTS, output_file=os.path.join(output_dir, "dispatch{t}.txt".format(t=t)), headings=("timepoint_label",)+tuple(m.PROJECTS), values=lambda m, t: (m.tp_timestamp[t],) + tuple( get(m.DispatchProj, (p, t), 0.0) for p in m.PROJECTS ) ) util.write_table( m, m.LOAD_ZONES, m.TIMEPOINTS, output_file=os.path.join(output_dir, "energy_sources{t}.txt".format(t=t)), headings= ("load_zone", "timepoint_label") +tuple(m.FUELS) +tuple(m.NON_FUEL_ENERGY_SOURCES) +tuple("curtail_"+s for s in m.NON_FUEL_ENERGY_SOURCES) +tuple(m.LZ_Energy_Components_Produce) +tuple(m.LZ_Energy_Components_Consume) +("marginal_cost",), values=lambda m, z, t: (z, m.tp_timestamp[t]) +tuple( sum(get(m.DispatchProj, (p, t), 0.0) for p in m.PROJECTS_BY_FUEL[f]) for f in m.FUELS ) +tuple( sum(get(m.DispatchProj, (p, t), 0.0) for p in m.PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s]) for s in m.NON_FUEL_ENERGY_SOURCES ) +tuple( sum( get(m.DispatchUpperLimit, (p, t), 0.0) - get(m.DispatchProj, (p, t), 0.0) for p in m.PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s] ) for s in m.NON_FUEL_ENERGY_SOURCES ) +tuple(sum(getattr(m, component)[lz, t] for lz in m.LOAD_ZONES) for component in m.LZ_Energy_Components_Produce) +tuple(sum(getattr(m, component)[lz, t] for lz in m.LOAD_ZONES) for component in m.LZ_Energy_Components_Consume) +(m.dual[m.Energy_Balance[z, t]]/m.bring_timepoint_costs_to_base_year[t],) ) built_proj = tuple(set( pr for pe in m.PERIODS for pr in m.PROJECTS if value(m.ProjCapacity[pr, pe]) > 0.001 )) util.write_table(m, m.PERIODS, output_file=os.path.join(output_dir, "capacity{t}.txt".format(t=t)), headings=("period",)+built_proj, values=lambda m, pe: (pe,) + tuple(m.ProjCapacity[pr, pe] for pr in built_proj) )
def write_results(tag=None): if tag is not None: t = "_"+str(tag) else: t = "" # write out results util.write_table(switch_instance, switch_instance.TIMEPOINTS, output_file=os.path.join("outputs", "dispatch{t}.txt".format(t=t)), headings=("timepoint_label",)+tuple(switch_instance.PROJECTS), values=lambda m, t: (m.tp_timestamp[t],) + tuple( m.DispatchProj_AllTimePoints[p, t] for p in m.PROJECTS ) ) util.write_table(switch_instance, switch_instance.TIMEPOINTS, output_file=os.path.join("outputs", "load_balance{t}.txt".format(t=t)), headings= ("timepoint_label",) +tuple(switch_instance.LZ_Energy_Components_Produce) +tuple(switch_instance.LZ_Energy_Components_Consume) +("marginal_cost",), values=lambda m, t: (m.tp_timestamp[t],) +tuple(sum(getattr(m, component)[lz, t] for lz in m.LOAD_ZONES) for component in m.LZ_Energy_Components_Produce) +tuple(sum(getattr(m, component)[lz, t] for lz in m.LOAD_ZONES) for component in m.LZ_Energy_Components_Consume) +(sum(m.dual[m.Energy_Balance[lz, t]]/m.bring_timepoint_costs_to_base_year[t] for lz in m.LOAD_ZONES) /len(m.LOAD_ZONES),) ) # note: the queries below could be sped up slightly by defining an indexed set # of projects that use each particular energy source util.write_table( switch_instance, switch_instance.LOAD_ZONES, switch_instance.TIMEPOINTS, output_file=os.path.join("outputs", "energy_sources{t}.txt".format(t=t)), headings= ("load_zone", "timepoint_label") +tuple(switch_instance.FUELS) +tuple(switch_instance.NON_FUEL_ENERGY_SOURCES) +tuple("curtail_"+s for s in switch_instance.NON_FUEL_ENERGY_SOURCES) +tuple(switch_instance.LZ_Energy_Components_Produce) +tuple(switch_instance.LZ_Energy_Components_Consume) +("marginal_cost",), values=lambda m, z, t: (z, m.tp_timestamp[t]) +tuple( sum(m.DispatchProj_AllTimePoints[p, t] for p in m.PROJECTS_BY_FUEL[f]) for f in m.FUELS ) +tuple( sum(m.DispatchProj_AllTimePoints[p, t] for p in m.PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s]) for s in m.NON_FUEL_ENERGY_SOURCES ) +tuple( sum( m.DispatchUpperLimit_AllTimePoints[p, t] - m.DispatchProj_AllTimePoints[p, t] for p in m.PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s] ) for s in m.NON_FUEL_ENERGY_SOURCES ) +tuple(sum(getattr(m, component)[lz, t] for lz in m.LOAD_ZONES) for component in m.LZ_Energy_Components_Produce) +tuple(sum(getattr(m, component)[lz, t] for lz in m.LOAD_ZONES) for component in m.LZ_Energy_Components_Consume) +(m.dual[m.Energy_Balance[z, t]]/m.bring_timepoint_costs_to_base_year[t],) ) util.write_table(switch_instance, switch_instance.LOAD_ZONES, switch_instance.TIMEPOINTS, output_file=os.path.join("outputs", "marginal_cost{t}.txt".format(t=t)), headings=("timepoint_label", "load_zone", "marginal_cost"), values=lambda m, lz, tp: (m.tp_timestamp[tp], lz, m.dual[m.Energy_Balance[lz, tp]]/m.bring_timepoint_costs_to_base_year[tp]) )
def execute(self, *args, **kwargs): masterTbl = MasterTbl() rows, width = getTerminalSize() projectDir = masterTbl['projectDir'] rptT = masterTbl['rptT'] humanDataA = [] tstSummaryT = masterTbl['tstSummaryT'] totalTime = time.strftime("%T", time.gmtime(masterTbl['totalTestTime'])) totalTime += ("%.2f" % (masterTbl['totalTestTime'] - int(masterTbl['totalTestTime'])))[1:] testresultT = Tst.test_result_values() tstSummaryT = masterTbl['tstSummaryT'] HDR = "*"*width TR = "*** Test Results" TS = "*** Test Summary" TRl = width - len(TR) - 3 TR = TR + " "*TRl + "***" TS = TS + " "*TRl + "***" humanDataA.append(0) humanDataA.append(HDR) humanDataA.append(TR) humanDataA.append(HDR) humanDataA.append(" ") humanDataA.append(0) humanDataA.append(2) humanDataA.append(["Date:", masterTbl['date']]) humanDataA.append(["TARGET:", masterTbl['target']]) humanDataA.append(["Themis Version:", masterTbl['ThemisVersion']]) humanDataA.append(["Total Test Time:", totalTime]) humanDataA.append(-2) humanDataA.append(0) humanDataA.append(HDR) humanDataA.append(TS) humanDataA.append(HDR) humanDataA.append(" ") humanDataA.append(0) humanDataA.append(2) humanDataA.append(["Total: ", tstSummaryT['total']]) for k in tstSummaryT: count = tstSummaryT[k] if (k != "total" and count > 0): humanDataA.append([k+":", count]) humanDataA.append(-2) humanDataA.append(0) humanDataA.append(" ") humanDataA.append(0) humanDataA.append(5) humanDataA.append(["*******","*","****","*********","***************"]) humanDataA.append(["Results","R","Time","Test Name","version/message"]) humanDataA.append(["*******","*","****","*********","***************"]) resultA = [] for ident in rptT: tst = rptT[ident] aFlag = " " if (tst.get("active")): aFlag = "R" result = tst.get('result') runtime = tst.get('strRuntime') rIdx = str(10 - testresultT.get(result,0)) + "_" + ident txt = " " if (result in testresultT): resultA.append((rIdx, result, aFlag, runtime, ident, txt)) resultA = sorted(resultA, key = lambda result: result[0]) for v in resultA: humanDataA.append(v[1:]) humanDataA.append(-5) humanDataA.append(0) humanDataA.append(" ") humanDataA.append(0) if(tstSummaryT['total'] != tstSummaryT['passed']): humanDataA.append(2) humanDataA.append(["*******", "****************"]) humanDataA.append(["Results", "Output Directory"]) humanDataA.append(["*******", "****************"]) resultA = [] for ident in rptT: tst = rptT[ident] result = tst.get('result') if (result != "passed" and result in testresultT): resultA.append((result, fullFn(projectDir, tst.get('outputDir')))) resultA = sorted(resultA, key = lambda result: result[0] + "-" + result[1]) for v in resultA: humanDataA.append(v) humanDataA.append(-2) humanData = self.format_human_data(humanDataA) if (tstSummaryT['total'] > 0): print(humanData) testreportT = build_test_reportT(humanData, masterTbl) write_table(masterTbl['tstReportFn'], testreportT)
def write_results(tag=None): if tag is not None: t = "_"+str(tag) else: t = "" # write out results util.write_table(switch_instance, switch_instance.TIMEPOINTS, output_file=os.path.join("outputs", "dispatch{t}.txt".format(t=t)), headings=("timepoint_label",)+tuple(switch_instance.PROJECTS), values=lambda m, t: (m.tp_timestamp[t],) + tuple( m.DispatchProj[p, t] if (p, t) in m.PROJ_DISPATCH_POINTS else 0.0 for p in m.PROJECTS ) ) util.write_table(switch_instance, switch_instance.TIMEPOINTS, output_file=os.path.join("outputs", "load_balance{t}.txt".format(t=t)), headings= ("timepoint_label",) +tuple(switch_instance.LZ_Energy_Balance_components) +("marginal_cost",), values=lambda m, t: (m.tp_timestamp[t],) +tuple(sum(getattr(m, component)[lz, t] for lz in m.LOAD_ZONES) for component in m.LZ_Energy_Balance_components) +(sum(m.dual[m.Energy_Balance[lz, t]]/m.bring_timepoint_costs_to_base_year[t] for lz in m.LOAD_ZONES) /len(m.LOAD_ZONES),) ) # Prepare tables of output data once, indexed by load_zone and timepoint, # so they can be spooled out row-by-row during the write_table operation. # Otherwise, there's a lot of redundant list scanning and filtering. # +tuple( # sum( # m.DispatchProj[p, t] # for p, t_ in m.PROJ_DISPATCH_POINTS # if t_ == t and p in m.FUEL_BASED_PROJECTS and m.proj_fuel[p] == f # ) # for f in m.FUELS # ) # +tuple( # sum( # m.DispatchProj[p, t] # for p, t_ in m.PROJ_DISPATCH_POINTS # if t_ == t and p in m.NON_FUEL_BASED_PROJECTS and m.proj_non_fuel_energy_source[p] == s # ) # for s in m.NON_FUEL_ENERGY_SOURCES # ) # +tuple( # sum( # m.DispatchUpperLimit[p, t] - m.DispatchProj[p, t] # for p, t_ in m.PROJ_DISPATCH_POINTS # if t_ == t and p in m.NON_FUEL_BASED_PROJECTS and m.proj_non_fuel_energy_source[p] == s # ) # for s in m.NON_FUEL_ENERGY_SOURCES # ) util.write_table( switch_instance, switch_instance.LOAD_ZONES, switch_instance.TIMEPOINTS, output_file=os.path.join("outputs", "energy_sources{t}.txt".format(t=t)), headings= ("load_zone", "timepoint_label") +tuple(switch_instance.FUELS) +tuple(switch_instance.NON_FUEL_ENERGY_SOURCES) +tuple("curtail_"+s for s in switch_instance.NON_FUEL_ENERGY_SOURCES) +tuple(switch_instance.LZ_Energy_Balance_components) +("marginal_cost",), values=lambda m, z, t: (z, m.tp_timestamp[t]) +tuple( sum( m.DispatchProj[p, t] for p, t_ in m.PROJ_DISPATCH_POINTS if t_ == t and p in m.FUEL_BASED_PROJECTS and m.proj_fuel[p] == f ) for f in m.FUELS ) +tuple( sum( m.DispatchProj[p, t] for p, t_ in m.PROJ_DISPATCH_POINTS if t_ == t and p in m.NON_FUEL_BASED_PROJECTS and m.proj_non_fuel_energy_source[p] == s ) for s in m.NON_FUEL_ENERGY_SOURCES ) +tuple( sum( m.DispatchUpperLimit[p, t] - m.DispatchProj[p, t] for p, t_ in m.PROJ_DISPATCH_POINTS if t_ == t and p in m.NON_FUEL_BASED_PROJECTS and m.proj_non_fuel_energy_source[p] == s ) for s in m.NON_FUEL_ENERGY_SOURCES ) +tuple(getattr(m, component)[z, t] for component in m.LZ_Energy_Balance_components) +(m.dual[m.Energy_Balance[z, t]]/m.bring_timepoint_costs_to_base_year[t],) ) util.write_table(switch_instance, switch_instance.LOAD_ZONES, switch_instance.TIMEPOINTS, output_file=os.path.join("outputs", "marginal_cost{t}.txt".format(t=t)), headings=("timepoint_label", "load_zone", "marginal_cost"), values=lambda m, lz, tp: (m.tp_timestamp[tp], lz, m.dual[m.Energy_Balance[lz, tp]]/m.bring_timepoint_costs_to_base_year[tp]) )
print "Model was infeasible; no results will be stored." print "Irreducible Infeasible Set (IIS) returned by solver:" print "\n".join(c.cname() for c in switch_instance.iis) if util.interactive_session: print "Unsolved model is available as switch_instance." else: # something other than infeasible... if util.interactive_session: print "Model solved successfully." # write out results try: util.write_table(switch_instance, switch_instance.TIMEPOINTS, output_file=os.path.join("outputs", "dispatch.txt"), headings=("timepoint_label",)+tuple(switch_instance.PROJECTS), values=lambda m, t: (m.tp_timestamp[t],) + tuple( m.DispatchProj[p, t] if (p, t) in m.PROJ_DISPATCH_POINTS else 0.0 for p in m.PROJECTS ) ) util.write_table(switch_instance, switch_instance.TIMEPOINTS, output_file=os.path.join("outputs", "load_balance.txt"), headings=("timepoint_label",)+tuple(switch_instance.LZ_Energy_Balance_components), values=lambda m, t: (m.tp_timestamp[t],) + tuple( sum(getattr(m, component)[lz, t] for lz in m.LOAD_ZONES) for component in m.LZ_Energy_Balance_components ) ) except Exception, e: print "An error occurred while writing results:" print "ERROR:", e if util.interactive_session:
def output(table, filename): util.write_table(table, filename)
def write_results(m, tag=None): scenario = tag # format the tag to append to file names (if any) if tag is not None and tag != "": tag = "_"+str(tag) else: tag = "" util.write_table(m, output_file=os.path.join(output_dir, "summary{t}.tsv".format(t=tag)), headings=summary_headers(m, scenario), values=lambda m: summary_values(m, scenario) ) # # write out results # util.write_table(m, m.TIMEPOINTS, # output_file=os.path.join(output_dir, "dispatch{t}.tsv".format(t=tag)), # headings=("timepoint_label",)+tuple(m.PROJECTS), # values=lambda m, t: (m.tp_timestamp[t],) + tuple( # get(m.DispatchProj, (p, t), 0.0) # for p in m.PROJECTS # ) # ) avg_ts_scale = float(sum(m.ts_scale_to_year[ts] for ts in m.TIMESERIES))/len(m.TIMESERIES) util.write_table( m, m.LOAD_ZONES, m.TIMEPOINTS, output_file=os.path.join(output_dir, "energy_sources{t}.tsv".format(t=tag)), headings= ("load_zone", "period", "timepoint_label") +tuple(m.FUELS) +tuple(m.NON_FUEL_ENERGY_SOURCES) +tuple("curtail_"+s for s in m.NON_FUEL_ENERGY_SOURCES) +tuple(m.LZ_Energy_Components_Produce) +tuple(m.LZ_Energy_Components_Consume) +("marginal_cost","peak_day"), values=lambda m, z, t: (z, m.tp_period[t], m.tp_timestamp[t]) +tuple( sum(get(m.DispatchProjByFuel, (p, t, f), 0.0) for p in m.PROJECTS_BY_FUEL[f]) for f in m.FUELS ) +tuple( sum(get(m.DispatchProj, (p, t), 0.0) for p in m.PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s]) for s in m.NON_FUEL_ENERGY_SOURCES ) +tuple( sum( get(m.DispatchUpperLimit, (p, t), 0.0) - get(m.DispatchProj, (p, t), 0.0) for p in m.PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s] ) for s in m.NON_FUEL_ENERGY_SOURCES ) +tuple(getattr(m, component)[z, t] for component in m.LZ_Energy_Components_Produce) +tuple(getattr(m, component)[z, t] for component in m.LZ_Energy_Components_Consume) +(get(m.dual, m.Energy_Balance[z, t], 0.0)/m.bring_timepoint_costs_to_base_year[t], # note: this uses 0.0 if no dual available, i.e., with glpk solver 'peak' if m.ts_scale_to_year[m.tp_ts[t]] < avg_ts_scale else 'typical') ) # installed capacity information g_energy_source = lambda t: '/'.join(sorted(m.G_FUELS[t])) if m.g_uses_fuel[t] else m.g_energy_source[t] built_proj = tuple(set( pr for pe in m.PERIODS for pr in m.PROJECTS if value(m.ProjCapacity[pr, pe]) > 0.001 )) built_tech = tuple(set(m.proj_gen_tech[p] for p in built_proj)) built_energy_source = tuple(set(g_energy_source(t) for t in built_tech)) # print "missing energy_source: "+str([t for t in built_tech if g_energy_source(t)=='']) battery_capacity_mw = lambda m, z, pe: ( (m.Battery_Capacity[z, pe] * m.battery_max_discharge / m.battery_min_discharge_time) if hasattr(m, "Battery_Capacity") else 0.0 ) util.write_table(m, m.LOAD_ZONES, m.PERIODS, output_file=os.path.join(output_dir, "capacity_by_technology{t}.tsv".format(t=tag)), headings=("load_zone", "period") + built_tech + ("hydro", "batteries"), values=lambda m, z, pe: (z, pe,) + tuple( sum(m.ProjCapacity[pr, pe] for pr in built_proj if m.proj_gen_tech[pr] == t and m.proj_load_zone[pr] == z) for t in built_tech ) + ( m.Pumped_Hydro_Capacity_MW[z, pe] if hasattr(m, "Pumped_Hydro_Capacity_MW") else 0, battery_capacity_mw(m, z, pe) ) ) util.write_table(m, m.LOAD_ZONES, m.PERIODS, output_file=os.path.join(output_dir, "capacity_by_energy_source{t}.tsv".format(t=tag)), headings=("load_zone", "period") + built_energy_source + ("hydro", "batteries"), values=lambda m, z, pe: (z, pe,) + tuple( sum(m.ProjCapacity[pr, pe] for pr in built_proj if g_energy_source(m.proj_gen_tech[pr]) == s and m.proj_load_zone[pr] == z) for s in built_energy_source ) + ( m.Pumped_Hydro_Capacity_MW[z, pe] if hasattr(m, "Pumped_Hydro_Capacity_MW") else 0, battery_capacity_mw(m, z, pe) ) ) def cost_breakdown_details(m, z, pe): values = [z, pe] # capacity built, conventional plants values += [ sum( m.BuildProj[pr, pe] for pr in built_proj if m.proj_gen_tech[pr] == t and m.proj_load_zone[pr] == z and (pr, pe) in m.BuildProj ) for t in built_tech ] # capacity built, batteries, MW and MWh if hasattr(m, "BuildBattery"): values.extend([ m.BuildBattery[z, pe]/m.battery_min_discharge_time, m.BuildBattery[z, pe] ]) else: values.append([0.0, 0.0]) # capacity built, hydro values.append( sum( m.BuildPumpedHydroMW[pr, pe] for pr in m.PH_PROJECTS if m.ph_load_zone[pr]==z ) if hasattr(m, "BuildPumpedHydroMW") else 0.0, ) # capacity built, hydrogen if hasattr(m, "BuildElectrolyzerMW"): values.extend([ m.BuildElectrolyzerMW[z, pe], m.BuildLiquifierKgPerHour[z, pe], m.BuildLiquidHydrogenTankKg[z, pe], m.BuildFuelCellMW[z, pe] ]) else: values.extend([0.0, 0.0, 0.0, 0.0]) # capital investments # regular projects values += [ sum( m.BuildProj[pr, pe] * (m.proj_overnight_cost[pr, pe] + m.proj_connect_cost_per_mw[pr]) for pr in built_proj if m.proj_gen_tech[pr] == t and m.proj_load_zone[pr] == z and (pr, pe) in m.PROJECT_BUILDYEARS ) for t in built_tech ] # batteries values.append(m.BuildBattery[z, pe] * m.battery_capital_cost_per_mwh_capacity if hasattr(m, "BuildBattery") else 0.0) # hydro values.append( sum( m.BuildPumpedHydroMW[pr, pe] * m.ph_capital_cost_per_mw[pr] for pr in m.PH_PROJECTS if m.ph_load_zone[pr]==z ) if hasattr(m, "BuildPumpedHydroMW") else 0.0, ) # hydrogen if hasattr(m, "BuildElectrolyzerMW"): values.extend([ m.BuildElectrolyzerMW[z, pe] * m.hydrogen_electrolyzer_capital_cost_per_mw, m.BuildLiquifierKgPerHour[z, pe] * m.hydrogen_liquifier_capital_cost_per_kg_per_hour, m.BuildLiquidHydrogenTankKg[z, pe] * m.liquid_hydrogen_tank_capital_cost_per_kg, m.BuildFuelCellMW[z, pe] * m.hydrogen_fuel_cell_capital_cost_per_mw ]) else: values.extend([0.0, 0.0, 0.0, 0.0]) # _annual_ fuel expenditures if hasattr(m, "REGIONAL_FUEL_MARKET"): values.extend([ sum(m.FuelConsumptionByTier[rfm_st] * m.rfm_supply_tier_cost[rfm_st] for rfm_st in m.RFM_P_SUPPLY_TIERS[rfm, pe]) for rfm in m.REGIONAL_FUEL_MARKET ]) # costs to expand fuel markets (this could later be disaggregated by market and tier) if hasattr(m, "RFM_Fixed_Costs_Annual"): values.append(m.RFM_Fixed_Costs_Annual[pe]) # TODO: add similar code for fuel_costs module instead of fuel_markets module return values util.write_table(m, m.LOAD_ZONES, m.PERIODS, output_file=os.path.join(output_dir, "cost_breakdown{t}.tsv".format(t=tag)), headings=("load_zone", "period") + tuple(t+"_mw_added" for t in built_tech) + ("batteries_mw_added", "batteries_mwh_added", "hydro_mw_added") + ("h2_electrolyzer_mw_added", "h2_liquifier_kg_per_hour_added", "liquid_h2_tank_kg_added", "fuel_cell_mw_added") + tuple(t+"_overnight_cost" for t in built_tech) + ("batteries_overnight_cost", "hydro_overnight_cost") + ("h2_electrolyzer_overnight_cost", "h2_liquifier_overnight_cost", "liquid_h2_tank_overnight_cost", "fuel_cell_overnight_cost") + (tuple(rfm+"_annual_cost" for rfm in m.REGIONAL_FUEL_MARKET) if hasattr(m, "REGIONAL_FUEL_MARKET") else ()) + (("fuel_market_expansion_annual_cost",) if hasattr(m, "RFM_Fixed_Costs_Annual") else ()), values=cost_breakdown_details ) # util.write_table(m, m.PERIODS, # output_file=os.path.join(output_dir, "capacity{t}.tsv".format(t=t)), # headings=("period",)+built_proj, # values=lambda m, pe: (pe,) + tuple(m.ProjCapacity[pr, pe] for pr in built_proj) # ) if hasattr(m, 'RFMSupplyTierActivate'): util.write_table(m, m.RFM_SUPPLY_TIERS, output_file=os.path.join(output_dir, "rfm_activate{t}.tsv".format(t=tag)), headings=("market", "period", "tier", "activate"), values=lambda m, r, p, st: (r, p, st, m.RFMSupplyTierActivate[r, p, st]) )