def _get_material_bioflows_for_bev(self): """ Obtain bioflow ids for *interesting* materials. These are the top bioflows in the ILCD materials characterization method for an BEV activity. """ method = ('ILCD 2.0 2018 midpoint', 'resources', 'minerals and metals') year = self.years[0] act_str = "transport, passenger car, fleet average, battery electric, {}".format(year) # upstream material demands are the same for all regions # so we can use GLO here act = Activity( Act.get((Act.name == act_str) & (Act.database == eidb_label( self.model, self.scenario, year)) & (Act.location == "EUR"))) lca = bw.LCA({act: 1}, method=method) lca.lci() lca.lcia() inv_bio = {value: key for key, value in lca.biosphere_dict.items()} ca = ContributionAnalysis() ef_contrib = ca.top_emissions(lca.characterized_inventory) return [inv_bio[int(el[1])] for el in ef_contrib]
def report_direct_emissions(self): """ Report the direct (exhaust) emissions of the LDV fleet. """ df = self.data[self.data.Variable.isin(self.variables)] df.set_index(["Year", "Region", "Variable"], inplace=True) start = time.time() result = {} # calc score for year in self.years: db = bw.Database(eidb_label(self.model, self.scenario, year)) for region in self.regions: for var in (df.loc[(year, region)] .index.get_level_values(0) .unique()): for act, share in self._act_from_variable( var, db, year, region).items(): for ex in act.biosphere(): result[(year, region, ex["name"])] = ( result.get((year, region, ex["name"]), 0) + ex["amount"] * share * df.loc[(year, region, var), "value"]) df_result = pd.Series(result) print("Calculation took {} seconds.".format(time.time() - start)) return df_result * 1e9 # kg
def _sum_variables_and_add_scores(self, market, variables): """ Sum the variables that belong to the market and calculate the LCA scores for all years, regions and methods. """ df = self.data[self.data.Variable.isin(variables)]\ .groupby(["Region", "Year"])\ .sum() df.reset_index(inplace=True) df["Market"] = market # add methods dimension & score column methods_df = pd.DataFrame({"Method": self.methods, "Market": market}) df = df.merge(methods_df) df.loc[:, "score"] = 0. df.loc[:, "score_direct"] = 0. df.set_index(["Year", "Region", "Method"], inplace=True) # calc score for year in self.years: db = bw.Database(eidb_label(self.model, self.scenario, year)) # database indexes of powerplants pps = [ pp for pp in db if pp["unit"] == "kilowatt hour" and "market" not in pp["name"] ] lca = bw.LCA({pps[0]: 1}) lca.lci() pp_idxs = [lca.activity_dict[pp.key] for pp in pps] for region in self.regions: # find activity act = [ a for a in db if a["name"] == market and a["location"] == region ][0] # create first lca object lca = bw.LCA({act: 1}, method=self.methods[0]) # build inventories lca.lci() for method in self.methods: lca.switch_method(method) lca.lcia() df.at[(year, region, method), "score"] = lca.score res_vec = np.squeeze( np.asarray(lca.characterized_inventory.sum(axis=0))) df.at[(year, region, method), "score_direct"] = np.sum(res_vec[pp_idxs]) df["total_score"] = df["score"] * df["value"] * 2.8e11 # EJ -> kWh df["total_score_direct"] = df["score_direct"] * df[ "value"] * 2.8e11 # EJ -> kWh return df
def test_electricity_tech_reporting(): rep = ElectricityLCAReporting(scenario, years) yr = random.choice(years) region = random.choice(remind_regions) db = bw.Database(eidb_label(model, scenario, yr)) fltrs = InventorySet(db).powerplant_filters tech = random.choice(list(fltrs.keys())) test = rep.report_tech_LCA(yr) assert len(test) > 0 assert len(test.loc[(region, tech)]) > 0
def report_endpoint(self): """ *DEPRECATED* Report the surplus extraction costs for the scenario. :return: A `pandas.Series` containing extraction costs with index `year` and `region`. """ indicatorgroup = 'ReCiPe Endpoint (H,A) (obsolete)' endpoint_methods = [ m for m in bw.methods if m[0] == indicatorgroup and m[2] == "total" and not m[1] == "total" ] df = self.data[self.data.Variable.isin(self.variables)] df.set_index(["Year", "Region", "Variable"], inplace=True) start = time.time() result = {} # calc score for year in self.years: db = bw.Database(eidb_label(self.model, self.scenario, year)) for region in self.regions: # create large lca demand object demand = [ self._act_from_variable(var, db, year, region, scale=df.loc[(year, region, var), "value"]) for var in (df.loc[( year, region)].index.get_level_values(0).unique()) ] # flatten dictionaries demand = {k: v for item in demand for k, v in item.items()} lca = bw.LCA(demand, method=endpoint_methods[0]) # build inventories lca.lci() for method in endpoint_methods: lca.switch_method(method) lca.lcia() # 6% discount for monetary endpoint factor = 1e9 * 1.06 ** (year - 2013) \ if "resources" == method[1] else 1e9 result[(year, region, method)] = lca.score * factor df_result = pd.Series(result) print("Calculation took {} seconds.".format(time.time() - start)) return df_result # billion pkm
def test_electricity_supplier_shares_random(): rep = ElectricityLCAReporting(scenario, years) yr = random.choice(years) region = random.choice(remind_regions) db = bw.Database(eidb_label(model, scenario, yr)) shares = rep.supplier_shares(db, region) fltrs = InventorySet(db).powerplant_filters tech = random.choice(list(fltrs.keys())) assert len(shares[tech]) > 0 assert math.isclose(sum(shares[tech].values()), 1)
def report_LDV_LCA(self): """ Report per-drivetrain impacts along the given dimension. Both per-pkm as well as total numbers are given. :return: a dataframe with impacts for the REMIND EDGE-T transport sector model. Levelized impacts (per pkm) are found in the column `score_pkm`, total impacts in `total_score`. :rtype: pandas.DataFrame """ df = self.data[self.data.Variable.isin(self.variables)] df.loc[:, "score_pkm"] = 0. # add methods dimension & score column methods_df = pd.DataFrame({"Method": self.methods, "score_pkm": 0.}) df = df.merge(methods_df, "outer") # on "score_pkm" df.set_index(["Year", "Region", "Variable", "Method"], inplace=True) start = time.time() # calc score for year in self.years: # find activities which at the moment do not depend # on regions db = bw.Database(eidb_label(self.model, self.scenario, year)) for region in self.regions: for var in (df.loc[(year, region)] .index.get_level_values(0) .unique()): demand = self._act_from_variable(var, db, year, region) lca = bw.LCA(demand, method=self.methods[0]) # build inventories lca.lci() if "_LowD" in self.scenario: fct = max(1 - (year - 2020)/15 * 0.15, 0.85) else: fct = 1. for method in self.methods: lca.switch_method(method) lca.lcia() df.loc[(year, region, var, method), "score_pkm"] = lca.score * fct print("Calculation took {} seconds.".format(time.time() - start)) df["total_score"] = df["value"] * df["score_pkm"] * 1e9 return df[["total_score", "score_pkm"]]
def report_materials(self): """ Report the material demand of the LDV fleet for all regions and years. :return: A `pandas.Series` with index `year`, `region` and `material`. """ # materials bioflows = self._get_material_bioflows_for_bev() df = self.data[self.data.Variable.isin(self.variables)] df.set_index(["Year", "Region", "Variable"], inplace=True) start = time.time() result = {} # calc score for year in self.years: db = bw.Database(eidb_label(self.model, self.scenario, year)) for region in self.regions: # create large lca demand object demand = [ self._act_from_variable( var, db, year, region, scale=df.loc[(year, region, var), "value"]) for var in (df.loc[(year, region)] .index.get_level_values(0) .unique())] # flatten dictionaries demand_flat = {} for item in demand: for act, val in item.items(): demand_flat[act] = val + demand_flat.get(act, 0) lca = bw.LCA(demand_flat) # build inventories lca.lci() for code in bioflows: result[( year, region, bw.get_activity(code)["name"].split(",")[0] )] = ( lca.inventory.sum(axis=1)[ lca.biosphere_dict[code], 0] ) df_result = pd.Series(result) print("Calculation took {} seconds.".format(time.time() - start)) return df_result * 1e9 # kg
def report_midpoint_to_endpoint(self): """ *DEPRECATED* Report midpoint impacts for the full fleet for each scenario. :return: A `pandas.Series` containing impacts with index `year`,`region` and `method`. """ methods = [ m for m in bw.methods if m[0] == "ReCiPe Endpoint (H,A) (obsolete)" and m[2] != "total" ] df = self.data[self.data.Variable.isin(self.variables)] df.set_index(["Year", "Region", "Variable"], inplace=True) start = time.time() result = {} # calc score for year in self.years: db = bw.Database(eidb_label(self.model, self.scenario, year)) for region in self.regions: # create large lca demand object demand = [ self._act_from_variable(var, db, year, region, scale=df.loc[(year, region, var), "value"]) for var in (df.loc[( year, region)].index.get_level_values(0).unique()) ] # flatten dictionaries demand = {k: v for item in demand for k, v in item.items()} lca = bw.LCA(demand, method=self.methods[0]) # build inventories lca.lci() for method in methods: lca.switch_method(method) lca.lcia() factor = 1e9 result[(year, region, method)] = lca.score * factor df_result = pd.Series(result) print("Calculation took {} seconds.".format(time.time() - start)) return df_result # billion pkm
def _sum_variables_and_add_scores(self, market, variables): """ Sum the variables that belong to the market and calculate the LCA scores for all years, regions and methods. """ df = self.data[self.data.Variable.isin(variables)]\ .groupby(["Region", "Year"])\ .sum() df.reset_index(inplace=True) df["market"] = market # add methods dimension & score column methods_df = pd.DataFrame({"method": self.methods, "market": market}) df = df.merge(methods_df) df.loc[:, "score"] = 0. # calc score for year in self.years: db = bw.Database(eidb_label(self.model, self.scenario, year)) for region in self.regions: # import ipdb;ipdb.set_trace() # find activity act = [a for a in db if a["name"] == market and a["location"] == region][0] # create first lca object lca = bw.LCA({act: 1}, method=df.method[0]) # build inventories lca.lci() df_slice = df[(df.Year == year) & (df.Region == region)] def get_score(method): lca.switch_method(method) lca.lcia() return lca.score df_slice.loc[:, "score"] = df_slice.apply( lambda row: get_score(row["method"]), axis=1) df.update(df_slice) df["total_score"] = df["score"] * df["value"] * 2.8e11 # EJ -> kWh return df
def report_tech_LCA(self, year): """ For each REMIND technology, find a set of activities in the region. Use ecoinvent tech share file to determine the shares of technologies within the REMIND proxies. """ tecf = pd.read_csv(DATA_DIR / "powertechs.csv", index_col="tech") tecdict = tecf.to_dict()["mif_entry"] db = bw.Database(eidb_label(self.model, self.scenario, year)) result = self._cartesian_product({ "region": self.regions, "tech": list(tecdict.keys()), "method": self.methods }).sort_index() for region in self.regions: # read the ecoinvent techs for the entries shares = self.supplier_shares(db, region) for tech, acts in shares.items(): # calc LCA lca = bw.LCA(acts, self.methods[0]) lca.lci() for method in self.methods: lca.switch_method(method) lca.lcia() result.at[(region, tech, method), "score"] = lca.score res_vec = np.squeeze( np.asarray(lca.characterized_inventory.sum(axis=0))) result.at[(region, tech, method), "score_direct"] = np.sum(res_vec[pp_idxs]) return result
def report_LDV_LCA(self): """ Report per-drivetrain impacts along the given dimension. Both per-pkm as well as total numbers are given. :return: a dataframe with impacts for the REMIND EDGE-T transport sector model. Levelized impacts (per pkm) are found in the column `score_pkm`, total impacts in `total_score`. :rtype: pandas.DataFrame """ df = self.data[self.data.Variable.isin(self.variables)] df.loc[:, "score_pkm"] = 0. df.loc[:, "score_pkm_direct"] = 0. # add methods dimension & score column methods_df = pd.DataFrame({"Method": self.methods, "score_pkm": 0.}) df = df.merge(methods_df, "outer") # on "score_pkm" df.set_index(["Year", "Region", "Variable", "Method"], inplace=True) start = time.time() # calc score for year in self.years: # find activities which at the moment do not depend # on regions db = bw.Database(eidb_label(self.model, self.scenario, year)) fleet_acts = [ a for a in db if a["name"].startswith( "transport, passenger car, fleet average") ] lca = bw.LCA({fleet_acts[0]: 1}) lca.lci() fleet_idxs = [lca.activity_dict[a.key] for a in fleet_acts] for region in self.regions: for var in (df.loc[( year, region)].index.get_level_values(0).unique()): demand = self._act_from_variable(var, db, year, region) if not demand: continue lca = bw.LCA(demand, method=self.methods[0]) # build inventories lca.lci() ## this is a workaround to correct for higher loadfactors in the LowD scenarios if "_LowD" in self.scenario: fct = max(1 - (year - 2020) / 15 * 0.15, 0.85) else: fct = 1. for method in self.methods: lca.switch_method(method) lca.lcia() df.at[(year, region, var, method), "score_pkm"] = lca.score * fct res_vec = np.squeeze( np.asarray( lca.characterized_inventory.sum(axis=0))) df.at[(year, region, var, method), "score_pkm_direct"] = \ np.sum(res_vec[fleet_idxs]) * fct print("Calculation took {} seconds.".format(time.time() - start)) df["total_score"] = df["value"] * df["score_pkm"] * 1e9 df["total_score_direct"] = df["value"] * df["score_pkm_direct"] * 1e9 return df[[ "total_score", "total_score_direct", "score_pkm", "score_pkm_direct" ]]