def add_unit_score_exchange_and_cf(method, biosphere='biosphere3'): """ Add unit score biosphere exchanges and cfs to biosphere and methods. Allows the storing of LCIA results in the B matrix for LCI datasets. Makes changes inplace and does not return anything. Parameters ---------- method: tuple Identification of the LCIA method, using Brightway2 tuple identifiers biosphere: str, default `biosphere3` Name of the biosphere database where biosphere exchanges are stored Note ---- This function is invoked directly by the DatabaseAggregator """ if method not in bw.methods: raise ValueError("Method {} not in registered methods".format(method)) if biosphere not in bw.databases: raise ValueError( "Database {} not in registered databases".format(biosphere)) m = bw.Method(method) ef_code = m.get_abbreviation() ef_name = 'Unit impact for {}'.format(method) # Add to biosphere database, skip if already present try: ef = bw.get_activity((biosphere, ef_code)) assert ef['name'] == ef_name except: ef = bw.Database(biosphere).new_activity(code=ef_code) ef['name'] = ef_name ef['unit'] = m.metadata['unit'] ef['categories'] = ('undefined', ) ef['exchanges']: [] ef['type'] = 'unit impact exchange' ef.save() try: bw.mapping[(biosphere, ef_code)] except KeyError: print("Manually added {} to mapping".format(ef_code)) bw.mapping.add((biosphere, ef_code)) # Add to associated method, skip if already present loaded_method = m.load() try: existing_cf = [ cf_tuple for cf_tuple in loaded_method if cf_tuple[0] == (biosphere, ef_code) ][0] assert existing_cf[1] == 1 except: loaded_method.append(((biosphere, m.get_abbreviation()), 1)) bw.Method(method).write(loaded_method)
def add_non_fossil_co2_flows_to_storage(): """Add a new flow to the biosphere: Non-fossil CO2 to storage. Add this biosphere flow to LCIA methods where it is suitable. """ from peewee import IntegrityError biosphere = bw.Database('biosphere3') new_flow = biosphere.new_activity( 'CO2 to geological storage, non-fossil', **{ 'name': 'CO2 to geological storage, non-fossil', 'unit': 'kilogram', 'type': 'storage', 'categories': ('geological storage', ) }) try: new_flow.save() except IntegrityError as e: print( "Database Error (flow is likely to be present already): {}".format( e)) print("Added new flow: {}".format(new_flow)) co2_to_soil = [ x for x in bw.Database("biosphere3") if ("Carbon dioxide, to soil or biomass stock" in str(x) and "('soil',)" in str(x)) ][0] print("Use {} as a template for the characterization factors.".format( co2_to_soil)) for cat in lcia_methods: method = bw.Method(lcia_methods[cat]) method_data = method.load() # first make sure we don't already have the flow included: if [x for x in method_data if new_flow.key[1] in x[0][1]]: print('Flow already present- you must have run this code already.') continue else: try: characterized_flow = [ x for x in method_data if co2_to_soil.key[1] in x[0][1] ][0] except: continue method_data.extend([(new_flow.key, characterized_flow[1])]) print('Flow added to method: {}'.format(method.name)) print('Characterisation factor: {}'.format(characterized_flow[1])) orig_name = [x for x in method.name] new_method = bw.Method(tuple(orig_name + ['CO2 storage'])) new_method.register() new_method.write(method_data) new_method.process()
def _get_impacts(self, biosphere='biosphere3'): if len(self.C_matrices) == 0: self._create_C_matrices() return [{ 'input': (biosphere, bw.Method(method).get_abbreviation()), 'amount': (C_matrix * self.lca.inventory).sum(), 'type': 'biosphere', 'name': 'Unit impact for {}'.format(method), 'unit': bw.Method(method).metadata['unit'] } \ for method, C_matrix in self.C_matrices.items()]
def get_multilca_to_dataframe(MultiLCA): ''' Return a long dataframe with the LCA scores of the multi LCA. Input arguments: *``MultiLCA``: a MultiLCA object already calculated Returns: *Return a long dataframe. Columns: ('Database', 'Code', 'Name', 'Location', 'Unit', 'Amount_fu','Method_name','Midpoint','Midpoint_abb','Score') ''' as_activities = [(bw.get_activity(key), amount) for dct in MultiLCA.func_units for key, amount in dct.items()] scores = pd.DataFrame(data=MultiLCA.results, columns=[method[1] for method in MultiLCA.methods], index=[act[0]['code'] for act in as_activities]) nicer_fu = pd.DataFrame( [(x['database'], x['code'], x['name'], x['location'], x['unit'], y, method[0], method[1], method[2], bw.Method(method).metadata['unit'], scores.loc[x['code'], method[1]]) for x, y in as_activities for method in MultiLCA.methods], columns=('Database', 'Code', 'Name', 'Location', 'Unit', 'Amount_fu', 'Method_name', 'Midpoint', 'Midpoint_abb', 'Midpoint_unit', 'Score')) return nicer_fu
def get_biosphere_factors(flows, lca, cats=['GWP', 'R_Total']): """Calcuate a LCA for a dict of biosphere flows for a list of methods and return a pandas dataframe. """ results = {} for cat in cats: lca.switch_method(lcia_methods[cat]) cf_dict = dict(bw.Method(lcia_methods[cat]).load()) results[cat] = {} for name, exc in flows.items(): # Not all flows are characterized if ('biosphere3', exc['code']) in cf_dict: results[cat][name] = cf_dict[('biosphere3', exc['code'])] # We usually prefer to group fossil and nuclear non renewable energy # into one category called non-renewable energy: if 'CEDF' in results.keys(): results['CED'] = results['CEDF'] del results['CEDF'] for cat in category_group: if cat in results.keys(): for key in results[cat].keys(): if 'CED' not in results.keys(): results['CED'] = {} try: results['CED'][key] += results[cat][key] except KeyError: results['CED'][key] = results[cat][key] del results[cat] return pd.DataFrame(results)
def get_json_data(data) -> str: """Transform bw.Graphtraversal() output to JSON data.""" lca = data["lca"] lca_score = lca.score lcia_unit = bw.Method(lca.method).metadata["unit"] demand = list(lca.demand.items())[0] reverse_activity_dict = {v: k for k, v in lca.activity_dict.items()} build_json_node = Graph.compose_node_builder(lca_score, lcia_unit, demand[0]) build_json_edge = Graph.compose_edge_builder(reverse_activity_dict, lca_score, lcia_unit) valid_nodes = ( (bw.get_activity(reverse_activity_dict[idx]), v) for idx, v in data["nodes"].items() if idx != -1 ) valid_edges = ( edge for edge in data["edges"] if all(i != -1 for i in (edge["from"], edge["to"])) ) json_data = { "nodes": [build_json_node(act, v) for act, v in valid_nodes], "edges": [build_json_edge(edge) for edge in valid_edges], "title": Graph.build_title(demand, lca_score, lcia_unit), "max_impact": max(abs(n["cum"]) for n in data["nodes"].values()), } # print("JSON DATA (Nodes/Edges):", len(nodes), len(edges)) # print(json_data) return json.dumps(json_data)
def sync(self, method: Optional[tuple] = None) -> None: if self.method and self.method.name != method: return if method: self.method = bw.Method(method) assert self.method is not None, "A method must be set." self._dataframe = pd.DataFrame( [self.build_row(obj) for obj in self.method.load()], columns=self.HEADERS + self.UNCERTAINTY) self.cf_column = self._dataframe.columns.get_loc("cf") self.updated.emit()
def adjust_table_unit(df: pd.DataFrame, method: Optional[tuple]) -> pd.DataFrame: """Given a dataframe, adjust the unit of the table to either match the given method, or not exist. """ if "unit" not in df.columns: return df keys = df.index[~df["index"].isin({"Total", "Rest"})] unit = bw.Method(method).metadata.get("unit") if method else "unit" df.loc[keys, "unit"] = unit return df
def __init__(self, up_db_name, agg_db_name, database_type='LCIA', method_list=[], biosphere='biosphere3', overwrite=False): assert up_db_name in bw.databases, "Source database does not exist" if agg_db_name in bw.databases and not overwrite: warnings.warn( "A database named {} already exists, set `overwrite` to True to overwrite" .format(agg_db_name)) return self.source = bw.Database(up_db_name) self.new_name = agg_db_name self.biosphere = biosphere self.lca = bw.LCA({self.source.random(): 1}) self.lca.lci(factorize=True) self.database_type = database_type self.methods = method_list if self.database_type not in ['LCI', 'LCIA']: raise ValueError( '{} is not a valid database type, should be "LCI" or "LCIA"'. format(self.database_type)) if self.database_type == "LCIA": if not self.methods: raise ValueError( "Need to pass a list of method identifiers to create an LCIA score database, none passed" ) for m in self.methods: if any([(self.biosphere, bw.Method(m).get_abbreviation()) not in [cf[0] for cf in bw.Method(m).load()], (self.biosphere, bw.Method(m).get_abbreviation()) not in bw.Database(self.biosphere)]): add_unit_score_exchange_and_cf(m, biosphere) self.C_matrices = {}
def add_non_fossil_co2_flows_to_ipcc_method(): """Add non-fossil CO2 flows to the IPCC 2013 GWP 100a method.""" ipcc = bw.Method(('IPCC 2013', 'climate change', 'GWP 100a')) gwp_data = ipcc.load() non_fossil = [ x for x in ws.get_many(bw.Database("biosphere3"), ws.equals("name", "Carbon dioxide, non-fossil")) ] print("Adding the following flows:") pprint(non_fossil) gwp_data.extend([(x.key, 1.) for x in non_fossil]) co2_in_air = ws.get_one(bw.Database("biosphere3"), ws.equals("name", 'Carbon dioxide, in air')) print("Adding {}.".format(co2_in_air)) gwp_data.append((co2_in_air.key, -1.)) method = bw.Method(('IPCC 2013', 'climate change', 'GWP 100a', 'Complete')) method.register() method.write(gwp_data) method.process()
def copy_method(self) -> None: """Call copy on the (first) selected method and present rename dialog.""" method = bw.Method(self.get_method(next(p for p in self.selectedIndexes()))) dialog = TupleNameDialog.get_combined_name( self, "Impact category name", "Combined name:", method.name, "Copy" ) if dialog.exec_() == TupleNameDialog.Accepted: new_name = dialog.result_tuple if new_name in bw.methods: warn = "Impact Category with name '{}' already exists!".format(new_name) QtWidgets.QMessageBox.warning(self, "Copy failed", warn) return method.copy(new_name) print("Copied method {} into {}".format(str(method.name), str(new_name))) self.new_method.emit(new_name)
def get_title(): act, amount = demand[0], demand[1] m = bw.Method(lca.method) # 'LCIA method: {} [{}] <br>' \ return 'Functional unit: {:.2g} {} {} | {} | {} <br>' \ 'Total impact: {:.2g} {}'.format( amount, act.get("unit"), act.get("reference product") or act.get("name"), act.get("name"), act.get("location"), # m.name, # m.metadata.get("unit"), lca.score, m.metadata.get("unit"), )
def modify_method_with_cf(self, cf: tuple, method: tuple) -> None: """ Take the given CF tuple, add it to the method object stored in `self.method` and call .write() & .process() to finalize. NOTE: if the flow key matches one of the CFs in method, that CF will be edited, if not, a new CF will be added to the method. """ method = bw.Method(method) cfs = method.load() idx = next((i for i, c in enumerate(cfs) if c[0] == cf[0]), None) if idx is None: cfs.append(cf) else: cfs[idx] = cf method.write(cfs) signals.method_modified.emit(method.name)
def get_cf_info(m): """extracts info on the characterisation factors of a method given the name. Currently prepared only for methods without uncertainty, where CF are only a tuple (key,amount)""" assert m in bw.methods, f"{m} not in bw.methods" assert is_method_uncertain( m) is False, f"{m} has uncertain CF. Not yet supported" M = bw.Method(m) cfs = M.load() info = [] for cf in cfs: key, value = cf flow = bw.get_activity(key) compartments = flow["categories"] compartment = compartments[0] try: subcompartment = compartments[1] except IndexError: subcompartment = None info.append(( flow["database"], flow["code"], flow["name"], value, flow["unit"], flow["type"], compartment, subcompartment, )) df = pd.DataFrame( info, columns=[ "database", "code", "name", "amount", "unit", "type", "compartment", "subcompartment", ], ) return df
def sync(self, method): self.setHorizontalHeaderLabels(self.HEADERS) method = bw.Method(method) data = method.load() self.setRowCount(len(data)) for row, obj in enumerate(data): key, amount = obj[:2] flow = bw.get_activity(key) if isinstance(amount, numbers.Number): uncertain = "False" else: uncertain = "True" amount = amount['amount'] self.setItem(row, 0, ABTableItem(flow['name'], key=key)) self.setItem(row, 1, ABTableItem("{:.6g}".format(amount), key=key)) self.setItem(row, 2, ABTableItem(flow.get('unit', 'Unknown'), key=key)) self.setItem(row, 3, ABTableItem(str(uncertain), key=key))
def remove_uncertainty(self, removed: list, method: tuple) -> None: """Remove all uncertainty information from the selected CFs. NOTE: Does not affect any selected CF that does not have uncertainty information. """ def unset(cf: tuple) -> tuple: data = [*cf] data[1] = data[1].get("amount") return tuple(data) method = bw.Method(method) modified_cfs = (unset(cf) for cf in removed if isinstance(cf[1], dict)) cfs = method.load() for cf in modified_cfs: idx = next(i for i, c in enumerate(cfs) if c[0] == cf[0]) cfs[idx] = cf method.write(cfs) signals.method_modified.emit(method.name)
def test_cf_interface(qtbot, ab_app): key = bw.methods.random() method = bw.Method(key).load() cf = next(f for f in method) assert isinstance(cf, tuple) if isinstance(cf[-1], dict): cf = method[1] assert isinstance(cf[-1], float) amount = cf[-1] # last value in the CF should be the amount. interface = get_uncertainty_interface(cf) assert isinstance(interface, CFUncertaintyInterface) assert not interface.is_uncertain # CF should not be uncertain. assert interface.amount == amount assert interface.uncertainty_type == UndefinedUncertainty assert interface.uncertainty == {} # Now add uncertainty. uncertainty = { "minimum": 1, "maximum": 18, "uncertainty type": UniformUncertainty.id } uncertainty["amount"] = amount cf = (cf[0], uncertainty) interface = get_uncertainty_interface(cf) assert isinstance(interface, CFUncertaintyInterface) assert interface.is_uncertain # It is uncertain now! assert interface.amount == amount assert interface.uncertainty_type == UniformUncertainty assert interface.uncertainty == { "uncertainty type": UniformUncertainty.id, "minimum": 1, "maximum": 18 }
def is_method_uncertain(method): """check if method is uncertain""" cfs = bw.Method(method).load() cf_values = [cf_value for flow, cf_value in cfs] return any(isinstance(x, dict) for x in cf_values)
def sync(self, method: tuple) -> None: self.method = bw.Method(method) self.dataframe = DataFrame( [self.build_row(obj) for obj in self.method.load()], columns=self.HEADERS + self.UNCERTAINTY) self.cf_column = self.dataframe.columns.get_loc("cf")
def compareStaticLCA_interactive(dScores1, dScores2, sFileName="static_LCA_comparison.html", legend1=None, legend2=None): # check if same methods have been used, abort if not if not dScores1.keys() == dScores2.keys(): print("Methods are different. Comparison invalid.") return # plot layout options bar_width = 0.5 # extract method names, units ltMethods = [m for m in dScores1.keys()] lsUnits = [bw.Method(m).metadata["unit"] for m in dScores1.keys()] lsNames = [", ".join(bw.Method(m).name) for m in dScores1.keys()] lsMethodLabels = lsNames # normalized values normalized_v1 = np.array([np.sign(v) for v in dScores1.values()]) normalized_v2 = np.array([ np.sign(v2) * np.abs(v2 / v1) for v1, v2 in zip(dScores1.values(), dScores2.values()) ]) # bar labels = actual values lsBarLabels1 = [ "%.2e " % v + unit for v, unit in zip(dScores1.values(), lsUnits) ] lsBarLabels2 = [ "%.2e " % v + unit for v, unit in zip(dScores2.values(), lsUnits) ] # putting it all together source1 = bokeh.models.ColumnDataSource(data=dict(y=normalized_v1, method=lsMethodLabels, value=lsBarLabels1, methods=ltMethods)) source2 = bokeh.models.ColumnDataSource(data=dict(y=normalized_v2, method=lsMethodLabels, value=lsBarLabels2, methods=ltMethods)) TOOLTIPS = [("method", "@method"), ("value", "@value")] # plot f = bokeh.plotting.figure(x_axis_label="indicator", y_axis_label='normalized impact [-]', plot_width=width, plot_height=height * 3, tooltips=TOOLTIPS, x_range=bokeh.models.FactorRange(*ltMethods)) p1 = f.vbar(x="methods", top="y", width=bar_width, color=palette[0], alpha=0.6, source=source1) p2 = f.vbar(x=dodge('methods', bar_width / 2, range=f.x_range), top="y", width=bar_width, color=palette[1], alpha=0.6, source=source2) # build legend legend = bokeh.models.Legend(items=[ (legend1, [p1]), (legend2, [p2]), ], location="center", orientation="horizontal", label_width=75) f.add_layout(legend, 'above') # font sizes font_size = "15pt" f.xaxis.axis_label_text_font_size = \ f.yaxis.axis_label_text_font_size = \ f.xaxis.major_label_text_font_size = \ f.yaxis.major_label_text_font_size = \ f.legend.label_text_font_size = font_size # x label rotation f.xaxis.major_label_orientation = np.pi / 2 # add tool for strict y-axis zoom f.add_tools(bokeh.models.WheelZoomTool(dimensions="height")) # show the results bokeh.io.output_notebook() bokeh.io.show(f) pass
def run_analyses(self, demand_item, demand_item_code, amount=1, methods=[('IPCC 2013', 'climate change', 'GWP 100a')], top_processes=10, gt_cutoff=0.01, pie_cutoff=0.05): ready = self.setup_bw2() name = self.bw2_database_name if ready: if name in bw2.databases: del bw2.databases[name] print('Rewriting database ({}) ...'.format(name)) else: print('Writing database ({})...'.format( name)) # pragma: no cover new_db = bw2.Database(name) new_db.write(self.bw2_database) new_db.process() #print ('trying to get {}'.format(demand_item_code)) product_demand = new_db.get(demand_item_code) if product_demand is not False: fu = {product_demand: amount} parameter_sets = self.modelInstance.evaluated_parameter_sets ts = time.time() ts_format = datetime.datetime.fromtimestamp(ts).strftime( '%Y-%m-%d %H:%M:%S') result_dict = { 'settings': { 'pie_cutoff': pie_cutoff, 'methods': [str(method) for method in methods], 'method_names': [', '.join(method[1:]) for method in methods], 'method_units': [bw2.methods[method]['unit'] for method in methods], 'item': demand_item, 'item_code': demand_item_code, 'amount': amount, 'ps_names': [name for name in parameter_sets.keys()], 'item_unit': product_demand['unit'], 'timestamp': ts_format, } } result_sets = [] #for each parameter set in the model run the analysis for n, (parameter_set_name, parameter_set) in enumerate(parameter_sets.items()): # update the parameter_set values print('\nAnalysis {}\n'.format(n + 1)) self.update_exchange_amounts(new_db, parameter_set) initial_method = methods[0] # run the LCA lca = bw2.LCA(fu, initial_method) lca.lci(factorize=True) lca.lcia() ps_results = [] for method in methods: lca.switch_method(method) lca.redo_lcia(fu) unit = bw2.methods[method]['unit'] score = lca.score #print('Analysis for {} {} of {}, using {}'.format(amount, product_demand['unit'], product_demand['name'], method)) #print ('{:.3g} {}'.format(score, unit)) method_dict = { o[0]: o[1] for o in bw2.Method(method).load() } default_tag = "other" label = "lcopt_type" type_graph = [ recurse_tagged_database(key, amount, method_dict, lca, label, default_tag) for key, amount in fu.items() ] # type_result = aggregate_tagged_graph(type_graph) # for k,v in type_result.items(): # print('{}\t\t{}'.format(k,v)) label = "name" foreground_graph = [ recurse_tagged_database(key, amount, method_dict, lca, label, default_tag) for key, amount in fu.items() ] foreground_result = aggregate_tagged_graph( foreground_graph) #for k,v in foreground_result.items(): # print('{}\t\t{}'.format(k,v)) recursed_graph = self.multi_recurse( deepcopy(type_graph[0])) dropped_graph = self.drop_level_recurse( deepcopy(type_graph[0])) result_set = { 'ps_name': parameter_set_name, 'method': str(method), 'unit': unit, 'score': score, 'foreground_results': foreground_result, 'graph': recursed_graph, 'dropped_graph': dropped_graph, 'original_graph': str(type_graph[0]) } ps_results.append(result_set) result_sets.append(ps_results) result_dict['results'] = result_sets return result_dict
def _method_unit(method): return bw.Method(method).metadata['unit']
def get_JSON_from_graph_traversal_data(self, data): """Transform bw.Graphtraversal() output to JSON data.""" def get_activity_by_index(ind): if ind != -1: return bw.get_activity(reverse_activity_dict[ind]) else: return False def get_max_impact(nodes): return max([abs(n["cum"]) for n in nodes.values()]) gnodes = data["nodes"] gedges = data["edges"] lca = data["lca"] lca_score = lca.score #abs(lca.score) max_impact = get_max_impact(gnodes) # print("Max impact:", max_impact) LCIA_unit = bw.Method(lca.method).metadata["unit"] demand = list(lca.demand.items())[0] reverse_activity_dict = {v: k for k, v in lca.activity_dict.items()} nodes, edges = [], [] for node_index, values in gnodes.items(): act = get_activity_by_index(node_index) if not act: continue nodes.append( { # "key": act.key, "db": act.key[0], "id": act.key[1], "product": act.get("reference product") or act.get("name"), "name": act.get("name"), "location": act.get("location"), "amount": values.get("amount"), "LCIA_unit": LCIA_unit, "ind": values.get("ind"), "ind_norm": values.get("ind") / lca_score, "cum": values.get("cum"), "cum_norm": values.get("cum") / lca_score, "class": "demand" if act == demand[0] else identify_activity_type(act), } ) for gedge in gedges: if gedge["from"] == -1 or gedge["to"] == -1: continue product = get_activity_by_index(gedge["from"]).get("reference product") or get_activity_by_index(gedge["from"]).get("name") from_key = reverse_activity_dict[gedge["from"]] to_key = reverse_activity_dict[gedge["to"]] edges.append( { "source_id": from_key[1], "target_id": to_key[1], "amount": gedge["amount"], "product": product, "impact": gedge["impact"], "ind_norm": gedge["impact"] / lca_score, "unit": bw.Method(lca.method).metadata["unit"], "tooltip": '<b>{}</b> ({:.2g} {})' '<br>{:.3g} {} ({:.2g}%) '.format( product, gedge["amount"], bw.get_activity(from_key).get("unit"), gedge["impact"], LCIA_unit, gedge["impact"] / lca.score * 100, ) } ) def get_title(): act, amount = demand[0], demand[1] m = bw.Method(lca.method) # 'LCIA method: {} [{}] <br>' \ return 'Functional unit: {:.2g} {} {} | {} | {} <br>' \ 'Total impact: {:.2g} {}'.format( amount, act.get("unit"), act.get("reference product") or act.get("name"), act.get("name"), act.get("location"), # m.name, # m.metadata.get("unit"), lca.score, m.metadata.get("unit"), ) json_data = { "nodes": nodes, "edges": edges, "title": get_title(), "max_impact": max_impact, } # print("JSON DATA (Nodes/Edges):", len(nodes), len(edges)) # print(json_data) return json.dumps(json_data)
def add_impact_scores_to_act(act_code, agg_db, up_db, selected_methods, overwrite=False, create_ef_on_the_fly=False, biosphere='biosphere3', create_agg_database_on_fly=False): """ Add unit impact scores to biosphere exchanges of activity in agg database The up_db is the unit process level database used for the calculations. The elementary flow code is Method(method).get_abbreviation() The elementary flow unit is Method(method).metadata['unit'] The elementary flow name is 'Unit impact for {}'.format(method) """ # Make sure unit process dataset exists assert (up_db, act_code) in bw.Database( up_db), "Activity missing from unit process database" up_act = bw.get_activity((up_db, act_code)) # Create aggregated dataset if required if not (agg_db, act_code) in bw.Database(agg_db): agg_act = copy_stripped_activity_to_other_db( data=copy.deepcopy(up_act._data), target_db=agg_db, create_database_on_fly=create_agg_database_on_fly) else: agg_act = bw.get_activity((agg_db, act_code)) existing_biosphere_in_agg = [exc.input.key for exc in agg_act.biosphere()] up_production_amount = up_act['production amount'] lca = bw.LCA({up_act: up_production_amount}) lca.lci() for method in selected_methods: m = bw.Method(method) ef_code = m.get_abbreviation() ef_name = 'Unit impact for {}'.format(method) result_already_in_act = (biosphere, ef_code) in existing_biosphere_in_agg if result_already_in_act: print("Results already exist for activity {}, category {}".format( agg_act, selected_methods)) if not overwrite: print("Set overwrite=True to replace value") if overwrite: potential_exc = [ exc for exc in agg_act.biosphere() if exc.input.key == (biosphere, ef_code) ] if len(potential_exc) > 1: raise ValueError( "More than one corresponding exchange found. activity: {}, exchange:{}" .format(agg_act, method)) else: exc = potential_exc[0] if not (biosphere, ef_code) in bw.Database(biosphere): if not create_ef_on_the_fly: raise ValueError( '{} needs to be added to biosphere database'.format( ef_name)) else: add_unit_score_exchange_and_cf(method, biosphere=biosphere) lca.switch_method(method) lca.lcia() if not result_already_in_act: exc = agg_act.new_exchange( input=(biosphere, ef_code), output=agg_act.key, amount=lca.score, unit=m.metadata['unit'], ) exc['type'] = 'biosphere' else: exc['amount'] = lca.score exc.save() # def calculate_LCIA_array_from_LCI_array(LCI_array, method, ref_bio_dict, result_precision='float32'): # """ Calculate a 1xn array of LCIA results from existing mxn LCI results array # # The reference biosphere dictionary (ref_bio_dict) provides a mapping between # biosphere exchange keys and their corresponding rows in the LCI # array, i.e. its values are (bio_db_name, code): row_number_in_LCI_array # """ # # Get a list of elementary flows that are characterized in the given method # loaded_method = bw.Method(method).load() # method_ordered_exchanges = [exc[0] for exc in loaded_method] # # # Collectors for the LCI array indices and characterization factors that # # are relevant for the impact assessment (i.e. those that have # # characterization factors for the given method) # lca_specific_biosphere_indices = [] # cfs = [] # for exc in method_ordered_exchanges: # For every exchange that has a cf # try: # # Check to see if it is in the bio_dict # # If it is, it is in the inventory, and its index is bio_dict[exc] # lca_specific_biosphere_indices.append(ref_bio_dict[exc]) # # If it is in bio_dict, we need its characterization factor # cfs.append(dict(loaded_method)[exc]) # except KeyError: # Exchange was not in bio_dict # pass # # # Extract elements of the LCI array that are characterized, # # in the correct order # filtered_LCI_array = LCI_array[lca_specific_biosphere_indices][:] # # Convert CF list to CF array # cf_array = np.reshape(np.array(cfs), (-1, 1)) # # LCIA score = sum of multiplication of inventory result and CF # LCIA_array = (np.array(filtered_LCI_array) * cf_array).sum(axis=0) # # Change result precision if needed # if LCIA_array.dtype != result_precision: # LCIA_array = LCIA_array.astype(result_precision, copy=False) # return LCIA_array # # # class CharacterizedBiosphereDatabaseGenerator(object): # def __init__(self, # up_db_name, # score_db_name, # method_list=list(bw.methods), # biosphere='biosphere3', # overwrite=False # ): # """ Generate an LCI database where biosphere exchanges are replaced with gate-to-gate single scores # # #TODO: Refactor to speedup: currently runs at unbearably slow speed # Instantiating the class will generate the data for the new database. # The `generate` method will write the database. # # Parameters # ---------- # up_db_name: str # Name of the unit process database from which results will be # calculated. Must be a registered database name. # score_db_name: str # Name of the new aggregated database. # method_list: list, default list(bw.methods) # List of method ids (tuples) for which to generate LCIA scores. # Default is all methods. # biosphere: str, default 'biosphere3' # Name of the biosphere database # overwrite: bool, default False # Determines whether an existing database with name `score_db_name` # will be overwritten. # """ # print("WARNING: WIP. Runs at very slow speeds, needs to be refactored. We suggest halting unless you really need this.") # assert up_db_name in bw.databases, "Source database does not exist" # if score_db_name in bw.databases and not overwrite: # print("A database named {} already exists, set `overwrite` to True to overwrite").format(score_db_name) # return # self.source = bw.Database(up_db_name) # self.new_name = score_db_name # self.biosphere = biosphere # self.lca = bw.LCA({self.source.random(): 1}) # self.lca.lci() # self.methods = method_list # self._get_impacts() # # def check_methods(self): # for method in self.methods: # m = bw.Method(method) # ef_code = m.get_abbreviation() # if (self.biosphere, ef_code) not in bw.Database(self.biosphere): # raise ValueError("Unit biosphere exchange for {} not in {} " # "database".format(method, self.biosphere) # ) # if (self.biosphere, ef_code) in [cf[0] for cf in m.load()]: # raise ValueError("Unit impact characterization factor doesn't exist" # "for {} ".format(method) # ) # # def __len__(self): # return len(self.source) # # def __iter__(self): # # Data for this line: # # wrong_database = {key[0] for key in data}.difference({self.name}) # yield ((self.new_name,)) # # def _get_techno_exchanges(self, act): # excs = [] # for techno in act.technosphere(): # data = techno.as_dict() # data['input'] = (self.new_name, data['input'][1]) # data['output'] = (self.new_name, data['output'][1]) # excs.append(data) # for prod in act.production(): # data = prod.as_dict() # data['input'] = (self.new_name, data['input'][1]) # data['output'] = (self.new_name, data['output'][1]) # excs.append(data) # return excs # # def _get_impacts(self): # self.impact_dict = {} # for method in self.methods: # self.lca.switch_method(method) # self.impact_dict[method] = ( # self.lca.characterization_matrix \ # * self.lca.biosphere_matrix.toarray() # ).sum(axis=0) # # def _get_scores(self, act): # return [ # { # 'input': (self.biosphere, bw.Method(method).get_abbreviation()), # 'output': (self.new_name, act['code']), # 'amount': self.impact_dict[method][self.lca.activity_dict[act.key]], # 'name': 'Unit impact for {}'.format(method), # 'unit': bw.Method(method).metadata['unit'], # 'comment': "Aggregated gate-to-gate impact for {}".format(method), # 'type': 'biosphere', # 'uncertainty type': 0, # # } for method in self.methods # ] # # def keys(self): # # Data for this line: # # mapping.add(data.keys()) # for act in self.source: # yield (self.new_name, act['code']) # # def values(self): # # Data for this line: # # geomapping.add({x["location"] for x in data.values() if x.get("location")}) # for act in self.source: # yield act # # def items(self): # # Actual data which is consumed by the function writing to the database # for i, act in enumerate(self.source): # self.lca.redo_lci({act: act['production amount']}) # obj = copy.deepcopy(act._data) # obj['database'] = self.new_name # techno = self._get_techno_exchanges(act) # scores = self._get_scores(act) # print(i, act, len(techno), len(scores)) # # obj['exchanges'] = techno + scores # yield ((self.new_name, obj['code']), obj) # # def generate(self): # bw.Database(self.new_name).write(self)
def contribution_LCA_to_df(datasets, cats=['CC', 'R_Total'], amount=1, names=['name', 'location']): """Calculate foreground contribution LCA of a list of datasets and return a multi-index dataframe. """ results = {} codes = {} index_dict = {} if datasets and cats: lca = bw.LCA({datasets[0]: 1}, method=lcia_methods[cats[0]]) lca.lci() lca.lcia() else: raise ValueError( "No datasets or impact categories found." + "Provide at least one dataset and one impact category.") for ds in datasets: index_dict[ds['code']] = tuple(ds[i] for i in names) for cat in cats: lca.switch_method(lcia_methods[cat]) cf_dict = dict(bw.Method(lcia_methods[cat]).load()) codes[cat] = {} results[cat] = {} for dataset in datasets: for exc in dataset.technosphere(): existing_value = 0 if (dataset['code'], exc.input['name']) in results[cat].keys(): existing_value = results[cat][(dataset['code'], exc.input['name'])] if exc['amount'] == 0: continue if exc['input'] in codes[cat]: results[cat][(dataset['code'], exc.input['name'])] = \ amount * codes[cat][exc['input']]*exc['amount'] + existing_value else: lca.redo_lcia({exc.input: exc['amount']}) results[cat][(dataset['code'],exc.input['name'])] = \ lca.score * amount + existing_value codes[cat][exc['input']] = (lca.score / exc['amount']) for exc in dataset.biosphere(): # Not all flows are characterized if exc.input in cf_dict: existing_value = 0 if (dataset['code'], exc.input['name']) in results[cat].keys(): existing_value = results[cat][(dataset['code'], exc.input['name'])] results[cat][(dataset['code'],exc.input['name'])] = \ amount * exc['amount'] * cf_dict[exc.input] + existing_value for cat in category_group: if cat in results.keys(): for key in results[cat].keys(): if 'CED' not in results.keys(): results['CED'] = {} try: results['CED'][key] += results[cat][key] except KeyError: results['CED'][key] = results[cat][key] del results[cat] return pd.DataFrame(results).unstack().sort_index(axis=1).rename( index=index_dict)
def _method_unit(method) : if method in UNIT_OVERRIDE : return UNIT_OVERRIDE[method] return bw.Method(method).metadata['unit']
product_system_depth = 4 #output contribution_df = pd.DataFrame() for year in year_list: print(year) utils_update.dbUpdate_ElecAluLiq(bw_db_name=bw_db_name, year=year) utils_update.dbUpdate_EnerAlumina(bw_db_name=bw_db_name, year=year) utils_update.dbUpdate_cons_mix(bw_db_name=bw_db_name, year=year, mineral='bauxite') utils_update.dbUpdate_cons_mix(bw_db_name=bw_db_name, year=year, mineral='alumina') for act in act_list: for method in mining_recipe_method: Method = bw.Method(method) functional_unit = {act: 1} #Get the spatial contribution temp_dtf = utils_bw.traverse_tagged_databases_to_dataframe( functional_unit, method, label="location_tag", default_tag='GLO', secondary_tag=(None, None), product_system_depth=product_system_depth) temp_dtf = temp_dtf.rename( columns={'location_tag': 'Spatial_contribution'}) temp_dtf = temp_dtf.join( pd.DataFrame( { 'Year': year,