def _retrieve_activities( data: Union[tuple, Iterator[tuple]]) -> Iterator[Activity]: """Given either a key-tuple or a list of key-tuples, return a list of activities. """ return [bw.get_activity(data)] if isinstance( data, tuple) else [bw.get_activity(k) for k in data]
def __init__(self, cs_name: str): try: cs = bw.calculation_setups[cs_name] except KeyError: raise ValueError( "{} is not a known `calculation_setup`.".format(cs_name) ) # reference flows and related indexes self.func_units = cs['inv'] self.fu_activity_keys = [list(fu.keys())[0] for fu in self.func_units] self.fu_index = {k: i for i, k in enumerate(self.fu_activity_keys)} self.rev_fu_index = {v: k for k, v in self.fu_index.items()} # Methods and related indexes self.methods = cs['ia'] self.method_index = {m: i for i, m in enumerate(self.methods)} self.rev_method_index = {v: k for k, v in self.method_index.items()} # initial LCA and prepare method matrices self.lca = self._construct_lca() self.lca.lci(factorize=True) self.method_matrices = [] for method in self.methods: self.lca.switch_method(method) self.method_matrices.append(self.lca.characterization_matrix) self.lca_scores = np.zeros((len(self.func_units), len(self.methods))) # data to be stored (self.rev_activity_dict, self.rev_product_dict, self.rev_biosphere_dict) = self.lca.reverse_dict() # Scaling self.scaling_factors = dict() # Technosphere product flows for a given reference flow self.technosphere_flows = dict() # Life cycle inventory (biosphere flows) by reference flow self.inventory = dict() # Inventory (biosphere flows) for specific reference flow (e.g. 2000x15000) and impact category. self.inventories = dict() # Inventory multiplied by scaling (relative impact on environment) per impact category. self.characterized_inventories = dict() # Summarized contributions for EF and processes. self.elementary_flow_contributions = np.zeros( (len(self.func_units), len(self.methods), self.lca.biosphere_matrix.shape[0])) self.process_contributions = np.zeros( (len(self.func_units), len(self.methods), self.lca.technosphere_matrix.shape[0])) # TODO: get rid of the below self.func_unit_translation_dict = { str(bw.get_activity(list(func_unit.keys())[0])): func_unit for func_unit in self.func_units } if len(self.func_unit_translation_dict) != len(self.func_units): self.func_unit_translation_dict = {} for fu in self.func_units: act = bw.get_activity(next(iter(fu))) self.func_unit_translation_dict["{} {}".format(act, act[0])] = fu self.func_key_dict = {m: i for i, m in enumerate(self.func_unit_translation_dict.keys())} self.func_key_list = list(self.func_key_dict.keys())
def compound_tflow(act, tflow_name, literal=False): '''for activities with several technosphere exchanges with the same name, it aggregates them into a single exchange with an amount equal to the sum of amounts. Aggregates based on the most important provider. parameters: ---------- act: brighway2 activity activity to modify flow_name: string name that identifies the flow to be aggregated literal: bool if true, the flow_name should exactly match the name of the identified flow. This is used for inuambigous identification returns: brightway2 activity activity with some technosphere flows aggregated. ''' tf = find_tflow(act, tflow_name, literal=literal) #the name of the fuel is unique if len(set([t['name'] for t in tf])) != 1: raise ValueError('incorrect fuel identification') #if its just one we do not need to do anything if len(tf) > 1: f_amount = 0 for f in tf: print(f['name'], f['amount'], bw.get_activity(f['input'])['name'], bw.get_activity(f['input'])['location']) f_amount = f_amount + f['amount'] #create new based on the flow with highest amount selected_flow = tf[0] for f in tf: if f['amount'] > selected_flow['amount']: selected_flow = f newflow = act.new_exchange( flow=selected_flow['flow'], unit=selected_flow['unit'], type=selected_flow['type'], name=selected_flow['name'], input=selected_flow['input'], comment='aggregation of fuels, uncertainty lost', amount=f_amount) newflow.save() for f in tf: f.delete() return (act)
def sa_pandas_init(self): """ Initialize a dataframe to store sensitivity indices later on. Returns ------- A GSAinLCA object that contains self.sensitivity_indices_df dataframe with columns: 'Products or flows' and 'Activities' corresponding to inputs and outputs of exchanges resp. For parameters these values coincide. index: consecutive numbers of the varied exchanges/parameters. """ lca = self.lca ind_activity = 0 ind_product = 1 ind_biosphere = 2 cols = [] rows = [] inputs = [] #All exchanges in inputs for input_ in self.inputs: if input_ == 'biosphere': continue for i in self.inputs_dict[input_]['tech_params']: act = lca.reverse_dict()[ind_activity][i['col']] prod = lca.reverse_dict()[ind_product][i['row']] cols += [bw.get_activity(act)['name']] rows += [bw.get_activity(prod)['name']] inputs += [input_] for j in self.inputs_dict[input_]['bio_params']: act = lca.reverse_dict()[ind_activity][j['col']] bio = lca.reverse_dict()[ind_biosphere][j['row']] cols += [bw.get_activity(act)['name']] rows += [bw.get_activity(prod)['name']] inputs += [input_] if self.parameters != None: # All parameters parameters_names_list = [ name for name in self.parameters_array['name'] ] cols += parameters_names_list rows += parameters_names_list inputs += ['Parameters'] * len(parameters_names_list) df = pd.DataFrame([inputs, rows, cols], index=['Inputs', 'Products or flows', 'Activities']) df = df.transpose() self.sensitivity_indices_df = df
def build_exchanges(cls, act_param, parent: TreeItem) -> None: """ Take the given activity parameter, retrieve the matching activity and construct tree-items for each exchange with a `formula` field. """ act = bw.get_activity((act_param.database, act_param.code)) for exc in [exc for exc in act.exchanges() if "formula" in exc]: act_input = bw.get_activity(exc.input) item = cls([ act_input.get("name"), parent.data(1), exc.amount, exc.get("formula"), ], parent) parent.appendChild(item)
def add_exchanges(self, from_keys, to_key): activity = bw.get_activity(to_key) for key in from_keys: from_act = bw.get_activity(key) exc = activity.new_exchange(input=key, amount=1) if key == to_key: exc['type'] = 'production' elif from_act.get('type', 'process') == 'process': exc['type'] = 'technosphere' elif from_act.get('type') == 'emission': exc['type'] = 'biosphere' else: exc['type'] = 'unknown' exc.save() signals.database_changed.emit(to_key[0])
def copy_to_db(self, activity_key): origin_db = activity_key[0] activity = bw.get_activity(activity_key) # TODO: Exclude read-only dbs from target_dbs as soon as they are implemented available_target_dbs = sorted(set(bw.databases).difference( {'biosphere3', origin_db} )) if not available_target_dbs: QtWidgets.QMessageBox.information( None, "No target database", "No valid target databases available. Create a new database first." ) else: target_db, ok = QtWidgets.QInputDialog.getItem( None, "Copy activity to database", "Target database:", available_target_dbs, 0, False ) if ok: new_code = self.generate_copy_code((target_db, activity['code'])) activity.copy(code=new_code, database=target_db) # only process database immediatly if small if len(bw.Database(target_db)) < 200: bw.databases.clean() signals.database_changed.emit(target_db) signals.databases_changed.emit()
def replace(parameters, gt_model): # CONVENTIONAL GEOTHERMAL parameters.static() gt_model.run(parameters) params_sta_conv = gt_model.array_io #Lookup activities _, _, _, _, _, _, _, _, _, _, _, _, _, _, electricity_prod_conventional, _, = lookup_geothermal( ) act = bw.get_activity(electricity_prod_conventional) if not bw.Database("geothermal energy").search(act["name"] + " zeros"): act.copy(name=act["name"] + " (zeros)") # Delete all exchanges for exc in act.exchanges(): exc.delete() # Insert new exchanges for inp in params_sta_conv: if inp['input_db'] != "biosphere3": print(inp) # act.new_exchange(input = (inp['input_db'],inp['input_code']), amount = float(inp['amount']), type= "technosphere").save() else: print(type(tuple((str(inp['input_db']), str(inp['input_code']))))) print(float(inp['amount']))
def get_multilca_to_dataframe(MultiLCA): ''' Return a long dataframe with the LCA scores of the multi LCA. Input arguments: *``MultiLCA``: a MultiLCA object already calculated Returns: *Return a long dataframe. Columns: ('Database', 'Code', 'Name', 'Location', 'Unit', 'Amount_fu','Method_name','Midpoint','Midpoint_abb','Score') ''' as_activities = [(bw.get_activity(key), amount) for dct in MultiLCA.func_units for key, amount in dct.items()] scores = pd.DataFrame(data=MultiLCA.results, columns=[method[1] for method in MultiLCA.methods], index=[act[0]['code'] for act in as_activities]) nicer_fu = pd.DataFrame( [(x['database'], x['code'], x['name'], x['location'], x['unit'], y, method[0], method[1], method[2], bw.Method(method).metadata['unit'], scores.loc[x['code'], method[1]]) for x, y in as_activities for method in MultiLCA.methods], columns=('Database', 'Code', 'Name', 'Location', 'Unit', 'Amount_fu', 'Method_name', 'Midpoint', 'Midpoint_abb', 'Midpoint_unit', 'Score')) return nicer_fu
def dropEvent(self, event): new_keys = [item.key for item in event.source().selectedItems()] for key in new_keys: act = bw.get_activity(key) if act.get('type', 'process') != "process": continue new_row = self.rowCount() self.insertRow(new_row) self.setItem(new_row, 0, ABTableItem(act['name'], key=key, color="name")) self.setItem( new_row, 1, ABTableItem("1.0", key=key, set_flags=[QtCore.Qt.ItemIsEditable], color="amount")) self.setItem( new_row, 2, ABTableItem(act.get('unit', 'Unknown'), key=key, color="unit")) event.accept() signals.calculation_setup_changed.emit() self.resizeColumnsToContents() self.resizeRowsToContents()
def delete_parameter(self, proxy) -> None: """ Override the base method to include additional logic. If there are multiple `ActivityParameters` for a single activity, only delete the selected instance, otherwise use `bw.parameters.remove_from_group` to clear out the `ParameterizedExchanges` as well. """ key = self.get_key(proxy) query = (ActivityParameter.select().where( ActivityParameter.database == key[0], ActivityParameter.code == key[1])) if query.count() > 1: super().delete_parameter(proxy) else: act = bw.get_activity(key) group = self.get_current_group(proxy) bw.parameters.remove_from_group(group, act) # Also clear the group if there are no more parameters in it if ActivityParameter.get_or_none(group=group) is None: with bw.parameters.db.atomic(): Group.get(name=group).delete_instance() bw.parameters.recalculate() signals.parameters_changed.emit()
def get_json_data(data) -> str: """Transform bw.Graphtraversal() output to JSON data.""" lca = data["lca"] lca_score = lca.score lcia_unit = bw.Method(lca.method).metadata["unit"] demand = list(lca.demand.items())[0] reverse_activity_dict = {v: k for k, v in lca.activity_dict.items()} build_json_node = Graph.compose_node_builder(lca_score, lcia_unit, demand[0]) build_json_edge = Graph.compose_edge_builder(reverse_activity_dict, lca_score, lcia_unit) valid_nodes = ( (bw.get_activity(reverse_activity_dict[idx]), v) for idx, v in data["nodes"].items() if idx != -1 ) valid_edges = ( edge for edge in data["edges"] if all(i != -1 for i in (edge["from"], edge["to"])) ) json_data = { "nodes": [build_json_node(act, v) for act, v in valid_nodes], "edges": [build_json_edge(edge) for edge in valid_edges], "title": Graph.build_title(demand, lca_score, lcia_unit), "max_impact": max(abs(n["cum"]) for n in data["nodes"].values()), } # print("JSON DATA (Nodes/Edges):", len(nodes), len(edges)) # print(json_data) return json.dumps(json_data)
def delete_activity(self, key): act = bw.get_activity(key) nu = len(act.upstream()) if nu: text = "activities consume" if nu > 1 else "activity consumes" QtWidgets.QMessageBox.information( None, "Not possible.", """Can't delete {}. {} upstream {} its reference product. Upstream exchanges must be modified or deleted.""".format(act, nu, text) ) else: # Check if the activity is parameterized: query = ActivityParameter.select().where( ActivityParameter.database == act[0], ActivityParameter.code == act[1] ) if query.exists(): # Remove all activity parameters Controller.delete_activity_parameter(act.key) act.delete() bw.databases.set_modified(act["database"]) signals.metadata_changed.emit(act.key) signals.database_changed.emit(act["database"]) signals.databases_changed.emit() signals.calculation_setup_changed.emit()
def format_activity_label(act, style='pnl', max_length=40): try: a = bw.get_activity(act) if style == 'pnl': label = wrap_text( '\n'.join([a.get('reference product', ''), a.get('name', ''), a.get('location', '')]), max_length=max_length) elif style == 'pl': label = wrap_text(', '.join([a.get('reference product', '') or a.get('name', ''), a.get('location', ''), ]), max_length=40) elif style == 'key': label = wrap_text(str(a.key)) # safer to use key, code does not always exist elif style == 'bio': label = wrap_text(',\n'.join( [a.get('name', ''), str(a.get('categories', ''))]), max_length=30 ) else: label = wrap_text( '\n'.join([a.get('reference product', ''), a.get('name', ''), a.get('location', '')])) except: if isinstance(act, tuple): return wrap_text(str(''.join(act))) else: return wrap_text(str(act)) return label
def test_succceed_open_activity(ab_app): """ Create a tiny test database with a production activity """ assert bw.projects.current == "pytest_project" db = bw.Database("testdb") act_key = ("testdb", "act1") db.write({ act_key: { "name": "act1", "unit": "kilogram", "exchanges": [{ "input": act_key, "amount": 1, "type": "production" }] } }) activities_tab = ab_app.main_window.right_panel.tabs["Activity Details"] # Select the activity and emit signal to trigger opening the tab act = bw.get_activity(act_key) signals.open_activity_tab.emit(act_key) assert len(activities_tab.tabs) == 1 assert act_key in activities_tab.tabs # Current index of QTabWidget is changed by opening the tab index = activities_tab.currentIndex() assert act.get("name") == activities_tab.tabText(index)
def update_calculation_setup(self, cs_name=None): """Update Calculation Setup, functional units and methods, and dropdown menus.""" # block signals self.func_unit_cb.blockSignals(True) self.method_cb.blockSignals(True) if not cs_name: cs_name = self.cs self.cs = cs_name self.func_unit_cb.clear() self.func_units = bw.calculation_setups[cs_name]['inv'] self.func_units = [{bw.get_activity(k): v for k, v in fu.items()} for fu in self.func_units] self.func_unit_cb.addItems( [list(fu.keys())[0].__repr__() for fu in self.func_units]) self.method_cb.clear() self.methods = bw.calculation_setups[cs_name]['ia'] self.method_cb.addItems([m.__repr__() for m in self.methods]) # unblock signals self.func_unit_cb.blockSignals(False) self.method_cb.blockSignals(False)
def delete_parameter(self, parameter: ParameterBase) -> None: """ Remove the given parameter from the project. If there are multiple `ActivityParameters` for a single activity, only delete the selected instance, otherwise use `bw.parameters.remove_from_group` to clear out the `ParameterizedExchanges` as well. """ if isinstance(parameter, ActivityParameter): db = parameter.database code = parameter.code amount = (ActivityParameter.select() .where((ActivityParameter.database == db) & (ActivityParameter.code == code)) .count()) if amount > 1: with bw.parameters.db.atomic(): parameter.delete_instance() else: group = parameter.group act = bw.get_activity((db, code)) bw.parameters.remove_from_group(group, act) # Also clear the group if there are no more parameters in it exists = (ActivityParameter.select() .where(ActivityParameter.group == group).exists()) if not exists: with bw.parameters.db.atomic(): Group.delete().where(Group.name == group).execute() else: with bw.parameters.db.atomic(): parameter.delete_instance() # After deleting things, recalculate and signal changes bw.parameters.recalculate() signals.parameters_changed.emit()
def append_row(self, key, amount='1.0'): try: act = bw.get_activity(key) new_row = self.rowCount() self.insertRow(new_row) self.setItem( new_row, 0, ABTableItem(amount, key=key, set_flags=[QtCore.Qt.ItemIsEditable], color="amount")) self.setItem(new_row, 1, ABTableItem(act.get('unit'), key=key, color="unit")) self.setItem( new_row, 2, ABTableItem(act.get('reference product'), key=key, color="product")) self.setItem(new_row, 3, ABTableItem(act.get('name'), key=key, color="name")) self.setItem( new_row, 4, ABTableItem(str(act.get('location')), key=key, color="location")) self.setItem( new_row, 5, ABTableItem(act.get('database'), key=key, color="database")) except: print("Could not load key in Calculation Setup: ", key)
def build_json_edge(edge: dict) -> dict: p = bw.get_activity(reverse_dict[edge["from"]]) from_key = reverse_dict[edge["from"]] to_key = reverse_dict[edge["to"]] return { "source_id": from_key[1], "target_id": to_key[1], "amount": edge["amount"], "product": p.get("reference product") or p.get("name"), "impact": edge["impact"], "ind_norm": edge["impact"] / lca_score, "unit": lcia_unit, "tooltip": '<b>{}</b> ({:.2g} {})' '<br>{:.3g} {} ({:.2g}%) '.format( lcia_unit, edge["amount"], p.get("unit"), edge["impact"], lcia_unit, edge["impact"] / lca_score * 100, ) }
def modify_activity(self, key, field, value): activity = bw.get_activity(key) activity[field] = value activity.save() bw.databases.set_modified(key[0]) signals.metadata_changed.emit(key) signals.database_changed.emit(key[0])
def test_exchange_interface(qtbot, ab_app): flow = bw.Database(bw.config.biosphere).random() db = bw.Database("testdb") act_key = ("testdb", "act_unc") db.write({ act_key: { "name": "act_unc", "unit": "kilogram", "exchanges": [ { "input": act_key, "amount": 1, "type": "production" }, { "input": flow.key, "amount": 2, "type": "biosphere" }, ] } }) act = bw.get_activity(act_key) exc = next(e for e in act.biosphere()) interface = get_uncertainty_interface(exc) assert isinstance(interface, ExchangeUncertaintyInterface) assert interface.amount == 2 assert interface.uncertainty_type == UndefinedUncertainty assert interface.uncertainty == {}
def sync(self, name): self.cellChanged.disconnect(self.filter_amount_change) self.clear() self.setRowCount(0) self.setHorizontalHeaderLabels(self.HEADERS) for func_unit in bw.calculation_setups[name]['inv']: for key, amount in func_unit.items(): act = bw.get_activity(key) new_row = self.rowCount() self.insertRow(new_row) self.setItem(new_row, 0, ABTableItem(act['name'], key=key, color="name")) self.setItem( new_row, 1, ABTableItem(amount, key=key, set_flags=[QtCore.Qt.ItemIsEditable], color="amount")) self.setItem( new_row, 2, ABTableItem(act.get('unit', 'Unknown'), key=key, color="unit")) self.resizeColumnsToContents() self.resizeRowsToContents() self.cellChanged.connect(self.filter_amount_change)
def modify_activity(key: tuple, field: str, value: object) -> None: activity = bw.get_activity(key) activity[field] = value activity.save() bw.databases.set_modified(key[0]) AB_metadata.update_metadata(key) signals.database_changed.emit(key[0])
def test_add_impact_scores_to_act_non_existing_db(data_for_testing): """Test adding agg dataset to non-existing database""" assert 'agg' not in databases with pytest.raises(ValueError): add_impact_scores_to_act(act_code='A', agg_db='agg', up_db='techno_UP', selected_methods=[ data_for_testing['m1_name'], data_for_testing['m2_name'] ], biosphere='biosphere', overwrite=False, create_ef_on_the_fly=True, create_agg_database_on_fly=False) add_impact_scores_to_act(act_code='A', agg_db='agg', up_db='techno_UP', selected_methods=[ data_for_testing['m1_name'], data_for_testing['m2_name'] ], biosphere='biosphere', overwrite=False, create_ef_on_the_fly=True, create_agg_database_on_fly=True) assert 'agg' in databases assert len(Database('agg')) == 1 assert ('agg', 'A') in Database('agg') act = get_activity(('agg', 'A')) assert len([_ for _ in act.biosphere()]) == 2
def show_duplicate_to_db_interface(self, activity_key): origin_db = activity_key[0] activity = bw.get_activity(activity_key) available_target_dbs = list(project_settings.get_editable_databases()) if origin_db in available_target_dbs: available_target_dbs.remove(origin_db) if not available_target_dbs: QtWidgets.QMessageBox.information( None, "No target database", "No valid target databases available. Create a new database or set one to writable (not read-only)." ) else: target_db, ok = QtWidgets.QInputDialog.getItem( None, "Copy activity to database", "Target database:", available_target_dbs, 0, False ) if ok: self.duplicate_activity_to_db(target_db, activity)
def test_add_impact_scores_to_act_existing_db(data_for_testing): """Test adding agg dataset to existing database""" Database('agg').register() assert 'agg' in databases assert len(Database('agg')) == 0 add_impact_scores_to_act(act_code='A', agg_db='agg', up_db='techno_UP', selected_methods=[ data_for_testing['m1_name'], data_for_testing['m2_name'] ], biosphere='biosphere', overwrite=False, create_ef_on_the_fly=True, create_agg_database_on_fly=False) assert 'agg' in databases assert len(Database('agg')) == 1 assert ('agg', 'A') in Database('agg') act = get_activity(('agg', 'A')) act_bio_exc = {exc.input.key: exc['amount'] for exc in act.biosphere()} assert len(act_bio_exc) == 2 lca = LCA({('techno_UP', 'A'): 1}, method=data_for_testing['m1_name']) lca.lci() lca.lcia() assert lca.score == act_bio_exc[( 'biosphere', Method(data_for_testing['m1_name']).get_abbreviation())] lca.switch_method(method=data_for_testing['m2_name']) lca.lcia() assert lca.score == act_bio_exc[( 'biosphere', Method(data_for_testing['m2_name']).get_abbreviation())]
def dropEvent(self, event: QDropEvent) -> None: """ If the user drops an activity into the activity parameters table read the relevant data from the database and generate a new row. Also, create a warning if the activity is from a read-only database """ db_table = event.source() if project_settings.settings["read-only-databases"].get( db_table.database_name, True): simple_warning_box( self, "Not allowed", "Cannot set activity parameters on read-only databases") return keys = [db_table.get_key(i) for i in db_table.selectedIndexes()] event.accept() # Block signals from `signals` while iterating through dropped keys. signals.blockSignals(True) for key in keys: act = bw.get_activity(key) if act.get("type", "process") != "process": simple_warning_box( self, "Not allowed", "Activity must be 'process' type, '{}' is type '{}'.". format(act.get("name"), act.get("type"))) continue self.add_parameter(key) signals.blockSignals(False) signals.parameters_changed.emit()
def format_activity_label(key, style='pnl', max_length=40): try: act = bw.get_activity(key) if style == 'pnl': label = '\n'.join([act.get('reference product', ''), act.get('name', ''), str(act.get('location', ''))]) elif style == 'pnl_': label = ' | '.join([act.get('reference product', ''), act.get('name', ''), str(act.get('location', ''))]) elif style == 'pnld': label = ' | '.join([act.get('reference product', ''), act.get('name', ''), str(act.get('location', '')), act.get('database', ''),]) elif style == 'pl': label = ', '.join([act.get('reference product', '') or act.get('name', ''), str(act.get('location', '')),]) elif style == 'key': label = str(act.key) # safer to use key, code does not always exist elif style == 'bio': label = ',\n'.join([act.get('name', ''), str(act.get('categories', ''))]) else: label = '\n'.join([act.get('reference product', ''), act.get('name', ''), str(act.get('location', ''))]) except: if isinstance(key, tuple): return wrap_text(str(''.join(key))) else: return wrap_text(str(key)) return wrap_text(label, max_length=max_length)
def get_CF_dataframe(lca, only_uncertain_CFs=True): """Returns a dataframe with the metadata for the characterization factors (in the biosphere matrix). Filters non-stochastic CFs if desired (default).""" data = dict() for params_index, row in enumerate(lca.cf_params): if only_uncertain_CFs and row['uncertainty_type'] <= 1: continue cf_index = row['row'] bio_act = bw.get_activity(lca.biosphere_dict_rev[cf_index]) data.update( { params_index: bio_act.as_dict() } ) for name in row.dtype.names: data[params_index][name] = row[name] data[params_index]['index'] = cf_index data[params_index]['GSA name'] = "CF: " + bio_act['name'] + str(bio_act['categories']) print('CF filtering resulted in including {} of {} characteriation factors.'.format( len(data), len(lca.cf_params), )) df = pd.DataFrame(data).T df.rename(columns={'uncertainty_type': 'uncertainty type'}, inplace=True) return df
def upstream_and_downstream_exchanges(key: tuple) -> (list, list): """Returns the upstream and downstream Exchange objects for a key. act.upstream refers to downstream exchanges; brightway is confused here) """ activity = bw.get_activity(key) return [ex for ex in activity.technosphere()], [ex for ex in activity.upstream()]