def compare_activities_by_lcia_score(activities, lcia_method, band=0.1):
    """Compare selected activities to see if they are substantially different.

    Substantially different means that all LCIA scores lie within a band of ``band * max_lcia_score``.

    Inputs:

        ``activities``: List of ``Activity`` objects.
        ``lcia_method``: Tuple identifying a ``Method``

    Returns:

        Nothing, but prints to stdout.

    """
    import bw2calc as bc

    lca = bc.LCA({a: 1 for a in activities}, lcia_method)
    lca.lci()
    lca.lcia()

    # First pass: Are all scores close?
    scores = []

    for a in activities:
        lca.redo_lcia({a: 1})
        scores.append(lca.score)

    if abs(max(scores) - min(scores)) < band * abs(max(scores)):
        print("All activities similar")
        return
    else:
        print("Differences observed. LCA scores:")
        for x, y in zip(scores, activities):
            print("\t{:5.3f} -> {}".format(x, y))
def get_archetypes_scores_per_sector(co_name, year_habe, method, write_dir):
    """Get total LCIA scores for all archetypes for one year of consumption split over sectors."""

    co = bd.Database(co_name)
    archetypes = sorted([
        act for act in co
        if "archetype" in act['name'].lower() and str(year_habe) in act['name']
    ])
    # a1 = [act for act in co if f"archetype_z_years_{year_habe}" in act['name'].lower()]
    # a2 = [act for act in co if f"archetype_ob_years_{year_habe}" in act['name'].lower()]
    # archetypes = a1 + a2
    sectors = sorted([
        act for act in co
        if f"sector, years {year_habe}" in act['name'].lower()
    ])

    scores = {}
    for archetype in archetypes:
        print("--> {}".format(archetype['name']))
        fp_archetype_scores = write_dir / f"monthly_{archetype['name']}.pickle"
        if fp_archetype_scores.exists():
            scores_per_sector = read_pickle(fp_archetype_scores)
        else:
            scores_per_sector = {}
            for sector in sectors:
                demand_sector = get_demand_per_sector(archetype, sector)
                lca = bc.LCA(demand_sector, method)
                lca.lci()
                lca.lcia()
                # print("{:8.3f}  {}".format(lca.score, sector['name']))
                scores_per_sector[sector['name']] = lca.score
            write_pickle(scores_per_sector, fp_archetype_scores)
        scores[archetype['name']] = scores_per_sector
        # print("\n")
    return scores
 def __init__(
         self,
         project,
         demand,
         exiobase_scores_precomputed,
         exiobase_name='EXIOBASE 2.2',
         ecoinvent_name='ecoinvent 3.6 cutoff',
         agribalyse_name='Agribalyse 1.3 - ecoinvent 3.6 cutoff',
         ch_consumption_name='CH consumption 1.0',
 ):
     # BW / LCA setup
     self.project = project
     bd.projects.set_current(self.project)
     self.demand = demand
     self.exiobase_scores_precomputed = exiobase_scores_precomputed
     self.methods = list(list(exiobase_scores_precomputed.values())[0].keys())
     self.lca = bc.LCA(self.demand, self.methods[0])
     self.lca.lci()
     self.lca.lcia()
     self.lca.build_demand_array()
     # Database names
     self.exiobase_name = exiobase_name
     self.ecoinvent_name = ecoinvent_name
     self.agribalyse_name = agribalyse_name
     self.ch_consumption_name = ch_consumption_name
     # Find databases indices in matrices
     self.biosphere_without_exiobase, self.d_exiobase_adjusted = self.precompute()
     self.weights = self.compute_exiobase_weights()
def find_leaves(
    activity,
    lcia_method,
    results=None,
    lca_obj=None,
    amount=1,
    total_score=None,
    level=0,
    max_level=3,
    cutoff=2.5e-2,
):
    """Traverse the supply chain of an activity to find leaves - places where the impact of that
    component falls below a threshold value.

    Returns a list of ``(impact of this activity, amount consumed, Activity instance)`` tuples."""
    first_level = results is None

    if first_level:
        level = 0
        results = []

        lca_obj = bc.LCA({activity: amount}, lcia_method)
        lca_obj.lci()
        lca_obj.lcia()
        total_score = lca_obj.score
    else:
        lca_obj.redo_lcia({activity: amount})

        # If this is a leaf, add the leaf and return
        if abs(lca_obj.score) <= abs(
                total_score * cutoff) or level >= max_level:

            # Only add leaves with scores that matter
            if abs(lca_obj.score) > abs(total_score * 1e-4):
                results.append((lca_obj.score, amount, activity))
            return results

        else:
            # Add direct emissions from this CPC product
            direct = (lca_obj.characterization_matrix *
                      lca_obj.biosphere_matrix * lca_obj.demand_array).sum()
            if abs(direct) >= abs(total_score * 1e-4):
                results.append((direct, amount, activity))

    for exc in activity.technosphere():
        find_leaves(
            activity=exc.input,
            lcia_method=lcia_method,
            results=results,
            lca_obj=lca_obj,
            amount=amount * exc["amount"],
            total_score=total_score,
            level=level + 1,
            max_level=max_level,
            cutoff=cutoff,
        )

    return sorted(results, reverse=True)
Пример #5
0
 def __init__(
     self,
     func_unit,
     method,
     uncertain_params,
     uncertain_params_selected_where_dict=None,
 ):
     self.func_unit = func_unit
     self.method = method
     self.lca = bc.LCA(self.func_unit, self.method)
     self.lca.lci()
     self.lca.lcia()
     self.uncertain_params = uncertain_params
     self.uncertain_exchanges_types = list(self.uncertain_params.keys())
     if uncertain_params_selected_where_dict is None:
         self.uncertain_params_selected_where_dict = {}
         for uncertain_exchange_type in self.uncertain_exchanges_types:
             params = self.get_params(uncertain_exchange_type)
             params_temp = []
             for p in self.uncertain_params[uncertain_exchange_type]:
                 where = np.where(
                     np.logical_and(
                         params["amount"] == p["amount"],
                         np.logical_and(
                             params["col"] == p["col"],
                             params["row"] == p["row"],
                         ),
                     )
                 )
                 assert len(where[0]) == 1
                 params_temp.append(where[0][0])
             self.uncertain_params_selected_where_dict[
                 uncertain_exchange_type
             ] = params_temp
     else:
         self.uncertain_params_selected_where_dict = (
             uncertain_params_selected_where_dict
         )
     self.num_params, self.uncertain_exchange_lengths = self.initialize(
         self.uncertain_params_selected_where_dict,
     )
     self.choices = uncertainty_choices
     method_unit = bd.Method(self.method).metadata["unit"]
     self.output_name = "LCIA scores, [{}]".format(method_unit)
     
     self.default_uncertain_amounts = get_amounts_shift(
         self.uncertain_params, shift_median=False
     )
     self.static_output = get_lca_score_shift(
         self.default_uncertain_amounts,
         self.uncertain_params_selected_where_dict,
         self.lca,
     )
     self.adjusted_score = self.static_output - self.lca.score 
def setup_bw_project_archetypes(project="GSA for archetypes"):
    bd.projects.set_current(project)
    co = bd.Database("swiss consumption 1.0")
    demand_act = [act for act in co if "Food" in act["name"]]
    assert len(demand_act) == 1
    demand_act = demand_act[0]
    demand = {demand_act: 1}
    uncertain_method = ("IPCC 2013", "climate change", "GWP 100a", "uncertain")
    lca = bc.LCA(demand, uncertain_method, use_distributions=False)
    lca.lci()
    lca.lcia()
    return lca
 def __init__(
     self,
     demand,
     method,
     write_dir,
 ):
     self.demand = demand
     self.method = method
     self.lca = bc.LCA(demand, method, use_distributions=True)
     self.lca.lci()
     self.lca.lcia()
     self.write_dir = Path(write_dir)
Пример #8
0
    def __init__(
        self,
        func_unit,
        method,
        write_dir,
        num_params=None,
        uncertain_exchanges_types=("tech", "bio", "cf"),
    ):
        self.lca = bc.LCA(func_unit, method)
        self.lca.lci()
        self.lca.lcia()
        # for uncertain_exchange_type in uncertain_exchanges_types:
        #     uncertain_params[uncertain_exchange_type] = self.get_uncertain_params_all(uncertain_exchange_type)

        self.write_dir = Path(write_dir)
        self.make_dirs()
        if num_params is not None:
            self.scores_dict_raw = self.get_lsa_scores_pickle(
                self.write_dir / "LSA_scores", uncertain_exchanges_types
            )
            self.scores_dict = {}
            for exchanges_type in uncertain_exchanges_types:
                self.scores_dict[exchanges_type] = self.scores_dict_raw[exchanges_type]
            self.uncertain_params_selected_where_dict = (
                self.get_nonzero_params_from_num_params(self.scores_dict, num_params)
            )
        else:
            self.uncertain_params_selected_where_dict = {}
            for exchanges_type in uncertain_exchanges_types:
                self.uncertain_params_selected_where_dict[exchanges_type] = np.where(
                    self.get_params(exchanges_type)["uncertainty_type"] > 1
                )[0]
        uncertain_params = {}
        for uncertain_exchange_type in uncertain_exchanges_types:
            uncertain_params[uncertain_exchange_type] = self.get_params(
                uncertain_exchange_type
            )[self.uncertain_params_selected_where_dict[uncertain_exchange_type]]

        super().__init__(
            func_unit,
            method,
            uncertain_params,
            self.uncertain_params_selected_where_dict,
        )
        self.num_params, self.uncertain_exchange_lengths = self.initialize(
            self.uncertain_params_selected_where_dict,
        )
def get_archetypes_scores_per_month(co_name, year_habe, method,
                                    fp_archetypes_scores):
    """Get total LCIA scores for all archetypes for one month of consumption."""
    co = bd.Database(co_name)
    archetypes = sorted([
        act for act in co
        if "archetype" in act['name'].lower() and str(year_habe) in act['name']
    ])
    if fp_archetypes_scores.exists():
        archetypes_scores = read_pickle(fp_archetypes_scores)
    else:
        archetypes_scores = {}
        for demand_act in archetypes:
            lca = bc.LCA({demand_act: 1}, method)
            lca.lci()
            lca.lcia()
            archetypes_scores[demand_act['name']] = lca.score
        write_pickle(archetypes_scores, fp_archetypes_scores)
    return archetypes_scores
def setup_lca_model_protocol_narrow_bio(path_base,
                                        num_params=None,
                                        write_dir=None):
    # LCA model
    bd.projects.set_current("GSA for protocol narrow bio")
    co = bd.Database("CH consumption 1.0")
    demand_act = [act for act in co if "Food" in act["name"]]
    assert len(demand_act) == 1
    demand_act = demand_act[0]
    demand = {demand_act: 1}
    method = ("IPCC 2013", "climate change", "GWP 100a", "uncertain")
    # num_params
    if num_params is None:
        lca = bc.LCA(demand, method)
        lca.lci()
        lca.lcia()
        print("LCA score is {}".format(lca.score))
        n_uncertain_tech = len(
            lca.tech_params[lca.tech_params["uncertainty_type"] > 1])
        n_uncertain_bio = len(
            lca.bio_params[lca.bio_params["uncertainty_type"] > 1])
        n_uncertain_cf = len(
            lca.cf_params[lca.cf_params["uncertainty_type"] > 1])
        num_params_stats = n_uncertain_tech + n_uncertain_bio + n_uncertain_cf
        print("Total number of uncertain exchanges is {}".format(
            num_params_stats))
        print("   tech={}, bio={}, cf={}".format(n_uncertain_tech,
                                                 n_uncertain_bio,
                                                 n_uncertain_cf))
    # Define some variables
    if write_dir is None:
        write_dir = path_base / "protocol_gsa_narrow_bio"
    model = LCAModel(
        demand,
        method,
        write_dir,
        num_params=num_params,
        uncertain_exchanges_types=["tech", "bio", "cf"],
    )
    gsa_seed = 4000238
    return model, write_dir, gsa_seed
Пример #11
0
import bw2data as bd, bw2calc as bc
bd.projects.set_current("ecoinvent 3.7.1 bw2")
bd.databases
a = bd.get_activity(('ecoinvent 3.7.1', 'f57568b2e553864152a6ac920595216f'))
ipcc = ('IPCC 2013', 'climate change', 'GWP 100a')

lca = bc.LCA(demand={a: 1}, method=ipcc)
lca.lci()
lca.lcia()

Пример #12
0
def contribution_for_all_datasets_one_method(database, method, progress=True):
    """Calculate contribution analysis (for technosphere processes) for all inventory datasets in one database for one LCIA method.

    Args:
        *database* (str): Name of database
        *method* (tuple): Method tuple

    Returns:
        NumPy array of relative contributions. Each column sums to one.
        Lookup dictionary, dataset keys to row/column indices
        Total elapsed time in seconds

    """
    def get_normalized_scores(lca, kind):
        if kind == "activities":
            data = lca.characterized_inventory.sum(axis=0)
        elif kind == "flows":
            data = lca.characterized_inventory.sum(axis=1)
        elif kind == "all":
            data = lca.characterized_inventory.data
        scores = np.abs(np.array(data).ravel())
        summed = scores.sum()
        if summed == 0:
            return np.zeros(scores.shape)
        else:
            return scores / summed

    start = time()
    assert database in databases, "Can't find database %s" % database
    assert method in methods, "Can't find method %s" % method
    keys = Database(database).load().keys()
    assert keys, "Database %s appears to have no datasets" % database

    # Array to store results
    results = np.zeros((len(keys), len(keys)), dtype=np.float32)

    # Instantiate LCA object
    lca = bc.LCA({keys[0]: 1}, method=method)
    lca.lci()
    lca.decompose_technosphere()
    lca.lcia()

    rows = lca.characterized_inventory.shape[0]
    cols = lca.characterized_inventory.shape[1]
    all_cutoff = cols * 4

    results = {
        "activities": np.zeros((cols, cols), dtype=np.float32),
        "flows": np.zeros((rows, cols), dtype=np.float32),
        "all": np.zeros((all_cutoff, cols), dtype=np.float32),
    }

    pbar = pyprind.ProgBar(len(keys), title="Activities:")

    # Actual calculations
    for key in keys:
        lca.redo_lcia({key: 1})
        if lca.score == 0.0:
            continue

        col = lca.activity_dict[mapping[key]]
        results["activities"][:,
                              col] = get_normalized_scores(lca, "activities")
        results["flows"][:, col] = get_normalized_scores(lca, "flows")
        results_all = get_normalized_scores(lca, "all")
        results_all.sort()
        results_all = results_all[::-1]
        fill_number = results_all.shape[0]
        assert fill_number < all_cutoff, "Too many values in 'all'"
        results["all"][:fill_number, col] = results_all

        pbar.update()

    print(pbar)

    return results, lca.activity_dict, time() - start
Пример #13
0
def print_recursive_calculation(
    activity,
    lcia_method,
    amount=1,
    max_level=3,
    cutoff=1e-2,
    file_obj=None,
    tab_character="  ",
    level=0,
    lca_obj=None,
    total_score=None,
    first=True,
):
    """Traverse a supply chain graph, and calculate the LCA scores of each component. Prints the result with the format:

    {tab_character * level }{fraction of total score} ({absolute LCA score for this input} | {amount of input}) {input activity}

    Args:
        activity: ``Activity``. The starting point of the supply chain graph.
        lcia_method: tuple. LCIA method to use when traversing supply chain graph.
        amount: int. Amount of ``activity`` to assess.
        max_level: int. Maximum depth to traverse.
        cutoff: float. Fraction of total score to use as cutoff when deciding whether to traverse deeper.
        file_obj: File-like object (supports ``.write``), optional. Output will be written to this object if provided.
        tab_character: str. Character to use to indicate indentation.

    Internal args (used during recursion, do not touch);
        level: int.
        lca_obj: ``LCA``.
        total_score: float.
        first: bool.

    Returns:
        Nothing. Prints to ``sys.stdout`` or ``file_obj``

    """

    if lca_obj is None:
        lca_obj = bc.LCA({activity: amount}, lcia_method)
        lca_obj.lci()
        lca_obj.lcia()
        total_score = lca_obj.score
    elif total_score is None:
        raise ValueError
    else:
        lca_obj.redo_lcia({activity: amount})
        if abs(lca_obj.score) <= abs(total_score * cutoff):
            return
    if first:
        message = "Fraction of score | Absolute score | Amount | Activity"
        if file_obj is not None:
            file_obj.write(message + "\n")
        else:
            print(message)
    message = "{}{:04.3g} | {:5.4n} | {:5.4n} | {:.70}".format(
        tab_character * level,
        lca_obj.score / total_score,
        lca_obj.score,
        float(amount),
        str(activity),
    )
    if file_obj is not None:
        file_obj.write(message + "\n")
    else:
        print(message)
    if level < max_level:
        for exc in activity.technosphere():
            print_recursive_calculation(
                activity=exc.input,
                lcia_method=lcia_method,
                amount=amount * exc["amount"],
                max_level=max_level,
                cutoff=cutoff,
                first=False,
                file_obj=file_obj,
                tab_character=tab_character,
                lca_obj=lca_obj,
                total_score=total_score,
                level=level + 1,
            )
Пример #14
0
import bw2calc as bc
import numpy as np
from gsa_framework.models.life_cycle_assessment import LCAModelBase, LCAModel

write_dir = (
    "/Users/akim/PycharmProjects/gsa-framework-master/dev/write_files/protocol_gsa"
)

bd.projects.set_current("GSA for protocol")
co = bd.Database("CH consumption 1.0")
demand_act = [act for act in co if "Food" in act["name"]]
assert len(demand_act) == 1
demand_act = demand_act[0]
demand = {demand_act: 1}
method = ("IPCC 2013", "climate change", "GWP 100a", "uncertain")
lca = bc.LCA(demand, method)
lca.lci()
lca.lcia()
iterations = 10

model2 = LCAModel(
    demand,
    method,
    write_dir,
    num_params=None,
    uncertain_exchanges_types=("bio", "cf"),
)
num_params2 = len(model2)
np.random.seed(234333)
X2 = np.random.rand(iterations, num_params2)
Xr2 = model2.rescale(X2)
Пример #15
0
# path_base = Path('/data/user/kim_a')
path_base = Path(
    "/Users/akim/PycharmProjects/gsa-framework-master/dev/write_files/")
write_dir = path_base / "protocol_gsa"
write_dir_fig = write_dir / "figures"

fig_format = ["pdf", "png"]

bd.projects.set_current("GSA for protocol")
co = bd.Database("CH consumption 1.0")
demand_act = [act for act in co if "Food" in act["name"]]
assert len(demand_act) == 1
demand_act = demand_act[0]
demand = {demand_act: 1}
uncertain_method = ("IPCC 2013", "climate change", "GWP 100a", "uncertain")
lca = bc.LCA(demand, uncertain_method)
lca.lci()
lca.lcia()

uncertain_tech_params = lca.tech_params[
    lca.tech_params["uncertainty_type"] > 1]
uncertain_bio_params = lca.bio_params[lca.bio_params["uncertainty_type"] > 1]
uncertain_cf_params = lca.cf_params[lca.cf_params["uncertainty_type"] > 1]

where_utech_params_lognormal = np.where(
    uncertain_tech_params["uncertainty_type"] == sa.LognormalUncertainty.id)[0]

where_ubio_params_lognormal = np.where(
    uncertain_bio_params["uncertainty_type"] == sa.LognormalUncertainty.id)[0]

where_ucf_params_lognormal = np.where(
Пример #16
0
        option=option,
    )
    add_consumption_categories(co_name)
    add_consumption_sectors(co_name)

# Add uncertainties to GWP values
method = ("IPCC 2013", "climate change", "GWP 100a", "uncertain")
if method not in bd.methods:
    add_bw_method_with_gwp_uncertainties()

# LCA
co_average_act_name = 'ch hh average consumption {}'.format(option)
hh_average = [act for act in co if co_average_act_name == act['name']]
assert len(hh_average) == 1
demand_act = hh_average[0]
lca = bc.LCA({demand_act: 1}, method)
lca.lci()
lca.lcia()
print(demand_act['name'], lca.score)

# food = [act for act in co if "Food" in act['name']]
# assert len(food) == 1
# demand_act = food[0]

# transport = [act for act in co if "Transport" in act['name']]
# assert len(transport) == 1
# demand_act = transport[0]

sectors = [act for act in co if "sector" in act['name'].lower()]
sum_ = 0
for demand_act in sectors:
Пример #17
0
#     )
#     return technosphere_exchange_indices

if __name__ == "__main__":

    path_base = Path(
        "/Users/akim/PycharmProjects/gsa-framework-master/dev/write_files/")
    write_dir = path_base / "protocol_gsa_food_bw2"
    bd.projects.set_current("GSA for archetypes")
    co = bd.Database("swiss consumption 1.0")
    demand_act = [act for act in co if "Food" in act["name"]]
    assert len(demand_act) == 1
    demand_act = demand_act[0]
    demand = {demand_act: 1}
    uncertain_method = ("IPCC 2013", "climate change", "GWP 100a", "uncertain")
    static_lca = bc.LCA(demand, uncertain_method, use_distributions=False)
    static_lca.lci()
    static_lca.lcia()

    # Biosphere, remove non-influential inputs (Step 1)
    lca = static_lca
    cutoff = 1e-3
    inv = lca.characterized_inventory
    finv = inv.multiply(abs(inv) > abs(lca.score * cutoff))
    biosphere_exchange_indices = list(zip(*finv.nonzero()))
    explained_fraction = finv.sum() / lca.score
    # print('Explained fraction of LCA score:', explained_fraction)
    print(
        "BIOSPHERE {} filtering resulted in {} of {} exchanges ({}% of total impact)"
        .format(
            inv.shape,
def compare_activities_by_grouped_leaves(
    activities,
    lcia_method,
    mode="relative",
    max_level=4,
    cutoff=7.5e-3,
    output_format="list",
    str_length=50,
):
    """Compare activities by the impact of their different inputs, aggregated by the product classification of those inputs.

    Args:
        activities: list of ``Activity`` instances.
        lcia_method: tuple. LCIA method to use when traversing supply chain graph.
        mode: str. If "relative" (default), results are returned as a fraction of total input. Otherwise, results are absolute impact per input exchange.
        max_level: int. Maximum level in supply chain to examine.
        cutoff: float. Fraction of total impact to cutoff supply chain graph traversal at.
        output_format: str. See below.
        str_length; int. If ``output_format`` is ``html``, this controls how many characters each column label can have.

    Raises:
        ValueError: ``activities`` is malformed.

    Returns:
        Depends on ``output_format``:

        * ``list``: Tuple of ``(column labels, data)``
        * ``html``: HTML string that will print nicely in Jupyter notebooks.
        * ``pandas``: a pandas ``DataFrame``.

    """
    for act in activities:
        if not isinstance(act, bd.backends.peewee.proxies.Activity):
            raise ValueError(
                "`activities` must be an iterable of `Activity` instances")

    objs = [
        group_leaves(
            find_leaves(act, lcia_method, max_level=max_level, cutoff=cutoff))
        for act in activities
    ]
    sorted_keys = sorted(
        [(max([el[0] for obj in objs for el in obj if el[2] == key]), key)
         for key in {el[2]
                     for obj in objs for el in obj}],
        reverse=True,
    )
    name_common = commonprefix([act["name"] for act in activities])

    if " " not in name_common:
        name_common = ""
    else:
        last_space = len(name_common) - operator.indexOf(
            reversed(name_common), " ")
        name_common = name_common[:last_space]
        print("Omitting activity name common prefix: '{}'".format(name_common))

    product_common = commonprefix(
        [act.get("reference product", "") for act in activities])

    lca = bc.LCA({act: 1 for act in activities}, lcia_method)
    lca.lci()
    lca.lcia()

    labels = [
        "activity",
        "product",
        "location",
        "unit",
        "total",
        "direct emissions",
    ] + [key for _, key in sorted_keys]
    data = []
    for act, lst in zip(activities, objs):
        lca.redo_lcia({act: 1})
        data.append([
            act["name"].replace(name_common, ""),
            act.get("reference product", "").replace(product_common, ""),
            act.get("location", "")[:25],
            act.get("unit", ""),
            lca.score,
        ] + [(lca.characterization_matrix * lca.biosphere_matrix *
              lca.demand_array).sum()] +
                    [get_value_for_cpc(lst, key) for _, key in sorted_keys])

    data.sort(key=lambda x: x[4], reverse=True)

    if mode == "relative":
        for row in data:
            for index, point in enumerate(row[5:]):
                row[index + 5] = point / row[4]

    if output_format == "list":
        return labels, data
    elif output_format == "pandas":
        return pd.DataFrame(data, columns=labels)
    elif output_format == "html":
        return tabulate.tabulate(
            data,
            [x[:str_length] for x in labels],
            tablefmt="html",
            floatfmt=".3f",
        )
Пример #19
0
import bw2data as bd, bw2calc as bc
bd.projects.set_current("ecoinvent 3.7.1")
bd.databases
a = bd.get_activity(('ecoinvent 3.7.1', 'f57568b2e553864152a6ac920595216f'))
ipcc = ('IPCC 2013', 'climate change', 'GWP 100a')

fu, data_objs, _ = bd.prepare_lca_inputs({a: 1}, method=ipcc)
lca = bc.LCA(demand=fu, data_objs=data_objs)
lca.lci()
lca.lcia()