def get_archetypes_scores_per_sector(co_name, year_habe, method, write_dir):
    """Get total LCIA scores for all archetypes for one year of consumption split over sectors."""

    co = bd.Database(co_name)
    archetypes = sorted([
        act for act in co
        if "archetype" in act['name'].lower() and str(year_habe) in act['name']
    ])
    # a1 = [act for act in co if f"archetype_z_years_{year_habe}" in act['name'].lower()]
    # a2 = [act for act in co if f"archetype_ob_years_{year_habe}" in act['name'].lower()]
    # archetypes = a1 + a2
    sectors = sorted([
        act for act in co
        if f"sector, years {year_habe}" in act['name'].lower()
    ])

    scores = {}
    for archetype in archetypes:
        print("--> {}".format(archetype['name']))
        fp_archetype_scores = write_dir / f"monthly_{archetype['name']}.pickle"
        if fp_archetype_scores.exists():
            scores_per_sector = read_pickle(fp_archetype_scores)
        else:
            scores_per_sector = {}
            for sector in sectors:
                demand_sector = get_demand_per_sector(archetype, sector)
                lca = bc.LCA(demand_sector, method)
                lca.lci()
                lca.lcia()
                # print("{:8.3f}  {}".format(lca.score, sector['name']))
                scores_per_sector[sector['name']] = lca.score
            write_pickle(scores_per_sector, fp_archetype_scores)
        scores[archetype['name']] = scores_per_sector
        # print("\n")
    return scores
Example #2
0
    def __init__(self, database):
        """Initiate ``Rower`` object to consistently label 'Rest-of-World' locations in LCI databases.

        ``database`` must be a registered database.

        This class provides the following functionality:

        * Define RoWs in a given database (``define_RoWs``). This will use the RoW labels in the master data, or create new user RoWs.
        * Load saved RoW definitions (``read_datapackage``).
        * Relabel activity locations in a given database using the generated RoW labels (``label_RoWs``).
        * Save user RoW definitions for reuse in a standard format (``write_datapackage``).
        * Import a ``geocollection`` and ``topocollection`` into bw2regional (bw2regional must be installed) (Not implemented).

        This class uses the following internal parameters:

        * ``self.db``: ``bw2data.Database`` instance
        * ``self.existing``: ``{"RoW label": ["list of excluded locations"]}``
        * ``self.user_rows``: ``{"RoW label": ["list of excluded locations"]}``
        * ``self.labelled``: ``{"RoW label": ["list of activity codes"]}``

        ``self.existing`` should be loaded (using ``self.load_existing``) from a previous saved result, while ``self.user_rows`` are new RoWs not found in ``self.existing``. When saving to a data package, only ``self.user_rows`` and ``self.labelled`` are saved.

        """
        assert database in bw2data.databases, "Database {} not registered".format(database)
        self.db = bw2data.Database(database)
        self.existing = {}
        self.user_rows = {}
        self.labelled = {}
Example #3
0
def test_compare_activities_by_lcia_score_similar(capsys):
    method = bd.Method(("method",))
    method.register()
    method.write(method_fixture)
    data = {
        ("c", "flow"): {"name": "flow", "type": "biosphere"},
        ("c", "1"): {
            "name": "process 1",
            "exchanges": [{"input": ("c", "flow"), "type": "biosphere", "amount": 1}],
        },
        ("c", "2"): {
            "name": "process 2",
            "exchanges": [
                {"input": ("c", "flow"), "type": "biosphere", "amount": 1.1},
            ],
        },
    }
    db = bd.Database("c")
    db.write(data)

    capsys.readouterr()
    compare_activities_by_lcia_score([("c", "1"), ("c", "2")], ("method",))

    expected = "All activities similar\n"
    assert capsys.readouterr().out == expected
 def map_by_production_volume(cls, db):
     ei_name = cls.determine_ecoinvent_db_name()
     ei = bd.Database(ei_name)
     mapping = [
         {
             ('market for rice', 'GLO'): [
                 act['code'] for act in ei
                 if 'market for rice' in act['name']
                 and act['location'] == 'GLO' and 'seed' not in act['name']
             ]
         },
         {
             ('rice production', 'RoW'): [
                 act['code'] for act in ei
                 if 'rice production' in act['name'] and act['location'] ==
                 'RoW' and 'straw' not in act['reference product']
             ]
         },
         {
             ('rice production', 'IN'): [
                 act['code'] for act in ei
                 if 'rice production' in act['name'] and act['location'] ==
                 'IN' and 'straw' not in act['reference product']
             ]
         },
         {
             ('market for wheat grain', 'GLO'): [
                 act['code'] for act in ei
                 if 'market for wheat grain' in act['name']
                 and 'feed' not in act['name']
             ]
         },
         {
             ('market for maize grain', 'GLO'): [
                 act['code'] for act in ei
                 if 'market for maize grain' in act['name']
                 and 'feed' not in act['name']
             ]
         },
         {
             ('market for mandarin', 'GLO'): [
                 act['code'] for act in ei
                 if 'market for mandarin' in act['name']
             ]
         },
         {
             ('market for soybean', 'GLO'): [
                 act['code'] for act in ei
                 if 'market for soybean' in act['name'] and all([
                     _ not in act['name']
                     for _ in ['meal', 'beverage', 'seed', 'feed', 'oil']
                 ])
             ]
         },
     ]
     db = modify_exchanges(db, mapping, ei_name)
     return db
Example #5
0
def process_delta_database(name, tech, bio, dependents):
    """A modification of ``bw2data.backends.base.SQLiteBackend.process`` to skip retrieving data from the database."""
    print("Tech:", tech)
    print("Bio:", bio)

    db = bd.Database(name)
    db.metadata["processed"] = datetime.datetime.now().isoformat()

    # Create geomapping array, from dataset interger ids to locations
    inv_mapping_qs = ActivityDataset.select(
        ActivityDataset.id, ActivityDataset.location
    ).where(ActivityDataset.database == name, ActivityDataset.type == "process")

    # self.filepath_processed checks if data is dirty,
    # and processes if it is. This causes an infinite loop.
    # So we construct the filepath ourselves.
    fp = str(db.dirpath_processed() / db.filename_processed())

    dp = bwp.create_datapackage(
        fs=ZipFS(fp, write=True),
        name=bwp.clean_datapackage_name(name),
        sum_intra_duplicates=True,
        sum_inter_duplicates=False,
    )
    dp.add_persistent_vector_from_iterator(
        matrix="inv_geomapping_matrix",
        name=bwp.clean_datapackage_name(name + " inventory geomapping matrix"),
        dict_iterator=(
            {
                "row": row[0],
                "col": bd.geomapping[
                    bd.backends.utils.retupleize_geo_strings(row[1])
                    or bd.config.global_location
                ],
                "amount": 1,
            }
            for row in inv_mapping_qs.tuples()
        ),
        nrows=inv_mapping_qs.count(),
    )

    dp.add_persistent_vector_from_iterator(
        matrix="biosphere_matrix",
        name=bwp.clean_datapackage_name(name + " biosphere matrix"),
        dict_iterator=bio,
    )
    dp.add_persistent_vector_from_iterator(
        matrix="technosphere_matrix",
        name=bwp.clean_datapackage_name(name + " technosphere matrix"),
        dict_iterator=tech,
    )
    dp.finalize_serialization()

    db.metadata["depends"] = sorted(dependents.difference({name}))
    db.metadata["dirty"] = False
    db._metadata.flush()
def setup_bw_project_archetypes(project="GSA for archetypes"):
    bd.projects.set_current(project)
    co = bd.Database("swiss consumption 1.0")
    demand_act = [act for act in co if "Food" in act["name"]]
    assert len(demand_act) == 1
    demand_act = demand_act[0]
    demand = {demand_act: 1}
    uncertain_method = ("IPCC 2013", "climate change", "GWP 100a", "uncertain")
    lca = bc.LCA(demand, uncertain_method, use_distributions=False)
    lca.lci()
    lca.lcia()
    return lca
Example #7
0
def create_example_database():
    with temporary_project_dir() as td:
        bw2io.add_example_database()
        db = bw2data.Database("Mobility example")
        method = bw2data.Method(("IPCC", "simple"))

        fixture_dir = this_dir / "example_db"
        fixture_dir.mkdir(exist_ok=True)
        db.filepath_processed().rename(fixture_dir / "example_db.zip")
        method.filepath_processed().rename(fixture_dir / "ipcc.zip")
        with open(fixture_dir / "mapping.json", "w") as f:
            json.dump(list(bw2data.mapping.items()), f)
Example #8
0
def create_mc_single_activity_only_production():
    with temporary_project_dir() as td:
        biosphere = bw2data.Database("biosphere")
        biosphere.write({
            ("biosphere", "1"): {
                "type": "emission"
            },
        })
        saop = bw2data.Database("saop")
        saop.write({
            ("saop", "1"): {
                "exchanges": [
                    {
                        "amount": 0.5,
                        "minimum": 0.2,
                        "maximum": 0.8,
                        "input": ("biosphere", "1"),
                        "type": "biosphere",
                        "uncertainty type": 4,
                    },
                    {
                        "amount": 1,
                        "minimum": 0.5,
                        "maximum": 1.5,
                        "input": ("saop", "1"),
                        "type": "production",
                        "uncertainty type": 4,
                    },
                ],
                "type":
                "process",
            },
        })
        fixture_dir = this_dir / "mc_saop"
        fixture_dir.mkdir(exist_ok=True)
        biosphere.filepath_processed().rename(fixture_dir / "biosphere.zip")
        saop.filepath_processed().rename(fixture_dir / "saop.zip")
        with open(fixture_dir / "mapping.json", "w") as f:
            json.dump(list(bw2data.mapping.items()), f)
Example #9
0
def create_empty_biosphere():
    with temporary_project_dir() as td:
        biosphere = bw2data.Database("biosphere")
        biosphere.write({
            ("biosphere", "1"): {
                "categories": ["things"],
                "exchanges": [],
                "name": "an emission",
                "type": "emission",
                "unit": "kg",
            }
        })

        test_data = {
            ("t", "1"): {
                "exchanges": [{
                    "amount": 1,
                    "input": ("t", "2"),
                    "type": "technosphere",
                }],
            },
            ("t", "2"): {
                "exchanges": []
            },
        }
        test_db = bw2data.Database("t")
        test_db.write(test_data)

        method = bw2data.Method(("a method", ))
        method.write([(("biosphere", "1"), 42)])

        fixture_dir = this_dir / "empty_biosphere"
        fixture_dir.mkdir(exist_ok=True)
        biosphere.filepath_processed().rename(fixture_dir / "biosphere.zip")
        test_db.filepath_processed().rename(fixture_dir / "test_db.zip")
        method.filepath_processed().rename(fixture_dir / "method.zip")
        with open(fixture_dir / "mapping.json", "w") as f:
            json.dump(list(bw2data.mapping.items()), f)
Example #10
0
def fix_acrolein():
    ERROR = "ReCiPe 2016 only tested for ecoinvent biosphere flows; install base ecoinvent data"
    assert "biosphere3" in bd.databases, ERROR

    bio = bd.Database("biosphere3")
    if bio.metadata.get("acrolein fixed"):
        return

    flow = bd.get_activity(
        ("biosphere3", "fa8bd05b-015d-5a82-878c-bde991551695"))
    flow["CAS number"] = "107-02-8"
    flow.save()

    bio.metadata["acrolein fixed"] = True
    bd.databases.flush()
def add_archetypes_consumption(co_name,
                               year_habe,
                               fp_archetypes=None,
                               fp_habe_clustering=None):
    print("Creating archetypes functional units")
    if fp_archetypes is None:
        fp_archetypes = dirpath / "functional_units" / f"hh_archetypes_weighted_working_tables_{year_habe}.csv"
    if fp_habe_clustering is None:
        fp_habe_clustering = dirpath / "functional_units" / "habe_clustering.csv"
    co = bd.Database(co_name)
    df = pd.read_csv(fp_archetypes)
    all_consumption_codes = [act['code'] for act in co]
    codes_to_ignore = [
        code for code in df.iloc[0].index if code not in all_consumption_codes
    ]
    # print(codes_to_ignore)
    ppl_per_archetype_dict = get_people_per_archetypes(co.metadata['dir_habe'],
                                                       fp_habe_clustering,
                                                       year_habe)
    income_per_archetype_dict = get_income_per_archetypes(
        co.metadata['dir_habe'], fp_habe_clustering, year_habe)
    for i, df_row in df.iterrows():
        archetype_label = df_row['cluster_label_name']
        # Create new activity
        act_name = f"archetype {archetype_label.upper()} consumption, years {year_habe}"
        try:
            co.get(act_name).delete()
        except:
            pass
        archetype_act = co.new_activity(
            act_name,
            name=act_name,
            location='CH',
            unit='1 month of consumption',
            cluster_label_def=df_row['cluster_label_def'],
            ppl_per_household=ppl_per_archetype_dict[archetype_label],
            income_per_household=income_per_archetype_dict[archetype_label],
        )
        archetype_act.save()
        # Add exchanges to this activity
        for code in df_row.index:
            if ("cluster" not in code) and (code not in codes_to_ignore):
                archetype_act.new_exchange(
                    input=(co.name, code),
                    amount=df_row[code],
                    type='technosphere',
                ).save()
    return
Example #12
0
def fdii():
    data = {
        ("c", "flow"): {"name": "flow", "type": "biosphere"},
        ("c", "1"): {
            "name": "yes",
            "reference product": "bar",
            "exchanges": [{"input": ("c", "flow"), "type": "biosphere", "amount": 0.1}],
        },
        ("c", "2"): {
            "name": "no",
            "reference product": "foo",
            "exchanges": [{"input": ("c", "flow"), "type": "biosphere", "amount": 10}],
        },
        ("c", "3"): {
            "name": "yes",
            "reference product": "foo",
            "location": "here",
            "exchanges": [
                {"input": ("c", "flow"), "type": "biosphere", "amount": 1},
                {"input": ("c", "1"), "type": "technosphere", "amount": 10},
            ],
        },
        ("c", "4"): {
            "name": "yes",
            "reference product": "foo",
            "location": "here",
            "exchanges": [
                {"input": ("c", "flow"), "type": "biosphere", "amount": 0.6},
                {"input": ("c", "flow"), "type": "biosphere", "amount": 0.5},
                {"input": ("c", "1"), "type": "technosphere", "amount": 10},
            ],
        },
        ("c", "5"): {
            "name": "yes",
            "reference product": "foo",
            "location": "there",
            "exchanges": [
                {"input": ("c", "flow"), "type": "biosphere", "amount": 0.95},
                {"input": ("c", "1"), "type": "technosphere", "amount": 10},
            ],
        },
        ("c", "6"): {
            "reference product": "bar",
            "exchanges": [{"input": ("c", "flow"), "type": "biosphere", "amount": 1}],
        },
    }
    db = bd.Database("c")
    db.write(data)
        def create_location_mapping(agribalyse_13_db, ecoinvent_name):

            ecoinvent_db = bd.Database(ecoinvent_name)

            unlinked_list = list(agribalyse_13_db.unlinked)
            len_unlinked = len(unlinked_list)

            mapping_ = [0]*len_unlinked
            for u in range(len_unlinked):
                new_el = {}
                name = unlinked_list[u]['name']
                loc = unlinked_list[u]['location']
                acts_codes = [act['code'] for act in ecoinvent_db if name == act['name']]
                new_el[(name, loc)] = acts_codes
                mapping_[u] = new_el

            return mapping_
def add_consumption_average_hh(consumption_all):
    """Add consumption activity for an average household."""
    co_name = consumption_all.get('database')
    option = consumption_all.get('agg_option')
    year_habe = consumption_all.get('year_habe')
    n_households = consumption_all.get('n_households')
    co = bd.Database(co_name)
    co_average_act_name = f'ch hh average consumption {option}, years {year_habe}'
    try:
        co.get(co_average_act_name).delete()
    except:
        pass
    consumption_average = consumption_all.copy(co_average_act_name,
                                               name=co_average_act_name)
    for exc in consumption_average.exchanges():
        if exc['type'] != 'production':
            exc['amount'] /= n_households
            exc.save()
def get_archetypes_scores_per_month(co_name, year_habe, method,
                                    fp_archetypes_scores):
    """Get total LCIA scores for all archetypes for one month of consumption."""
    co = bd.Database(co_name)
    archetypes = sorted([
        act for act in co
        if "archetype" in act['name'].lower() and str(year_habe) in act['name']
    ])
    if fp_archetypes_scores.exists():
        archetypes_scores = read_pickle(fp_archetypes_scores)
    else:
        archetypes_scores = {}
        for demand_act in archetypes:
            lca = bc.LCA({demand_act: 1}, method)
            lca.lci()
            lca.lcia()
            archetypes_scores[demand_act['name']] = lca.score
        write_pickle(archetypes_scores, fp_archetypes_scores)
    return archetypes_scores
Example #16
0
def cabls(capsys):
    method = bd.Method(("method",))
    method.register()
    method.write(method_fixture)
    data = {
        ("c", "flow"): {"name": "flow", "type": "biosphere"},
        ("c", "1"): {
            "name": "process 1",
            "exchanges": [{"input": ("c", "flow"), "type": "biosphere", "amount": 1}],
        },
        ("c", "2"): {
            "name": "process 2",
            "exchanges": [
                {"input": ("c", "flow"), "type": "biosphere", "amount": 1.25},
            ],
        },
    }
    db = bd.Database("c")
    db.write(data)
    capsys.readouterr()
def setup_lca_model_protocol_narrow_bio(path_base,
                                        num_params=None,
                                        write_dir=None):
    # LCA model
    bd.projects.set_current("GSA for protocol narrow bio")
    co = bd.Database("CH consumption 1.0")
    demand_act = [act for act in co if "Food" in act["name"]]
    assert len(demand_act) == 1
    demand_act = demand_act[0]
    demand = {demand_act: 1}
    method = ("IPCC 2013", "climate change", "GWP 100a", "uncertain")
    # num_params
    if num_params is None:
        lca = bc.LCA(demand, method)
        lca.lci()
        lca.lcia()
        print("LCA score is {}".format(lca.score))
        n_uncertain_tech = len(
            lca.tech_params[lca.tech_params["uncertainty_type"] > 1])
        n_uncertain_bio = len(
            lca.bio_params[lca.bio_params["uncertainty_type"] > 1])
        n_uncertain_cf = len(
            lca.cf_params[lca.cf_params["uncertainty_type"] > 1])
        num_params_stats = n_uncertain_tech + n_uncertain_bio + n_uncertain_cf
        print("Total number of uncertain exchanges is {}".format(
            num_params_stats))
        print("   tech={}, bio={}, cf={}".format(n_uncertain_tech,
                                                 n_uncertain_bio,
                                                 n_uncertain_cf))
    # Define some variables
    if write_dir is None:
        write_dir = path_base / "protocol_gsa_narrow_bio"
    model = LCAModel(
        demand,
        method,
        write_dir,
        num_params=num_params,
        uncertain_exchanges_types=["tech", "bio", "cf"],
    )
    gsa_seed = 4000238
    return model, write_dir, gsa_seed
def add_consumption_categories(co_name):
    co = bd.Database(co_name)

    sheet_name = 'Overview & LCA-Modeling'
    co_path = dirpath / "es8b01452_si_002.xlsx"
    df_raw = pd.read_excel(co_path, sheet_name=sheet_name, header=2)

    categories_col_de = 'Original name in Swiss household budget survey'
    categories_col_en = 'Translated name'
    categories_raw = df_raw[[categories_col_de, categories_col_en]]

    categories = {}
    for v in categories_raw.values:
        v_list_de = v[0].split(':')
        v_list_en = v[1].split(':')
        if len(v_list_de) > 1 and len(v_list_de[0].split('.')) == 1:
            categories[v_list_de[0]] = v_list_en[0]
    max_code_len = max({len(k) for k in categories.keys()})

    category_names_dict = {
        2: 'coarse',
        3: 'middle',
        4: 'fine',
    }
    for act in co:
        code = re.sub(r'[a-z]+', '', act['code'], re.I)[:max_code_len]

        for i in range(2, max_code_len + 1):
            try:
                category_name = 'category_' + category_names_dict[i]
                act[category_name] = categories[code[:i]]
                act.save()
            except:
                pass
        if act['name'] == "Desktop computers" or act[
                'name'] == "Portable computers" or act[
                    'name'] == "Printers (incl. multifunctional printers)":
            act["category_coarse"] = "Durable goods"
            act.save()
Example #19
0
def bw2io_example_database():
    try:
        import bw2data as bd
        import bw2io as bi
        from bw2data.backends.schema import ActivityDataset as AD

        if "__fixture_creation__" in bd.projects:
            bd.projects.delete_project("__fixture_creation__", delete_dir=True)

        bd.projects.set_current("__fixture_creation__")
        bi.add_example_database()
        db = bd.Database("Mobility example")
        method = bd.Method(("IPCC", "simple"))

        db.filepath_processed().rename(fixture_dir / "bw2io_example_db.zip")
        method.filepath_processed().rename(fixture_dir / "ipcc_simple.zip")
        with open(fixture_dir / "bw2io_example_db_mapping.json", "w") as f:
            json.dump([(obj.name, obj.id) for obj in AD.select()], f)

        bd.projects.delete_project(delete_dir=True)
    except ImportError:
        print(
            "Can't import libraries for bw2io example database fixture creation"
        )
Example #20
0
import bw2data as bd
import pandas as pd
from consumption_model_ch.utils import get_habe_filepath
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import numpy as np
from pathlib import Path
from dev.utils_paper_plotting import *

bd.projects.set_current("GSA for archetypes")
co_name = "swiss consumption 1.0"
co = bd.Database(co_name)
year_habe = co.metadata["year_habe"]
dir_habe = co.metadata["dir_habe"]

# 2. Extract total demand from HABE
path_beschrei = get_habe_filepath(dir_habe, year_habe, "Datenbeschreibung")
path_ausgaben = get_habe_filepath(dir_habe, year_habe, "Ausgaben")
path_mengen = get_habe_filepath(dir_habe, year_habe, "Mengen")

# change codes to be consistent with consumption database and Andi's codes
ausgaben = pd.read_csv(path_ausgaben, sep="\t")
mengen = pd.read_csv(path_mengen, sep="\t")
ausgaben.columns = [col.lower() for col in ausgaben.columns]
mengen.columns = [col.lower() for col in mengen.columns]
codes_co_db = sorted([act["code"] for act in co])
columns_a = ausgaben.columns.values
columns_m = [columns_a[0]]
for code_a in columns_a[1:]:
    code_m = code_a.replace("a", "m")
    if code_m in codes_co_db:
Example #21
0
def cabgl():
    data = {
        ("c", "flow"): {"name": "flow", "type": "biosphere"},
        ("c", "1"): {
            "name": "1",
            "reference product": "bar",
            "exchanges": [{"input": ("c", "flow"), "type": "biosphere", "amount": 1}],
            "classifications": [
                ("foo", "bar"),
                ("CPC", "product A"),
            ],
        },
        ("c", "2"): {
            "name": "2",
            "reference product": "foo",
            "exchanges": [
                {"input": ("c", "flow"), "type": "biosphere", "amount": 2},
                {"input": ("c", "1"), "type": "technosphere", "amount": 2},
            ],
            "classifications": [
                ("CPC", "product B"),
            ],
        },
        ("c", "3"): {
            "name": "3",
            "reference product": "foo",
            "location": "here",
            "exchanges": [
                {"input": ("c", "1"), "type": "technosphere", "amount": 1},
            ],
            "classifications": [
                ("CPC", "product B"),
            ],
        },
        ("c", "4"): {
            "name": "4",
            "reference product": "foo",
            "location": "here",
            "exchanges": [
                {"input": ("c", "flow"), "type": "biosphere", "amount": 4},
                {"input": ("c", "3"), "type": "technosphere", "amount": 3},
            ],
            "classifications": [
                ("CPC", "product C"),
            ],
        },
        ("c", "5"): {
            "name": "5",
            "reference product": "foo",
            "location": "there",
            "exchanges": [
                {"input": ("c", "flow"), "type": "biosphere", "amount": 5},
                {"input": ("c", "3"), "type": "technosphere", "amount": 5},
                {"input": ("c", "2"), "type": "technosphere", "amount": 4},
            ],
            "classifications": [
                ("CPC", "product D"),
            ],
        },
        ("c", "6"): {
            "name": "6",
            "reference product": "bar",
            "exchanges": [
                {"input": ("c", "flow"), "type": "biosphere", "amount": 6},
                {"input": ("c", "5"), "type": "technosphere", "amount": 7},
                {"input": ("c", "4"), "type": "technosphere", "amount": 9},
            ],
            "classifications": [
                ("CPC", "product E"),
            ],
        },
        ("c", "7"): {
            "name": "7",
            "reference product": "bar",
            "exchanges": [
                {"input": ("c", "flow"), "type": "biosphere", "amount": 0},
                {"input": ("c", "5"), "type": "technosphere", "amount": 6},
                {"input": ("c", "4"), "type": "technosphere", "amount": 8},
            ],
            "classifications": [
                ("CPC", "product E"),
            ],
        },
    }
    db = bd.Database("c")
    db.write(data)

    method = bd.Method(("method",))
    method.register()
    method.write(method_fixture)
def add_consumption_sectors(co_name, year_habe):
    """Add consumption sectors as separate activities in the consumption database."""
    co = bd.Database(co_name)
    demand_act = co.search(f"ch hh average consumption {year_habe}")[0]

    cat_option = 'category_coarse'

    cat_unique = []
    for act in co:
        cat_unique.append(act.get(cat_option) or 0)
    cat_unique = list(set(cat_unique))

    category_activities = {}
    category_activities_len = {}
    for cat_of_interest in cat_unique:
        list_ = []
        for act in co:
            if act.get(cat_option) == cat_of_interest:
                list_.append(act)
        if len(list_) > 0:
            category_activities[cat_of_interest] = list_
            category_activities_len[cat_of_interest] = len(list_)

    dict_ = {}
    for cat_of_interest, activities in category_activities.items():

        excs_input_ag = []
        excs_input_ex = []
        excs_input_ec = []

        for act in activities:
            for exc in act.exchanges():
                if 'Agribalyse' in exc.input['database']:
                    excs_input_ag.append(exc.input)
                elif 'EXIOBASE' in exc.input['database']:
                    excs_input_ex.append(exc.input)
                elif 'ecoinvent' in exc.input['database']:
                    excs_input_ec.append(exc.input)

        dict_[cat_of_interest] = dict(
            n_activities=len(activities),
            n_agribalyse_exchanges=len(excs_input_ag),
            n_exiobase_exchanges=len(excs_input_ex),
            n_ecoinvent_exchanges=len(excs_input_ec),
        )

    for cat_of_interest in category_activities:
        # Create new bw activity with a specific name
        try:
            co.get(cat_of_interest).delete()
        except:
            pass
        new_act = co.new_activity(
            cat_of_interest,
            name=f"{cat_of_interest} sector, years {year_habe}",
            location='CH',
            unit='1 month of consumption',
            comment=
            f'Average consumption of one household for the years {year_habe}',
        )
        new_act.save()

        # Add production exchange
        new_act.new_exchange(input=(new_act['database'], new_act['code']),
                             amount=1,
                             type='production').save()

        for exc in demand_act.exchanges():
            if exc.input.get('category_coarse') == cat_of_interest:
                new_act.new_exchange(input=(exc.input['database'],
                                            exc.input['code']),
                                     amount=exc.amount,
                                     type='technosphere').save()
Example #23
0
 def _load_groups_other_backend(self):
     """Return dictionary of ``{(name, product): [(location, code)]`` from non-SQLite3 database"""
     data = defaultdict(list)
     for obj in bw2data.Database(database):
         data[(obj['name'], obj['product'])].append((obj['location'], obj['code']))
     return data
def add_consumption_activities(
    co_name,
    year_habe,
    dir_habe=None,
    option='disaggregated',
):
    # Delete all existing consumption activities
    co = bd.Database(co_name)
    [
        act.delete() for act in co
        if f"consumption disaggregated, years {year_habe}" in act['name']
    ]
    [
        act.delete() for act in co
        if f"consumption aggregated, years {year_habe}" in act['name']
    ]

    # Add new consumption activities
    dir_habe = dir_habe or co.metadata['dir_habe']
    add_consumption_all_hh(
        co_name,
        year_habe,
        dir_habe,
        option='disaggregated',
    )
    add_consumption_all_hh(
        co_name,
        year_habe,
        dir_habe,
        option='aggregated',
    )

    demand_act_dis = [
        act for act in co
        if f"consumption disaggregated, years {year_habe}" in act['name']
    ]
    assert len(demand_act_dis) == 1
    demand_act_dis = demand_act_dis[0]
    demand_act_agg = [
        act for act in co
        if f"consumption aggregated, years {year_habe}" in act['name']
    ]
    assert len(demand_act_agg) == 1
    demand_act_agg = demand_act_agg[0]
    dict_dis = {
        a.input: a.amount
        for a in demand_act_dis.exchanges() if a['type'] == 'technosphere'
    }
    dict_agg = {
        b.input: b.amount
        for b in demand_act_agg.exchanges() if b['type'] == 'technosphere'
    }

    demand_act_dis_dict = {k['name']: v for k, v in dict_dis.items()}
    demand_act_agg_dict = {k['name']: v for k, v in dict_agg.items()}

    add_inputs = list(set(dict_dis.keys()) - set(dict_agg.keys()))
    fix_amounts = {}
    for input_, amount_dis in dict_dis.items():
        amount_agg = dict_agg.get(input_, np.nan)
        if not np.allclose(amount_dis, amount_agg):
            fix_amounts[input_] = amount_dis
    n_households_old = demand_act_dis['n_households']
    demand_act_dis.delete()
    demand_act_agg.delete()

    add_consumption_all_hh(
        co_name,
        year_habe,
        dir_habe,
        option=option,
    )
    demand = [
        act for act in co
        if f"consumption aggregated, years {year_habe}" in act['name']
    ]
    assert len(demand) == 1
    demand = demand[0]
    n_households_new = demand['n_households']

    for exc in demand.exchanges():
        amount = fix_amounts.get(exc.input, False)
        if amount:
            exc['amount'] = amount
            exc.save()
    for input_ in add_inputs:
        amount = fix_amounts.get(input_) / n_households_old * n_households_new
        demand.new_exchange(
            input=input_,
            amount=amount,
            type='technosphere',
        ).save()

    add_consumption_average_hh(demand)

    write_dir = Path("write_files")
    path_demand_comparison = write_dir / f"comparison_total_demands_{year_habe}.xlsx"
    demand_new_dict = {
        c.input['name']: c.amount
        for c in demand.exchanges() if c['type'] == 'technosphere'
    }
    dis_agg_ratio = {
        k: demand_act_agg_dict.get(k, 0) / v
        for k, v in demand_act_dis_dict.items()
    }
    dis_new_ratio = {
        k: demand_new_dict.get(k, 0) / v
        for k, v in demand_act_dis_dict.items()
    }

    df = pd.DataFrame.from_dict({
        'Froemelt': demand_act_dis_dict,
        'HABE 091011': demand_act_agg_dict,
        'HABE 091011 / Froemelt': dis_agg_ratio,
        f'HABE {year_habe}': demand_new_dict,
        f'HABE {year_habe} / Froemelt': dis_new_ratio,
    })
    df.to_excel(path_demand_comparison)
def find_differences_in_inputs(activity,
                               rel_tol=1e-4,
                               abs_tol=1e-9,
                               locations=None,
                               as_dataframe=False):
    """Given an ``Activity``, try to see if other activities in the same database (with the same name and
    reference product) have the same input levels.

    Tolerance values are inputs to `math.isclose <https://docs.python.org/3/library/math.html#math.isclose>`__.

    If differences are present, a difference dictionary is constructed, with the form:

    .. code-block:: python

        {Activity instance: [(name of input flow (str), amount)]}

    Note that this doesn't reference a specific exchange, but rather sums **all exchanges with the same input reference product**.

    Assumes that all similar activities produce the same amount of reference product.

    ``(x, y)``, where ``x`` is the number of similar activities, and ``y`` is a dictionary of the differences. This dictionary is empty if no differences are found.

    Args:
        activity: ``Activity``. Activity to analyze.
        rel_tol: float. Relative tolerance to decide if two inputs are the same. See above.
        abs_tol: float. Absolute tolerance to decide if two inputs are the same. See above.
        locations: list, optional. Locations to restrict comparison to, if present.
        as_dataframe: bool. Return results as pandas DataFrame.

    Returns:
        dict or ``pandas.DataFrame``.


    """
    try:
        math.isclose(1, 1)
    except AttributeError:
        raise ImportError("This function requires Python >= 3.5")

    assert isinstance(activity, bd.backends.peewee.proxies.Activity)

    try:
        similar = [
            obj for obj in bd.Database(activity["database"])
            if obj != activity and obj.get("reference product") == activity.
            get("reference product") and obj.get("name") == activity["name"]
            and (not locations or obj.get("location") in locations)
        ]
    except KeyError:
        raise ValueError("Given activity has no `name`")

    result = {}

    origin_dict = aggregated_dict(activity)

    for target in similar:
        target_dict = aggregated_dict(target)
        difference = compare_dictionaries(origin_dict, target_dict, rel_tol,
                                          abs_tol)
        if difference:
            if activity not in result:
                result[activity] = {}
            result[activity].update({
                key: value
                for key, value in origin_dict.items() if key in difference
            })
            result[target] = {
                key: value
                for key, value in target_dict.items() if key in difference
            }

    if as_dataframe:
        df = DataFrame([{
            "location": obj.get("location"),
            **result[obj]
        } for obj in result])
        df.set_index("location", inplace=True)
        return df
    else:
        return result
def add_consumption_all_hh(
    co_name,
    year_habe,
    dir_habe=None,
    option='disaggregated',
    write_dir="write_files",
):
    # 1. Get some metadata from the consumption database
    co = bd.Database(co_name)
    dir_habe = dir_habe or co.metadata['dir_habe']

    # 2. Extract total demand from HABE
    path_beschrei = get_habe_filepath(dir_habe, year_habe, 'Datenbeschreibung')
    path_ausgaben = get_habe_filepath(dir_habe, year_habe, 'Ausgaben')
    path_mengen = get_habe_filepath(dir_habe, year_habe, 'Mengen')

    # change codes to be consistent with consumption database and Andi's codes
    co = bd.Database(co_name)
    ausgaben = pd.read_csv(path_ausgaben, sep='\t')
    mengen = pd.read_csv(path_mengen, sep='\t')
    ausgaben.columns = [col.lower() for col in ausgaben.columns]
    mengen.columns = [col.lower() for col in mengen.columns]
    codes_co_db = sorted([act['code'] for act in co])
    columns_a = ausgaben.columns.values
    columns_m = [columns_a[0]]
    for code_a in columns_a[1:]:
        code_m = code_a.replace('a', 'm')
        if code_m in codes_co_db:
            columns_m.append(code_m)
        else:
            columns_m.append(code_a)
    ausgaben.columns = columns_m

    # Compute total consumption
    total_consumption = ausgaben.sum()
    total_consumption = total_consumption.drop('haushaltid')
    mengen = mengen.sum()
    mengen = mengen.drop('haushaltid')
    for i in range(len(mengen)):
        try:
            total_consumption[mengen.index[i]] = mengen.values[i]
        except KeyError:
            print(mengen.index[i])

    # Add other useful info, eg number of households and number of people
    meta = pd.read_excel(path_beschrei,
                         sheet_name='Tabellen',
                         skiprows=8,
                         usecols=[0, 1, 3, 4])
    meta.columns = ['category1', 'category2', 'n_rows', 'n_cols']
    meta.dropna(subset=['n_rows'], inplace=True)

    # Combine some columns together
    temp1 = meta[meta['category1'].notnull()][[
        'category1', 'n_rows', 'n_cols'
    ]]
    temp1.columns = ['category2', 'n_rows', 'n_cols']
    temp2 = meta[meta['category2'].notnull()][[
        'category2', 'n_rows', 'n_cols'
    ]]
    meta = pd.concat([temp1, temp2])
    meta.set_index('category2', inplace=True)

    # Add info
    total_consumption['n_households'] = meta.loc[f'HABE{year_habe}_Ausgaben'][
        'n_rows']
    total_consumption['n_people'] = meta.loc[f'HABE{year_habe}_Personen'][
        'n_rows']

    # Save total demand
    write_dir = Path(write_dir)
    path_demand = write_dir / f"habe_totaldemands_{year_habe}.xlsx"
    total_consumption.to_excel(path_demand)

    # 3. Options

    # OPTION 1 aggregated. Total demands extract directly from HABE raw files
    # Excel file `habe_totaldemands.xlsx` contains sums of all private households in Switzerland for all categories of
    # the HBS. Units are the same as in the HBS (please refer to the SI-excel of Andi's ES&T-paper in order to translate
    # the codenames). The attached vector is in "per month" quantities.

    # OPTION 2 disaggregated. Andi's total demands from his Swiss consumption model
    # Excel file `heia2_totaldemands.xlsx` contains sums of all private households in Switzerland for all categories of
    # the HBS. Please note that the units are basically the same as in the HBS (please refer to the SI-excel of Andi's
    # ES&T-paper in order to translate the codenames). However, the attached vector is in "per year" instead of in
    # "per month". Furthermore, there are a couple of demands that were computed by the model itself. The codenames for
    # these computed/imputed categories start with "mx" and the units are as follows:
    # - kWh per year for electricity
    # - MJ per year for heating
    # - cubic meters per year for water supply and wastewater collection
    # - number of waste bags per year for refuse collection

    if option == 'aggregated':
        df = pd.read_excel(path_demand)
        df.columns = ['code', 'amount']
        df.set_index('code', inplace=True)
        n_households = int(df.loc['n_households', 'amount'])
        # n_people     = int(df.loc['n_people', 'amount'])
        df = df.drop(['n_households', 'n_people'])
        df = df.reset_index()

    elif option == 'disaggregated':
        path = dirpath / "functional_units" / 'habe20092011_hh_prepared_imputed.csv'
        df = pd.read_csv(path, low_memory=False)
        n_households = df.shape[0]
        df = df.drop('haushaltid', axis=1).sum()
        df = df.reset_index()
        df.columns = ['code', 'amount']

    else:
        n_households = None

    # 4. Add total inputs from Andi's model as swiss consumption activity
    co_act_name = f'ch hh all consumption {option}, years {year_habe}'
    try:
        co.get(co_act_name).delete()
    except:
        pass
    consumption_all = co.new_activity(co_act_name,
                                      name=co_act_name,
                                      location='CH',
                                      unit='1 month of consumption')
    consumption_all.save()
    # Add production exchange for the activity `consumption`
    consumption_all.new_exchange(
        input=(consumption_all['database'], consumption_all['code']),
        amount=1,
        type='production',
    ).save()
    consumption_all['agg_option'] = option
    consumption_all['n_households'] = n_households
    consumption_all['year_habe'] = year_habe
    consumption_all.save()
    # Smth with codes
    codes = [act['code'] for act in co]
    unlinked_codes = []
    for i in range(len(df)):
        code = df.loc[i]['code']
        factor = 1
        # if "mx" in code:
        #     factor = 12 # TODO?? divide by number of months
        if code in codes:
            consumption_all.new_exchange(
                input=(co.name, code),
                amount=df.loc[i]['amount'] / factor,
                type='technosphere',
                has_uncertainty=True,
            ).save()
        else:
            unlinked_codes.append(code)
Example #27
0
#     print(
#         "TECHNOSPHERE filtering resulted in {} exchanges and took {} iterations in {} seconds.".format(
#             len(technosphere_exchange_indices),
#             res["counter"],
#             np.round(time() - start, 2),
#         )
#     )
#     return technosphere_exchange_indices

if __name__ == "__main__":

    path_base = Path(
        "/Users/akim/PycharmProjects/gsa-framework-master/dev/write_files/")
    write_dir = path_base / "protocol_gsa_food_bw2"
    bd.projects.set_current("GSA for archetypes")
    co = bd.Database("swiss consumption 1.0")
    demand_act = [act for act in co if "Food" in act["name"]]
    assert len(demand_act) == 1
    demand_act = demand_act[0]
    demand = {demand_act: 1}
    uncertain_method = ("IPCC 2013", "climate change", "GWP 100a", "uncertain")
    static_lca = bc.LCA(demand, uncertain_method, use_distributions=False)
    static_lca.lci()
    static_lca.lcia()

    # Biosphere, remove non-influential inputs (Step 1)
    lca = static_lca
    cutoff = 1e-3
    inv = lca.characterized_inventory
    finv = inv.multiply(abs(inv) > abs(lca.score * cutoff))
    biosphere_exchange_indices = list(zip(*finv.nonzero()))
Example #28
0
def get_biosphere_database():
    ERROR = "ReCiPe 2016 only tested for ecoinvent biosphere flows; install base ecoinvent data"
    assert "biosphere3" in bd.databases, ERROR
    return list(bd.Database("biosphere3"))
Example #29
0
from gsa_framework.models.life_cycle_assessment import LCAModelBase
from gsa_framework.utils import read_hdf5_array, write_hdf5_array
from dev.utils_paper_plotting import *

iterations = 2000
seed = 349239
# path_base = Path('/data/user/kim_a')
path_base = Path(
    "/Users/akim/PycharmProjects/gsa-framework-master/dev/write_files/")
write_dir = path_base / "protocol_gsa"
write_dir_fig = write_dir / "figures"

fig_format = ["pdf", "png"]

bd.projects.set_current("GSA for protocol")
co = bd.Database("CH consumption 1.0")
demand_act = [act for act in co if "Food" in act["name"]]
assert len(demand_act) == 1
demand_act = demand_act[0]
demand = {demand_act: 1}
uncertain_method = ("IPCC 2013", "climate change", "GWP 100a", "uncertain")
lca = bc.LCA(demand, uncertain_method)
lca.lci()
lca.lcia()

uncertain_tech_params = lca.tech_params[
    lca.tech_params["uncertainty_type"] > 1]
uncertain_bio_params = lca.bio_params[lca.bio_params["uncertainty_type"] > 1]
uncertain_cf_params = lca.cf_params[lca.cf_params["uncertainty_type"] > 1]

where_utech_params_lognormal = np.where(
Example #30
0
def create_mc_basic():
    with temporary_project_dir() as td:
        biosphere = bw2data.Database("biosphere")
        biosphere.write({
            ("biosphere", "1"): {
                "type": "emission"
            },
            ("biosphere", "2"): {
                "type": "emission"
            },
        })
        test_db = bw2data.Database("test")
        test_db.write({
            ("test", "1"): {
                "exchanges": [
                    {
                        "amount": 0.5,
                        "minimum": 0.2,
                        "maximum": 0.8,
                        "input": ("test", "2"),
                        "type": "technosphere",
                        "uncertainty type": 4,
                    },
                    {
                        "amount": 1,
                        "minimum": 0.5,
                        "maximum": 1.5,
                        "input": ("biosphere", "1"),
                        "type": "biosphere",
                        "uncertainty type": 4,
                    },
                ],
                "type":
                "process",
            },
            ("test", "2"): {
                "exchanges": [{
                    "amount": 0.1,
                    "minimum": 0,
                    "maximum": 0.2,
                    "input": ("biosphere", "2"),
                    "type": "biosphere",
                    "uncertainty type": 4,
                }],
                "type":
                "process",
                "unit":
                "kg",
            },
        })
        method = bw2data.Method(("a", "method"))
        method.write([
            (("biosphere", "1"), 1),
            (("biosphere", "2"), 2),
        ])
        fixture_dir = this_dir / "mc_basic"
        fixture_dir.mkdir(exist_ok=True)
        biosphere.filepath_processed().rename(fixture_dir / "biosphere.zip")
        test_db.filepath_processed().rename(fixture_dir / "test_db.zip")
        method.filepath_processed().rename(fixture_dir / "method.zip")
        with open(fixture_dir / "mapping.json", "w") as f:
            json.dump(list(bw2data.mapping.items()), f)