def test_fix_spatial_dictionaries(self):
        # TODO: Fix
        return

        empty = Database("empty")
        empty.register(depends=[], geocollections=["foo"])
        method = Method(("a", "name"))
        method.register(geocollections=["foo"])
        rlca = RegionalizationBase({("empty", "nothing"): 1},
                                   method=("a", "name"))

        # No-op - `inv_spatial_dict` not yet set...
        rlca.fix_spatial_dictionaries()
        assert not getattr(rlca, "_mapped_spatial_dict", None)
        self.assertFalse(hasattr(rlca, "inv_spatial_dict"))

        geomapping.data = {"a": 1, "b": 2}
        rlca.inv_spatial_dict = {"a": "foo"}
        # Now it does something...
        rlca.fix_spatial_dictionaries()
        self.assertFalse(hasattr(rlca, "ia_spatial_dict"))
        rlca.inv_spatial_dict = {1: "foo"}
        rlca.ia_spatial_dict = {2: "bar"}
        rlca.fix_spatial_dictionaries()
        self.assertEqual(rlca.inv_spatial_dict, {"a": "foo"})
        self.assertEqual(rlca.ia_spatial_dict, {"b": "bar"})
    def test_geocollections_mismatch(self):
        inventory = Database("inventory")
        inventory.register(geocollections=["places"])
        method = Method(("a", "method"))
        method.register(geocollections=['regions'])

        with self.assertRaises(GeocollectionsMismatch):
            LCA({("inventory", "foo"): 1}, method=("a", "method"))
 def test_site_generic_method_error(self):
     empty = Database("empty")
     empty.register(depends=[], geocollections=[])
     method = Method(("a", "name"))
     method.register()
     with self.assertRaises(SiteGenericMethod):
         rlca = RegionalizationBase({("empty", "nothing"): 1},
                                    method=("a", "name"))
         rlca.get_ia_geocollections()
Beispiel #4
0
def test_method_process_adds_correct_geo(add_method):
    method = Method(("test method", ))
    pickled = np.load(
        os.path.join(projects.dir, "processed",
                     method.get_abbreviation() + ".npy"))
    mapped = {row['flow']: row['geo'] for row in pickled}
    assert geomapping["foo"] == mapped[mapping[("biosphere", 1)]]
    assert geomapping["bar"] == mapped[mapping[("biosphere", 2)]]
    assert pickled.shape == (2, )
Beispiel #5
0
 def find_characterization(self, row):
     # Doesn't work for regionalized LCIA methods
     flow = self.rb[row]
     flow_data = Database(flow[0]).load()[flow]
     method = Method(self.lca.method)
     try:
         cf = [x for x in method.load() if x[0] == flow][0][1]
     except:
         raise ValueError("Can't find this CF")
     return {"flow": {"key": flow, "data": flow_data}, "cf": cf}
 def test_missing_intersection_error(self):
     empty = Database("empty")
     empty.register(depends=[], geocollections=["foo"])
     method = Method(("a", "name"))
     method.register(geocollections=["bar"])
     with self.assertRaises(MissingIntersection):
         rlca = RegionalizationBase({("empty", "nothing"): 1},
                                    method=("a", "name"))
         rlca.inventory_geocollections = rlca.get_inventory_geocollections()
         rlca.ia_geocollections = rlca.get_ia_geocollections()
         rlca.needed_intersections()
Beispiel #7
0
def test_method_process_adds_correct_geo(add_method):
    method = Method(("test method",))
    package = load_datapackage(ZipFS(method.filepath_processed()))
    print(package.resources)

    mapped = {
        row["row"]: row["col"]
        for row in package.get_resource("test_method_matrix_data.indices")[0]
    }
    assert geomapping["foo"] == mapped[get_id(("biosphere", 1))]
    assert geomapping["bar"] == mapped[get_id(("biosphere", 2))]
    assert package.get_resource("test_method_matrix_data.data")[0].shape == (2,)
Beispiel #8
0
    def write_methods(self, overwrite=False, verbose=True):
        num_methods, num_cfs, num_unlinked = self.statistics(False)
        if num_unlinked:
            raise ValueError((u"Can't write unlinked methods ({} unlinked cfs)"
                              ).format(num_unlinked))
        for ds in self.data:
            if ds["name"] in methods:
                if overwrite:
                    del methods[ds["name"]]
                else:
                    raise ValueError(
                        (u"Method {} already exists. Use "
                         u"``overwrite=True`` to overwrite existing methods"
                         ).format(ds["name"]))

            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                method = Method(ds["name"])
                method.register(
                    description=ds["description"],
                    filename=ds["filename"],
                    unit=ds["unit"],
                )
                method.write(self._reformat_cfs(ds["exchanges"]))
                method.process()
        if verbose:
            print(u"Wrote {} LCIA methods with {} characterization factors".
                  format(num_methods, num_cfs))
Beispiel #9
0
def test_excel_lcia_integration():
    Database("biosphere").write({
        ("biosphere", "a"): {
            "name": "aluminium",
            "categories": ("Resource", "in ground"),
        },
        ("biosphere", "b"): {
            "name": "Uranium ore, 1.11 GJ per kg",
            "categories": ("Resource", ),
        },
    })
    config.p["biosphere_database"] = "biosphere"
    fp = os.path.join(EXCEL_FIXTURES_DIR, "lcia.xlsx")
    eli = ExcelLCIAImporter(fp, ("foo", ), "d", "bar")
    eli.apply_strategies()
    eli.write_methods()

    expected = {
        "abbreviation": "foo.acbd18db4cc2f85cedef654fccc4a4d8",
        "description": "d",
        "num_cfs": 2,
        "filename": "lcia.xlsx",
        "unit": "bar",
    }
    assert methods[("foo", )] == expected

    expected = [(("biosphere", "a"), 42), (("biosphere", "b"), 1000000)]
    assert Method(("foo", )).load() == expected
Beispiel #10
0
    def characterize_static(self,
                            method,
                            data=None,
                            cumulative=True,
                            stepped=False):
        """Characterize a Timeline object with a static impact assessment method.
        
        Args:
            * *method* (tuple): The static impact assessment method.
            * *data* (Timeline object; default=None): ....
            * *cumulative* (bool; default=True): when True return cumulative impact over time.
            * *stepped* (bool; default=True):...
        """
        if method not in methods:
            raise ValueError(u"LCIA static method %s not found" % method)
        if data is None and not self.raw:
            raise EmptyTimeline("No data to characterize")
        self.method_data = {x[0]: x[1] for x in Method(method).load()}
        self.dp_groups = self._groupby_sum_by_flow(
            self.raw if data is None else data)

        self.characterized = [
            grouped_dp(nt.dt, nt.flow,
                       nt.amount * self.method_data.get(nt.flow, 0))
            # grouped_dp(nt.dt, nt.flow, nt.amount * method_data.get(nt.flow, 0))
            for nt in self.dp_groups
        ]
        self.characterized.sort(key=lambda x: x.dt)
        return self._summer(self.characterized, cumulative, stepped)
Beispiel #11
0
def multi_traverse_tagged_databases(functional_unit,
                                    methods,
                                    label="tag",
                                    default_tag="other",
                                    secondary_tags=[]):
    """Traverse a functional unit throughout its foreground database(s), and
    group impacts (for multiple methods) by tag label.

    Input arguments:
        * ``functional_unit``: A functional unit dictionary, e.g. ``{("foo", "bar"): 42}``.
        * ``methods``: A list of method names, e.g. ``[("foo", "bar"), ("baz", "qux"), ...]``
        * ``label``: The label of the tag classifier. Default is ``"tag"``
        * ``default_tag``: The tag classifier to use if none was given. Default is ``"other"``
        * ``secondary_tags``: List of tuples in the format (secondary_label, secondary_default_tag). Default is empty list.

    Returns:

        Aggregated tags dictionary from ``aggregate_tagged_graph``, and tagged supply chain graph from ``recurse_tagged_database``.

    """

    lca = LCA(functional_unit, methods[0])
    lca.lci()  # factorize=True)
    lca.lcia()

    method_dicts = [{o[0]: o[1]
                     for o in Method(method).load()} for method in methods]

    graph = [
        multi_recurse_tagged_database(key, amount, methods, method_dicts, lca,
                                      label, default_tag, secondary_tags)
        for key, amount in functional_unit.items()
    ]

    return multi_aggregate_tagged_graph(graph), graph
Beispiel #12
0
    def patch_lcia_methods(self, new_biosphere):
        flows = ["PFC (CO2-eq)", "HFC (CO2-eq)"]

        for flow in flows:
            assert get_activity((new_biosphere, flow))

        new_data = [((new_biosphere, flow), 1) for flow in flows]
        count = 0

        for name, metadata in methods.items():
            if metadata.get("unit") == "kg CO2-Eq":
                count += 1
                obj = Method(name)
                data = obj.load()
                data.extend(new_data)
                obj.write(data)

        print(f"Patched {count} LCIA methods with unit 'kg CO2-Eq'")
Beispiel #13
0
    def create_methods(self):
        gw = [
            [("b", "bad"), 1],
        ]
        method = Method(("foo", ))
        method.register()
        method.write(gw)
        method.process()

        fake_dynamic_method = DynamicIAMethod("Dynamic foo")
        fake_dynamic_method.register()
        fake_dynamic_method.write({x[0]: x[1] for x in gw})
Beispiel #14
0
def multi_traverse_tagged_databases(functional_unit, methods, label="tag", default_tag="other", secondary_tags=[]):

    lca = LCA(functional_unit, methods[0])
    lca.lci()#factorize=True)
    lca.lcia()

    method_dicts = [{o[0]: o[1] for o in Method(method).load()} for method in methods]

    graph = [multi_recurse_tagged_database(key, amount, methods, method_dicts, lca, label, default_tag, secondary_tags)
             for key, amount in functional_unit.items()]

    return multi_aggregate_tagged_graph(graph), graph
 def install_fixtures(self):
     db = Database("a")
     db.write(lci_fixture)
     method = Method(("method", ))
     method.register()
     method.write(method_fixture)
     return db, method
Beispiel #16
0
    def test_grouping_separate_unit(self):
        biosphere_data = {
            ("biosphere", "1"): {
                "categories": ["foo", "this"],
                "exchanges": [],
                "name": "some bad stuff",
                "type": "emission",
                "unit": "kg",
            },
            ("biosphere", "2"): {
                "categories": ["foo", "that"],
                "exchanges": [],
                "name": "some bad stuff",
                "type": "emission",
                "unit": "tonne",
            },
        }

        biosphere = Database("biosphere")
        biosphere.register(name="Tests", depends=[])
        biosphere.write(biosphere_data)

        method = Method(("test", "LCIA", "method"))
        method.register(unit="points")
        method.write([(("biosphere", "1"), 1.0, "GLO"),
                      (("biosphere", "2"), 2.0, "GLO")])

        answer = {
            ("some bad stuff", "foo", "kg"): [1.0],
            ("some bad stuff", "foo", "tonne"): [2.0],
        }
        self.assertEqual(group_by_emissions(method), answer)
 def test_health_check(self):
     db = Database("a")
     db.write(lci_fixture)
     method = Method(("method",))
     method.register()
     method.write(method_fixture)
     dhc = DHCMock("a").check()
def activity_and_method():
    database = DatabaseChooser("db")
    database.write({
        ("db", "a"): {
            'exchanges': [{
                'input': ("db", "a"),
                'amount': 2,
                'type': 'production',
            }, {
                'input': ("db", "b"),
                'amount': 3,
                'type': 'technosphere',
            }, {
                'input': ("db", "c"),
                'amount': 4,
                'type': 'biosphere',
            }],
            'name': 'a'
        },
        ("db", "b"): {'name': 'b'},
        ("db", "c"): {'name': 'c', 'type': 'biosphere'},
        ("db", "d"): {
            'name': 'd',
            'exchanges': [{
                'input': ("db", "a"),
                'amount': 5,
                'type': 'technosphere'
            }]
        },
    })
    cfs = [(("db", "c"), 42)]
    method = Method(("a method",))
    method.register()
    method.write(cfs)
    return database.get("a"), method
Beispiel #19
0
def add_example_database(overwrite=True):
    from ..importers.excel import (
        assign_only_product_as_production,
        convert_activity_parameters_to_list,
        convert_uncertainty_types_to_integers,
        csv_add_missing_exchanges_section,
        csv_drop_unknown,
        csv_numerize,
        csv_restore_booleans,
        csv_restore_tuples,
        drop_falsey_uncertainty_fields_but_keep_zeros,
        ExcelImporter,
        set_code_by_activity_hash,
        strip_biosphere_exc_locations,
    )

    if "Mobility example" in databases:
        if not overwrite:
            print("Example already imported, use `overwrite=True` to delete")
            return
        else:
            del databases["Mobility example"]
            if ("IPCC", "simple") in methods:
                del methods[("IPCC", "simple")]

    importer = ExcelImporter(
        os.path.join(dirpath, "examples",
                     "sample_parameterized_database.xlsx"))
    importer.strategies = [
        csv_restore_tuples,
        csv_restore_booleans,
        csv_numerize,
        csv_drop_unknown,
        csv_add_missing_exchanges_section,
        strip_biosphere_exc_locations,
        set_code_by_activity_hash,
        assign_only_product_as_production,
        drop_falsey_uncertainty_fields_but_keep_zeros,
        convert_uncertainty_types_to_integers,
        convert_activity_parameters_to_list,
    ]
    importer.apply_strategies()
    importer.match_database(fields=['name'])
    importer.write_database(activate_parameters=True)

    group = "Mobility exchanges"
    Group.delete().where(Group.name == group).execute()
    group = Group.create(name=group)

    for ds in Database("Mobility example"):
        parameters.add_exchanges_to_group(group, ds)

    parameters.recalculate()

    ipcc = Method(("IPCC", "simple"))
    ipcc.register()
    ipcc.write([(("Mobility example", "CO2"), 1)])
Beispiel #20
0
def traverse_tagged_databases_to_dataframe(functional_unit,
                                           method,
                                           label="tag",
                                           default_tag="other",
                                           secondary_tag=(None, None),
                                           product_system_depth=5):
    """Traverse a functional unit throughout its foreground database(s), and
    group impacts by tag label.
    
    Input arguments:
        * ``functional_unit``: A functional unit dictionary, e.g. ``{("foo", "bar"): 42}``.
        * ``method``: A method name, e.g. ``("foo", "bar")``
        * ``label``: The label of the tag classifier. Default is ``"tag"``
        * ``default_tag``: The tag classifier to use if none was given. Default is ``"other"``
        * ``secondary_tags``: List of tuples in the format (secondary_label, secondary_default_tag). Default is empty list.

    Returns:

        Aggregated tags dictionary from ``aggregate_tagged_graph``, and tagged supply chain graph from ``recurse_tagged_database``.

    """
    lca = LCA(functional_unit, method)
    lca.lci(factorize=True)
    lca.lcia()
    method_dict = {o[0]: o[1] for o in Method(method).load()}
    graph = [
        recurse_tagged_database(key, amount, method_dict, lca, label,
                                default_tag, secondary_tag,
                                product_system_depth)
        for key, amount in functional_unit.items()
    ]
    agg_graph = aggregate_tagged_graph(graph)
    if secondary_tag == (None, None):
        dtf = pd.Series(agg_graph, name='Score')
        dtf.index.name = label
        dtf = dtf.reset_index()
    else:
        dtf = pd.DataFrame(agg_graph)
        dtf[secondary_tag[0]] = dtf.index
        dtf = dtf.reset_index(drop=True)
        dtf = dtf.melt(id_vars=[secondary_tag[0]],
                       value_vars=[key for key in agg_graph.keys()])
        dtf = dtf.rename({"variable": label, 'value': 'Score'}, axis="columns")
        dtf = dtf.dropna()

    redo_lca_score(lca, functional_unit)
    dtf['Rel_Score'] = [imp / lca.score for imp in dtf.Score]
    return dtf
Beispiel #21
0
    def from_static_method(self, name):
        """Turn a static LCIA method into a dynamic one.

        The dynamic method should not be registered yet.

        `name` is the name (tuple) of an existing static method."""
        assert name in methods, "Method {} not found".format(name)
        cfs = {
            obj[0]: obj[1]
            for obj in Method(name).load()
            if (len(obj) == 2 or obj[2] == 'GLO')
        }
        metadata = copy.deepcopy(methods[name])
        metadata['from_static_method'] = name
        self.register(**metadata)
        self.write(cfs)
Beispiel #22
0
    def to_worst_case_method(self,
                             name,
                             lower=None,
                             upper=None,
                             dynamic=True,
                             register=True):
        """Create a static LCA method using the worst case for each dynamic CF function.
        Default time interval over which to test for maximum CF is `datetime.now()` to `datetime.now()+relativedelta(years=100)`.
        
Args:
    * *name* (string): method name.
    * *lower* (datetime, default=datetime(2010, 1, 1): lower bound of the interval to consider.
    * *upper* (datetime, default=lower + relativedelta(years=100): upper bound of the interval to consider.
    * *dynamic* (bool, default=True): If total CF function of time of emission 
    * *register* (bool, default=True): If to register the method   

        """
        kwargs = {'dynamic': dynamic}
        if lower is not None:
            kwargs['lower'] = lower
        if upper is not None:
            kwargs['upper'] = upper
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            worst_case_method = Method(tuple(name))
            if worst_case_method.name not in methods:
                worst_case_method.register(dynamic_method=self.name)
        data = self.load()
        data.update(self.create_functions())
        # for now just characterize all the 'Carbon dioxide, in air' to be sure they are not skipped
        # should think better on how to deal with this
        method = [[('biosphere3', 'cc6a1abb-b123-4ca6-8f16-38209df609be'),
                   abs(get_maximum_value(value, **kwargs))]
                  if key == ('static_forest', "C_biogenic") else
                  [key, abs(get_maximum_value(value, **kwargs))]
                  for key, value in data.items()]
        #needed for GWP function to avoid registration every time
        if not register:
            return method
        worst_case_method.write(method)
        worst_case_method.process()  #GIU: guess not needed anymore right?
        return worst_case_method
Beispiel #23
0
    def __init__(self,
                 demand,
                 worst_case_method,
                 t0=None,
                 max_calc_number=1e4,
                 cutoff=0.001,
                 loop_cutoff=10,
                 group=False,
                 grouping_field="tempo_group",
                 log=False,
                 lca_object=None):
        self.demand = demand
        self.worst_case_method = worst_case_method
        self.t0 = np.datetime64(
            'now', dtype="datetime64[s]") if t0 is None else np.datetime64(
                t0).astype("datetime64[s]")
        self.max_calc_number = max_calc_number
        self.cutoff_value = cutoff
        self.loop_cutoff_value = loop_cutoff
        self.log = get_logger("dynamic-lca.log") if log else FakeLog()
        self.lca_object = lca_object
        self.group = group
        self.grouping_field = grouping_field
        self.stat_for_keys = get_static_forest_keys()  #return forest processes
        self.loops = collections.Counter()  #to count loops iterations

        #return static db and create set where will be added nodes as traversed
        all_databases = set.union(
            *[Database(key[0]).find_graph_dependents() for key in self.demand])
        self.static_databases = {
            name
            for name in all_databases if databases[name].get('static')
        }
        self.product_amount = collections.defaultdict(
            int)  #to check supply amount calculated for each product
        self.nodes = set()
        self.edges = set()

        #take the biosphere flows that are in the CF used to add only them in the timeline
        self._flows_in_CF = [
            x[0] for x in Method(self.worst_case_method).load()
        ] + [('static_forest', 'C_biogenic')]
def build_databases():
    Database("biosphere").write({
        ("biosphere", "1"): {'type': 'emission'},
        ("biosphere", "2"): {'type': 'emission'},
    })
    Database("test").write({
        ("test", "1"): {
            'exchanges': [{
                'amount': 0.5,
                'minimum': 0.2,
                'maximum': 0.8,
                'input': ('test', "2"),
                'type': 'technosphere',
                'uncertainty type': 4  # Uniform
            }, {
                'amount': 100,
                'minimum': 50,
                'maximum': 500,
                'input': ('biosphere', "1"),
                'type': 'biosphere',
                'loc': 100,
                'scale': 20,
                'uncertainty type': 3  # Normal
            }],
            'type': 'process',
        },
        ("test", "2"): {
            'exchanges': [{
                'amount': -0.42,
                'input': ('biosphere', "2"),
                'type': 'biosphere',
            }],
            'type': 'process',
            'unit': 'kg'
        },
    })
    method = Method(("a", "method"))
    method.register()
    method.write([
        (("biosphere", "1"), 1),
        (("biosphere", "2"), {
            'amount': 10,
            'uncertainty type': 5,  # Triangular
            'loc': 10,
            'minimum': 8,
            'maximum': 15
        }),
    ])
    def get_regionalized_characterization_matrix(self, builder=MatrixBuilder):
        """Get regionalized characterization matrix, **R**, which gives location- and biosphere flow-specific characterization factors. Rows are impact assessment spatial units, and columns are biosphere flows.

        Uses ``self._biosphere_dict`` and ``self.method``.

        Returns:
            * ``reg_cf_params``: Parameter array with row/col of IA locations/biosphere flows
            * ``ia_spatial_dict``: Dictionary linking impact assessment locations to matrix rows
            * ``reg_cf_matrix``: The matrix **R**

        """
        reg_cf_params, ia_spatial_dict, _, reg_cf_matrix = builder.build(
            paths=[Method(self.method).filepath_processed()],
            data_label="amount",
            row_id_label="geo",
            row_index_label="row",
            col_id_label="flow",
            col_index_label="col",
            col_dict=self._biosphere_dict,
        )
        return (reg_cf_params, ia_spatial_dict, reg_cf_matrix)
Beispiel #26
0
 def compare_to_previous(self):
     if not hasattr(self, "previous_reference"):
         raise ValueError("No previous reference method found")
     names_found_in_data = {
         get_activity(cf["input"])["name"].lower()
         for ds in self.data for cf in ds["exchanges"] if cf.get("input")
     }
     names_missing_in_data = {
         cf["name"].lower()
         for ds in self.data for cf in ds["exchanges"]
         if not cf.get("input")
     }
     names_in_reference = {
         get_activity(key)["name"].lower()
         for key, _ in Method(self.previous_reference).load()
     }
     return {
         "found": names_found_in_data,
         "missing": names_missing_in_data,
         "reference": names_in_reference,
     }
def group_by_emissions(method):
    """Group characterization factors by name, realm, and unit.

    **realm** is the general category, e.g. air, soil, water.

    Does not work on regionalized LCIA methods!

    Args:
        *method* (tuple or Method): LCIA method

    Returns:
        Dictionary: {(name, realm, unit)}: [cfs... ]

    """
    if isinstance(method, Method):
        data = method.load()
    elif isinstance(method, tuple):
        data = Method(method).load()
    else:
        raise ValueError("Can't interpret %s as a LCIA method" % method)

    biosphere = Database(config.biosphere).load()
    grouped = {}

    for key, cf, geo in data:
        if geo != config.global_location:
            raise ValueError(
                "`group_by_emissions` doesn't work on regionalized methods")
        if key[0] != config.biosphere:
            # Alternative biosphere, e.g. Ecoinvent 3. Add new biosphere DB
            biosphere.update(Database(key[0]).load())
        flow = biosphere[key]
        label = (
            flow.get("name", "Unknown"),
            flow.get("categories", [""])[0],
            flow.get("unit", "Unknown"),
        )
        grouped.setdefault(label, []).append(cf)

    return grouped
Beispiel #28
0
def activity_and_method():
    database = DatabaseChooser("db")
    database.write({
        ("db", "a"): {
            "exchanges": [
                {
                    "input": ("db", "a"),
                    "amount": 2,
                    "type": "production",
                },
                {
                    "input": ("db", "b"),
                    "amount": 3,
                    "type": "technosphere",
                },
                {
                    "input": ("db", "c"),
                    "amount": 4,
                    "type": "biosphere",
                },
            ],
            "name":
            "a",
        },
        ("db", "b"): {
            "name": "b"
        },
        ("db", "c"): {
            "name": "c",
            "type": "biosphere"
        },
        ("db", "d"): {
            "name":
            "d",
            "exchanges": [{
                "input": ("db", "a"),
                "amount": 5,
                "type": "technosphere"
            }],
        },
    })
    cfs = [(("db", "c"), 42)]
    method = Method(("a method", ))
    method.register()
    method.write(cfs)
    return database.get("a"), method
Beispiel #29
0
def build_databases():
    Database("biosphere").write({
        ("biosphere", "1"): {'type': 'emission'},
        ("biosphere", "2"): {'type': 'emission'},
    })
    Database("test").write({
        ("test", "1"): {
            'exchanges': [{
                'amount': 0.5,
                'minimum': 0.2,
                'maximum': 0.8,
                'input': ('test', "2"),
                'type': 'technosphere',
                'uncertainty type': 4
            }, {
                'amount': 1,
                'minimum': 0.5,
                'maximum': 1.5,
                'input': ('biosphere', "1"),
                'type': 'biosphere',
                'uncertainty type': 4
            }],
            'type': 'process',
        },
        ("test", "2"): {
            'exchanges': [{
                'amount': 0.1,
                'minimum': 0,
                'maximum': 0.2,
                'input': ('biosphere', "2"),
                'type': 'biosphere',
                'uncertainty type': 4
            }],
            'type': 'process',
            'unit': 'kg'
        },
    })
    method = Method(("a", "method"))
    method.register()
    method.write([
        (("biosphere", "1"), 1),
        (("biosphere", "2"), 2),
    ])
Beispiel #30
0
def traverse_tagged_databases(functional_unit,
                              method,
                              label="tag",
                              default_tag="other",
                              secondary_tags=[]):
    """Traverse a functional unit throughout its foreground database(s), and

    group impacts by tag label.


    Contribution analysis work by linking impacts to individual activities.

    However, you also might want to group impacts in other ways. For example,

    give individual biosphere exchanges their own grouping, or aggregate two

    activities together.


    Consider this example system, where the letters are the tag labels, and the

    numbers are exchange amounts. The functional unit is one unit of the tree

    root.


    .. image:: images/tagged-traversal.png

       :alt: Example tagged supply chain


    In this supply chain, tags are applied to activities and biosphere exchanges.

    If a biosphere exchange is not tagged, it inherits the tag of its producing

    activity. Similarly, links to other databases are assessed with the usual

    LCA machinery, and the total LCA score is tagged according to its consuming

    activity. If an activity does not have a tag, a default tag is applied.


    We can change our visualization to show the use of the default tags:


    .. image:: images/tagged-traversal-2.png

       :alt: Example tagged supply chain


    And then we can manually calculate the tagged impacts. Normally we would

    need to know the actual biosphere flows and their respective

    characterization factors (CF), but in this example we assume that each

    CF is one. Our result, group by tags, would therefore be:


        * **A**: :math:`6 + 27 = 33`

        * **B**: :math:`30 + 44 = 74`

        * **C**: :math:`5 + 16 + 48 = 69`

        * **D**: :math:`14`


    This function will only traverse the foreground database, i.e. the

    database of the functional unit activity. A functional unit can have

    multiple starting nodes; in this case, all foreground databases are

    traversed.


    Input arguments:

        * ``functional_unit``: A functional unit dictionary, e.g. ``{("foo", "bar"): 42}``.

        * ``method``: A method name, e.g. ``("foo", "bar")``

        * ``label``: The label of the tag classifier. Default is ``"tag"``

        * ``default_tag``: The tag classifier to use if none was given. Default is ``"other"``

        * ``secondary_tags``: List of tuples in the format (secondary_label, secondary_default_tag). Default is empty list.


    Returns:


        Aggregated tags dictionary from ``aggregate_tagged_graph``, and tagged supply chain graph from ``recurse_tagged_database``.


    """

    lca = LCA(functional_unit, method)

    lca.lci(factorize=True)

    lca.lcia()

    method_dict = {o[0]: o[1] for o in Method(method).load()}

    graph = [
        recurse_tagged_database(key, amount, method_dict, lca, label,
                                default_tag, secondary_tags)
        for key, amount in functional_unit.items()
    ]

    return aggregate_tagged_graph(graph), graph