Beispiel #1
0
def test_writing_test_fixture(basic):
    assert len(databases) == 2
    assert len(methods) == 1
    lca = LCA({("test", "1"): 1})
    lca.lci()
    expected = [
        (("test", "1"), ("test", "1"), 1),
        (("test", "3"), ("test", "1"), -0.1),
        (("test", "2"), ("test", "2"), 0.5),
        (("test", "1"), ("test", "2"), 2),
        (("test", "3"), ("test", "3"), 1),
        (("test", "1"), ("test", "3"), -3),
        (("test", "2"), ("test", "3"), -2),
    ]
    for x, y, z in expected:
        assert np.allclose(
            lca.technosphere_matrix[lca.product_dict[x], lca.activity_dict[y]],
            z)
    expected = [
        (("bio", "b"), ("test", "1"), 7),
        (("bio", "a"), ("test", "2"), 1),
        (("bio", "b"), ("test", "2"), 5),
        (("bio", "a"), ("test", "3"), 2),
    ]
    for x, y, z in expected:
        assert np.allclose(
            lca.biosphere_matrix[lca.biosphere_dict[x], lca.activity_dict[y]],
            z)
Beispiel #2
0
def test_solver_cache_invalidated(basic):
    ss = os.path.join(basedir, "single-sample")
    lca = LCA({("test", "2"): 1}, method=("m", ), presamples=[ss])
    lca.lci(factorize=True)
    assert hasattr(lca, "solver")
    lca.presamples.update_matrices()
    assert not hasattr(lca, "solver")
def test_excel_products_lca():
    ei = ExcelImporter(os.path.join(EXCEL_FIXTURES_DIR, "with_products.xlsx"))
    ei.strategies = [
        csv_restore_tuples,
        csv_restore_booleans,
        csv_numerize,
        csv_drop_unknown,
        csv_add_missing_exchanges_section,
        normalize_units,
        set_code_by_activity_hash,
        assign_only_product_as_production,
        link_technosphere_by_activity_hash,
        drop_falsey_uncertainty_fields_but_keep_zeros,
        convert_uncertainty_types_to_integers,
    ]
    ei.apply_strategies()
    ei.match_database()
    ei.write_database()
    lca = LCA({('Product example', 'B'): 1})
    lca.lci()
    keys = {('Product example', 'B'), ('Product example', 'C'),
            ('Product example', 'E')}
    for key in lca.product_dict:
        assert key in keys
    keys = {('Product example', 'A'), ('Product example', 'C'),
            ('Product example', 'D')}
    for key in lca.activity_dict:
        assert key in keys
    for value in lca.supply_array:
        assert np.allclose(value, 1) or np.allclose(
            value, 0.539) or np.allclose(value, 0.539 * 0.00805)
 def calculate(self):
     self.lca = LCA({self.database.random(): 1})
     self.lca.lci()
     self.ra, _, _ = self.lca.reverse_dict()
     self.matrix = self.lca.technosphere_matrix.transpose()
     self.pr = [(x[0], self.ra[x[1]]) for x in self.page_rank(self.matrix)]
     return self.pr
Beispiel #5
0
    def graph_technosphere(self, filename=None, **kwargs):
        from bw2analyzer.matrix_grapher import SparseMatrixGrapher
        from bw2calc import LCA
        lca = LCA({self.random(): 1})
        lca.lci()

        smg = SparseMatrixGrapher(lca.technosphere_matrix)
        return smg.ordered_graph(filename, **kwargs)
Beispiel #6
0
def test_multi_sample_presamples_no_seed_different(basic):
    path = os.path.join(basedir, "unseeded")

    multi = []
    for _ in range(10):
        lca = LCA({("test", "2"): 1}, method=("m", ), presamples=[path])
        lca.lci()
        multi.append(lca.technosphere_matrix.data)

    assert not all(np.allclose(multi[i], multi[i + 1]) for i in range(9))
 def make_graphs(self, graphs_dir=None):
     lca = LCA({self.db.random(): 1})
     lca.lci()
     tech_filename = self.db.filename + ".technosphere.png"
     tech_filepath = os.path.join(graphs_dir or projects.output_dir, tech_filename)
     SparseMatrixGrapher(lca.technosphere_matrix).graph(tech_filepath, dpi=600)
     bio_filename = self.db.filename + ".biosphere.png"
     bio_filepath = os.path.join(graphs_dir or projects.output_dir, bio_filename)
     SparseMatrixGrapher(lca.biosphere_matrix).graph(bio_filepath, dpi=600)
     return tech_filepath, tech_filename, bio_filepath, bio_filename
Beispiel #8
0
def test_call_update_matrices_manually(basic):
    path = os.path.join(basedir, "multi")

    lca = LCA({("test", "2"): 1}, method=("m", ), presamples=[path])
    lca.lci()
    lca.lcia()

    results = set()
    for _ in range(100):
        lca.presamples.update_matrices()
        lca.redo_lci()
        lca.redo_lcia()
        results.add(lca.score)

    assert len(results) > 1
Beispiel #9
0
def test_sequential_seed():
    path = os.path.join(basedir, "seq")
    lca = LCA({("test", "2"): 1}, method=("m", ), presamples=[path])
    lca.lci()
    assert lca.technosphere_matrix[lca.product_dict[("test", "1")],
                                   lca.activity_dict[("test", "2")], ] == -1
    lca.presamples.update_matrices()
    assert lca.technosphere_matrix[lca.product_dict[("test", "1")],
                                   lca.activity_dict[("test", "2")], ] == -2
    lca.presamples.update_matrices()
    assert lca.technosphere_matrix[lca.product_dict[("test", "1")],
                                   lca.activity_dict[("test", "2")], ] == -3
    lca.presamples.update_matrices()
    assert lca.technosphere_matrix[lca.product_dict[("test", "1")],
                                   lca.activity_dict[("test", "2")], ] == -1
Beispiel #10
0
    def lca(self, method=None, amount=1.):
        """Shortcut to construct an LCA object for this activity."""
        from bw2calc import LCA

        lca = LCA({self: amount}, method=method)
        lca.lci()
        if method is not None:
            lca.lcia()
        lca.fix_dictionaries()
        return lca
 def test_d3_treemap_no_error(self):
     self.install_fixtures()
     lca = LCA({("a", "2"): 1}, ("method", ))
     lca.lci()
     lca.lcia()
     ra, rp, rb = lca.reverse_dict()
     CA().d3_treemap(lca.characterized_inventory, rb, ra)
Beispiel #12
0
def multi_traverse_tagged_databases(functional_unit,
                                    methods,
                                    label="tag",
                                    default_tag="other",
                                    secondary_tags=[]):
    """Traverse a functional unit throughout its foreground database(s), and
    group impacts (for multiple methods) by tag label.

    Input arguments:
        * ``functional_unit``: A functional unit dictionary, e.g. ``{("foo", "bar"): 42}``.
        * ``methods``: A list of method names, e.g. ``[("foo", "bar"), ("baz", "qux"), ...]``
        * ``label``: The label of the tag classifier. Default is ``"tag"``
        * ``default_tag``: The tag classifier to use if none was given. Default is ``"other"``
        * ``secondary_tags``: List of tuples in the format (secondary_label, secondary_default_tag). Default is empty list.

    Returns:

        Aggregated tags dictionary from ``aggregate_tagged_graph``, and tagged supply chain graph from ``recurse_tagged_database``.

    """

    lca = LCA(functional_unit, methods[0])
    lca.lci()  # factorize=True)
    lca.lcia()

    method_dicts = [{o[0]: o[1]
                     for o in Method(method).load()} for method in methods]

    graph = [
        multi_recurse_tagged_database(key, amount, methods, method_dicts, lca,
                                      label, default_tag, secondary_tags)
        for key, amount in functional_unit.items()
    ]

    return multi_aggregate_tagged_graph(graph), graph
Beispiel #13
0
def test_multi_mc_not_same_answer():
    k1, k2, packages = mc_fixture()
    mc = MonteCarloLCA({k1: 1}, data_objs=packages)
    activity_list = [
        {
            k1: 1
        },
        {
            k2: 1
        },
    ]
    mc = MultiMonteCarlo(activity_list, data_objs=packages, iterations=10)
    results = mc.calculate()
    assert len(results) == 2
    for _, lst in results:
        assert len(set(lst)) == len(lst)

    lca = LCA(activity_list[0], data_objs=packages)
    lca.lci()
    lca.lcia()

    def score(lca, func_unit):
        lca.redo_lcia(func_unit)
        return lca.score

    static = [score(lca, func_unit) for func_unit in activity_list]
    for a, b in zip(static, results):
        assert a not in b[1]
Beispiel #14
0
def test_excel_products_lca():
    ei = ExcelImporter(os.path.join(EXCEL_FIXTURES_DIR, "with_products.xlsx"))
    ei.strategies = [
        csv_restore_tuples,
        csv_restore_booleans,
        csv_numerize,
        csv_drop_unknown,
        csv_add_missing_exchanges_section,
        normalize_units,
        set_code_by_activity_hash,
        assign_only_product_as_production,
        link_technosphere_by_activity_hash,
        drop_falsey_uncertainty_fields_but_keep_zeros,
        convert_uncertainty_types_to_integers,
    ]
    print(ei.data)
    ei.apply_strategies()
    ei.match_database()
    ei.write_database()
    fu, data_objs, _ = prepare_lca_inputs({("Product example", "B"): 1})
    lca = LCA(fu, data_objs=data_objs)
    lca.lci()
    keys = {
        get_id(("Product example", "B")),
        get_id(("Product example", "C")),
        get_id(("Product example", "E")),
    }
    for key in lca.dicts.product:
        assert key in keys
    keys = {
        get_id(("Product example", "A")),
        get_id(("Product example", "C")),
        get_id(("Product example", "D")),
    }
    for key in lca.dicts.activity:
        assert key in keys
    for value in lca.supply_array:
        assert (np.allclose(value, 1) or np.allclose(value, 0.539)
                or np.allclose(value, 0.539 * 0.00805))
Beispiel #15
0
    def lca(self, method=None, amount=None):
        """Shortcut to construct an LCA object for this exchange **input**.

        Uses the exchange amount if no other amount is provided."""
        from bw2calc import LCA

        if amount is None:
            amount = self['amount']

        lca = LCA({self.input: amount}, method=method)
        lca.lci()
        if method is not None:
            lca.lcia()
        lca.fix_dictionaries()
        return lca
Beispiel #16
0
def test_multi_sample_presamples(basic):
    path = os.path.join(basedir, "multi")

    lca = LCA({("test", "2"): 1}, method=("m", ))
    lca.lci()
    static = lca.technosphere_matrix.data

    multi = []
    for _ in range(10):
        lca = LCA({("test", "2"): 1}, method=("m", ), presamples=[path])
        lca.lci()
        multi.append(lca.technosphere_matrix.data)

    assert all(np.allclose(multi[i], multi[i + 1]) for i in range(9))
    for x in range(9):
        assert not np.allclose(multi[x], static)
Beispiel #17
0
def test_basic_setup(bw25_setup):
    fu, data_objs, _ = prepare_lca_inputs({("c", "2"): 1}, method=("d",))
    lca = LCA(fu, data_objs=data_objs)
    lca.lci()
    lca.lcia()

    print(lca.technosphere_matrix.toarray())
    print(lca.biosphere_matrix.toarray())
    print(lca.supply_array)
    print(lca.score)

    assert np.allclose(lca.score, 44)
Beispiel #18
0
def multi_traverse_tagged_databases(functional_unit, methods, label="tag", default_tag="other", secondary_tags=[]):

    lca = LCA(functional_unit, methods[0])
    lca.lci()#factorize=True)
    lca.lcia()

    method_dicts = [{o[0]: o[1] for o in Method(method).load()} for method in methods]

    graph = [multi_recurse_tagged_database(key, amount, methods, method_dicts, lca, label, default_tag, secondary_tags)
             for key, amount in functional_unit.items()]

    return multi_aggregate_tagged_graph(graph), graph
Beispiel #19
0
def test_single_sample_presamples(basic):
    ss = os.path.join(basedir, "single-sample")

    lca = LCA({("test", "2"): 1}, method=("m", ))
    lca.lci()
    assert np.allclose(lca.supply_array,
                       np.array([-(2 + 2 / 3), 14 / 15, -4 / 15]))
    lca = LCA({("test", "2"): 1}, method=("m", ), presamples=[ss])
    lca.lci()
    assert np.allclose(lca.supply_array, np.array([2, 1.4, 0.2]))

    mc = MonteCarloLCA({("test", "2"): 1}, method=("m", ))
    next(mc)
    assert np.allclose(mc.supply_array,
                       np.array([-(2 + 2 / 3), 14 / 15, -4 / 15]))
    mc = MonteCarloLCA({("test", "2"): 1}, method=("m", ), presamples=[ss])
    next(mc)
    assert np.allclose(mc.supply_array, np.array([2, 1.4, 0.2]))

    mc = ParameterVectorLCA({("test", "2"): 1}, method=("m", ))
    next(mc)
    assert mc.technosphere_matrix[mc.product_dict[("test", "2")],
                                  mc.activity_dict[("test", "2")], ] == 0.5
    mc = ParameterVectorLCA({("test", "2"): 1},
                            method=("m", ),
                            presamples=[ss])
    next(mc)
    assert mc.technosphere_matrix[mc.product_dict[("test", "2")],
                                  mc.activity_dict[("test", "2")], ] == 1

    mc = ComparativeMonteCarlo([{("test", "2"): 1}], method=("m", ))
    next(mc)
    assert mc.technosphere_matrix[mc.product_dict[("test", "2")],
                                  mc.activity_dict[("test", "2")], ] == 0.5
    mc = ComparativeMonteCarlo([{
        ("test", "2"): 1
    }],
                               method=("m", ),
                               presamples=[ss])
    next(mc)
    assert mc.technosphere_matrix[mc.product_dict[("test", "2")],
                                  mc.activity_dict[("test", "2")], ] == 1
Beispiel #20
0
def traverse_tagged_databases_to_dataframe(functional_unit,
                                           method,
                                           label="tag",
                                           default_tag="other",
                                           secondary_tag=(None, None),
                                           product_system_depth=5):
    """Traverse a functional unit throughout its foreground database(s), and
    group impacts by tag label.
    
    Input arguments:
        * ``functional_unit``: A functional unit dictionary, e.g. ``{("foo", "bar"): 42}``.
        * ``method``: A method name, e.g. ``("foo", "bar")``
        * ``label``: The label of the tag classifier. Default is ``"tag"``
        * ``default_tag``: The tag classifier to use if none was given. Default is ``"other"``
        * ``secondary_tags``: List of tuples in the format (secondary_label, secondary_default_tag). Default is empty list.

    Returns:

        Aggregated tags dictionary from ``aggregate_tagged_graph``, and tagged supply chain graph from ``recurse_tagged_database``.

    """
    lca = LCA(functional_unit, method)
    lca.lci(factorize=True)
    lca.lcia()
    method_dict = {o[0]: o[1] for o in Method(method).load()}
    graph = [
        recurse_tagged_database(key, amount, method_dict, lca, label,
                                default_tag, secondary_tag,
                                product_system_depth)
        for key, amount in functional_unit.items()
    ]
    agg_graph = aggregate_tagged_graph(graph)
    if secondary_tag == (None, None):
        dtf = pd.Series(agg_graph, name='Score')
        dtf.index.name = label
        dtf = dtf.reset_index()
    else:
        dtf = pd.DataFrame(agg_graph)
        dtf[secondary_tag[0]] = dtf.index
        dtf = dtf.reset_index(drop=True)
        dtf = dtf.melt(id_vars=[secondary_tag[0]],
                       value_vars=[key for key in agg_graph.keys()])
        dtf = dtf.rename({"variable": label, 'value': 'Score'}, axis="columns")
        dtf = dtf.dropna()

    redo_lca_score(lca, functional_unit)
    dtf['Rel_Score'] = [imp / lca.score for imp in dtf.Score]
    return dtf
    def calculate(self):
        """Calculate LCA report data"""
        lca = LCA(self.activity, self.method)
        lca.lci()
        lca.lcia()

        gt = GraphTraversal().calculate(self.activity, method=self.method)
        print("FD")
        force_directed = self.get_force_directed(gt["nodes"], gt["edges"], lca)
        print("CA")
        ca = ContributionAnalysis()
        print("hinton")
        hinton = ca.hinton_matrix(lca)
        print("treemap")
        treemap = self.get_treemap(gt["nodes"], gt["edges"], lca)
        print("herfindahl")
        herfindahl = herfindahl_index(lca.characterized_inventory.data)
        print("concentration")
        concentration = concentration_ratio(lca.characterized_inventory.data)
        print("MC:")
        monte_carlo = self.get_monte_carlo()

        activity_data = []
        for k, v in self.activity.items():
            obj = get_activity(k)
            activity_data.append((obj["name"], "%.2g" % v, obj["unit"]))

        self.report = {
            "activity": activity_data,
            "method": {
                "name": ": ".join(self.method),
                "unit": methods[self.method]["unit"],
            },
            "score": float(lca.score),
            "contribution": {
                "hinton": hinton,
                "treemap": treemap,
                "herfindahl": herfindahl,
                "concentration": concentration,
            },
            "force_directed": force_directed,
            "monte carlo": monte_carlo,
            "metadata": {
                "type": "Brightway2 serialized LCA report",
                "version": self.version,
                "uuid": self.uuid,
            },
        }
Beispiel #22
0
def lci_matrices_to_excel(database_name, include_descendants=True):
    """Fake docstring"""

    from bw2calc import LCA
    print("Starting Excel export. This can be slow for large matrices!")
    safe_name = safe_filename(database_name, False)
    filepath = os.path.join(projects.output_dir, safe_name + ".xlsx")

    lca = LCA({Database(database_name).random(): 1})
    lca.load_lci_data()
    lca.fix_dictionaries()

    if not include_descendants:
        lca.activity_dict = {
            key: value
            for key, value in lca.activity_dict.items()
            if key[0] == database_name
        }

    # Drop biosphere flows with zero references
    # TODO: This will ignore (-1 + 1 = 0) references
    lca.biosphere_dict = {
        key: value
        for key, value in lca.biosphere_dict.items()
        if lca.biosphere_matrix[lca.biosphere_dict[key], :].sum() != 0
    }

    workbook = xlsxwriter.Workbook(filepath)
    bold = workbook.add_format({'bold': True})

    print("Sorting objects")

    sorted_activity_keys = sorted([(Database.get(key).get("name")
                                    or u"Unknown", key)
                                   for key in lca.activity_dict])
    sorted_product_keys = sorted([(Database.get(key).get("name")
                                   or u"Unknown", key)
                                  for key in lca.product_dict])
    sorted_bio_keys = sorted([(Database.get(key).get("name")
                               or u"Unknown", key)
                              for key in lca.biosphere_dict])

    tm_sheet = workbook.add_worksheet('technosphere')
    tm_sheet.set_column('A:A', 50)

    data = Database(database_name).load()

    # Labels
    for index, data in enumerate(sorted_activity_keys):
        tm_sheet.write_string(0, index + 1, data[0])
    for index, data in enumerate(sorted_product_keys):
        tm_sheet.write_string(index + 1, 0, data[0])

    print("Entering technosphere matrix data")

    coo = lca.technosphere_matrix.tocoo()

    # Translate row index to sorted product index
    act_dict = {obj[1]: idx for idx, obj in enumerate(sorted_activity_keys)}
    pro_dict = {obj[1]: idx for idx, obj in enumerate(sorted_product_keys)}
    bio_dict = {obj[1]: idx for idx, obj in enumerate(sorted_bio_keys)}

    pro_lookup = {v: pro_dict[k] for k, v in lca.product_dict.items()}
    bio_lookup = {v: bio_dict[k] for k, v in lca.biosphere_dict.items()}
    act_lookup = {v: act_dict[k] for k, v in lca.activity_dict.items()}

    # Matrix values
    for row, col, value in zip(coo.row, coo.col, coo.data):
        tm_sheet.write_number(pro_lookup[row] + 1, act_lookup[col] + 1, value)

    bm_sheet = workbook.add_worksheet('biosphere')
    bm_sheet.set_column('A:A', 50)

    data = Database(database_name).load()

    # Labels
    for index, data in enumerate(sorted_activity_keys):
        bm_sheet.write_string(0, index + 1, data[0])
    for index, data in enumerate(sorted_bio_keys):
        bm_sheet.write_string(index + 1, 0, data[0])

    print("Entering biosphere matrix data")

    coo = lca.biosphere_matrix.tocoo()

    # Matrix values
    for row, col, value in zip(coo.row, coo.col, coo.data):
        bm_sheet.write_number(bio_lookup[row] + 1, act_lookup[col] + 1, value)

    COLUMNS = (u"Index", u"Name", u"Reference product", u"Unit", u"Categories",
               u"Location")

    tech_sheet = workbook.add_worksheet('technosphere-labels')
    tech_sheet.set_column('B:B', 60)
    tech_sheet.set_column('C:C', 30)
    tech_sheet.set_column('D:D', 15)
    tech_sheet.set_column('E:E', 30)

    print("Writing metadata")

    # Header
    for index, col in enumerate(COLUMNS):
        tech_sheet.write_string(0, index, col, bold)

    tech_sheet.write_comment(
        'C1',
        "Only for ecoinvent 3, where names =/= products.",
    )

    for index, data in enumerate(sorted_activity_keys):
        obj = Database.get(data[1])

        tech_sheet.write_number(index + 1, 0, index + 1)
        tech_sheet.write_string(index + 1, 1, obj.get(u'name') or u'Unknown')
        tech_sheet.write_string(index + 1, 2,
                                obj.get(u'reference product') or u'')
        tech_sheet.write_string(index + 1, 3, obj.get(u'unit') or u'Unknown')
        tech_sheet.write_string(index + 1, 4,
                                u" - ".join(obj.get(u'categories') or []))
        tech_sheet.write_string(index + 1, 5,
                                obj.get(u'location') or u'Unknown')

    COLUMNS = (
        u"Index",
        u"Name",
        u"Unit",
        u"Categories",
    )

    bio_sheet = workbook.add_worksheet('biosphere-labels')
    bio_sheet.set_column('B:B', 60)
    bio_sheet.set_column('C:C', 15)
    bio_sheet.set_column('D:D', 30)

    # Header
    for index, col in enumerate(COLUMNS):
        bio_sheet.write_string(0, index, col, bold)

    for index, data in enumerate(sorted_bio_keys):
        obj = Database.get(data[1])

        bio_sheet.write_number(index + 1, 0, index + 1)
        bio_sheet.write_string(index + 1, 1, obj.get(u'name') or u'Unknown')
        bio_sheet.write_string(index + 1, 2, obj.get(u'unit') or u'Unknown')
        bio_sheet.write_string(index + 1, 3,
                               u" - ".join(obj.get(u'categories') or []))

    workbook.close()
    return filepath
Beispiel #23
0
def lci_matrices_to_matlab(database_name):
    from bw2calc import LCA

    lca = LCA({Database(database_name).random(): 1})
    lca.lci()
    lca.fix_dictionaries()
    ra, rp, rb = lca.reverse_dict()

    safe_name = safe_filename(database_name, False)
    scipy.io.savemat(
        os.path.join(projects.output_dir, safe_name + ".mat"),
        {
            "technosphere": lca.technosphere_matrix,
            "biosphere": lca.biosphere_matrix
        },
    )

    workbook = xlsxwriter.Workbook(os.path.join(dirpath, safe_name + ".xlsx"))
    bold = workbook.add_format({"bold": True})

    COLUMNS = ("Index", "Name", "Reference product", "Unit", "Categories",
               "Location")

    tech_sheet = workbook.add_worksheet("technosphere")
    tech_sheet.set_column("B:B", 60)
    tech_sheet.set_column("C:C", 30)
    tech_sheet.set_column("D:D", 15)
    tech_sheet.set_column("E:E", 30)

    # Header
    for index, col in enumerate(COLUMNS):
        tech_sheet.write_string(0, index, col, bold)

    tech_sheet.write_comment(
        "C1",
        "Only for ecoinvent 3, where names =/= products.",
    )

    data = Database(database_name).load()

    for index, key in sorted(ra.items()):
        tech_sheet.write_number(index + 1, 0, index + 1)
        tech_sheet.write_string(index + 1, 1, data[key].get("name")
                                or "Unknown")
        tech_sheet.write_string(index + 1, 2,
                                data[key].get("reference product") or "")
        tech_sheet.write_string(index + 1, 3, data[key].get("unit")
                                or "Unknown")
        tech_sheet.write_string(index + 1, 4,
                                " - ".join(data[key].get("categories") or []))
        tech_sheet.write_string(index + 1, 5, data[key].get("location")
                                or "Unknown")

    COLUMNS = (
        "Index",
        "Name",
        "Unit",
        "Categories",
    )

    biosphere_dicts = {}
    bio_sheet = workbook.add_worksheet("biosphere")
    bio_sheet.set_column("B:B", 60)
    bio_sheet.set_column("C:C", 15)
    bio_sheet.set_column("D:D", 30)

    # Header
    for index, col in enumerate(COLUMNS):
        bio_sheet.write_string(0, index, col, bold)

    for index, key in sorted(rb.items()):
        if key[0] not in biosphere_dicts:
            biosphere_dicts[key[0]] = Database(key[0]).load()
        obj = biosphere_dicts[key[0]][key]

        bio_sheet.write_number(index + 1, 0, index + 1)
        bio_sheet.write_string(index + 1, 1, obj.get("name", "Unknown"))
        bio_sheet.write_string(index + 1, 2, obj.get("unit", "Unknown"))
        bio_sheet.write_string(index + 1, 3,
                               " - ".join(obj.get("categories", [])))

    workbook.close()
    return dirpath
 def test_hinton_matrix_no_error(self):
     self.install_fixtures()
     lca = LCA({("a", "2"): 1}, ("method", ))
     lca.lci()
     lca.lcia()
     CA().hinton_matrix(lca, 2, 2)
Beispiel #25
0
    def calculate(self):
        """Calculate"""
        self.timeline = Timeline()
        self.heap = [
        ]  #heap with dynamic exchanges to loop over (impact,edge,datetime, TemporalDistribution)
        self.calc_number = 0

        #run worst case LCA if lca_object not passed else redo for demand and worst_case method
        if self.lca_object:
            _redo_lcia(self, self.lca_object, self.demand,
                       self.worst_case_method)
        else:
            self.lca = LCA(self.demand, self.worst_case_method)
            self.lca.lci()
            self.lca.lcia()

        #reverse matrix and calculate cutoff
        self.reverse_activity_dict, self.reverse_prod_dict, self.reverse_bio_dict = self.lca.reverse_dict(
        )
        self.cutoff = abs(self.lca.score) * self.cutoff_value

        #logs
        self.log.info("Starting dynamic LCA")
        self.log.info("Demand: %s" % self.demand)
        self.log.info("Worst case method: %s" % str(self.worst_case_method))
        self.log.info("Start datetime: %s" % self.t0)
        self.log.info("Maximum calculations: %i" % self.max_calc_number)
        self.log.info("Worst case LCA score: %.4g." % self.lca.score)
        self.log.info("Cutoff value (fraction): %.4g." % self.cutoff_value)
        self.log.info("Cutoff score: %.4g." % self.cutoff)

        # Initialize heap
        #MAYBE NOT NECESSARY ANYMORE
        heappush(
            self.heap,
            (
                None,
                (
                    "Functional unit",
                    "Functional unit",
                ),
                self.t0,
                TemporalDistribution(
                    np.array([
                        0,
                    ], dtype='timedelta64[s]'),  # need int
                    np.array((1., )).astype(float)),
                'Functional unit'  #with tag
            )
        )  #if self.lca.score!=0 else self.timeline.add(self.t0.astype(datetime.datetime) , None, None,0) #deal with demand with no impact (doing so does not return error in LCIA)
        #TODO: the part commented out above was needed for `MultiDynamicLCA`, in commits `traverse also when total score is 0` this has been deleted, check if `MultiDynamicLCA` works fine or is affected

        while self.heap:
            if self.calc_number >= self.max_calc_number:
                warnings.warn("Stopping traversal due to calculation count.")
                break
            self._iterate()

        self.log.info("NODES: " + pprint.pformat(self.nodes))
        self.log.info("EDGES: " + pprint.pformat(self.edges))

        return self.timeline
class PageRank(object):
    def __init__(self, database):
        self.database = database

    def calculate(self):
        self.lca = LCA({self.database.random(): 1})
        self.lca.lci()
        self.ra, _, _ = self.lca.reverse_dict()
        self.matrix = self.lca.technosphere_matrix.transpose()
        self.pr = [(x[0], self.ra[x[1]]) for x in self.page_rank(self.matrix)]
        return self.pr

    def page_rank(self, technosphere, alpha=0.85, max_iter=100, tol=1e-6):
        """
        Return the PageRank of the nodes in the graph.

        Adapted from http://networkx.lanl.gov/svn/networkx/trunk/networkx/algorithms/link_analysis/pagerank_alg.py

        PageRank computes a ranking of the nodes in the graph G based on
        the structure of the incoming links. It was originally designed as
        an algorithm to rank web pages.

        The eigenvector calculation uses power iteration with a SciPy
        sparse matrix representation.

        Args:
            * *technosphere* (scipy sparse matrix): The technosphere matrix.
            * *alpha* (float, optional): Damping parameter for PageRank, default=0.85

        Returns:
            * Dictionary of nodes (activity codes) with value as PageRank


        References

        .. [1] A. Langville and C. Meyer,
           "A survey of eigenvector methods of web information retrieval."
           http://citeseer.ist.psu.edu/713792.html
        .. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry,
           The PageRank citation ranking: Bringing order to the Web. 1999
           http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf
        """
        mat = technosphere.copy()
        (n, m) = mat.shape
        assert n == m  # should be square
        nodelist = range(n)

        # Drop diagonals, and only indicate adjacency
        mat.data[:] = 1
        for x in range(n):
            mat[x, x] = 0

        column_sum = array(mat.sum(axis=1)).flatten()
        index = where(column_sum != 0)[0]
        mat = mat.tolil()
        for i in index:
            # Workaround for lack of fancy indexing in CSR matrices
            mat[i, :] *= 1.0 / column_sum[i]

        mat = mat.tocsc()
        x = ones((n)) / n  # initial guess
        dangle = array(where(mat.sum(axis=1) == 0, 1.0 / n, 0)).flatten()
        i = 0

        while True:  # power iteration: make up to max_iter iterations
            xlast = x
            x = alpha * (x * mat + dot(dangle, xlast)) + (1 - alpha) * xlast.sum() / n
            # check convergence, l1 norm
            err = absolute(x - xlast).sum()
            if err < n * tol:
                break
            if i > max_iter:
                raise ConvergenceError(
                    "pagerank: power iteration "
                    "failed to converge in %d iterations." % (i + 1)
                )
            i += 1

        return sorted(zip(x, nodelist), reverse=True)
Beispiel #27
0
 def get_lca_score(self, fu, method):
     lca = LCA(fu, method)
     lca.lci()
     lca.lcia()
     return lca.score
def contribution_for_all_datasets_one_method(database, method, progress=True):
    """Calculate contribution analysis (for technosphere processes) for all inventory datasets in one database for one LCIA method.

    Args:
        *database* (str): Name of database
        *method* (tuple): Method tuple

    Returns:
        NumPy array of relative contributions. Each column sums to one.
        Lookup dictionary, dataset keys to row/column indices
        Total elapsed time in seconds

    """
    def get_normalized_scores(lca, kind):
        if kind == "activities":
            data = lca.characterized_inventory.sum(axis=0)
        elif kind == "flows":
            data = lca.characterized_inventory.sum(axis=1)
        elif kind == "all":
            data = lca.characterized_inventory.data
        scores = np.abs(np.array(data).ravel())
        summed = scores.sum()
        if summed == 0:
            return np.zeros(scores.shape)
        else:
            return scores / summed

    start = time()
    assert database in databases, "Can't find database %s" % database
    assert method in methods, "Can't find method %s" % method
    keys = Database(database).load().keys()
    assert keys, "Database %s appears to have no datasets" % database

    # Array to store results
    results = np.zeros((len(keys), len(keys)), dtype=np.float32)

    # Instantiate LCA object
    lca = LCA({keys[0]: 1}, method=method)
    lca.lci()
    lca.decompose_technosphere()
    lca.lcia()

    rows = lca.characterized_inventory.shape[0]
    cols = lca.characterized_inventory.shape[1]
    all_cutoff = cols * 4

    results = {
        "activities": np.zeros((cols, cols), dtype=np.float32),
        "flows": np.zeros((rows, cols), dtype=np.float32),
        "all": np.zeros((all_cutoff, cols), dtype=np.float32),
    }

    pbar = pyprind.ProgBar(len(keys), title="Activities:")

    # Actual calculations
    for key in keys:
        lca.redo_lcia({key: 1})
        if lca.score == 0.0:
            continue

        col = lca.activity_dict[mapping[key]]
        results["activities"][:,
                              col] = get_normalized_scores(lca, "activities")
        results["flows"][:, col] = get_normalized_scores(lca, "flows")
        results_all = get_normalized_scores(lca, "all")
        results_all.sort()
        results_all = results_all[::-1]
        fill_number = results_all.shape[0]
        assert fill_number < all_cutoff, "Too many values in 'all'"
        results["all"][:fill_number, col] = results_all

        pbar.update()

    print(pbar)

    return results, lca.activity_dict, time() - start
Beispiel #29
0
def traverse_tagged_databases(functional_unit,
                              method,
                              label="tag",
                              default_tag="other",
                              secondary_tags=[]):
    """Traverse a functional unit throughout its foreground database(s), and

    group impacts by tag label.


    Contribution analysis work by linking impacts to individual activities.

    However, you also might want to group impacts in other ways. For example,

    give individual biosphere exchanges their own grouping, or aggregate two

    activities together.


    Consider this example system, where the letters are the tag labels, and the

    numbers are exchange amounts. The functional unit is one unit of the tree

    root.


    .. image:: images/tagged-traversal.png

       :alt: Example tagged supply chain


    In this supply chain, tags are applied to activities and biosphere exchanges.

    If a biosphere exchange is not tagged, it inherits the tag of its producing

    activity. Similarly, links to other databases are assessed with the usual

    LCA machinery, and the total LCA score is tagged according to its consuming

    activity. If an activity does not have a tag, a default tag is applied.


    We can change our visualization to show the use of the default tags:


    .. image:: images/tagged-traversal-2.png

       :alt: Example tagged supply chain


    And then we can manually calculate the tagged impacts. Normally we would

    need to know the actual biosphere flows and their respective

    characterization factors (CF), but in this example we assume that each

    CF is one. Our result, group by tags, would therefore be:


        * **A**: :math:`6 + 27 = 33`

        * **B**: :math:`30 + 44 = 74`

        * **C**: :math:`5 + 16 + 48 = 69`

        * **D**: :math:`14`


    This function will only traverse the foreground database, i.e. the

    database of the functional unit activity. A functional unit can have

    multiple starting nodes; in this case, all foreground databases are

    traversed.


    Input arguments:

        * ``functional_unit``: A functional unit dictionary, e.g. ``{("foo", "bar"): 42}``.

        * ``method``: A method name, e.g. ``("foo", "bar")``

        * ``label``: The label of the tag classifier. Default is ``"tag"``

        * ``default_tag``: The tag classifier to use if none was given. Default is ``"other"``

        * ``secondary_tags``: List of tuples in the format (secondary_label, secondary_default_tag). Default is empty list.


    Returns:


        Aggregated tags dictionary from ``aggregate_tagged_graph``, and tagged supply chain graph from ``recurse_tagged_database``.


    """

    lca = LCA(functional_unit, method)

    lca.lci(factorize=True)

    lca.lcia()

    method_dict = {o[0]: o[1] for o in Method(method).load()}

    graph = [
        recurse_tagged_database(key, amount, method_dict, lca, label,
                                default_tag, secondary_tags)
        for key, amount in functional_unit.items()
    ]

    return aggregate_tagged_graph(graph), graph
Beispiel #30
0
class DynamicLCA(object):
    """Calculate a dynamic LCA, where processes, emissions, and CFs can vary throughout time.If an already (statically) characterized LCA object is passed calculate its dynamic LCA (useful when doing several dynamic LCA for same database but different the FUs).

Args:
    * *demand* (dict): The functional unit. Same format as in LCA class.
    * *worst_case_method* (tuple): LCIA method. Same format as in LCA class.
    * *cutoff* (float, default=0.005): Cutoff criteria to stop LCA calculations. Relative score of total, i.e. 0.005 will cutoff if a dataset has a score less than 0.5 percent of the total.
    * *max_calc_number* (int, default=10000): Maximum number of LCA calculations to perform.
    * *loop_cutoff* (int, default=10): Maximum number of times loops encountered will be traversed.
    * *t0* (datetime, default=np.datetime64('now')): `datetime` of the year zero (i.e. the one of the functional unit). 
    * *group* (Boolean, default=False): When 'True' groups the impact upstream for each of the processes based on the values of `grouping_field`
    * *grouping_field* (string, default='tempo_group': The bw2 field to look for when grouping impacts upstream. When ``group`==True and a process has `grouping_field==whatever` the impacts are grouped upstream with name ``whatever` untill another  process with `grouping_field==another name` is found. If `grouping_field==True` it simply uses the name of the process
    * *log* (int, default=False): If True to make log file
    * *lca_object* (LCA object,default=None): do dynamic LCA for the object passed (must have "characterized_inventory" i.e. LCA_object.lcia() has been called)
    """

    #* *group* (Boolean, default=False: When 'True' groups the impact upstream for each of the processes with the field`grouping_field`==True
    #* *grouping_field* (string, default='tempo_group': The bw2 field to look for when grouping impacts upstream. When ``group`==True and a process has `grouping_field`==True the impacts are grouped upstream for it untill another  process with `grouping_field`==True is found

    def __init__(self,
                 demand,
                 worst_case_method,
                 t0=None,
                 max_calc_number=1e4,
                 cutoff=0.001,
                 loop_cutoff=10,
                 group=False,
                 grouping_field="tempo_group",
                 log=False,
                 lca_object=None):
        self.demand = demand
        self.worst_case_method = worst_case_method
        self.t0 = np.datetime64(
            'now', dtype="datetime64[s]") if t0 is None else np.datetime64(
                t0).astype("datetime64[s]")
        self.max_calc_number = max_calc_number
        self.cutoff_value = cutoff
        self.loop_cutoff_value = loop_cutoff
        self.log = get_logger("dynamic-lca.log") if log else FakeLog()
        self.lca_object = lca_object
        self.group = group
        self.grouping_field = grouping_field
        self.stat_for_keys = get_static_forest_keys()  #return forest processes
        self.loops = collections.Counter()  #to count loops iterations

        #return static db and create set where will be added nodes as traversed
        all_databases = set.union(
            *[Database(key[0]).find_graph_dependents() for key in self.demand])
        self.static_databases = {
            name
            for name in all_databases if databases[name].get('static')
        }
        self.product_amount = collections.defaultdict(
            int)  #to check supply amount calculated for each product
        self.nodes = set()
        self.edges = set()

        #take the biosphere flows that are in the CF used to add only them in the timeline
        self._flows_in_CF = [
            x[0] for x in Method(self.worst_case_method).load()
        ] + [('static_forest', 'C_biogenic')]

        #self.test_datetime={}    #test for using TD,left for future (potential) development (other parts are commented out below)

    ###########
    #Traversal#
    ###########

    def calculate(self):
        """Calculate"""
        self.timeline = Timeline()
        self.heap = [
        ]  #heap with dynamic exchanges to loop over (impact,edge,datetime, TemporalDistribution)
        self.calc_number = 0

        #run worst case LCA if lca_object not passed else redo for demand and worst_case method
        if self.lca_object:
            _redo_lcia(self, self.lca_object, self.demand,
                       self.worst_case_method)
        else:
            self.lca = LCA(self.demand, self.worst_case_method)
            self.lca.lci()
            self.lca.lcia()

        #reverse matrix and calculate cutoff
        self.reverse_activity_dict, self.reverse_prod_dict, self.reverse_bio_dict = self.lca.reverse_dict(
        )
        self.cutoff = abs(self.lca.score) * self.cutoff_value

        #logs
        self.log.info("Starting dynamic LCA")
        self.log.info("Demand: %s" % self.demand)
        self.log.info("Worst case method: %s" % str(self.worst_case_method))
        self.log.info("Start datetime: %s" % self.t0)
        self.log.info("Maximum calculations: %i" % self.max_calc_number)
        self.log.info("Worst case LCA score: %.4g." % self.lca.score)
        self.log.info("Cutoff value (fraction): %.4g." % self.cutoff_value)
        self.log.info("Cutoff score: %.4g." % self.cutoff)

        # Initialize heap
        #MAYBE NOT NECESSARY ANYMORE
        heappush(
            self.heap,
            (
                None,
                (
                    "Functional unit",
                    "Functional unit",
                ),
                self.t0,
                TemporalDistribution(
                    np.array([
                        0,
                    ], dtype='timedelta64[s]'),  # need int
                    np.array((1., )).astype(float)),
                'Functional unit'  #with tag
            )
        )  #if self.lca.score!=0 else self.timeline.add(self.t0.astype(datetime.datetime) , None, None,0) #deal with demand with no impact (doing so does not return error in LCIA)
        #TODO: the part commented out above was needed for `MultiDynamicLCA`, in commits `traverse also when total score is 0` this has been deleted, check if `MultiDynamicLCA` works fine or is affected

        while self.heap:
            if self.calc_number >= self.max_calc_number:
                warnings.warn("Stopping traversal due to calculation count.")
                break
            self._iterate()

        self.log.info("NODES: " + pprint.pformat(self.nodes))
        self.log.info("EDGES: " + pprint.pformat(self.edges))

        return self.timeline

    ##############
    #INTERNAL USE#
    ##############

    def _iterate(self):
        """Iterate over the datasets starting from the FU"""
        # Ignore the calculated impact
        # `ed` is the the edge, in the form of (keyto, keyfrom)
        # `dt` is the datetime; GIU: we can also avoid this and use self.t0
        # `td` is a TemporalDistribution instance, which gives
        # how much of the dataset is used over time at
        # this point in the graph traversal
        _, ed, dt, td, ups_tag = heappop(
            self.heap)  # Don't care about impact #with tag

        #do not remeber what is this, check
        if ed != (
                "Functional unit",
                "Functional unit",
        ):
            self.product_amount[ed[1]] += td.total
        self.scale_value = self._get_scale_value(ed[1])

        if self.log:
            self.log.info("._iterate(): %s, %s, %s" % (ed, dt, td))

        #get bw2 activity for node
        node = get_activity(ed[1]) if ed[1] != "Functional unit" else {
            'FU': False
        }  #trick to deal with FU in LCA with results==0

        #tag ds with label if present otherwise inherit upstream tag
        ed_tag = ed[1]
        if self.group == True:
            ed_tag = ups_tag if node.get(
                self.grouping_field,
                False) == False else ed[1]  #with tags ed[0]

        #add bio flows (both dynamic and static)
        self._add_biosphere_flows(ed, td, ed_tag)  #with tag

        #deal with functional unit
        if ed[1] == "Functional unit":
            dyn_edges = {}
            for key, value in self.demand.items():
                dyn_edges[key] = \
                    TemporalDistribution(
                        np.array([0,],dtype='timedelta64[s]'), # need int
                        np.array((value,)).astype(float)
                )
                new_td = self._calculate_new_td(dyn_edges[key], td)
                # Calculate lca and discard if node impact is lower than cutoff
                if self._discard_node(key, new_td.total):
                    continue

                #else add to the heap the ds of this exchange with the new TD
                heappush(
                    self.heap,
                    (
                        abs(1 / self.lca.score) if self.lca.score != 0 else
                        0,  #deal with situations where the overal LCA score of the FU assessed is 0
                        (ed[1], key),
                        dt,
                        new_td,
                        ed_tag  #with tag
                    ))
            self.calc_number += 1

        #for all the other datasets
        else:
            #skip node if part of of a static db or when a loop i traversed loop_cutoff times
            ###ALL THIS LEFT FOR FUTURE IMPROVEMENTS
            # if ed in self.edges or node['database'] in self.static_databases: #this do not loop
            # if node['database'] in self.static_databases: #this loop
            # if node['database'] in self.static_databases or self.loops[ed]>=15: #loop certain amount of time
            #~if (ed[1],ed[0],) in self.edges or node['database'] in self.static_databases: #this do not reloop
            #~if node['database'] in self.static_databases or self.loops[ed]>=15: #loop certain amount of time
            #~if node['database'] in self.static_databases or self.loops[ed]>=self.loop_cutoff_value or td.total>1: #do not remeber why did this
            if node['database'] in self.static_databases or self.loops[
                    ed] >= self.loop_cutoff_value or (
                        self.loops[ed] >= 1 and td.total >= 1
                    ):  #loop certain amount of time ONLY if exc amoung <=1
                return

            #add to nodes,edges and loops counter
            self.nodes.add(ed[1])
            self.edges.add(ed)
            self.loops[ed] += 1

            #defaultdict with all edges of this node (can have multiple exchanges with same input/output so use default dict with list TDs as values)
            dyn_edges = collections.defaultdict(list)
            #loop dynamic_technosphere edges for node
            for exc in node.exchanges():
                #deal with technophsere and substitution exchanges
                if exc.get("type") in ["technosphere", 'substitution']:
                    if self.log:
                        self.log.info("._iterate:edge: " + pprint.pformat(exc))
                    dyn_edges[exc['input']].append(
                        self._get_temporal_distribution(exc))

                #deal with coproducts
                if exc.get(
                        'type') == 'production' and exc.get('input') != ed[1]:
                    if self.log:
                        self.log.info("._iterate:edge: " + pprint.pformat(exc))
                    dyn_edges[exc['input']].append(
                        self._get_temporal_distribution(exc))

            #GIU: test if it is necessary all this or just loop all of them
            for edge, edge_exchanges in dyn_edges.items():
                #need index to add duplicates exchanges with their index
                for i, edge_td in enumerate(edge_exchanges):

                    #Recalculate edge TD convoluting its TD with TD of the node consuming it (ds)
                    #return a new_td with timedelta as times
                    new_td = self._calculate_new_td(edge_td, td)

                    # Calculate lca and discard if node impact is lower than cutoff
                    if self._discard_node(edge, new_td.total):
                        continue

                    #else add to the heap the ds of this exchange with the new TD
                    heappush(
                        self.heap,
                        (
                            abs(1 / self.lca.score) if self.lca.score != 0 else
                            0,  #deal with exchanges with 0 impact
                            (ed[1], edge, i),
                            dt,
                            new_td,
                            ed_tag  #with tag          
                        ))

            self.calc_number += 1

    def _add_biosphere_flows(self, edge, tech_td, tag):  #with tag
        """add temporally distributed biosphere exchanges for this ds to timeline.raw both if ds is static or dynamic"""

        ds = edge[
            1]  #fix this (for now done just to avoid changing all the ds below)
        if ds == "Functional unit":
            return
        data = get_activity(ds)

        #add biosphere flow for process passed
        #check if new bw2 will need changes cause will differentiate import of products and activity (i.e. process)
        if not data.get('type', 'process') == "process":
            return

        #Add cumulated inventory for static database (to make faster calc) and loops (to avoid infinite loops)
        ###ALL THIS LEFT FOR FUTURE IMPROVEMENTS
        # if data['database'] in self.static_databases: #this loop without stoop
        # if data['database'] in self.static_databases or edge in self.edges: #do not loop
        #~if data['database'] in self.static_databases or (edge[1],edge[0],) in self.edges: #do not re-loop (new)
        #~if data['database'] in self.static_databases or self.loops[edge]>=15: #loop certain amount of time
        #~if data['database'] in self.static_databases or self.loops[edge]>=self.loop_cutoff_value or tech_td.total>1: #do not remeber why did this
        if data['database'] in self.static_databases or self.loops[
                edge] >= self.loop_cutoff_value or (
                    self.loops[edge] >= 1 and tech_td.total >= 1
                ):  #loop certain amount of time only if exc amoung <=1
            self.lca.redo_lci({data: 1})

            # #add product amount to product_amount (to be used when background dataset traversal will be implemented )
            # for i,am in np.ndenumerate(self.lca.supply_array):
            # product=self.reverse_prod_dict[i[0]]
            # if product!=ds:
            # self.product_amount[product] += am*tech_td.total

            # #this only foreground
            inventory_vector = np.array(self.lca.inventory.sum(axis=1)).ravel()
            for index, amount in enumerate(inventory_vector):
                if not amount or amount == 0:  #GIU: we can skip also 0 amounts that sometimes occurs right?
                    continue
                flow = self.reverse_bio_dict[index]

                ###benchmarked this below, takes the same time of the foreground the problem is the high memory usage that slow things down
                # #this also background
                # coo=self.lca.inventory.tocoo()
                # for i,j,amount in zip(coo.row, coo.col, coo.data):
                # flow = self.reverse_bio_dict[i]
                # pr = self.reverse_prod_dict[j]

                dt_bio = self._calculate_bio_td_datetime(amount, tech_td)
                for bio_dt, bio_amount_scaled in dt_bio:
                    #TODO: best to use a better container for timeline.
                    #maybe defaultdict with namedtuple as key to group amount when added
                    #fastest, see among others here https://gist.github.com/dpifke/2244911 (I also tested)
                    if bio_amount_scaled != 0 and flow in self._flows_in_CF:
                        self.timeline.add(
                            bio_dt, flow, tag,
                            bio_amount_scaled)  #only foreground with tag
                        # self.timeline.add(bio_dt, flow, pr,bio_amount_scaled) #with background

                #~#test for using TD
                #~dt_bio_test=self._calculate_bio_td_datetime_test_timeline(amount,tech_td)
                #~self.test_datetime[flow, ds] = dt_bio_test+self.test_datetime.get((flow, ds),0)

            ##deal with co2 biogenic dynamic in installed (static) databases (maybe can avoid this loop and do direclty above)
            if ('biosphere3', 'cc6a1abb-b123-4ca6-8f16-38209df609be'
                ) in self.lca.biosphere_dict:
                row_bioc = self.lca.biosphere_dict[(
                    'biosphere3', 'cc6a1abb-b123-4ca6-8f16-38209df609be')]
                col_cbio = self.lca.biosphere_matrix[row_bioc, :].tocoo(
                )  #get coordinates Carbon dioxide, in air

                ## find inventory values and sum
                ## in principle `CO2, in air` should have a negative
                ## but in ei it is positive so no need to change sign in bio_c
                bio_c = sum([
                    self.lca.inventory[row_bioc, index]
                    for index in col_cbio.col
                    if self.reverse_activity_dict[index] in self.stat_for_keys
                ])
                dt_bio_c = self._calculate_bio_td_datetime(bio_c, tech_td)
                for bio_dt, bio_amount_scaled in dt_bio_c:
                    if bio_amount_scaled != 0:
                        #~self.timeline.add(bio_dt, ('static_forest','C_biogenic'), ds, bio_amount_scaled)
                        self.timeline.add(bio_dt,
                                          ('static_forest', 'C_biogenic'), tag,
                                          bio_amount_scaled)  #with tag

                #~#test for using TD
                #~dt_bio_c_test=self._calculate_bio_td_datetime_test_timeline(bio_c,tech_td)
                #~self.test_datetime[('static_forest','C_biogenic'), ds] = dt_bio_c_test+self.test_datetime.get((('static_forest','C_biogenic'), ds),0)

            return

        #dynamic database
        #get TD of bio exc, spread, convert to datetime and append to timeline.raw
        for exc in data.biosphere():
            bio_td = self._get_temporal_distribution(exc)
            td_bio_new = self._calculate_bio_td_datetime(bio_td, tech_td)
            for bio_dt, bio_amount_scaled in td_bio_new:
                if bio_amount_scaled != 0:
                    #deal with forest biogenic C in dynamic db
                    if exc['input'] == ('biosphere3',
                                        'cc6a1abb-b123-4ca6-8f16-38209df609be'
                                        ) and ds in self.stat_for_keys:
                        self.timeline.add(bio_dt,
                                          ('static_forest', 'C_biogenic'), tag,
                                          bio_amount_scaled)  # with tag
                    elif exc['input'] in self._flows_in_CF:
                        self.timeline.add(bio_dt, exc['input'], tag,
                                          bio_amount_scaled)  # with tag
                    else:
                        continue

            #~#test for using TD
            #~td_bio_new_test=self._calculate_bio_td_datetime_test_timeline(bio_td,tech_td)
            #~if exc['input']==('biosphere3', 'cc6a1abb-b123-4ca6-8f16-38209df609be') and ds in self.stat_for_keys:
            #~self.test_datetime[('static_forest','C_biogenic'), ds] = td_bio_new_test+self.test_datetime.get((('static_forest','C_biogenic'), ds),0)
            #~else:
            #~self.test_datetime[exc['input'], ds] = td_bio_new_test+self.test_datetime.get((exc['input'], ds),0)

    def _calculate_bio_td_datetime(self, bio_flows, td_tech):
        """Recalculate bio, both if datetime or timedelta, and add to timedelta.
        td_tech is always timedelta64, bio_flows can be datetime64 or float for static db"""
        #dynamic db with dt for bio_flows, multiply by node total
        if isinstance(bio_flows, TemporalDistribution) and 'datetime64' in str(
                bio_flows.times.dtype):
            return (bio_flows * td_tech.total) / self.scale_value
        #both static db and dynamic with timedelta for bio_flows
        bio_td_delta = (td_tech * bio_flows) / self.scale_value
        return bio_td_delta.timedelta_to_datetime(self.t0)

    #~#test for using TD
    #~def _calculate_bio_td_datetime_test_timeline(self,bio_flows,td_tech):
    #~###a test to check `test_datetime` since timeline multiply only with timedelta
    #~"""Recalculate bio, both if datetime or timedelta, and add to timedelta.
    #~td_tech is always timedelta64, bio_flows can be datetime64 or float for static db"""
    #~#dynamic db with dt for bio_flows, multiply by node total
    #~if isinstance(bio_flows,TemporalDistribution) and 'datetime64' in str(bio_flows.times.dtype):
    #~return ( bio_flows * td_tech.total ) / self.scale_value
    #~#both static db and dynamic with timedelta for bio_flows
    #~bio_td_delta = (td_tech * bio_flows) / self.scale_value
    #~return bio_td_delta

    def _calculate_new_td(self, edge_td, node_td):
        """Recalculate edge both if datetime or timedelta, return always timedelta.
        node_td is always timedelta64, edge_td can be datetime"""
        if 'datetime64' in str(edge_td.times.dtype):
            #multiply by node.total and convert to timedelta
            new_td = (edge_td * node_td.total) / self.scale_value
            return new_td.datetime_to_timedelta(self.t0)
        #else just convolute
        return (node_td * edge_td) / self.scale_value

    ################
    #Data retrieval#
    ################

    def _get_temporal_distribution(self, exc):
        """get 'temporal distribution'and change sing in case of production or substitution exchange"""
        # sign = 1 if exc.get('type') != 'production' else -1
        #deal with exchanges of type production and substititution
        sign = -1 if exc.get('type') in ['production', 'substitution'] else 1

        td = exc.get(
            'temporal distribution',
            TemporalDistribution(
                np.array([
                    0,
                ], dtype='timedelta64[s]'),  # need int
                np.array([
                    exc['amount'],
                ]).astype(float)))
        if not isinstance(td, TemporalDistribution):
            #convert old format, not for fractional years
            if any(
                    isinstance(t_v, tuple) and len(t_v) == 2
                    and isinstance(t_v[0], int) for t_v in td):
                array = np.array(exc[u'temporal distribution'])
                td = TemporalDistribution(array[:, 0].astype('timedelta64[Y]'),
                                          array[:, 1])
                warnings.warn(
                    "The old format for `temporal distribution` is deprecated, now must be a `TemporalDistribution` object instead of a nested list of tuples. The applied convertion might be incorrect in the exchange from {} to {}"
                    .format(exc['input'], exc['output']), DeprecationWarning)
            else:
                raise ValueError(
                    "incorrect data format for temporal distribution` from: {} to {}"
                    .format(exc['input'], exc['output']))
        if not np.isclose(td.total, exc['amount'], rtol=0.0001):
            raise ValueError(
                "Unbalanced exchanges from {} to {}. Make sure that total of `temporal distribution` is the same of `amount`"
                .format(exc['input'], exc['output']))
        return td * sign

    def _discard_node(self, node, amount):
        """Calculate lca for {node, amount} passed return True if lca.score lower than cutoff"""
        self.lca.redo_lcia({node: amount})
        discard = abs(self.lca.score) < self.cutoff
        if discard:
            self.log.info(u"Discarding node: %s of %s (score %.4g)" %
                          (amount, node, self.lca.score))
        return discard

    def _get_scale_value(self, ds):
        """Get production amount (diagonal in matrix A) for the dataset (ds) passed.
        Normally scale_value is 1 but in the case of `non-unitary producitons <https://chris.mutel.org/non-unitary.html>`_ """
        # Each activity must produce its own reference product, but amount
        # can vary, or even be negative.
        # TODO: Do we need to look up the reference product?
        # It is not necessarily the same as the activity,
        # but maybe this breaks many things in the graph traversal
        if ds != "Functional unit":
            scale_value = float(
                self.lca.technosphere_matrix[self.lca.product_dict[ds],
                                             self.lca.activity_dict[ds]])
            if scale_value == 0:
                raise ValueError(u"Can't rescale activities that produce "
                                 u"zero reference product")
            return scale_value
        else:
            return 1

    def _redo_lcia(self, lca_obj, demand, method):
        """
        Redo LCA for the same inventory and different method and FU using redo_lcia().Decompose technosphere if it was not factorized in the LCA object passed. Useful when redoing many dynamic LCA for same database
        Args:
            * *demand* (dict): The functional unit. Same format as in LCA class.
            * *method* (tuple): LCIA method. Same format as in LCA class.
            * *LCA_object* for which self.characterized_inventory already exists (i.e. LCA_object.lcia() has been called) 
        """
        assert hasattr(lca_obj, "characterized_inventory"
                       ), "Must do LCIA first for the LCA object passed"
        self.lca = lca_obj
        self.lca.switch_method(method)
        self.lca.redo_lcia(demand)
        #assumes that this must be reused several times thus better to factorize
        if not hasattr(self.lca, "solver"):
            self.lca.decompose_technosphere()
        return self.lca

    def save_dLCI(self, folderpath=None):
        """Save the results of DynamicLCA to a (compressed) ``bw2lci`` file containing dictionary like 
        ``'timeline':timeline object,
        'demand':dLCI demand
        'wc_method':dLCI worst_case_method
        The file is saved to the current working directory by default with the filename=demandkey_worstcase_method. Restoration is done using 'bw2temporalis.timeline.load_LCI'. 
        Args:
            * *folderpath* (str, default=None): the filepath of the timeline (without file extension)
        """

        assert hasattr(self, "timeline"), "Must do calculate first"
        #make folder if not existing and give name of demand_worstcase_method to the file
        os.makedirs(
            folderpath or '.', exist_ok=True
        )  #create folder if not existing yet see  https://stackoverflow.com/a/12517490/4929813
        tl_path = os.path.join(
            folderpath or '.', '{}_{}.bw2lci'.format(self.demand,
                                                     self.worst_case_method))
        f = gzip.open(tl_path, 'wb')
        pickle.dump(
            {
                'timeline': self.timeline,
                'demand': self.demand,
                'wc_method': self.worst_case_method
            }, f)
        f.close()