Пример #1
0
def sobol_pandas(gsa_in_lca, first, total):

    lca = gsa_in_lca.lca
    ind_reverse_act = 0
    ind_reverse_prod = 1
    ind_reverse_bio = 2
    ind_db_name = 0
    ind_act_code = 1

    sum_first = np.sum(first)
    sum_total = np.sum(total)
    normalized_first = np.array(
        [first[i] / sum_first for i in range(len(first))])
    normalized_total = np.array(
        [total[i] / sum_total for i in range(len(total))])

    # get_act_prod_bio = lambda ind_reverse, p:
    activities, products_flows = [], []
    for input_ in gsa_in_lca.inputs:

        #Technosphere activities, columns
        act_mask_tech = gsa_in_lca.inputs_dict[input_]['tech_params_where']
        act_pos_tech = lca.tech_params[act_mask_tech]['col']
        act_tech      = [ bw.Database(lca.reverse_dict()[ind_reverse_act][p][ind_db_name]).get(lca.reverse_dict()[ind_reverse_act][p][ind_act_code])['name'] \
                 for p in act_pos_tech]

        #Products, rows
        prod_mask = gsa_in_lca.inputs_dict[input_]['tech_params_where']
        prod_pos = lca.tech_params[prod_mask]['row']
        products  = [ bw.Database(lca.reverse_dict()[ind_reverse_prod][p][ind_db_name]).get(lca.reverse_dict()[ind_reverse_prod][p][ind_act_code])['name'] \
             for p in prod_pos ]

        #Technosphere activities wrt biosphere flows, columns
        act_mask_bio = gsa_in_lca.inputs_dict[input_]['bio_params_where']
        act_pos_bio = lca.bio_params[act_mask_bio]['col']
        act_bio      = [ bw.Database(lca.reverse_dict()[ind_reverse_act][p][ind_db_name]).get(lca.reverse_dict()[ind_reverse_act][p][ind_act_code])['name'] \
                 for p in act_pos_bio]

        #Biosphere flows, rows
        bio_mask = gsa_in_lca.inputs_dict[input_]['bio_params_where']
        bio_pos = lca.bio_params[bio_mask]['row']
        bio_flows = [ bw.Database(lca.reverse_dict()[ind_reverse_bio][p][ind_db_name]).get(lca.reverse_dict()[ind_reverse_bio][p][ind_act_code])['name'] \
              for p in bio_pos]

        activities += act_tech + act_bio
        products_flows += products + bio_flows

    data = {
        'Products or flows': products_flows + list(gsa_in_lca.parameters),
        'Activities': activities + list(gsa_in_lca.parameters),
        'First': first,
        'Total': total,
        'Normalized first': normalized_first,
        'Normalized total': normalized_total
    }

    # Creates pandas DataFrame
    df = pd.DataFrame(data)

    gsa_in_lca.sobol_df = df
Пример #2
0
def test_exchange_interface(qtbot, ab_app):
    flow = bw.Database(bw.config.biosphere).random()
    db = bw.Database("testdb")
    act_key = ("testdb", "act_unc")
    db.write({
        act_key: {
            "name":
            "act_unc",
            "unit":
            "kilogram",
            "exchanges": [
                {
                    "input": act_key,
                    "amount": 1,
                    "type": "production"
                },
                {
                    "input": flow.key,
                    "amount": 2,
                    "type": "biosphere"
                },
            ]
        }
    })

    act = bw.get_activity(act_key)
    exc = next(e for e in act.biosphere())
    interface = get_uncertainty_interface(exc)
    assert isinstance(interface, ExchangeUncertaintyInterface)
    assert interface.amount == 2
    assert interface.uncertainty_type == UndefinedUncertainty
    assert interface.uncertainty == {}
Пример #3
0
def add_non_fossil_co2_flows_to_storage():
    """Add a new flow to the biosphere: Non-fossil CO2 to storage.
    Add this biosphere flow to LCIA methods where it is suitable.
    """
    from peewee import IntegrityError

    biosphere = bw.Database('biosphere3')
    new_flow = biosphere.new_activity(
        'CO2 to geological storage, non-fossil', **{
            'name': 'CO2 to geological storage, non-fossil',
            'unit': 'kilogram',
            'type': 'storage',
            'categories': ('geological storage', )
        })
    try:
        new_flow.save()
    except IntegrityError as e:
        print(
            "Database Error (flow is likely to be present already): {}".format(
                e))

    print("Added new flow: {}".format(new_flow))

    co2_to_soil = [
        x for x in bw.Database("biosphere3")
        if ("Carbon dioxide, to soil or biomass stock" in str(x)
            and "('soil',)" in str(x))
    ][0]
    print("Use {} as a template for the characterization factors.".format(
        co2_to_soil))

    for cat in lcia_methods:
        method = bw.Method(lcia_methods[cat])
        method_data = method.load()
        # first make sure we don't already have the flow included:
        if [x for x in method_data if new_flow.key[1] in x[0][1]]:
            print('Flow already present- you must have run this code already.')
            continue
        else:
            try:
                characterized_flow = [
                    x for x in method_data if co2_to_soil.key[1] in x[0][1]
                ][0]
            except:
                continue

            method_data.extend([(new_flow.key, characterized_flow[1])])

            print('Flow added to method: {}'.format(method.name))
            print('Characterisation factor: {}'.format(characterized_flow[1]))

            orig_name = [x for x in method.name]
            new_method = bw.Method(tuple(orig_name + ['CO2 storage']))
            new_method.register()
            new_method.write(method_data)
            new_method.process()
Пример #4
0
 def relink_database(self, db_name: str, parent: QObject) -> None:
     """Relink technosphere exchanges within the given database."""
     db = bw.Database(db_name)
     depends = db.find_dependents()
     options = [(depend, bw.databases.list) for depend in depends]
     dialog = DatabaseLinkingDialog.relink_sqlite(db_name, options, parent)
     if dialog.exec_() == DatabaseLinkingDialog.Accepted:
         # Now, start relinking.
         for old, new in dialog.relink.items():
             other = bw.Database(new)
             relink_exchanges_existing_db(db, old, other)
         signals.database_changed.emit(db_name)
         signals.databases_changed.emit()
Пример #5
0
def lookup_geothermal():

    db_geothe = bw.Database("geothermal energy")
    db_ecoinv = bw.Database("ecoinvent 3.5 cutoff")
    db_biosph = bw.Database("biosphere3")

    #needed to exclude print statements from the search function
    with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):

        wellhead = db_geothe.search("geothermal wellhead")[0].key
        diesel = db_ecoinv.search(
            "market diesel, burned diesel-electric generating set")[1].key
        steel = db_ecoinv.search("market steel low alloyed")[0].key
        cement = db_ecoinv.search("market cement portland",
                                  filter={"location": "ROW"})[0].key
        water = db_ecoinv.search("market tap water",
                                 filter={"location": "ROW"})[0].key
        drilling_mud = db_geothe.search("drilling mud")[0].key
        drill_wst = db_ecoinv.search("market drilling waste")[0].key
        wells_closr = db_ecoinv.search("deep well closure")[0].key
        coll_pipe = db_geothe.search("collection pipelines")[0].key
        plant = db_geothe.search(
            "geothermal plant, double flash (electricity)")[0].key
        ORC_fluid = db_ecoinv.search("market perfluoropentane")[0].key
        ORC_fluid_wst = db_ecoinv.search("treatment perfluoropentane")[0].key
        diesel_stim = db_ecoinv.search(
            "market diesel, burned diesel-electric generating set")[0].key
        co2 = db_biosph.search("Carbon dioxide, fossil")[0].key
        electricity_prod_conventional = db_geothe.search(
            "electricity production, geothermal, conventional")[0].key
        electricity_prod_enhanced = db_geothe.search(
            "electricity production, geothermal, enhanced")[0].key

    return wellhead,     \
           diesel,       \
           steel,        \
           cement,       \
           water,        \
           drilling_mud, \
           drill_wst,    \
           wells_closr,  \
           coll_pipe,    \
           plant,        \
           ORC_fluid,    \
           ORC_fluid_wst,\
           diesel_stim,  \
           co2,          \
           electricity_prod_conventional, \
           electricity_prod_enhanced
Пример #6
0
def store_database_as_package(db_name: str, directory: str = None) -> bool:
    """ Attempt to use `bw.BW2Package` to save the given database as an
    isolated package that can be shared with others.
    Returns a boolean signifying success or failure.
    """
    if db_name not in bw.databases:
        return False
    metadata = bw.databases[db_name]
    db = bw.Database(db_name)
    directory = directory or bw.projects.output_dir
    output_dir = Path(directory)
    if output_dir.suffix == ".bw2package":
        out_file = output_dir
    else:
        out_file = output_dir / "{}.bw2package".format(db.filename)
    # First, ensure the metadata on the database is up-to-date.
    modified = dt.strptime(metadata["modified"], "%Y-%m-%dT%H:%M:%S.%f")
    if "processed" in metadata:
        processed = dt.strptime(metadata["processed"], "%Y-%m-%dT%H:%M:%S.%f")
        if processed < modified:
            db.process()
    else:
        db.process()
    # Now that processing is done, perform the export.
    ABPackage.unrestricted_export(db, out_file)
    return True
Пример #7
0
    def sync(self):
        # code below is based on the assumption that bw uses utc timestamps
        tz = datetime.datetime.now(datetime.timezone.utc).astimezone()
        time_shift = -tz.utcoffset().total_seconds()

        data = []
        for name in natural_sort(bw.databases):
            dt = bw.databases[name].get("modified", "")
            if dt:
                dt = arrow.get(dt).shift(seconds=time_shift).humanize()
            # final column includes interactive checkbox which shows read-only state of db
            database_read_only = project_settings.db_is_readonly(name)
            data.append({
                "Name":
                name,
                "Depends":
                ", ".join(bw.databases[name].get("depends", [])),
                "Modified":
                dt,
                "Records":
                len(bw.Database(name)),
                "Read-only":
                database_read_only,
            })

        self.dataframe = pd.DataFrame(data, columns=self.HEADERS)
Пример #8
0
def conveniently_tag_database(
    fg_db_to_tag='',
    label='',
):
    '''
    ESA: LEGACY, use not recommended, use instead: conveniently_tag_database_v2
    Auxiliary function to conveniently assign new tag labels to a foreground database, for group analysis.
    Select a forground database to tag, via fg_db_to_tag;
    Then define the label name, via label
    Returns the tagged database as a new dictionnnary, to be checked, and then re-written in database
    
    Usage: new_data = conveniently_tag_database('fg_database', 'label_name')
    '''
    db = bw2.Database(fg_db_to_tag)
    data = db.load()
    new_data = {}
    print('There are %i items to be tagged, one by one' % (len(data)))
    val = input("Do you want to proceed (Y/N) ? ")
    if val != 'Y':
        print('Okay, we stop here')
    else:
        print(
            "Lets proceed! Type 'skip' in order to not tag the given activity")
        for pro_tpl, pro in data.items():
            val = input(pro['name'] + "... to be in the group called... ? ")

            if val == 'skip':
                # need to pop the key if it was defined previously
                pro.pop(label, 'label was not present')
            if val != 'skip':
                pro[label] = val

            new_data[pro_tpl] = pro
    return new_data
Пример #9
0
 def delete_database(self, name):
     ok = QtWidgets.QMessageBox.question(None, "Delete database?", (
         "Are you sure you want to delete database '{}'? It has {} activity datasets"
     ).format(name, len(bw.Database(name))))
     if ok:
         del bw.databases[name]
         self.change_project(bw.projects.current, reload=True)
Пример #10
0
 def sync(self, name, data=None):
     self.database_name = name
     if not data:
         self.database = bw.Database(name)
         self.database.order_by = 'name'
         self.database.filters = {'type': 'process'}
         data = itertools.islice(self.database, 0, self.MAX_LENGTH)
         self.setRowCount(min(len(self.database), self.MAX_LENGTH))
     self.setHorizontalHeaderLabels(self.HEADERS)
     for row, ds in enumerate(data):
         for col, value in self.COLUMNS.items():
             if value == "key":
                 self.setItem(
                     row, col,
                     ABTableItem(str(ds.key), key=ds.key, color=value))
             elif value == "location":
                 self.setItem(
                     row, col,
                     ABTableItem(str(ds.get(value, '')),
                                 key=ds.key,
                                 color=value))
             else:
                 self.setItem(
                     row, col,
                     ABTableItem(ds.get(value, ''), key=ds.key,
                                 color=value))
Пример #11
0
    def report_tech_LCA(self, year):
        """
        For each REMIND technology, find a set of activities in the region.
        Use ecoinvent tech share file to determine the shares of technologies
        within the REMIND proxies.
        """

        tecf = pd.read_csv(DATA_DIR/"powertechs.csv", index_col="tech")
        tecdict = tecf.to_dict()["mif_entry"]

        db = bw.Database("_".join(["ecoinvent", self.scenario, str(year)]))

        result = self._cartesian_product({
            "region": self.regions,
            "tech": list(tecdict.keys()),
            "method": self.methods
        }).sort_index()

        for region in self.regions:
            # read the ecoinvent techs for the entries
            shares = self.supplier_shares(db, region)

            for tech, acts in shares.items():
                # calc LCA
                lca = bw.LCA(acts, self.methods[0])
                lca.lci()

                for method in self.methods:
                    lca.switch_method(method)
                    lca.lcia()
                    result.at[(region, tech, method), "score"] = lca.score

        return result
Пример #12
0
    def report_direct_emissions(self):
        """
        Report the direct (exhaust) emissions of the LDV fleet.
        """

        df = self.data[self.data.Variable.isin(self.variables)]

        df.set_index(["Year", "Region", "Variable"], inplace=True)

        start = time.time()
        result = {}
        # calc score
        for year in self.years:
            db = bw.Database(eidb_label(self.model, self.scenario, year))
            for region in self.regions:
                for var in (df.loc[(year, region)]
                            .index.get_level_values(0)
                            .unique()):
                    for act, share in self._act_from_variable(
                            var, db, year, region).items():
                        for ex in act.biosphere():
                            result[(year, region, ex["name"])] = (
                                result.get((year, region, ex["name"]), 0)
                                + ex["amount"] * share * df.loc[(year, region, var), "value"])

        df_result = pd.Series(result)
        print("Calculation took {} seconds.".format(time.time() - start))
        return df_result * 1e9  # kg
Пример #13
0
 def copy_to_db(self, activity_key):
     origin_db = activity_key[0]
     activity = bw.get_activity(activity_key)
     # TODO: Exclude read-only dbs from target_dbs as soon as they are implemented
     available_target_dbs = sorted(set(bw.databases).difference(
         {'biosphere3', origin_db}
     ))
     if not available_target_dbs:
         QtWidgets.QMessageBox.information(
             None,
             "No target database",
             "No valid target databases available. Create a new database first."
         )
     else:
         target_db, ok = QtWidgets.QInputDialog.getItem(
             None,
             "Copy activity to database",
             "Target database:",
             available_target_dbs,
             0,
             False
         )
         if ok:
             new_code = self.generate_copy_code((target_db, activity['code']))
             activity.copy(code=new_code, database=target_db)
             # only process database immediatly if small
             if len(bw.Database(target_db)) < 200:
                 bw.databases.clean()
             signals.database_changed.emit(target_db)
             signals.databases_changed.emit()
Пример #14
0
 def random_graph(self) -> None:
     """ Show graph for a random activity in the currently loaded database."""
     if self.selected_db:
         self.new_graph(bw.Database(self.selected_db).random().key)
     else:
         QtWidgets.QMessageBox.information(None, "Not possible.",
                                           "Please load a database first.")
Пример #15
0
 def new_activity(self, database_name: str) -> None:
     name, ok = QtWidgets.QInputDialog.getText(
         self.window,
         "Create new technosphere activity",
         "Please specify an activity name:" + " " * 10,
     )
     if ok and name:
         data = {
             "name": name,
             "reference product": name,
             "unit": "unit",
             "type": "process"
         }
         new_act = bw.Database(database_name).new_activity(
             code=uuid.uuid4().hex, **data)
         new_act.save()
         production_exchange = new_act.new_exchange(input=new_act,
                                                    amount=1,
                                                    type="production")
         production_exchange.save()
         bw.databases.set_modified(database_name)
         AB_metadata.update_metadata(new_act.key)
         signals.open_activity_tab.emit(new_act.key)
         signals.database_changed.emit(database_name)
         signals.databases_changed.emit()
 def run_forwast(self):
     """
     adapted from pjamesjoyce/lcopt
     """
     response = requests.get(self.forwast_url)
     forwast_zip = zipfile.ZipFile(io.BytesIO(response.content))
     import_signals.download_complete.emit()
     with tempfile.TemporaryDirectory() as tempdir:
         if not import_signals.cancel_sentinel:
             forwast_zip.extractall(tempdir)
             import_signals.unarchive_finished.emit()
         if not import_signals.cancel_sentinel:
             import_signals.extraction_progress.emit(0, 0)
             import_signals.strategy_progress.emit(0, 0)
             import_signals.db_progress.emit(0, 0)
             bw.BW2Package.import_file(
                 os.path.join(tempdir, 'forwast.bw2package'))
         if self.db_name != 'forwast':
             bw.Database('forwast').rename(self.db_name)
         if not import_signals.cancel_sentinel:
             import_signals.extraction_progress.emit(1, 1)
             import_signals.strategy_progress.emit(1, 1)
             import_signals.db_progress.emit(1, 1)
             import_signals.finished.emit()
         else:
             self.delete_canceled_db()
Пример #17
0
def freezeParams(db_name, **params):
    """
    Freeze parameters values in all exchanges amounts of a DB.
    The formulas are computed and the 'amount' attributes are set with the result.
    This enables parametric datasets to be used by standard, non parametric tools of Brightway2.
    """

    db = bw.Database(db_name)

    with DbContext(db):
        for act in db:
            for exc in act.exchanges():

                amount = _getAmountOrFormula(exc)

                # Amount is a formula ?
                if isinstance(amount, Basic):

                    replace = [(name, value)
                               for name, value in _completeParamValues(
                                   params, setDefaults=True).items()]
                    val = amount.subs(replace).evalf()

                    with ExceptionContext(val):
                        val = float(val)

                    print("Freezing %s // %s : %s => %d" %
                          (act, exc['name'], amount, val))

                    # Update in DB
                    exc["amount"] = val
                    exc.save()
 def superstructure_from_databases(cls, databases: List[str],
                                   superstructure: Optional[str] = None) -> 'Builder':
     """Given a list of database names and the name of the superstructure,
     upgrade or create the superstructure database.
     """
     assert len(databases) >= 1, "At least one database should be included"
     assert len(databases) == len(set(databases)), "Duplicates are not allowed in the databases"
     assert all(db in bw.databases for db in databases), "All databases must exist in the project"
     if superstructure is None:
         # Default to first db in list if no name is given
         superstructure, databases = databases[0], databases[1:]
     elif superstructure not in bw.databases:
         db = bw.Database(superstructure)
         db.register()
     elif superstructure in databases:
         databases.remove(superstructure)
     print("Superstructure: {}, deltas: {}".format(superstructure, ", ".join(databases)))
     builder = cls.initialize(superstructure, databases)
     print("Amount of activities in superstructure: {}".format(len(builder.unique_codes)))
     builder.find_missing_activities()
     print("Total amount of activities in superstructure: {}".format(len(builder.unique_codes)))
     if builder.missing_activities:
         print("Storing {} new activities for superstructure.".format(len(builder.missing_activities)))
         builder.expand_superstructure_activities()
     print("Amount of exchanges in superstructure: {}".format(len(builder.unique_indexes)))
     builder.find_missing_exchanges()
     print("Total amount of exchanges in superstructure: {}".format(len(builder.unique_indexes)))
     if builder.missing_exchanges:
         print("Storing {} new exchanges for superstructure.".format(len(builder.missing_exchanges)))
         builder.expand_superstructure_exchanges()
     return builder
Пример #19
0
    def uslci_db(self):
        """ 
        Orignal repository were downloaded from the U.S. Federal LCA Commons|Ag data Commons platform. 
        Note Biosteam_lca leverages ecospold2 importer and excel importer. For converting from json format to ecospold format, the Openlca Data Converter were used. Pre-processed database are saved under biosteam_lca-database ddirectory.
        
        **References**
        
            [1] USDA National Agricultural Library. (2015). LCA Commons. Ag Data Commons. https://doi.org/10.15482/USDA.ADC/1173236. Accessed 2020-03-23.
            
            [2] Michael Srocka, Juliane Franze, Andreas Ciroth, January 2010. Documentation openLCA format converter V2. Data Conversion from EcoSpold02 to ILCD. GreenDeltaTC GmbH Berlin 
        """
        db_name = 'us_lci'
        lci_import = importers.SingleOutputEcospold2Importer(
            os.path.join(self.dirpath, 'US_lci'), db_name)
        lci_import.apply_strategies()

        lci_import.migrate('unusual-units')
        lci_import.migrate('default-units')
        #linking the biosphere flows by their names, units, and categories
        link_iter = functools.partial(strategies.link_iterable_by_fields,
                                      other=bw2.Database(bw2.config.biosphere),
                                      kind='biosphere')
        lci_import.apply_strategy(link_iter)
        try:
            self.inspect(lci_import, db_name)
        except:
            pass
        sp = lci_import
        return sp
Пример #20
0
def test_succceed_open_activity(ab_app):
    """ Create a tiny test database with a production activity
    """
    assert bw.projects.current == "pytest_project"
    db = bw.Database("testdb")
    act_key = ("testdb", "act1")
    db.write({
        act_key: {
            "name": "act1",
            "unit": "kilogram",
            "exchanges": [{
                "input": act_key,
                "amount": 1,
                "type": "production"
            }]
        }
    })
    activities_tab = ab_app.main_window.right_panel.tabs["Activity Details"]
    # Select the activity and emit signal to trigger opening the tab
    act = bw.get_activity(act_key)
    signals.open_activity_tab.emit(act_key)
    assert len(activities_tab.tabs) == 1
    assert act_key in activities_tab.tabs
    # Current index of QTabWidget is changed by opening the tab
    index = activities_tab.currentIndex()
    assert act.get("name") == activities_tab.tabText(index)
Пример #21
0
def replace(parameters, gt_model):

    # CONVENTIONAL GEOTHERMAL
    parameters.static()
    gt_model.run(parameters)
    params_sta_conv = gt_model.array_io

    #Lookup activities
    _, _, _, _, _, _, _, _, _, _, _, _, _, _, electricity_prod_conventional, _, = lookup_geothermal(
    )

    act = bw.get_activity(electricity_prod_conventional)

    if not bw.Database("geothermal energy").search(act["name"] + " zeros"):
        act.copy(name=act["name"] + " (zeros)")

    # Delete all exchanges
    for exc in act.exchanges():
        exc.delete()

    # Insert new exchanges
    for inp in params_sta_conv:
        if inp['input_db'] != "biosphere3":
            print(inp)
            # act.new_exchange(input = (inp['input_db'],inp['input_code']), amount = float(inp['amount']), type= "technosphere").save()
        else:
            print(type(tuple((str(inp['input_db']), str(inp['input_code'])))))
            print(float(inp['amount']))
def setup_lca_model_oases(path_base,
                          num_params=None,
                          write_dir_name=None,
                          flag_generate_scores_dict=False):
    # LCA model
    bw.projects.set_current("GSA for oases")
    co = bw.Database("CH consumption 1.0")
    demand_act = [
        act for act in co if "ch hh average consumption" in act["name"]
    ]
    assert len(demand_act) == 1
    demand_act = demand_act[0]
    demand = {demand_act: 1}
    method = ("IPCC 2013", "climate change", "GWP 100a")
    # num_params
    if num_params is None:
        lca = bw.LCA(demand, method)
        lca.lci()
        lca.lcia()
        print("LCA score is {}".format(lca.score))
        all_uncertain_params = lca.tech_params[
            lca.tech_params["uncertainty_type"] > 1]
        num_params = len(all_uncertain_params)
        print("Total number of uncertain exchanges is {}".format(num_params))
    # Define some variables
    if write_dir_name is None:
        write_dir_name = "oases_gsa_gwp_{}".format(num_params)
    write_dir = path_base / write_dir_name
    if flag_generate_scores_dict:
        model = LCAModel(demand, method, write_dir)  # generate scores_dict
        del model
    model = LCAModel(demand, method, write_dir, num_params=num_params)
    gsa_seed = 92374523
    return model, write_dir, gsa_seed
Пример #23
0
def add_unit_score_exchange_and_cf(method, biosphere='biosphere3'):
    """ Add unit score biosphere exchanges and cfs to biosphere and methods.

    Allows the storing of LCIA results in the B matrix for LCI datasets. Makes
    changes inplace and does not return anything.

    Parameters
    ----------

    method: tuple
        Identification of the LCIA method, using Brightway2 tuple identifiers

    biosphere: str, default `biosphere3`
        Name of the biosphere database where biosphere exchanges are stored

    Note
    ----

    This function is invoked directly by the DatabaseAggregator

    """
    if method not in bw.methods:
        raise ValueError("Method {} not in registered methods".format(method))
    if biosphere not in bw.databases:
        raise ValueError(
            "Database {} not in registered databases".format(biosphere))

    m = bw.Method(method)
    ef_code = m.get_abbreviation()
    ef_name = 'Unit impact for {}'.format(method)

    # Add to biosphere database, skip if already present
    try:
        ef = bw.get_activity((biosphere, ef_code))
        assert ef['name'] == ef_name
    except:
        ef = bw.Database(biosphere).new_activity(code=ef_code)
        ef['name'] = ef_name
        ef['unit'] = m.metadata['unit']
        ef['categories'] = ('undefined', )
        ef['exchanges']: []
        ef['type'] = 'unit impact exchange'
        ef.save()
        try:
            bw.mapping[(biosphere, ef_code)]
        except KeyError:
            print("Manually added {} to mapping".format(ef_code))
            bw.mapping.add((biosphere, ef_code))
    # Add to associated method, skip if already present
    loaded_method = m.load()
    try:
        existing_cf = [
            cf_tuple for cf_tuple in loaded_method
            if cf_tuple[0] == (biosphere, ef_code)
        ][0]
        assert existing_cf[1] == 1
    except:
        loaded_method.append(((biosphere, m.get_abbreviation()), 1))
        bw.Method(method).write(loaded_method)
Пример #24
0
def _relink_exchanges(data: list, other: str) -> list:
    other = bw.Database(other)
    if len(other) == 0:
        raise StrategyError("Cannot link to empty database")
    act = other.random()
    is_technosphere = act.get("type", "process") == "process"
    kind = TECHNOSPHERE_TYPES if is_technosphere else BIOSPHERE_TYPES
    return link_iterable_by_fields(data, other=other, kind=kind)
Пример #25
0
 def delete_database(self, name):
     ok = self.window.confirm(
         ("Are you sure you want to delete database '{}'? "
          "It has {} activity datasets").format(name,
                                                len(bw.Database(name))))
     if ok:
         del bw.databases[name]
         self.change_project(bw.projects.current, reload=True)
Пример #26
0
 def ensure_sqlite_indices(self):
     """
     - fix for https://github.com/LCA-ActivityBrowser/activity-browser/issues/189
     - also see bw2data issue: https://bitbucket.org/cmutel/brightway2-data/issues/60/massive-sqlite-query-performance-decrease
     """
     if bw.databases and not sqlite3_lci_db._database.get_indexes('activitydataset'):
         print('creating missing sqlite indices')
         bw.Database(list(bw.databases)[-1])._add_indices()
Пример #27
0
def test_fail_open_biosphere(ab_app):
    """ Specifically fail to open an activity tab for a biosphere flow
    """
    assert bw.projects.current == "pytest_project"
    activities_tab = ab_app.main_window.right_panel.tabs["Activity Details"]
    # Select any biosphere activity and emit signal to trigger opening the tab
    biosphere_flow = bw.Database("biosphere3").random()
    signals.open_activity_tab.emit(biosphere_flow.key)
    assert len(activities_tab.tabs) == 0
 def random_graph(self) -> None:
     """ Show graph for a random activity in the currently loaded database."""
     if self.selected_db:
         method = bw.methods.random()
         act = bw.Database(self.selected_db).random()
         demand = {act: 1.0}
         self.update_sankey(demand, method)
     else:
         QtWidgets.QMessageBox.information(None, "Not possible.", "Please load a database first.")
Пример #29
0
def is_technosphere_db(db_name: str) -> bool:
    """Returns True if database describes the technosphere, False if it describes a biosphere."""
    if not db_name in bw.databases:
        raise KeyError("Not an existing database:", db_name)
    act = bw.Database(db_name).random()
    if act is None or act.get("type", "process") == "process":
        return True
    else:
        return False
Пример #30
0
def setup_fixtures(request):

    print('RUNNING SETUP FIXTURE')

    if FULL_SETUP:
        bw2.projects.purge_deleted_directories()
        if bw2_project_exists(IMPORT_PROJECT_NAME):
            bw2.projects.delete_project(name=IMPORT_PROJECT_NAME,
                                        delete_dir=True)
        if bw2_project_exists(TEST_BW_PROJECT_NAME):
            bw2.projects.delete_project(name=TEST_BW_PROJECT_NAME,
                                        delete_dir=True)

        if bw2_project_exists(DEFAULT_BIOSPHERE_PROJECT):
            bw2.projects.set_current(DEFAULT_BIOSPHERE_PROJECT)
            bw2.projects.copy_project(IMPORT_PROJECT_NAME, switch=True)
        else:
            bw2.projects.set_current(IMPORT_PROJECT_NAME)
            bw2.bw2setup()

        script_path = os.path.dirname(os.path.realpath(__file__))
        ecospold_folder = os.path.join("tests", "assets", "datasets")
        ecospold_path = os.path.join(script_path, ecospold_folder)
        print(ecospold_path)

        ei = bw2.SingleOutputEcospold2Importer(ecospold_path,
                                               "Ecoinvent3_3_cutoff")
        ei.apply_strategies()
        ei.statistics()
        ei.write_database()

        bw2.projects.copy_project(TEST_BW_PROJECT_NAME, switch=True)

        test_db = bw2.Database(TEST_BW_DB_NAME)
        test_db.write(TEST_MODEL_DATABASE)

        bw2.projects.set_current('default')

    def teardown_fixtures():
        print('TEAR IT DOWN!!')

        print('cleaning up brightway')

        bw2.projects.set_current('default')

        if bw2_project_exists(TEST_BW_PROJECT_NAME):
            bw2.projects.delete_project(
                name=TEST_BW_PROJECT_NAME)  #, delete_dir=True)
            #bw2.projects.purge_deleted_directories()

        if bw2_project_exists(IMPORT_PROJECT_NAME):
            bw2.projects.delete_project(name=IMPORT_PROJECT_NAME)

        shutil.rmtree(os.path.join(script_path, "tests", TEST_FOLDER))

    request.addfinalizer(teardown_fixtures)