def test_grouping_separate_unit(self): biosphere_data = { ("biosphere", "1"): { "categories": ["foo", "this"], "exchanges": [], "name": "some bad stuff", "type": "emission", "unit": "kg", }, ("biosphere", "2"): { "categories": ["foo", "that"], "exchanges": [], "name": "some bad stuff", "type": "emission", "unit": "tonne", }, } biosphere = Database("biosphere") biosphere.register(name="Tests", depends=[]) biosphere.write(biosphere_data) method = Method(("test", "LCIA", "method")) method.register(unit="points") method.write([(("biosphere", "1"), 1.0, "GLO"), (("biosphere", "2"), 2.0, "GLO")]) answer = { ("some bad stuff", "foo", "kg"): [1.0], ("some bad stuff", "foo", "tonne"): [2.0], } self.assertEqual(group_by_emissions(method), answer)
def test_fix_spatial_dictionaries(self): # TODO: Fix return empty = Database("empty") empty.register(depends=[], geocollections=["foo"]) method = Method(("a", "name")) method.register(geocollections=["foo"]) rlca = RegionalizationBase({("empty", "nothing"): 1}, method=("a", "name")) # No-op - `inv_spatial_dict` not yet set... rlca.fix_spatial_dictionaries() assert not getattr(rlca, "_mapped_spatial_dict", None) self.assertFalse(hasattr(rlca, "inv_spatial_dict")) geomapping.data = {"a": 1, "b": 2} rlca.inv_spatial_dict = {"a": "foo"} # Now it does something... rlca.fix_spatial_dictionaries() self.assertFalse(hasattr(rlca, "ia_spatial_dict")) rlca.inv_spatial_dict = {1: "foo"} rlca.ia_spatial_dict = {2: "bar"} rlca.fix_spatial_dictionaries() self.assertEqual(rlca.inv_spatial_dict, {"a": "foo"}) self.assertEqual(rlca.ia_spatial_dict, {"b": "bar"})
def import_exiopol_IO_table(database_name, dir_path): assert os.path.exists(dir_path) and os.path.isdir(dir_path), "Problem with given directory path" assert database_name not in databases, "Database {} already exists".format(database_name) assert "mrIot_version2.2.2.txt" in os.listdir(dir_path), "Directory path must contain `mrIot_version2.2.2.txt` file." print("Loading and processing data") fp = os.path.join(dir_path, 'mrIot_version2.2.2.txt') data = [line for line in csv.reader(open(fp, "r"), delimiter="\t")] labels = [tuple(x[:3]) for x in data[2:]] labels_dict = {i: obj for i, obj in enumerate(labels)} data = np.array([[float(x) for x in row[3:]] for row in data[2:]]) codify = lambda x: ":".join(x[:2]) def get_column_tech_exchanges(index, obj): excs = [] for row_i, value in enumerate(data[:, index]): if not value: continue elif row_i == index: excs.append({ 'type': 'production', 'uncertainty_type': UndefinedUncertainty.id, 'amount': float(1 - value), 'loc': float(1 - value), 'input': (database_name, obj), 'output': (database_name, obj) }) else: excs.append({ 'type': 'technosphere', 'uncertainty_type': UndefinedUncertainty.id, 'amount': float(value), 'loc': float(value), 'input': (database_name, codify(labels_dict[row_i])), 'output': (database_name, obj) }) return excs print("Creating LCA datasets") db = [] pbar = pyprind.ProgBar(len(labels)) for index, ds in enumerate(labels): db.append({ 'location': ds[0], 'name': ds[1], 'unit': ds[2], 'exchanges': get_column_tech_exchanges(index, codify(ds)), 'type': 'process', 'database': database_name, 'code': codify(ds) }) pbar.update() print("Writing datasets") db_obj = Database(database_name) db_obj.register(directory=dir_path) # db_obj.write({(ds['database'], ds['code']): ds for ds in db}) return db_obj
def test_missing_in_row_dict_raise_valueerror(self): database = Database("whoah") database.register() dtype = [ (numpy_string('a'), np.uint32), (numpy_string('b'), np.uint32), (numpy_string('row'), np.uint32), (numpy_string('col'), np.uint32), (numpy_string('values'), np.float32), ] array = np.array([ (1, 2, MAX_INT_32, MAX_INT_32, 99), (1, 4, MAX_INT_32, MAX_INT_32, 99), ], dtype=dtype) row_dict = {1: 0} col_dict = {2: 0} np.save(database.filepath_processed(), array, allow_pickle=False) with self.assertRaises(ValueError): MatrixBuilder.build([database.filepath_processed()], "values", "a", "row", "b", "col", row_dict, col_dict, drop_missing=False)
def test_geocollections_mismatch(self): inventory = Database("inventory") inventory.register(geocollections=["places"]) method = Method(("a", "method")) method.register(geocollections=['regions']) with self.assertRaises(GeocollectionsMismatch): LCA({("inventory", "foo"): 1}, method=("a", "method"))
def test_site_generic_method_error(self): empty = Database("empty") empty.register(depends=[], geocollections=[]) method = Method(("a", "name")) method.register() with self.assertRaises(SiteGenericMethod): rlca = RegionalizationBase({("empty", "nothing"): 1}, method=("a", "name")) rlca.get_ia_geocollections()
def extra_setup(self): data = { ("A", "a"): {"name": "a", "categories": [], "unit": "kilogram"}, ("A", "b"): {"name": "b", "categories": [], "unit": "kilogram"}, ("A", "c"): {"name": "c", "categories": [], "unit": "kilogram"}, } d = Database("A") d.register(name="Tests", depends=[]) d.write(data) self.assertEqual(len(databases), 1)
def test_missing_intersection_error(self): empty = Database("empty") empty.register(depends=[], geocollections=["foo"]) method = Method(("a", "name")) method.register(geocollections=["bar"]) with self.assertRaises(MissingIntersection): rlca = RegionalizationBase({("empty", "nothing"): 1}, method=("a", "name")) rlca.inventory_geocollections = rlca.get_inventory_geocollections() rlca.ia_geocollections = rlca.get_ia_geocollections() rlca.needed_intersections()
def test_damage_category_import(): # Write the 2 item biosphere database database = Database("biosphere3", backend="singlefile") database.register() database.write( { ("biosphere3", "00e73fdb-98df-4a03-8290-79931cddfd12"): { "categories": ("air",), "code": "00e73fdb-98df-4a03-8290-79931cddfd12", "database": "biosphere3", "exchanges": [], "name": "Lead-210", "type": "emission", "unit": "kilo Becquerel", }, ("biosphere3", "2cfc5ba4-3db2-4193-9e81-b61e75ba1706"): { "categories": ("water",), "code": "2cfc5ba4-3db2-4193-9e81-b61e75ba1706", "database": "biosphere3", "exchanges": [], "name": "Lead-210", "type": "emission", "unit": "kilo Becquerel", }, } ) assert database # create the required migrations Migration("biosphere-2-3-categories").write( get_biosphere_2_3_category_migration_data(), "Change biosphere category and subcategory labels to ecoinvent version 3", ) Migration("biosphere-2-3-names").write( get_biosphere_2_3_name_migration_data(), "Change biosphere flow names to ecoinvent version 3", ) # Run the import if sys.version_info[0] < 3: delimiter = b"\t" else: delimiter = "\t" sp = SimaProLCIACSVImporter( os.path.join(SP_FIXTURES_DIR, "damagecategory.txt"), delimiter=delimiter ) assert len(sp.data) sp.apply_strategies() assert sp.statistics() == (6, 12, 0)
def setup(): db = Database("example") db.register(extra='yes please') a = db.new_activity(code="A", name="An activity", unit='kg', foo='bar') a.save() a.new_exchange(amount=1, input=a, type="production").save() b = db.new_activity(code="B", name="Another activity", location='here', this='that') b.save() b.new_exchange(amount=10, input=b, type="production").save() a.new_exchange(amount=0, input=b, type="technosphere", formula="foo * bar + 4").save() project_data = [{ 'name': 'foo', 'formula': 'green / 7', }, { 'name': 'green', 'amount': 7 }] parameters.new_project_parameters(project_data) database_data = [{ 'name': 'red', 'formula': '(foo + blue ** 2) / 5', }, { 'name': 'blue', 'amount': 12 }] parameters.new_database_parameters(database_data, "example") activity_data = [{ 'name': 'reference_me', 'formula': 'sqrt(red - 20)', 'database': 'example', 'code': "B", }, { 'name': 'bar', 'formula': 'reference_me + 2', 'database': 'example', 'code': "A", }] parameters.new_activity_parameters(activity_data, "my group") parameters.add_exchanges_to_group("my group", a) ActivityParameter.recalculate_exchanges("my group")
def test_damage_category_import(): # Write the 2 item biosphere database database = Database("biosphere3", backend="singlefile") database.register() database.write({ ('biosphere3', '00e73fdb-98df-4a03-8290-79931cddfd12'): { 'categories': ('air', ), 'code': '00e73fdb-98df-4a03-8290-79931cddfd12', 'database': 'biosphere3', 'exchanges': [], 'name': 'Lead-210', 'type': 'emission', 'unit': 'kilo Becquerel' }, ('biosphere3', '2cfc5ba4-3db2-4193-9e81-b61e75ba1706'): { 'categories': ('water', ), 'code': '2cfc5ba4-3db2-4193-9e81-b61e75ba1706', 'database': 'biosphere3', 'exchanges': [], 'name': 'Lead-210', 'type': 'emission', 'unit': 'kilo Becquerel' } }) assert database # create the required migrations Migration("biosphere-2-3-categories").write( get_biosphere_2_3_category_migration_data(), "Change biosphere category and subcategory labels to ecoinvent version 3" ) Migration("biosphere-2-3-names").write( get_biosphere_2_3_name_migration_data(), "Change biosphere flow names to ecoinvent version 3") # Run the import if sys.version_info[0] < 3: delimiter = b"\t" else: delimiter = "\t" sp = SimaProLCIACSVImporter(os.path.join(SP_FIXTURES_DIR, "damagecategory.txt"), delimiter=delimiter) assert len(sp.data) sp.apply_strategies() assert sp.statistics() == (6, 12, 0)
def create_new_biosphere(self, biosphere_name, relink=True): """Create new biosphere database from biosphere flows in ``self.data``. Links all biosphere flows to new bio database if ``relink``.""" assert biosphere_name not in databases, u"{} database already exists".format( biosphere_name ) print(u"Creating new biosphere database: {}".format(biosphere_name)) with warnings.catch_warnings(): warnings.simplefilter("ignore") new_bio = Database(biosphere_name, backend="singlefile") new_bio.register( format=self.format, comment="New biosphere created by LCI import" ) KEYS = {"name", "unit", "categories"} def reformat(exc): dct = {key: value for key, value in list(exc.items()) if key in KEYS} dct.update( type="emission", exchanges=[], database=biosphere_name, code=activity_hash(dct), ) return dct bio_data = [ reformat(exc) for ds in self.data for exc in ds.get("exchanges", []) if exc["type"] == "biosphere" ] bio_data = {(ds["database"], ds["code"]): ds for ds in bio_data} new_bio.write(bio_data) if relink: self.apply_strategies( [ functools.partial( link_iterable_by_fields, other=list(bio_data.values()), relink=True, ), ] )
def test_match_subcategories_makes_copies(self): """Should copy data instead of creating references, so that there are different amounts for different methods.""" self.maxDiff = None background = [ { "categories": ("air", "non-urban air or from high stacks"), "code": "first", "database": "b", "exchanges": [], "name": "Boron trifluoride", "type": "emission", "unit": "kilogram", } ] db = Database("b") db.register() db.write({(obj["database"], obj["code"]): obj for obj in background}) data = [ { "name": "Some LCIA method", "exchanges": [ { "name": "Boron trifluoride", "categories": ("air",), "unit": "kilogram", "amount": 1, "input": ("foo", "bar"), } ], }, { "name": "Another LCIA method", "exchanges": [ { "name": "Boron trifluoride", "categories": ("air",), "unit": "kilogram", "amount": 2, "input": ("foo", "bar"), } ], }, ] result = match_subcategories(data, "b") for cf in result[0]["exchanges"]: self.assertEqual(cf["amount"], 1) for cf in result[1]["exchanges"]: self.assertEqual(cf["amount"], 2)
def test_combine_methods(self): d = Database("biosphere") d.register(depends=[]) d.write(biosphere) m1 = Method(("test method 1",)) m1.register(unit="p") m1.write([(("biosphere", 1), 1, "GLO"), (("biosphere", 2), 2, "GLO")]) m2 = Method(("test method 2",)) m2.register(unit="p") m2.write([(("biosphere", 2), 10, "GLO")]) combine_methods(("test method 3",), ("test method 1",), ("test method 2",)) cm = Method(("test method 3",)) self.assertEqual( sorted(cm.load()), [(("biosphere", 1), 1, "GLO"), (("biosphere", 2), 12, "GLO")], ) self.assertEqual(methods[["test method 3"]]["unit"], "p")
def create_biosphere(self): db = Database("biosphere") data = { ("biosphere", "oxygen"): { "name": "oxygen", "unit": "kilogram", "type": "emission", }, ("biosphere", "argon"): { "name": "argon", "unit": "kilogram", "type": "emission", }, ("biosphere", "nitrogen"): {"name": "nitrogen", "unit": "kilogram"}, } db.register() db.write(data)
def test_match_subcategories_makes_copies(self): """Should copy data instead of creating references, so that there are different amounts for different methods.""" self.maxDiff = None background = [{ 'categories': ('air', 'non-urban air or from high stacks'), 'code': 'first', 'database': 'b', 'exchanges': [], 'name': 'Boron trifluoride', 'type': 'emission', 'unit': 'kilogram' }] db = Database('b') db.register() db.write({(obj['database'], obj['code']): obj for obj in background}) data = [{ 'name': 'Some LCIA method', 'exchanges': [{ 'name': 'Boron trifluoride', 'categories': ('air', ), 'unit': 'kilogram', 'amount': 1, 'input': ('foo', 'bar'), }] }, { 'name': 'Another LCIA method', 'exchanges': [{ 'name': 'Boron trifluoride', 'categories': ('air', ), 'unit': 'kilogram', 'amount': 2, 'input': ('foo', 'bar'), }] }] result = match_subcategories(data, 'b') for cf in result[0]['exchanges']: self.assertEqual(cf['amount'], 1) for cf in result[1]['exchanges']: self.assertEqual(cf['amount'], 2)
def setup(): db = Database("example") db.register(extra="yes please") a = db.new_activity(code="A", name="An activity", unit="kg", foo="bar") a.save() a.new_exchange(amount=1, input=a, type="production").save() b = db.new_activity(code="B", name="Another activity", location="here", this="that") b.save() b.new_exchange(amount=10, input=b, type="production").save() a.new_exchange( amount=0, input=b, type="technosphere", formula="foo * bar + 4" ).save() project_data = [ {"name": "foo", "formula": "green / 7",}, {"name": "green", "amount": 7}, ] parameters.new_project_parameters(project_data) database_data = [ {"name": "red", "formula": "(foo + blue ** 2) / 5",}, {"name": "blue", "amount": 12}, ] parameters.new_database_parameters(database_data, "example") activity_data = [ { "name": "reference_me", "formula": "sqrt(red - 20)", "database": "example", "code": "B", }, { "name": "bar", "formula": "reference_me + 2", "database": "example", "code": "A", }, ] parameters.new_activity_parameters(activity_data, "my group") parameters.add_exchanges_to_group("my group", a) ActivityParameter.recalculate_exchanges("my group")
def create_biosphere(self): db = Database("biosphere") data = { ('biosphere', 'oxygen'): { 'name': 'oxygen', 'unit': 'kilogram', 'type': 'emission', }, ('biosphere', 'argon'): { 'name': 'argon', 'unit': 'kilogram', 'type': 'emission', }, ('biosphere', 'nitrogen'): { 'name': 'nitrogen', 'unit': 'kilogram' }, } db.register() db.write(data)
def add_unlinked_flows_to_new_biosphere_database(self, biosphere_name=None): biosphere_name = biosphere_name or self.db_name + " biosphere" db = Database(biosphere_name) data = {(biosphere_name, o["exiobase name"]): { "name": o["exiobase name"], "unit": o["exiobase unit"], "categories": (o["ecoinvent category"], ), "comment": o["comment"], "exchanges": [], } for o in self.biosphere_correspondence if o["new flow"]} if biosphere_name not in databases: db.register(format="EXIOBASE 3 New Biosphere", filepath=str(self.dirpath)) db.write(data) return biosphere_name
def test_build_one_d(self): database = Database("sour") database.register() dtype = [ (numpy_string('a'), np.uint32), (numpy_string('row'), np.uint32), (numpy_string('values'), np.float32), ] array = np.array([ (1, MAX_INT_32, 99), (2, MAX_INT_32, 100), ], dtype=dtype) row_dict = {1: 0, 2: 1} np.save(database.filepath_processed(), array, allow_pickle=False) matrix = MatrixBuilder.build([database.filepath_processed()], "values", "a", "row", row_dict=row_dict, one_d=True)[3] self.assertTrue( np.allclose(matrix.toarray(), np.array(((99, 0), (0, 100)))))
def test_build_one_d_drop_missing(self): database = Database("ghost") database.register() dtype = [ (numpy_string('a'), np.uint32), (numpy_string('row'), np.uint32), (numpy_string('values'), np.float32), ] array = np.array([ (1, MAX_INT_32, 99), (2, MAX_INT_32, 99), (3, MAX_INT_32, 99), ], dtype=dtype) row_dict = {1: 0, 2: 1} np.save(database.filepath_processed(), array, allow_pickle=False) values = MatrixBuilder.build([database.filepath_processed()], "values", "a", "row", row_dict=row_dict, one_d=True)[0] self.assertEqual(values.shape, (2, ))
def test_build_drop_missing(self): database = Database("boo") database.register() dtype = [ (numpy_string('a'), np.uint32), (numpy_string('b'), np.uint32), (numpy_string('row'), np.uint32), (numpy_string('col'), np.uint32), (numpy_string('values'), np.float32), ] array = np.array([ (1, 2, MAX_INT_32, MAX_INT_32, 99), (3, 4, MAX_INT_32, MAX_INT_32, 99), (3, 2, MAX_INT_32, MAX_INT_32, 99), (5, 6, MAX_INT_32, MAX_INT_32, 99), ], dtype=dtype) row_dict = {1: 0, 3: 1} col_dict = {2: 0, 6: 1} np.save(database.filepath_processed(), array, allow_pickle=False) values = MatrixBuilder.build([database.filepath_processed()], "values", "a", "row", "b", "col", row_dict, col_dict)[0] self.assertEqual(values.shape, (2, ))
def write_database(self, data=None, delete_existing=True, backend=None, activate_parameters=False, **kwargs): """ Write data to a ``Database``. All arguments are optional, and are normally not specified. ``delete_existing`` effects both the existing database (it will be emptied prior to writing if True, which is the default), and, if ``activate_parameters`` is True, existing database and activity parameters. Database parameters will only be deleted if the import data specifies a new set of database parameters (i.e. ``database_parameters`` is not ``None``) - the same is true for activity parameters. If you need finer-grained control, please use the ``DatabaseParameter``, etc. objects directly. Args: * *data* (dict, optional): The data to write to the ``Database``. Default is ``self.data``. * *delete_existing* (bool, default ``True``): See above. * *activate_parameters* (bool, default ``False``). Instead of storing parameters in ``Activity`` and other proxy objects, create ``ActivityParameter`` and other parameter objects, and evaluate all variables and formulas. * *backend* (string, optional): Storage backend to use when creating ``Database``. Default is the default backend. Returns: ``Database`` instance. """ data = self.data if data is None else data self.metadata.update(kwargs) if activate_parameters: # Comes before .write_database because we # need to remove `parameters` key activity_parameters = self._prepare_activity_parameters( data, delete_existing ) if {o['database'] for o in data} != {self.db_name}: error = "Activity database must be {}, but {} was also found".format( self.db_name, {o['database'] for o in data}.difference({self.db_name}) ) raise WrongDatabase(error) if len({o['code'] for o in data}) < len(data): seen, duplicates = set(), [] for o in data: if o['code'] in seen: duplicates.append(o['name']) else: seen.add(o['code']) error = "The following activities have non-unique codes: {}" raise NonuniqueCode(error.format(duplicates)) data = {(ds['database'], ds['code']): ds for ds in data} if self.db_name in databases: # TODO: Raise error if unlinked exchanges? db = Database(self.db_name) if delete_existing: existing = {} else: existing = db.load(as_dict=True) else: existing = {} if 'format' not in self.metadata: self.metadata['format'] = self.format with warnings.catch_warnings(): warnings.simplefilter("ignore") db = Database(self.db_name, backend=backend) db.register(**self.metadata) self.write_database_parameters(activate_parameters, delete_existing) existing.update(data) db.write(existing) if activate_parameters: self._write_activity_parameters(activity_parameters) print(u"Created database: {}".format(self.db_name)) return db
def test_waste_sign_changed(): assert not len(Database("other")) other = Database("other") other.register() other.write({ ("other", "non-waste"): { "name": "production activity", "unit": "kilogram", "location": "GLO", "reference product": "non-waste product", "production amount": 1, # Positive, not a waste treatment "activity type": "ordinary transforming activity", "exchanges": [ { "name": "non-waste product", "unit": "kilogram", "amount": 1.0, "input": ("other", "non-waste"), "type": "production", "uncertainty type": 0, }, ], }, ("other", "waste-0"): { "name": "waste treatment activity", "unit": "kilogram", "location": "GLO", "reference product": "waste product", "production amount": -1, # negative, waste treatment "activity type": "ordinary transforming activity", "exchanges": [ { "name": "waste treatment", "unit": "kilogram", "amount": -1.0, "input": ("other", "waste-0"), "type": "production", "uncertainty type": 0, }, ], }, ("other", "waste-1"): { "name": "waste treatment activity", "unit": "kilogram", "location": "GLO", "reference product": "waste product", "production amount": -1, # negative, waste treatment "activity type": "ordinary transforming activity", "exchanges": [ { "name": "waste treatment", "unit": "kilogram", "amount": -1.0, "input": ("other", "waste-1"), "type": "production", "uncertainty type": 0, }, ], }, ("other", "waste-2"): { "name": "waste treatment activity", "unit": "kilogram", "location": "GLO", "reference product": "waste product", "production amount": -1, # negative, waste treatment "activity type": "ordinary transforming activity", "exchanges": [ { "name": "waste treatment", "unit": "kilogram", "amount": -1.0, "input": ("other", "waste-2"), "type": "production", "uncertainty type": 0, }, ], }, ("other", "waste-3"): { "name": "waste treatment activity", "unit": "kilogram", "location": "GLO", "reference product": "waste product", "production amount": -1, # negative, waste treatment "activity type": "ordinary transforming activity", "exchanges": [ { "name": "waste treatment", "unit": "kilogram", "amount": -1.0, "input": ("other", "waste-3"), "type": "production", "uncertainty type": 0, }, ], }, ("other", "waste-4"): { "name": "waste treatment activity", "unit": "kilogram", "location": "GLO", "reference product": "waste product", "production amount": -1, # negative, waste treatment "activity type": "ordinary transforming activity", "exchanges": [ { "name": "waste treatment", "unit": "kilogram", "amount": -1.0, "input": ("other", "waste-4"), "type": "production", "uncertainty type": 0, }, ], }, ("other", "waste-5"): { "name": "waste treatment activity", "unit": "kilogram", "location": "GLO", "reference product": "waste product", "production amount": -1, # negative, waste treatment "activity type": "ordinary transforming activity", "exchanges": [ { "name": "waste treatment", "unit": "kilogram", "amount": -1.0, "input": ("other", "waste-5"), "type": "production", "uncertainty type": 0, }, ], }, }) assert "other" in databases db = [ { "simapro metadata": dict(), "code": "test_non_waste", "database": "sp", "type": "process", "name": "test_non_waste", "unit": "kilogram", "location": "GLO", "reference product": "anything", "production amount": 1, "exchanges": [ { "name": "test_non_waste", "unit": "kilogram", "amount": 1.0, "input": ("sp", "test_non_waste"), "type": "production", "uncertainty type": 0, }, { "name": "some product", "unit": "kilogram", "amount": 1.0, "input": ("other", "non-waste"), "type": "technosphere", "uncertainty type": 0, }, ], }, { "simapro metadata": dict(), "code": "test_waste_0", "database": "sp", "type": "process", "name": "test_waste_0", "unit": "kilogram", "location": "GLO", "reference product": "anything else", "production amount": 1, "exchanges": [ { "name": "test_waste_0", "unit": "kilogram", "amount": 1.0, "input": ("sp", "test_waste_0"), "type": "production", "uncertainty type": 0, }, { "name": "waste product", "unit": "kilogram", "amount": 1.0, "input": ("other", "waste-0"), "type": "technosphere", "uncertainty type": 0, }, ], }, { "simapro metadata": dict(), "code": "test_waste_1", "database": "sp", "type": "process", "name": "test_waste_1", "unit": "kilogram", "location": "GLO", "reference product": "anything else", "production amount": 1, "exchanges": [ { "name": "test_waste_1", "unit": "kilogram", "amount": 1.0, "input": ("sp", "test_waste_1"), "type": "production", "uncertainty type": 0, }, { "name": "waste product", "unit": "kilogram", "amount": 1.0, "input": ("other", "waste-1"), "type": "technosphere", "uncertainty type": 1, "loc": 1.0, }, ], }, { "simapro metadata": dict(), "code": "test_waste_2", "database": "sp", "type": "process", "name": "test_waste_2", "unit": "kilogram", "location": "GLO", "reference product": "anything else", "production amount": 1, "activity type": "ordinary transforming activity", "exchanges": [ { "name": "test_waste_2", "unit": "kilogram", "amount": 1.0, "input": ("sp", "test_waste_2"), "type": "production", }, { "name": "waste product", "unit": "kilogram", "amount": 1.0, "input": ("other", "waste-0"), "type": "technosphere", "uncertainty type": 2, "loc": 0, "scale": 0.1, }, ], }, { "simapro metadata": dict(), "code": "test_waste_3", "database": "sp", "type": "process", "name": "test_waste_3", "unit": "kilogram", "location": "GLO", "reference product": "anything else", "production amount": 1, "activity type": "ordinary transforming activity", "exchanges": [ { "name": "test_waste_3", "unit": "kilogram", "amount": 1.0, "input": ("sp", "test_waste_3"), "type": "production", "uncertainty type": 0, }, { "name": "waste product", "unit": "kilogram", "amount": 1.0, "input": ("other", "waste-3"), "type": "technosphere", "uncertainty type": 3, "loc": 1.0, "scale": 0.1, }, ], }, { "simapro metadata": dict(), "code": "test_waste_4", "database": "sp", "type": "process", "name": "test_waste_4", "unit": "kilogram", "location": "GLO", "reference product": "anything else", "production amount": 1, "activity type": "ordinary transforming activity", "exchanges": [ { "name": "test_waste_4", "unit": "kilogram", "amount": 1.0, "input": ("sp", "test_waste_4"), "type": "production", "uncertainty type": 0, }, { "name": "waste product", "unit": "kilogram", "amount": 1.0, "input": ("other", "waste-4"), "type": "technosphere", "uncertainty type": 4, "minimum": 0.5, "maximum": 1.5, }, ], }, { "simapro metadata": dict(), "code": "test_waste_5", "database": "sp", "type": "process", "name": "test_waste_5", "unit": "kilogram", "location": "GLO", "reference product": "anything else", "production amount": 1, "activity type": "ordinary transforming activity", "exchanges": [ { "name": "test_waste_5", "unit": "kilogram", "amount": 1.0, "input": ("sp", "test_waste_5"), "type": "production", "uncertainty type": 0, }, { "name": "waste product", "unit": "kilogram", "amount": 1.0, "input": ("other", "waste-5"), "type": "technosphere", "uncertainty type": 5, "minimum": 0.5, "maximum": 1.5, "loc": 1.0, }, ], }, ] db_before = copy.deepcopy(db) db = flip_sign_on_waste(db, "other") # Check that things did not unexpectedly change expected_unchanged_keys_act = [ "simapro metadata", "code", "database", "name", "unit", "location", "reference product", "type", "production amount", ] expected_unchanged_keys_exc = [ "name", "unit", "input", "type", "uncertainty type", ] for old_act, new_act in zip(db_before, db): for act_k in expected_unchanged_keys_act: assert old_act[act_k] == new_act[act_k] for old_exc, new_exc in zip(old_act["exchanges"], new_act["exchanges"]): for exc_k in expected_unchanged_keys_exc: assert old_exc.get(exc_k, "key not found") == new_exc.get( exc_k, "key not found") # Check that inputs of regular products have not changed assert db[0] == db_before[0] # Check uncertainty types 0 (undefined) for new_exc, old_exc in zip(db[1]["exchanges"], db_before[1]["exchanges"]): if new_exc["type"] == "production": assert new_exc == old_exc else: assert new_exc["amount"] == -1 # Check uncertainty types 1 (no uncertainty) for new_exc, old_exc in zip(db[2]["exchanges"], db_before[2]["exchanges"]): if new_exc["type"] == "production": assert new_exc == old_exc else: assert new_exc["amount"] == -1 # Check uncertainty type 2 (lognormal) for new_exc, old_exc in zip(db[3]["exchanges"], db_before[3]["exchanges"]): if new_exc["type"] == "production": assert new_exc == old_exc else: assert new_exc["amount"] == -1 assert new_exc["loc"] == 0 # ln(1) assert new_exc["scale"] == old_exc["scale"] # no change assert new_exc["negative"] == True # Check uncertainty type 3 (normal) for new_exc, old_exc in zip(db[4]["exchanges"], db_before[4]["exchanges"]): if new_exc["type"] == "production": assert new_exc == old_exc else: assert new_exc["amount"] == -1 assert new_exc["loc"] == -1 assert new_exc["scale"] == old_exc["scale"] # no change # Check uncertainty type 4 (uniform) for new_exc, old_exc in zip(db[5]["exchanges"], db_before[5]["exchanges"]): if new_exc["type"] == "production": assert new_exc == old_exc else: assert new_exc["amount"] == -1 assert new_exc["minimum"] == -1.5 assert new_exc["maximum"] == -0.5 # Check uncertainty type 5 (triangular) for new_exc, old_exc in zip(db[6]["exchanges"], db_before[6]["exchanges"]): if new_exc["type"] == "production": assert new_exc == old_exc else: assert new_exc["amount"] == -1 assert new_exc["loc"] == -1 assert new_exc["minimum"] == -1.5 assert new_exc["maximum"] == -0.5
def test_match_subcategories(self): self.maxDiff = None background = [ { 'categories': ('air', 'non-urban air or from high stacks'), 'code': 'first', 'database': 'b', 'exchanges': [], 'name': 'Boron trifluoride', 'type': 'emission', 'unit': 'kilogram' }, { 'categories': ('air', 'low population density, long-term'), 'code': 'second', 'database': 'b', 'exchanges': [], 'name': 'Boron trifluoride', 'type': 'emission', 'unit': 'kilogram' }, { 'categories': ('air', 'lower stratosphere + upper troposphere'), 'code': 'third', 'database': 'b', 'exchanges': [], 'name': 'Boron trifluoride', 'type': 'emission', 'unit': 'kilogram' }, { # Skip - root category 'categories': ('air', ), 'code': 'fourth', 'database': 'b', 'exchanges': [], 'name': 'Boron trifluoride', 'type': 'emission', 'unit': 'kilogram' }, { # Should be skipped - wrong type 'categories': ('air', 'skip me'), 'code': 'Bill. My friends just call me Bill.', 'database': 'b', 'exchanges': [], 'name': 'Boron trifluoride', 'type': 'something else', 'unit': 'kilogram' } ] db = Database('b') db.register() db.write({(obj['database'], obj['code']): obj for obj in background}) data = [{ 'name': 'Some LCIA method', 'exchanges': [ { 'name': 'Boron trifluoride', 'categories': ('air', ), 'unit': 'kilogram', # Only for CFs - no need for biosphere filter # 'type': 'biosphere', 'amount': 1, }, { 'name': 'Boron trifluoride', 'categories': ('air', 'lower stratosphere + upper troposphere'), 'unit': 'kilogram', 'amount': 0, } ] }] expected = [{ 'name': 'Some LCIA method', 'exchanges': [{ 'name': 'Boron trifluoride', 'categories': ('air',), 'unit': 'kilogram', 'amount': 1, }, { # Not linked - already has subcategories 'categories': ('air', 'lower stratosphere + upper troposphere'), 'name': 'Boron trifluoride', 'unit': 'kilogram', 'amount': 0, }, { 'categories': ('air', 'low population density, long-term'), 'database': 'b', 'name': 'Boron trifluoride', 'unit': 'kilogram', 'input': ('b', 'second'), 'amount': 1, }, { 'amount': 1, 'categories': ('air', 'non-urban air or from high stacks'), 'database': 'b', 'input': ('b', 'first'), 'name': 'Boron trifluoride', 'unit': 'kilogram' }] }] answer = match_subcategories(data, 'b', remove=False) self.assertEqual(expected, answer)
def create_database(self, name, data): db = Database(name) db.register() db.write(data) db.process()
def import_data(self): biosphere_data = { ('biosphere', 'F'): { 'type': 'emission', 'exchanges': [], }, ('biosphere', 'G'): { 'type': 'emission', 'exchanges': [], } } biosphere = Database("biosphere") biosphere.register(depends=[], geocollections=[]) biosphere.write(biosphere_data) inventory_data = { ('inventory', 'U'): { 'type': 'process', 'location': "L", 'exchanges': [ { 'input': ('biosphere', 'F'), 'type': 'biosphere', 'amount': 1 }, { 'input': ('biosphere', 'G'), 'type': 'biosphere', 'amount': 1 }, ] }, ('inventory', 'V'): { 'type': 'process', 'location': "M", 'exchanges': [] }, ('inventory', 'X'): { 'type': 'process', 'location': "N", 'exchanges': [] }, ('inventory', 'Y'): { 'type': 'process', 'location': "O", 'exchanges': [] }, ('inventory', 'Z'): { 'type': 'process', 'location': "O", 'exchanges': [] } } inventory = Database("inventory") inventory.register(depends=["biosphere"], geocollections=["places"]) inventory.write(inventory_data) method_data = [ [('biosphere', 'F'), 1, "L"], [('biosphere', 'G'), 2, "L"], ] method = Method(("a", "method")) method.register(geocollections=['places']) method.write(method_data)
def test_match_subcategories(self): self.maxDiff = None background = [ { "categories": ("air", "non-urban air or from high stacks"), "code": "first", "database": "b", "exchanges": [], "name": "Boron trifluoride", "type": "emission", "unit": "kilogram", }, { "categories": ("air", "low population density, long-term"), "code": "second", "database": "b", "exchanges": [], "name": "Boron trifluoride", "type": "emission", "unit": "kilogram", }, { "categories": ("air", "lower stratosphere + upper troposphere"), "code": "third", "database": "b", "exchanges": [], "name": "Boron trifluoride", "type": "emission", "unit": "kilogram", }, { # Skip - root category "categories": ("air",), "code": "fourth", "database": "b", "exchanges": [], "name": "Boron trifluoride", "type": "emission", "unit": "kilogram", }, { # Should be skipped - wrong type "categories": ("air", "skip me"), "code": "Bill. My friends just call me Bill.", "database": "b", "exchanges": [], "name": "Boron trifluoride", "type": "something else", "unit": "kilogram", }, ] db = Database("b") db.register() db.write({(obj["database"], obj["code"]): obj for obj in background}) data = [ { "name": "Some LCIA method", "exchanges": [ { "name": "Boron trifluoride", "categories": ("air",), "unit": "kilogram", # Only for CFs - no need for biosphere filter # 'type': 'biosphere', "amount": 1, }, { "name": "Boron trifluoride", "categories": ("air", "lower stratosphere + upper troposphere"), "unit": "kilogram", "amount": 0, }, ], } ] expected = [ { "name": "Some LCIA method", "exchanges": [ { "name": "Boron trifluoride", "categories": ("air",), "unit": "kilogram", "amount": 1, }, { # Not linked - already has subcategories "categories": ("air", "lower stratosphere + upper troposphere"), "name": "Boron trifluoride", "unit": "kilogram", "amount": 0, }, { "categories": ("air", "low population density, long-term"), "database": "b", "name": "Boron trifluoride", "unit": "kilogram", "input": ("b", "second"), "amount": 1, }, { "amount": 1, "categories": ("air", "non-urban air or from high stacks"), "database": "b", "input": ("b", "first"), "name": "Boron trifluoride", "unit": "kilogram", }, ], } ] answer = match_subcategories(data, "b", remove=False) self.assertEqual(expected, answer)
def test_unprocessed_database_error(self): empty = Database("empty") empty.register(depends=[]) with self.assertRaises(UnprocessedDatabase): rlca = RegionalizationBase({("empty", "nothing"): 1}) rlca.get_inventory_geocollections()
def test_value_error_no_method(self): empty = Database("empty") empty.register(depends=[]) with self.assertRaises(ValueError): LCA({("empty", "nothing"): 1})