def test_units_are_olcaunits(self):
     import olca.units as olcaunits
     """ Test that units are openlca reference units
     """
     for c_ in flow_list_specs["flow_classes"]:
         flowables = read_in_flowclass_file(c_, "Flowables")
         # Get units and test that they are olca ref units
         ref_units = pd.unique(flowables['Unit'])
         for unt in ref_units:
             olcaref = olcaunits.unit_ref(unt)
             if olcaref is None:
                 log.debug(unt + ' in Flowables for class ' + c_ +
                           ' is not an olca ref unit')
             self.assertIsNotNone(olcaref)
         try:
             altunits_for_class = read_in_flowclass_file(
                 c_, 'FlowableAltUnits')
             alt_units_with_ref = list(
                 altunits_for_class['Alternate Unit']) + list(
                     altunits_for_class['Reference Unit'])
             alt_units_with_ref_unique = set(alt_units_with_ref)
             for unt in alt_units_with_ref_unique:
                 olcaref = olcaunits.unit_ref(unt)
                 if olcaref is None:
                     log.debug(unt + ' in alt units for class ' + c_ +
                               ' is not an olca ref unit')
                 self.assertIsNotNone(olcaref)
         except FileNotFoundError:
             altunits_for_class = None
Exemplo n.º 2
0
            c = {}
            c['Context'] = i
            for f in field_to_keep:
                c[f] = row[f]
            c_group.append(c)
        class_contexts_list.extend(c_group)
    class_contexts = pd.DataFrame(class_contexts_list)

    # Merge this table now with the flowables and primary contexts with the full contexts per class, creating flows for each compartment relevant for that flow type, using major
    flows = pd.merge(flowables_w_primary_contexts,
                     class_contexts,
                     on=['Class', 'Directionality', 'Environmental Media'])

    #Drop duplicate flows if they exist
    if len(flows[flows.duplicated(keep=False)]) > 0:
        log.debug("Duplicate flows exist. They will be removed.")
        flows = flows.drop_duplicates()

    #If both the flowable and context are preferred, make this a preferred flow
    flows['Preferred'] = 0
    flows.loc[(flows['Flowable Preferred'] == 1) &
              (flows['ContextPreferred'] == 1), 'Preferred'] = 1

    #Drop unneeded columns
    cols_to_drop = [
        'Flowable Preferred', 'ContextPreferred', 'Directionality',
        'Environmental Media'
    ]
    flows = flows.drop(columns=cols_to_drop)

    # Drop excluded flows based on CSV input file
Exemplo n.º 3
0
import pandas as pd
import fedelemflowlist
from fedelemflowlist.globals import flowmappingpath, flowmapping_fields, log

#Add source name here. The .csv mapping file with this name must be in the flowmapping directory
#None can be used to update UUIDs in all mapping files
source = None

if __name__ == '__main__':
    # Pull in mapping file
    mapping = fedelemflowlist.get_flowmapping(source)
    mapping_length = len(mapping)
    all_flows = fedelemflowlist.get_flows()
    all_flows = all_flows[['Flowable', 'Context', 'Flow UUID', 'Unit']]
    mapping_w_flowinfo = pd.merge(mapping, all_flows,
                                  left_on=['TargetFlowName', 'TargetFlowContext', 'TargetUnit'],
                                  right_on=['Flowable', 'Context', 'Unit'])
    mapping_w_flowinfo = mapping_w_flowinfo.drop(columns=['TargetFlowUUID', 'Flowable',
                                                          'Context', 'Unit'])
    mapping_w_flowinfo = mapping_w_flowinfo.rename(columns={'Flow UUID': 'TargetFlowUUID'})
    mapping_merged_len = len(mapping_w_flowinfo)
    if mapping_length > mapping_merged_len:
        log.debug("Not all flows were mapped to flows in the list")

    flowmapping_order = list(flowmapping_fields.keys())
    mapping_w_flowinfo = mapping_w_flowinfo[flowmapping_order]

    for s in pd.unique(mapping_w_flowinfo['SourceListName']):
        mapping = mapping_w_flowinfo[mapping_w_flowinfo['SourceListName'] == s]
        mapping.to_csv(flowmappingpath + s + '.csv', index=False)