def to_json(self) -> dict: """ Creates a dictionary for an olca json file :return: dictionary """ flow_ref = olca.FlowRef() flow_ref.name = self.name if self.category is not None: flow_ref.category_path = self.category.split('/') # set the UUID or generate it from the attributes if self.uid is None: flow_ref.id = make_uuid("Flow", self.category, self.name) else: flow_ref.id = self.uid json = { 'flow': flow_ref.to_json() } if self.unit is not None: unit_ref = units.unit_ref(self.unit) if unit_ref is not None: json['unit'] = unit_ref.to_json() prop_ref = units.property_ref(self.unit) if prop_ref is not None: json['flowProperty'] = prop_ref.to_json() return json
def _write_categories(self, pw: pack.Writer): root = olca.Category() root.id = "f318fa60-bae9-361f-ad5a-5066a0e2a9d1" root.name = "Elementary flows" root.model_type = olca.ModelType.FLOW self._context_uids[root.name.lower()] = root.id pw.write(root) for _, row in self.flow_list.iterrows(): path = row['Context'] if not isinstance(path, str): continue path = path.strip() if path == '' or path.lower() in self._context_uids: continue parts = path.split("/") parent_id = root.id for i in range(0, len(parts)): lpath = "/".join(parts[0:i+1]).lower() uid = self._context_uids.get(lpath) if uid is not None: parent_id = uid continue uid = make_uuid("Flow", lpath) log.info("create category %s", lpath) c = olca.Category() c.id = uid c.name = parts[i] c.category = olca.ref(olca.Category, parent_id) c.model_type = olca.ModelType.FLOW pw.write(c) self._context_uids[lpath] = uid parent_id = uid
#Create combined context and drop individual context level columns flows['Context'] = flows['PrimaryContext'] + "/" + flows[ 'SecondaryContext'] + "/" + flows['ContextDetail'] flows = flows.drop( columns=['PrimaryContext', 'SecondaryContext', 'ContextDetail']) flows = flows.append(flowscategorycutoff, ignore_index=False) flows = flows.drop_duplicates() log.info('Total of ' + str(len(flows)) + ' flows with contexts created.') # Loop through flows generating UUID for each flowids = [] log.info('Generating unique UUIDs for each flow...') for index, row in flows.iterrows(): flowid = make_uuid(row['Flowable'], row['Context'], row['Unit']) flowids.append(flowid) flows['Flow UUID'] = flowids # Drop entries due to duplicate UUIDs flows['Duplicates'] = flows.duplicated(subset=['Flow UUID'], keep='first') if flows['Duplicates'].sum() > 0: log.info( str(flows['Duplicates'].sum()) + " flows with same UUID; these duplicates have been removed.") flows = flows.drop_duplicates(subset=['Flow UUID'], keep='first') flows.drop(columns='Duplicates') # Log unique entries contexts_in_flows = pd.unique(flows['Context']) log.info('Created ' + str(len(flows)) + ' flows with ' +