def _add_more_content(self, row_dict): """ """ scientific_name = row_dict.get('scientific_name', '') scientific_authority = plankton_core.Species().get_taxon_value( scientific_name, 'author') row_dict['scientific_authority'] = scientific_authority harmful = plankton_core.Species().get_taxon_value( scientific_name, 'harmful') row_dict['harmful'] = u'X' if harmful else u''
def _get_trophic_type(self, scientific_name, size_class, reported_trophic_type = ''): """ To be called from Excel-based parser. """ scientific_name = str(scientific_name) size_class = str(size_class) reported_trophic_type = str(reported_trophic_type) value = plankton_core.Species().get_bvol_value(scientific_name, size_class, 'trophic_type') if not value: value = plankton_core.Species().get_taxon_value(scientific_name, 'trophic_type') if not value: value = reported_trophic_type # return value
def bvol_species_screening(self, datasets): """ """ species = plankton_core.Species() # for dataset in datasets: # for visitnode in dataset.get_children(): # for samplenode in visitnode.get_children(): # for variablenode in samplenode.get_children(): # data_dict = variablenode.get_data_dict() if ('scientific_name' in data_dict) and ('size_class' in data_dict): taxonname = data_dict['scientific_name'] sizeclass = data_dict['size_class'] if species.get_bvol_value( taxonname, sizeclass, 'bvol_size_class') == None: toolbox_utils.Logging().warning( 'Taxon name/size clas not in BVOL list. Taxon name: ' + str(taxonname) + ' Size class: ' + str(sizeclass))
def _get_sizeclass_info_by_key(self, scientific_name, size_class, key): """ To be called from Excel-based parser. """ scientific_name = str(scientific_name) key = str(key) size_class = str(size_class) value = plankton_core.Species().get_bvol_value(scientific_name, size_class, key) if value: return value return ''
def _loadResources(self): """ """ try: # Load resources here. self.statusBar().showMessage(self.tr('Loading species lists...')) plankton_core.Species() finally: self.statusBar().showMessage(self.tr(''))
def add_new_fields(self, row_dict): """ """ scientificname = row_dict.get('scientific_name', '') sizeclass = row_dict.get('size_class', '') if scientificname: taxon_class = plankton_core.Species().get_taxon_value( scientificname, u'taxon_class') taxon_rank = plankton_core.Species().get_taxon_value( scientificname, u'rank') counted_unit = plankton_core.Species().get_bvol_value( scientificname, sizeclass, u'bvol_unit') bvol_size_range = plankton_core.Species().get_bvol_value( scientificname, sizeclass, u'bvol_size_range') harmful = plankton_core.Species().get_taxon_value( scientificname, u'harmful') if len(str(harmful).strip()) > 0: harmful = 'Y' else: harmful = '' row_dict['taxon_class'] = taxon_class row_dict['rank'] = taxon_rank row_dict['potential_harmful'] = harmful row_dict['counted_unit'] = counted_unit row_dict['bvol_size_range'] = bvol_size_range
def species_screening(self, datasets): """ """ species = plankton_core.Species() # for dataset in datasets: # for visitnode in dataset.get_children(): # for samplenode in visitnode.get_children(): # for variablenode in samplenode.get_children(): # data_dict = variablenode.get_data_dict() if 'scientific_name' in data_dict: if data_dict[ 'scientific_name'] not in species.get_taxa_lookup_dict( ): toolbox_utils.Logging().warning( 'Taxon name not in species list. Taxon name: ' + str(data_dict['scientific_name']))
def get_counting_species_table(self, counting_species_file_name): """ """ # Use all prealoaded species. if counting_species_file_name == '<valid taxa>': # if counting_species_file_name == '<all species>': species_list_of_list = [] # for key in sorted(plankton_core.Species().get_taxa_lookup_dict().keys()): for key in sorted(plankton_core.Species().get_taxa_dict().keys()): species_list_of_list.append([key]) return ['scientific_name'], species_list_of_list # Read stored species file. filepath = os.path.join(self._methods_species_lists_dir_path, counting_species_file_name + '.txt') if os.path.isfile(filepath): tablefilereader = toolbox_utils.TableFileReader( file_path=self._methods_species_lists_dir_path, text_file_name=counting_species_file_name + '.txt', ) return tablefilereader.header(), tablefilereader.rows() else: return [], []
def create_report(self, datasets, result_table, aggregate_rows=False, abundance_class_samples=False): """ Note: - Datasets must be of the format used in the modules dataset_tree and datasets_tree. - The result_table object must contain self._header = [] and self._rows = []. """ # Check indata. if datasets == None: raise UserWarning('Datasets are missing.') if result_table == None: raise UserWarning('Result table is missing.') # Check number of samples. self._numberofsamples = 0 for dataset in datasets: for visit in dataset.get_children(): self._numberofsamples += len(visit.get_children()) # Number of columns per sample. self._numberofcolumnspersample = None if self._reporttype == 'counted': self._numberofcolumnspersample = 2 elif self._reporttype == 'net': self._numberofcolumnspersample = 1 else: raise UserWarning('Unknown report type.') # Set header. self._numberofcolumns = 8 + (self._numberofsamples * self._numberofcolumnspersample) result_table.set_header( [u''] * (8 + self._numberofsamples * self._numberofcolumnspersample) ) # Note: Header is not used. # # Part 1: Create header rows with columns for sample related data. # header_row_1 = [''] * self._numberofcolumns header_row_2 = [''] * self._numberofcolumns header_row_3 = [''] * self._numberofcolumns header_row_4 = [''] * self._numberofcolumns header_row_5 = [''] * self._numberofcolumns header_row_6 = [''] * self._numberofcolumns header_row_1[7] = u'Station:' header_row_2[7] = u'Sampling date:' header_row_3[7] = u'Min. depth:' header_row_4[7] = u'Max. depth:' header_row_5[7] = u'Analysis date:' header_row_6[7] = u'Analysed by:' # # Iterate over file to create columns. sampleindex = 0 for dataset in datasets: for visitnode in dataset.get_children(): for samplenode in visitnode.get_children(): # header_row_1[8 + (sampleindex * self._numberofcolumnspersample )] = visitnode.get_data('station_name') header_row_2[8 + (sampleindex * self._numberofcolumnspersample )] = visitnode.get_data('sample_date') header_row_3[ 8 + (sampleindex * self._numberofcolumnspersample )] = samplenode.get_data('sample_min_depth_m') header_row_4[ 8 + (sampleindex * self._numberofcolumnspersample )] = samplenode.get_data('sample_max_depth_m') header_row_5[8 + (sampleindex * self._numberofcolumnspersample )] = samplenode.get_data('analysis_date') header_row_6[8 + (sampleindex * self._numberofcolumnspersample )] = samplenode.get_data('analysed_by') # sampleindex += 1 # # Part 2: Iterate over all rows in all samples. Create a dictionaries # containing taxon/size as key and lists of abundances for each sample. # Size class included with ':' as delimiter. # Example: "Incertae sedis:1": [1234.5, 1234.5, 1234.5, 1234.5] parameter_values_dict = {} # Iterate through datasets. sampleindex = 0 for dataset in datasets: for visitnode in dataset.get_children(): for samplenode in visitnode.get_children(): for variablenode in samplenode.get_children(): # taxonandsize = variablenode.get_data('scientific_name') + ':' + \ variablenode.get_data(u'size_class') + ':' + \ variablenode.get_data(u'species_flag_code') + ':' + \ variablenode.get_data(u'cf') + ':' + \ variablenode.get_data(u'trophic_type') # parameter = variablenode.get_data('parameter') value = variablenode.get_data('value') # if taxonandsize not in parameter_values_dict: parameter_values_dict[taxonandsize] = [ '' ] * self._numberofsamples * self._numberofcolumnspersample # Add new value list. # if self._reporttype == 'counted': # Counted, column 1. if parameter == 'Abundance': parameter_values_dict[taxonandsize][ sampleindex * self._numberofcolumnspersample] = value # Counted, column 2. if parameter == 'Biovolume concentration': parameter_values_dict[taxonandsize][ sampleindex * self._numberofcolumnspersample + 1] = value elif self._reporttype == 'net': if parameter == 'Abundance class': parameter_values_dict[taxonandsize][ sampleindex * self._numberofcolumnspersample] = value # sampleindex += 1 # # Part 3: Create the species rows in the report. # species_rows = [] # Iterate over species in the dictionary. for phytowinnameandsize in parameter_values_dict.keys(): # Counted samples: taxonandsize = phytowinnameandsize.split(':') scientificname = taxonandsize[0] sizeclass = taxonandsize[1] sflag = taxonandsize[2] cf = taxonandsize[3] trophic_type = taxonandsize[4] # Get extra info. taxonclass = plankton_core.Species().get_taxon_value( scientificname, u'taxon_class') harmful = plankton_core.Species().get_taxon_value( scientificname, u'harmful') unit_type = plankton_core.Species().get_bvol_value( scientificname, sizeclass, u'bvol_unit') # trophic_type = plankton_core.Species().get_bvol_value(scientificname, sizeclass, u'trophic_type') # if not trophic_type: # trophic_type = plankton_core.Species().get_taxon_value(scientificname, u'trophic_type') # # Put the row together. row = [''] * ( 8 + (self._numberofcolumns * self._numberofcolumnspersample)) row[0] = taxonclass row[1] = u'X' if harmful else u'' row[2] = scientificname row[3] = sizeclass row[4] = sflag.lower() if sflag else '' # Lowercase. row[5] = cf.lower() if cf else '' # Lowercase. row[6] = trophic_type row[7] = unit_type # for index, value in enumerate( parameter_values_dict[phytowinnameandsize]): row[8 + (index)] = value # Add the row the report. species_rows.append(row) # Sort the outdata list before writing to file. # Sort order: Class, species, size and trophy. species_rows.sort(key=operator.itemgetter(0, 2, 3, 6)) # # Aggregate values. Same species and trophy but different size classes will be aggregated. if aggregate_rows and (self._reporttype == 'counted'): oldrow = None for row in species_rows: row[3] = u'' # Size classes should be removed. if oldrow: if row[2]: # Don't aggregate if species is missing. # Iterate over samples. if oldrow[2] == row[2]: # Column 2: Species. if oldrow[5] == row[ 5]: # Column 5: Trophy may differ for Unicells etc. sampleindex = 0 while sampleindex < self._numberofsamples: abundcol = 8 + ( sampleindex * self._numberofcolumnspersample) volumecol = abundcol + 1 if row[abundcol] and oldrow[abundcol]: row[abundcol] = str( float(row[abundcol]) + float(oldrow[abundcol])) oldrow[0] = u'REMOVE AGGREGATED' # if row[volumecol] and oldrow[volumecol]: row[volumecol] = str( float(row[volumecol].replace( u',', u'.')) + float(oldrow[volumecol].replace( u',', u'.'))) oldrow[0] = u'REMOVE AGGREGATED' # # sampleindex += 1 # oldrow = row # # Part 4: Put all parts together and add to result table. # result_table.append_row(header_row_1) result_table.append_row(header_row_2) result_table.append_row(header_row_3) result_table.append_row(header_row_4) result_table.append_row(header_row_5) result_table.append_row(header_row_6) # NET samples: #report_table.append_row([u'Klass', u'Pot. giftig', u'Art', u'Sflag'] + [u'Förekomst'] * numberofsamples) # Counted samples: if self._reporttype == 'counted': if aggregate_rows: result_table.append_row( [ u'Class', u'Pot. toxic', u'Scientific name', u'', u'Sflag', u'Cf', u'Trophic type', u'Unit type' ] + [u'Units/L', u'Biovolume (mm3/L)'] * self._numberofsamples) # Two columns per sample. else: result_table.append_row([ u'Class', u'Pot. toxic', u'Scientific name', u'Size class', u'Sflag', u'Cf', u'Trophic type', u'Unit type' ] + [u'Units/L', u'Biovolume (mm3/L)'] * self._numberofsamples ) # Two columns per sample. elif self._reporttype == 'net': result_table.append_row( [ u'Class', u'Pot. toxic', u'Scientific name', u'', u'Sflag', u'Cf', u'Trophic type', u'Unit type' ] + [u'Occurrence'] * self._numberofsamples) # Two columns per sample. # for row in species_rows: if row[0] != u'REMOVE AGGREGATED': result_table.append_row(row)
def create_tree_dataset(self, dataset_top_node, update_trophic_type): """ """ # Add data to dataset node. for parsinginforow in self._parsing_info: if parsinginforow[0] == 'dataset': if parsinginforow[3] in self._sample_info: dataset_top_node.add_data( parsinginforow[1], self._sample_info[parsinginforow[3]]) # Create visit node and add data. Note: Only one visit in each file. visitnode = plankton_core.VisitNode() dataset_top_node.add_child(visitnode) # for parsinginforow in self._parsing_info: if parsinginforow[0] == 'visit': if parsinginforow[3] in self._sample_info: visitnode.add_data(parsinginforow[1], self._sample_info[parsinginforow[3]]) # Add visit_year and visit_month. sample_date = visitnode.get_data('sample_date', '') try: visitnode.add_data('visit_year', sample_date[0:4]) except: pass try: visitnode.add_data('visit_month', sample_date[5:7]) except: pass # Create sample node and add data. Note: Only one sample in each file. samplenode = plankton_core.SampleNode() visitnode.add_child(samplenode) # for parsinginforow in self._parsing_info: if parsinginforow[0] == 'sample': if parsinginforow[3] in self._sample_info: samplenode.add_data(parsinginforow[1], self._sample_info[parsinginforow[3]]) # Create variable nodes. for row in self._sample_rows: variablenode = plankton_core.VariableNode() samplenode.add_child(variablenode) # Get info from sample_info. for parsinginforow in self._parsing_info: if parsinginforow[0] == 'variable': value = self._sample_info.get(parsinginforow[3], '') variablenode.add_data(parsinginforow[1], value) # Merge data header and row. row_dict = dict(zip(self._sample_header, row)) # Get info from sample_method and add to row_dict. if 'method_step' in row_dict: if row_dict['method_step'] in self._sample_method_dict: method_dict = self._sample_method_dict[ row_dict['method_step']] row_dict.update(method_dict) else: print('DEBUG: Key: "' + row_dict['method_step'] + '" not in sample_method_dict.') # Get info from row. for parsinginforow in self._parsing_info: if parsinginforow[0] == 'variable': value = row_dict.get(parsinginforow[3], '') # Update trophic_type. if parsinginforow[1] == 'trophic_type': if update_trophic_type: scientific_name = row_dict.get( 'scientific_name', '') size_class = row_dict.get('size_class', '') trophic_type = plankton_core.Species( ).get_bvol_value(scientific_name, size_class, 'trophic_type') if trophic_type: value = trophic_type # Use existing if not in local list. # Replace empty with NS=Not specified. if not value: value = 'NS' if len(value) > 0: # Don't overwrite from previous step. variablenode.add_data(parsinginforow[1], value) # Copy to new variable nodes for parameters. for parsinginforow in self._parsing_info: if parsinginforow[0] == 'copy_parameter': paramunit = parsinginforow[1].split(':') parameter = paramunit[0] unit = paramunit[1] value = row_dict.get(parsinginforow[3], '') if len(value.strip()) > 0: self.copy_variable(variablenode, p=parameter, v=value, u=unit)
def _get_plankton_group(self, scientific_name): """ To be called from Excel-based parser. """ scientific_name = str(scientific_name) return plankton_core.Species().get_plankton_group_from_taxon_name(scientific_name)
def _get_taxon_info_by_key(self, scientific_name, key): """ To be called from Excel-based parser. """ scientific_name = str(scientific_name) key = str(key) return plankton_core.Species().get_taxon_value(scientific_name, key)
def _aggregate_data(self): """ """ try: try: # if self._aggregate_rank_list.currentIndex() == 0: # toolbox_utils.Logging().log('Taxon level is not selected. Please try again.') # raise UserWarning('Taxon level is not selected. Please try again.') if not self._analysisdata.get_data(): toolbox_utils.Logging().log( 'No data is loaded for analysis. Please try again.') raise UserWarning( 'No data is loaded for analysis. Please try again.') # toolbox_utils.Logging().log('Aggregating data...') toolbox_utils.Logging().start_accumulated_logging() try: # selected_taxon_rank = str( self._aggregate_rank_list.currentText()) selected_trophic_type_list = self._trophic_type_listview.getSelectedDataList( ) selected_trophic_type_text = '-'.join( selected_trophic_type_list) selected_lifestage_list = self._lifestage_listview.getSelectedDataList( ) selected_lifestage_text = '-'.join(selected_lifestage_list) # for visitnode in self._analysisdata.get_data( ).get_children()[:]: for samplenode in visitnode.get_children()[:]: aggregatedvariables = {} trophic_type_set_dict = {} ### TEST for variablenode in samplenode.get_children()[:]: newtaxon = None value = variablenode.get_data('value') try: value = value.replace(',', '.').replace( ' ', '') # Try/except if already float. except: pass # Use values containing valid float data. try: value = float(value) # if selected_taxon_rank == 'Biota (all levels)': newtaxon = 'Biota' # Biota is above kingdom in the taxonomic hierarchy. elif selected_taxon_rank == 'Plankton group': newtaxon = plankton_core.Species( ).get_plankton_group_from_taxon_name( variablenode.get_data( 'scientific_name')) elif selected_taxon_rank == 'Kingdom': newtaxon = plankton_core.Species( ).get_taxon_value( variablenode.get_data( 'scientific_name'), 'taxon_kingdom') elif selected_taxon_rank == 'Phylum': newtaxon = plankton_core.Species( ).get_taxon_value( variablenode.get_data( 'scientific_name'), 'taxon_phylum') elif selected_taxon_rank == 'Class': newtaxon = plankton_core.Species( ).get_taxon_value( variablenode.get_data( 'scientific_name'), 'taxon_class') elif selected_taxon_rank == 'Order': newtaxon = plankton_core.Species( ).get_taxon_value( variablenode.get_data( 'scientific_name'), 'taxon_order') elif selected_taxon_rank == 'Family': newtaxon = plankton_core.Species( ).get_taxon_value( variablenode.get_data( 'scientific_name'), 'taxon_family') elif selected_taxon_rank == 'Genus': newtaxon = plankton_core.Species( ).get_taxon_value( variablenode.get_data( 'scientific_name'), 'taxon_genus') elif selected_taxon_rank == 'Species': newtaxon = plankton_core.Species( ).get_taxon_value( variablenode.get_data( 'scientific_name'), 'taxon_species') elif selected_taxon_rank == 'Scientific name': newtaxon = variablenode.get_data( 'scientific_name') elif selected_taxon_rank == 'Kingdom (from dataset)': newtaxon = variablenode.get_data( 'taxon_kingdom') elif selected_taxon_rank == 'Phylum (from dataset)': newtaxon = variablenode.get_data( 'taxon_phylum') elif selected_taxon_rank == 'Class (from dataset)': newtaxon = variablenode.get_data( 'taxon_class') elif selected_taxon_rank == 'Order (from dataset)': newtaxon = variablenode.get_data( 'taxon_order') elif selected_taxon_rank == 'Family (from dataset)': newtaxon = variablenode.get_data( 'taxon_family') elif selected_taxon_rank == 'Genus (from dataset)': newtaxon = variablenode.get_data( 'taxon_genus') elif selected_taxon_rank == 'Species (from dataset)': newtaxon = variablenode.get_data( 'taxon_species') # If not found in classification, then use scientific_name. # This is valid for taxon with rank above the selected rank. if not newtaxon: newtaxon = variablenode.get_data( 'scientific_name') # if not newtaxon: toolbox_utils.Logging().warning( 'Not match for selected rank. "not-designated" assigned for: ' + variablenode.get_data( 'scientific_name')) newtaxon = 'not-designated' # Use this if empty. # taxontrophic_type = variablenode.get_data( 'trophic_type') if taxontrophic_type in selected_trophic_type_list: taxontrophic_type = selected_trophic_type_text # Concatenated string of ranks. ### TEST if newtaxon not in trophic_type_set_dict: ### TEST trophic_type_set_dict[ newtaxon] = set() ### TEST trophic_type_set_dict[newtaxon].add( variablenode.get_data( 'trophic_type')) ### TEST else: continue # Phytoplankton only: Use selected trophic_type only, don't use others. # stage = variablenode.get_data('stage') sex = variablenode.get_data('sex') checkstage = stage if sex: checkstage += '/' + sex if checkstage in selected_lifestage_list: stage = selected_lifestage_text sex = '' # else: # continue # Note: Don't skip for zooplankton. # parameter = variablenode.get_data( 'parameter') unit = variablenode.get_data('unit') agg_tuple = (newtaxon, taxontrophic_type, stage, sex, parameter, unit) if agg_tuple in aggregatedvariables: aggregatedvariables[ agg_tuple] = value + aggregatedvariables[ agg_tuple] else: aggregatedvariables[agg_tuple] = value except: if variablenode.get_data('value'): toolbox_utils.Logging().warning( 'Value is not a valid float: ' + str(variablenode.get_data('value')) ) #Remove all variables for this sample. samplenode.remove_all_children() # Add the new aggregated variables instead. for variablekeytuple in aggregatedvariables: newtaxon, taxontrophic_type, stage, sex, parameter, unit = variablekeytuple # newvariable = plankton_core.VariableNode() samplenode.add_child(newvariable) # newvariable.add_data('scientific_name', newtaxon) ### TEST. newvariable.add_data('trophic_type', taxontrophic_type) newvariable.add_data('trophic_type', '-'.join( sorted( trophic_type_set_dict.get( newtaxon, [])))) ### TEST newvariable.add_data('stage', stage) newvariable.add_data('sex', sex) newvariable.add_data('parameter', parameter) newvariable.add_data('unit', unit) newvariable.add_data( 'value', aggregatedvariables[variablekeytuple]) # Add taxon class, etc. based on taxon name. newvariable.add_data( 'taxon_kingdom', plankton_core.Species().get_taxon_value( newtaxon, 'taxon_kingdom')) newvariable.add_data( 'taxon_phylum', plankton_core.Species().get_taxon_value( newtaxon, 'taxon_phylum')) newvariable.add_data( 'taxon_class', plankton_core.Species().get_taxon_value( newtaxon, 'taxon_class')) newvariable.add_data( 'taxon_order', plankton_core.Species().get_taxon_value( newtaxon, 'taxon_order')) newvariable.add_data( 'taxon_family', plankton_core.Species().get_taxon_value( newtaxon, 'taxon_family')) newvariable.add_data( 'taxon_genus', plankton_core.Species().get_taxon_value( newtaxon, 'taxon_genus')) newvariable.add_data( 'taxon_species', plankton_core.Species().get_taxon_value( newtaxon, 'taxon_species')) # self._main_activity.update_viewed_data_and_tabs() except UserWarning as e: toolbox_utils.Logging().error( 'Failed to aggregate data. ' + str(e)) QtWidgets.QMessageBox.warning( self._main_activity, 'Warning', 'Failed to aggregate data. ' + str(e)) finally: toolbox_utils.Logging().log_all_accumulated_rows() toolbox_utils.Logging().log('Aggregation of data is done.') # except Exception as e: debug_info = self.__class__.__name__ + ', row ' + str( sys._getframe().f_lineno) toolbox_utils.Logging().error('Exception: (' + debug_info + '): ' + str(e))
def create_tree_dataset(self, dataset, update_trophic_type): """ """ try: # Base class must know header for _asText(), etc. # self._set_header(self._header) # Iterate over rows in imported_table. for row in self._rows: row_dict = dict(zip(self._header, row)) # Get or create nodes. currentvisit = None currentsample = None currentvariable = None # Check if visit exists. Create or reuse. keystring = '' delimiter = '' for key_field in self._visit_key_fields: keystring += delimiter + row_dict.get(key_field, '') delimiter = '<+>' # currentvisit = dataset.get_visit_lookup(keystring) if not currentvisit: currentvisit = plankton_core.VisitNode() dataset.add_child(currentvisit) currentvisit.set_id_string(keystring) # Check if sample exists. Create or reuse. keystring = '' delimiter = '' for key_field in self._sample_key_fields: keystring += delimiter + row_dict.get(key_field, '') delimiter = '<+>' # currentsample = dataset.get_sample_lookup(keystring) if not currentsample: currentsample = plankton_core.SampleNode() currentvisit.add_child(currentsample) currentsample.set_id_string(keystring) # Add all variables in row. currentvariable = plankton_core.VariableNode() currentsample.add_child(currentvariable) # === Parse row and add fields on nodes. === for parsinginforow in self._parsing_info: # value = row_dict.get(parsinginforow[3], '') # Fix float. if parsinginforow[2] == 'float': value = value.replace(',', '.') # Calculate some values. if parsinginforow[1] == 'visit_month': try: value = row_dict.get('sample_date', '') value = value[5:7] except: pass if parsinginforow[1] == 'plankton_group': try: value = row_dict.get('scientific_name', '') value = plankton_core.Species().get_plankton_group_from_taxon_name(value) except: pass if parsinginforow[1] == 'analysed_by': try: if not value: value = row_dict.get('taxonomist', '') except: pass if parsinginforow[1] == 'trophic_type': # Update trophic_type. if parsinginforow[1] == 'trophic_type': if update_trophic_type: scientific_name = row_dict.get('scientific_name', '') size_class = row_dict.get('size_class', '') trophic_type = plankton_core.Species().get_bvol_value(scientific_name, size_class, 'trophic_type') if trophic_type: value = trophic_type # Use existing if not in local list. # Replace empty with NS=Not specified. if not value: value = 'NS' # Add at right level. if parsinginforow[0] == 'visit': currentvisit.add_data(parsinginforow[1], value) # if parsinginforow[0] == 'sample': currentsample.add_data(parsinginforow[1], value) # if parsinginforow[0] == 'variable': currentvariable.add_data(parsinginforow[1], value) # except Exception as e: toolbox_utils.Logging().warning('Failed to parse dataset: %s' % (e.args[0]))