def update_viewed_data(self): """ """ try: # Clear table. self._tableview.setTableModel(self._empty_dataset_table) self._refresh_viewed_data_table() self._numberofrows_label.setText('Number of rows: 0') # if not self._analysisdata.get_data(): return # selectedviewindex = self._viewdata_list.currentIndex() if selectedviewindex == 0: # View analysis data. # Convert from tree model to table model. targetdataset = plankton_core.DatasetTable() self._analysisdata.get_data().convert_to_table_dataset(targetdataset) # View model. self._tableview.setTableModel(targetdataset) self._refresh_viewed_data_table() elif selectedviewindex == 1: # View filtered data only. self._tab4widget.update_filter() # Must be done before create_filtered_dataset(). filtereddataset = self._analysisdata.create_filtered_dataset() # Convert from tree model to table model. targetdataset = plankton_core.DatasetTable() filtereddataset.convert_to_table_dataset(targetdataset) # View model. self._tableview.setTableModel(targetdataset) self._refresh_viewed_data_table() elif selectedviewindex == 2: # Statistical data. self._tableview.setTableModel(self._statisticaldata.get_data()) self._refresh_viewed_data_table() elif selectedviewindex == 3: # Export data. self._tableview.setTableModel(self._reportdata.get_data()) self._refresh_viewed_data_table() else: # Hide data. self._tableview.setTableModel(self._empty_dataset_table) self._refresh_viewed_data_table() # if self._tableview.getTableModel(): self._numberofrows_label.setText('Number of rows: ' + str(self._tableview.getTableModel().get_row_count())) else: self._numberofrows_label.setText('Number of rows: 0') # except Exception as e: debug_info = self.__class__.__name__ + ', row ' + str(sys._getframe().f_lineno) toolbox_utils.Logging().error('Exception: (' + debug_info + '): ' + str(e))
def _view_dataset(self, index): """ """ try: if index <= 0: # Clear table. self._tableview.clearModel() self._refresh_result_table() else: # envmonlib: dataset = app_framework.ToolboxDatasets().get_dataset_by_index(index - 1) if isinstance(dataset, plankton_core.DatasetTable): self._tableview.setTableModel(dataset) self._refresh_result_table() elif isinstance(dataset, plankton_core.DatasetNode): # Tree dataset must be converted to table dataset before viewing. targetdataset = plankton_core.DatasetTable() dataset.convert_to_table_dataset(targetdataset) # self._tableview.setTableModel(targetdataset) self._refresh_result_table() # # TODO: Remove later. Default alternative used for non toolbox_utils. else: self._tableview.setTableModel(dataset) self._refresh_result_table() # if self._tableview.getTableModel(): self._numberofrows_label.setText('Number of rows: ' + str(self._tableview.getTableModel().get_row_count())) else: self._numberofrows_label.setText('Number of rows: 0') # except Exception as e: debug_info = self.__class__.__name__ + ', row ' + str(sys._getframe().f_lineno) toolbox_utils.Logging().error('Exception: (' + debug_info + '): ' + str(e))
def _create_and_view_report(self, report): """ """ try: # Reset and redraw before loading new content. self._tableview.setTableModel(self._empty_dataset_table) self._tableview.resetModel() # Model data has changed. self._tableview.resizeColumnsToContents() # Create a list with selected datasets. datasets = [] for rowindex, dataset in enumerate( app_framework.ToolboxDatasets().get_datasets()): item = self._loaded_datasets_model.item(rowindex, 0) if item.checkState() == QtCore.Qt.Checked: datasets.append(dataset) # Preview result. result_table = plankton_core.DatasetTable() report.create_report( datasets, result_table, # show_debug_info = self._debuginfo_checkbox.checkState(), aggregate_rows=self._aggregate_checkbox.isChecked()) # Preview result. self._tableview.setTableModel(result_table) self._tableview.resetModel() # Model data has changed. self._tableview.resizeColumnsToContents() # except Exception as e: debug_info = self.__class__.__name__ + ', row ' + str( sys._getframe().f_lineno) toolbox_utils.Logging().error('Exception: (' + debug_info + '): ' + str(e))
def __init__(self, name, parentwidget): """ """ # Create object containing analysis data. self._analysisdata = plankton_core.AnalysisData() self._statisticaldata = plankton_core.StatisticalData() self._reportdata = plankton_core.ReportData() # Filename used when saving data to file. self._lastuseddirectory = '.' # Create tab widgets. self._tab1widget = app_activities.analyse_datasets_tab1.AnalyseDatasetsTab1() self._tab2widget = app_activities.analyse_datasets_tab2.AnalyseDatasetsTab2() self._tab3widget = app_activities.analyse_datasets_tab3.AnalyseDatasetsTab3() self._tab4widget = app_activities.analyse_datasets_tab4.AnalyseDatasetsTab4() self._tab5widget = app_activities.analyse_datasets_tab5.AnalyseDatasetsTab5() self._tab6widget = app_activities.analyse_datasets_tab6.AnalyseDatasetsTab6() self._tab7widget = app_activities.analyse_datasets_tab7.AnalyseDatasetsTab7() self._tab8widget = app_activities.analyse_datasets_tab8.AnalyseDatasetsTab8() # self._tab1widget.set_main_activity(self) self._tab2widget.set_main_activity(self) self._tab3widget.set_main_activity(self) self._tab4widget.set_main_activity(self) self._tab5widget.set_main_activity(self) self._tab6widget.set_main_activity(self) self._tab7widget.set_main_activity(self) self._tab8widget.set_main_activity(self) # Initialize parent. super(AnalyseDatasetsActivity, self).__init__(name, parentwidget) # self._empty_dataset_table = plankton_core.DatasetTable()
def __init__(self, parent=None, filter_column_index=None): """ """ QtWidgets.QTableView.__init__(self, parent) self._tablemodel = ToolboxTableModel( modeldata=plankton_core.DatasetTable()) self._selectionmodel = None # Created below. # Connect models. if filter_column_index is None: self.setModel(self._tablemodel) # self._selectionmodel = QtCore.QItemSelectionModel(self._tablemodel) self.setSelectionModel(self._selectionmodel) self.resizeColumnsToContents() else: """ Use this method if the default model should be replaced by a filtered model. """ # Filter proxy model. self.filterproxymodel = QtCore.QSortFilterProxyModel(self) self.filterproxymodel.setSourceModel(self._tablemodel) self.filterproxymodel.setFilterKeyColumn(filter_column_index) self.setModel(self.filterproxymodel) # self._selectionmodel = QtCore.QItemSelectionModel( self.filterproxymodel) self.setSelectionModel(self._selectionmodel) self.resizeColumnsToContents() # Default setup for tables. self.setAlternatingRowColors(True) self.setHorizontalScrollMode( QtWidgets.QAbstractItemView.ScrollPerPixel) #self.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows) self.setSelectionMode(QtWidgets.QAbstractItemView.ContiguousSelection)
def __init__(self, name, parentwidget): """ """ self._parent = parentwidget # self._lastusedphytowinfilename = '' self._lastuseddirectory = '' self._tableview = None self._tabledataset = None self._report_list = None # Initialize parent. super(CreateReportsActivity, self).__init__(name, parentwidget) # Listen for changes in the toolbox dataset list. app_framework.ToolboxDatasets().datasetListChanged.connect(self.update) # self._empty_dataset_table = plankton_core.DatasetTable()
def _content_table(self): """ """ widget = QtWidgets.QWidget() # widget = QtWidgets.QGroupBox('Plankton counter - sample editor', self._parentwidget) # self._sampletable_editable = app_framework.ToolboxEditableQTableView() self._sampletable_table = plankton_core.DatasetTable() self._sampletable_editable.setTableModel(self._sampletable_table) # layout = QtWidgets.QVBoxLayout() layout.addWidget(self._sampletable_editable) widget.setLayout(layout) # return widget
def import_text_file(self, filename, textfile_encoding): """ """ # Select import format. formatparser = plankton_core.FormatSingleFile() # Phase 1: Read file into a temporary table. sheetname = None headerrow = 1 datarowsfrom = 2 # for rowdict in self._importrows: if rowdict['node'] == 'info': if rowdict['key'] == 'header_row': headerrow = int( float(rowdict.get('command', '1').replace(',', '.'))) if headerrow: headerrow -= 1 if rowdict['key'] == 'first_data_row': datarowsfrom = int( float(rowdict.get('command', '2').replace(',', '.'))) if datarowsfrom: datarowsfrom -= 1 tablefilereader = toolbox_utils.TableFileReader( text_file_name=filename, encoding=textfile_encoding, header_row=headerrow, data_rows_from=datarowsfrom) tabledataset = plankton_core.DatasetTable() tabledataset.set_header(tablefilereader.header()) for row in tablefilereader.rows(): tabledataset.append_row(row) # toolbox_utils.Logging().info('Loading file. Header content: ' + str(tabledataset.get_header())) # Phase 2: Parse the table and create a corresponding tree structure. targetdataset = plankton_core.DatasetNode() # targetdataset.set_dataset_parser_rows(self._importrows) targetdataset.set_export_table_columns(self._columnsinfo) # formatparser.parse_table_dataset(targetdataset, tabledataset) # Phase 3: Reorganize between nodes in tree structure. formatparser.reorganize_dataset() # Phase 4: Reformat fields in tree structure. formatparser.reformat_dataset() # Phase 5: Perform basic screening. formatparser.basic_screening() # return targetdataset
def _load_parser_info(self): """ """ # Read dataset parser. tablefilereader = toolbox_utils.TableFileReader( excel_file_name=self._parser_file_path) tabledata = plankton_core.DatasetTable() tabledata.set_header(tablefilereader.header()) for row in tablefilereader.rows(): tabledata.append_row(row) # Create import info. if self._import_column: # self.addMetadata('Import column', self._import_column) self._importrows = [] for rowindex in range(0, tabledata.get_row_count()): importcolumndata = tabledata.get_data_item_by_column_name( rowindex, self._import_column) if importcolumndata: nodelevel = tabledata.get_data_item(rowindex, 0) key = tabledata.get_data_item(rowindex, 1) viewformat = tabledata.get_data_item(rowindex, 2) self._importrows.append({ 'node': nodelevel, 'key': key, 'view_format': viewformat, 'command': importcolumndata }) # self.set_dataset_parser_rows(self._importrows) # Create export info. if self._export_column: # self.addMetadata('Export column', self._export_column) self._columnsinfo = [] for rowindex in range(0, tabledata.get_row_count()): exportcolumndata = tabledata.get_data_item_by_column_name( rowindex, self._export_column) if exportcolumndata: nodelevel = tabledata.get_data_item(rowindex, 0) if nodelevel != 'info': key = tabledata.get_data_item(rowindex, 1) viewformat = tabledata.get_data_item(rowindex, 2) self._columnsinfo.append({ 'header': exportcolumndata, 'node': nodelevel, 'key': key, 'view_format': viewformat })
def _create_content_labels(self): """ """ widget = QtWidgets.QWidget() # Active widgets and connections. self._title_edit = QtWidgets.QLineEdit() self._xlabel_edit = QtWidgets.QLineEdit() self._xtype_edit = QtWidgets.QLineEdit() self._xformat_edit = QtWidgets.QLineEdit() self._ylabel_edit = QtWidgets.QLineEdit() self._ytype_edit = QtWidgets.QLineEdit() self._yformat_edit = QtWidgets.QLineEdit() self._zlabel_edit = QtWidgets.QLineEdit() self._ztype_edit = QtWidgets.QLineEdit() self._zformat_edit = QtWidgets.QLineEdit() # self._plotlabels_editable = app_framework.ToolboxEditableQTableView() self._plotlabels_table = plankton_core.DatasetTable() self._plotlabels_table.set_header(['Plot name ', 'X-label ', 'Y-label ', 'Z-label ']) self._plotlabels_table.append_row(['', '', '', '']) self._plotlabels_editable.setTableModel(self._plotlabels_table) # self._labelsreset_button = QtWidgets.QPushButton('Reset') self._labelsreset_button.clicked.connect(self._reset_labels) self._labelsapply_button = QtWidgets.QPushButton('Apply') self._labelsapply_button.clicked.connect(self._apply_labels) # Layout. form0 = QtWidgets.QFormLayout() form0.addRow('Title: ', self._title_edit) form1 = QtWidgets.QFormLayout() form1.addRow('X-label:', self._xlabel_edit) form1.addRow('Y-label:', self._ylabel_edit) form1.addRow('Z-label:', self._zlabel_edit) form2 = QtWidgets.QFormLayout() form2.addRow('X-type:', self._xtype_edit) form2.addRow('Y-type:', self._ytype_edit) form2.addRow('Z-type:', self._ztype_edit) form3 = QtWidgets.QFormLayout() form3.addRow('X-format:', self._xformat_edit) form3.addRow('Y-format:', self._yformat_edit) form3.addRow('Z-format:', self._zformat_edit) # hbox1 = QtWidgets.QHBoxLayout() hbox1.addLayout(form0) hbox1.addStretch() # hbox2 = QtWidgets.QHBoxLayout() hbox2.addLayout(form1) hbox2.addLayout(form2) hbox2.addLayout(form3) hbox2.addStretch() # hbox3 = QtWidgets.QHBoxLayout() hbox3.addWidget(QtWidgets.QLabel(' ')) hbox3.addWidget(self._plotlabels_editable) hbox3.addStretch() # hbox4 = QtWidgets.QHBoxLayout() # hbox4.addStretch(5) hbox4.addWidget(self._labelsreset_button) hbox4.addWidget(self._labelsapply_button) hbox4.addStretch(10) # layout = QtWidgets.QVBoxLayout() layout.addWidget(QtWidgets.QLabel('Graph:')) layout.addLayout(hbox1) layout.addLayout(hbox2) layout.addWidget(QtWidgets.QLabel('Plots:')) layout.addLayout(hbox3, 10) layout.addLayout(hbox4) # widget.setLayout(layout) # return widget
def _extract_values(self, dataset, reportdata, selectedparameter, split_on_year=False, split_on_season=False, split_on_month=False, split_on_station=False, split_on_visit=False, split_on_depth=False, split_on_taxon=False): """ """ try: # Target list. data_dict = {} # Create a dataset (table, not tree). tabledata = plankton_core.DatasetTable() reportdata.set_data(tabledata) # Extract values. yearkey = '' monthkey = '' seasonkey = '' stationkey = '' visitkey = '' depthkey = '' taxonkey = '' # for visitnode in dataset.get_children(): visitdate = visitnode.get_data('sample_date') visitstation = visitnode.get_data('station_name') visitvisit = visitstation + ' ' + visitdate visityear = str(visitdate[0:4]) visitmonth = str(visitdate[5:7]) visitseason = '' if visitmonth in ['12', '01', '02']: visitseason = 'Dec-Jan-Feb' elif visitmonth in ['03', '04', '05']: visitseason = 'Mar-Apr-May' elif visitmonth in ['06', '07', '08']: visitseason = 'Jun-Jul-Aug' elif visitmonth in ['09', '10', '11']: visitseason = 'Sep-Oct-Nov' # for samplenode in visitnode.get_children(): sample_min_depth_m = str( samplenode.get_data('sample_min_depth_m')) sample_max_depth_m = str( samplenode.get_data('sample_max_depth_m')) sampleminmaxdepth = sample_min_depth_m + '-' + sample_max_depth_m # Iterate over sample content. # Note: Create a level between sample and variabel. grouped_size_lifestages = {} for variablenode in samplenode.get_children(): group_key = variablenode.get_data('scientific_name') group_key += ':' + variablenode.get_data( 'size_class') # Specific for phytoplankton. group_key += ':' + variablenode.get_data( 'stage') # Specific for zooplankton. group_key += ':' + variablenode.get_data( 'sex') # Specific for zooplankton. if group_key not in grouped_size_lifestages: grouped_size_lifestages[group_key] = [ ] # Starts a new group. grouped_size_lifestages[group_key].append(variablenode) # Get variables from the new set of groups. for group_key in grouped_size_lifestages.keys(): # for variablenode in grouped_size_lifestages[group_key]: variabletaxon = variablenode.get_data( 'scientific_name') # Parameters. parameter = variablenode.get_data('parameter') unit = variablenode.get_data('unit') parameternadunit = parameter + ' (' + unit + ')' if parameternadunit == selectedparameter: # Build split key. splitkey = '' if split_on_year: splitkey += visityear splitkey += ':' if split_on_season: splitkey += visitseason splitkey += ':' if split_on_month: splitkey += visitmonth splitkey += ':' if split_on_station: splitkey += visitstation splitkey += ':' if split_on_visit: splitkey += visitvisit splitkey += ':' if split_on_depth: splitkey += sampleminmaxdepth splitkey += ':' if split_on_taxon: splitkey += variabletaxon # Add data. if splitkey not in data_dict: data_dict[splitkey] = [] data_dict[splitkey].append( variablenode.get_data('value')) # Create empty result table. resulttable = [] emptyrow = [''] * (1 + len(data_dict)) # Empty row. maxlength = 0 for key in sorted(data_dict.keys()): datalistlength = len(data_dict[key]) if datalistlength > maxlength: maxlength = datalistlength for index in range(maxlength + 9): # Header rows and data rows. resulttable.append(emptyrow[:]) # Clone. # Headers, multiple rows. resulttable[0][0] = 'Parameter:' resulttable[1][0] = 'Year:' resulttable[2][0] = 'Season:' resulttable[3][0] = 'Month:' resulttable[4][0] = 'Station:' resulttable[5][0] = 'Sampling event:' resulttable[6][0] = 'Depth:' resulttable[7][0] = 'Scientific name:' # Calculate result for colindex, key in enumerate(sorted(data_dict.keys())): # Keys. keysplit = key.split(':') yearkey = keysplit[0] seasonkey = keysplit[1] monthkey = keysplit[2] stationkey = keysplit[3] visitkey = keysplit[4] depthkey = keysplit[5] taxonkey = keysplit[6] # resulttable[0][colindex + 1] = selectedparameter resulttable[1][colindex + 1] = yearkey resulttable[2][colindex + 1] = seasonkey resulttable[3][colindex + 1] = monthkey resulttable[4][colindex + 1] = stationkey resulttable[5][colindex + 1] = visitkey resulttable[6][colindex + 1] = depthkey resulttable[7][colindex + 1] = taxonkey # Data. resulttable[8][colindex + 1] = 'Values' for rowindex, value in enumerate(data_dict[key]): resulttable[rowindex + 9][colindex + 1] = value # Header. header = emptyrow[:] tabledata.set_header(header) # Rows. for row in resulttable: tabledata.append_row(row) # except Exception as e: debug_info = self.__class__.__name__ + ', row ' + str( sys._getframe().f_lineno) toolbox_utils.Logging().error('Exception: (' + debug_info + '): ' + str(e))
def _calc_statistics(self, dataset, reportdata, selectedparameter, split_on_year=False, split_on_season=False, split_on_month=False, split_on_station=False, split_on_visit=False, split_on_depth=False, split_on_taxon=False): """ """ try: # Target list. data_dict = {} # Create a dataset (table, not tree). tabledata = plankton_core.DatasetTable() reportdata.set_data(tabledata) # Header for result table. header_row = [] header_row.append('Parameter') if split_on_year: header_row.append('Year') if split_on_season: header_row.append('Season') if split_on_month: header_row.append('Month') if split_on_station: header_row.append('Station') if split_on_visit: header_row.append('Sampling event') if split_on_depth: header_row.append('Depth') if split_on_taxon: header_row.append('Scientific name') header_row.append('Mean') header_row.append('Median') header_row.append('Std. dev.') header_row.append('Min') header_row.append('Max') header_row.append('Counted values') tabledata.set_header(header_row) # Extract values. yearkey = '' monthkey = '' seasonkey = '' stationkey = '' visitkey = '' depthkey = '' taxonkey = '' # for visitnode in dataset.get_children(): visitdate = visitnode.get_data('sample_date') visitstation = visitnode.get_data('station_name') visitvisit = visitstation + ' ' + visitdate visityear = str(visitdate[0:4]) visitmonth = str(visitdate[5:7]) visitseason = '' if visitmonth in ['12', '01', '02']: visitseason = 'Dec-Jan-Feb' elif visitmonth in ['03', '04', '05']: visitseason = 'Mar-Apr-May' elif visitmonth in ['06', '07', '08']: visitseason = 'Jun-Jul-Aug' elif visitmonth in ['09', '10', '11']: visitseason = 'Sep-Oct-Nov' # for samplenode in visitnode.get_children(): sample_min_depth_m = str( samplenode.get_data('sample_min_depth_m')) sample_max_depth_m = str( samplenode.get_data('sample_max_depth_m')) sampleminmaxdepth = sample_min_depth_m + '-' + sample_max_depth_m # Iterate over sample content. # Note: Create a level between sample and variabel. grouped_size_lifestages = {} for variablenode in samplenode.get_children(): group_key = variablenode.get_data('scientific_name') group_key += ':' + variablenode.get_data( 'size_class') # Specific for phytoplankton. group_key += ':' + variablenode.get_data( 'stage') # Specific for zooplankton. group_key += ':' + variablenode.get_data( 'sex') # Specific for zooplankton. if group_key not in grouped_size_lifestages: grouped_size_lifestages[group_key] = [ ] # Starts a new group. grouped_size_lifestages[group_key].append(variablenode) # Get variables from the new set of groups. for group_key in grouped_size_lifestages.keys(): # for variablenode in grouped_size_lifestages[group_key]: variabletaxon = variablenode.get_data( 'scientific_name') # Parameters. parameter = variablenode.get_data('parameter') unit = variablenode.get_data('unit') parameternadunit = parameter + ' (' + unit + ')' if parameternadunit == selectedparameter: # Build split key. splitkey = '' if split_on_year: splitkey += visityear splitkey += ':' if split_on_season: splitkey += visitseason splitkey += ':' if split_on_month: splitkey += visitmonth splitkey += ':' if split_on_station: splitkey += visitstation splitkey += ':' if split_on_visit: splitkey += visitvisit splitkey += ':' if split_on_depth: splitkey += sampleminmaxdepth splitkey += ':' if split_on_taxon: splitkey += variabletaxon # Add data. if splitkey not in data_dict: data_dict[splitkey] = [] data_dict[splitkey].append( variablenode.get_data('value')) # Calculate result for key in sorted(data_dict.keys()): # Keys. keysplit = key.split(':') yearkey = keysplit[0] seasonkey = keysplit[1] monthkey = keysplit[2] stationkey = keysplit[3] visitkey = keysplit[4] depthkey = keysplit[5] taxonkey = keysplit[6] # try: # data_list = map(float, data_dict[key]) # Not working in Python3. # data_list = list(map(float, data_dict[key])) # ALT 1. data_list = [float(x) for x in data_dict[key] ] # ALT 2. list comprehensions. # Calculate result by use of numpy. # Use float64 since we are using both small and big unit. meanvalue = numpy.mean(data_list, dtype=numpy.float64) medianvalue = numpy.median(data_list) stddevvalue = numpy.std(data_list, dtype=numpy.float64) minvalue = numpy.nanmin(data_list) maxvalue = numpy.nanmax(data_list) countedvalues = len(data_list) except Exception as e: meanvalue = '<ERROR>' medianvalue = '<ERROR>' stddevvalue = '<ERROR>' minvalue = '<ERROR>' maxvalue = '<ERROR>' countedvalues = len(data_list) # print(', '.join(map(str, data_list))) # Print data. print('Error in calc statistics: ' + str(e)) # Print exception. # Create row. report_row = [] report_row.append(selectedparameter) if split_on_year: report_row.append(yearkey) if split_on_season: report_row.append(seasonkey) if split_on_month: report_row.append(monthkey) if split_on_station: report_row.append(stationkey) if split_on_visit: report_row.append(visitkey) if split_on_depth: report_row.append(depthkey) if split_on_taxon: report_row.append(taxonkey) report_row.append(meanvalue) report_row.append(medianvalue) report_row.append(stddevvalue) report_row.append(minvalue) report_row.append(maxvalue) report_row.append(countedvalues) # tabledata.append_row(report_row) # except Exception as e: debug_info = self.__class__.__name__ + ', row ' + str( sys._getframe().f_lineno) toolbox_utils.Logging().error('Exception: (' + debug_info + '): ' + str(e))
def _create_report_zooplankton_abundance_length_median_and_mean(self, dataset, reportdata): """ """ try: # Create a dataset (table, not tree). tabledata = plankton_core.DatasetTable() reportdata.set_data(tabledata) # Header. header_row = [] header_row.append('Station name') header_row.append('Date') header_row.append('Sample min depth') header_row.append('Sample max depth') header_row.append('Scientific name') header_row.append('Stage') header_row.append('Sex') header_row.append('Abundance (ind/m2)') header_row.append('Abundance (ind/m3)') header_row.append('Length (median)') header_row.append('Length (mean)') tabledata.set_header(header_row) # Extract values for the plot. date = '-' station_name = '-' sample_min_depth_m = '-' sample_max_depth_m = '-' for visitnode in dataset.get_children(): station_name = visitnode.get_data('station_name') date = visitnode.get_data('sample_date') for samplenode in visitnode.get_children(): sample_min_depth_m = samplenode.get_data('sample_min_depth_m') sample_max_depth_m = samplenode.get_data('sample_max_depth_m') # Iterate over sample content. # Note: Create a level between sample and variabel. grouped_lifestages = {} for variablenode in samplenode.get_children(): group_key = variablenode.get_data('scientific_name') group_key += ':' + variablenode.get_data('stage') # Specific for zooplankton. group_key += ':' + variablenode.get_data('sex') # Specific for zooplankton. if group_key not in grouped_lifestages: grouped_lifestages[group_key] = [] # Starts a new group. grouped_lifestages[group_key].append(variablenode) # Get variables from the new set of groups. for group_key in grouped_lifestages.keys(): # This should be available in each group. scientific_name = '-' stage = '-' sex = '-' abundance_ind_m2 = '-' abundance_ind_m3 = '-' length_median = '-' length_mean = '-' # for variablenode in grouped_lifestages[group_key]: # This should be same for all variables in the group. scientific_name = variablenode.get_data('scientific_name') stage = variablenode.get_data('stage') sex = variablenode.get_data('sex') # Parameters. parameter = variablenode.get_data('parameter') unit = variablenode.get_data('unit') if (parameter == 'Abundance') and (unit == 'ind/m2'): abundance_ind_m2 = variablenode.get_data('value') if (parameter == 'Abundance') and (unit == 'ind/m3'): abundance_ind_m3 = variablenode.get_data('value') if parameter == 'Length (median)': length_median = variablenode.get_data('value') if parameter == 'Length (mean)': length_mean = variablenode.get_data('value') # Organism group is finished. Add row to report. report_row = [] report_row.append(station_name) report_row.append(date) report_row.append(sample_min_depth_m) report_row.append(sample_max_depth_m) report_row.append(scientific_name) report_row.append(stage) report_row.append(sex) report_row.append(abundance_ind_m2) report_row.append(abundance_ind_m3) report_row.append(length_median) report_row.append(length_mean) # tabledata.append_row(report_row) # except Exception as e: debug_info = self.__class__.__name__ + ', row ' + str(sys._getframe().f_lineno) toolbox_utils.Logging().error('Exception: (' + debug_info + '): ' + str(e))
def create_primer_report(self, parameters, datasets, reportdata): """ """ try: # Create a dataset (table, not tree). tabledata = plankton_core.DatasetTable() reportdata.set_data(tabledata) # Check indata. if parameters == None: raise UserWarning('Parameters are missing.') if datasets == None: raise UserWarning('Datasets are missing.') # # Calculate number of samples and columns. numberofsamples = 0 for dataset in datasets: for visit in dataset.get_children(): numberofsamples += len(visit.get_children()) numberofparameters = len(parameters) numberofcolumns = 6 + (numberofsamples * numberofparameters) # Set header. Note: Normal header is not used. tabledata.set_header([''] * numberofcolumns) # # Part 1: Create header rows with columns for sample related data. # header_row_1 = [str()] * numberofcolumns header_row_2 = [str()] * numberofcolumns header_row_3 = [str()] * numberofcolumns header_row_4 = [str()] * numberofcolumns header_row_1[5] = 'Station name:' header_row_2[5] = 'Date:' header_row_3[5] = 'Sample min depth:' header_row_4[5] = 'Sample max depth:' # # Iterate over file to create column headers. sampleindex = 0 for dataset in datasets: for visit in dataset.get_children(): for sample in visit.get_children(): header_row_1[6 + (sampleindex * numberofparameters)] = visit.get_data('station_name') header_row_2[6 + (sampleindex * numberofparameters)] = visit.get_data('sample_date') header_row_3[6 + (sampleindex * numberofparameters)] = sample.get_data('sample_min_depth_m') header_row_4[6 + (sampleindex * numberofparameters)] = sample.get_data('sample_max_depth_m') sampleindex += 1 # # Part 2: Iterate over all rows in all samples. Create a dictionary with # species as keys and lists of abundances for each sample. # Size class included with ':' as delimiter. # Example: "Incertae sedis:1": [1234.5, 1234.5, 1234.5, 1234.5] taxon_values_dict = {} # Iterate through datasets. sampleindex = 0 for dataset in datasets: for visit in dataset.get_children(): for sample in visit.get_children(): for variable in sample.get_children(): scientific_name = variable.get_data('scientific_name') size_class = variable.get_data('size_class') trophic_type = variable.get_data('trophic_type') stage = variable.get_data('stage') sex = variable.get_data('sex') # taxon_key = str(scientific_name) + ':' + str(size_class) + ':' + str(trophic_type) + ':' + str(stage) + ':' + str(sex) if taxon_key not in taxon_values_dict: taxon_values_dict[taxon_key] = [str()] * (numberofsamples * numberofparameters) # Add new value list. # for paramindex, param in enumerate(parameters): parameter = variable.get_data('parameter') unit = variable.get_data('unit') value = variable.get_data('value') # if param == (parameter + ' (' + unit + ')'): taxon_values_dict[taxon_key][sampleindex * numberofparameters + paramindex] = value # sampleindex += 1 # # Part 3: Create the species rows in the report. # taxon_rows = [] # Iterate over species in the dictionary. for taxon_key in taxon_values_dict.keys(): # taxon_key_parts = taxon_key.split(':') scientific_name = taxon_key_parts[0] size_class = taxon_key_parts[1] trophic_type = taxon_key_parts[2] stage = taxon_key_parts[3] sex = taxon_key_parts[4] # row = [''] * (numberofcolumns * numberofparameters) row[0] = scientific_name row[1] = size_class row[2] = trophic_type row[3] = stage row[4] = sex for index, value in enumerate(taxon_values_dict[taxon_key]): row[6 + index] = value # row[6 + (index * numberofparameters) + 1] = values[1] # row[6 + (index * numberofparameters) + 1] = values[2] # Add the row the report. taxon_rows.append(row) # Sort the outdata list before writing to file. # Sort order: # - 0: scientific_name # - 1: size_class # - 2: trophic_type # - 3: stage # - 4: sex taxon_rows.sort(key=operator.itemgetter(0, 1, 2, 3, 4)) # # Part 4: Put all parts together and add to result table. # tabledata.append_row(header_row_1) tabledata.append_row(header_row_2) tabledata.append_row(header_row_3) tabledata.append_row(header_row_4) tabledata.append_row(['Scientific name', 'Size class', 'Trophic type', 'Stage', 'Sex', ''] + \ parameters * numberofsamples) # Multiple columns per sample. # for row in taxon_rows: tabledata.append_row(row) # except Exception as e: debug_info = self.__class__.__name__ + ', row ' + str(sys._getframe().f_lineno) toolbox_utils.Logging().error('Exception: (' + debug_info + '): ' + str(e))