def _create_story(self): # Set up an empty list to hold the story story = [] # Import the report styles styles = report_styles.get_report_styles() # Create a page break story = self._make_page_break(story, self.PORTRAIT) # Section title title_str = '<strong>References</strong>' para = p.Paragraph(title_str, styles['section_style']) t = p.Table([[para]], colWidths=[7.5 * u.inch]) t.setStyle( p.TableStyle([ ('TOPPADDING', (0, 0), (-1, -1), 6), ('BOTTOMPADDING', (0, 0), (-1, -1), 6), ('BACKGROUND', (0, 0), (-1, -1), '#957348'), ('ALIGNMENT', (0, 0), (-1, -1), 'LEFT'), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ('GRID', (0, 0), (-1, -1), 0.25, colors.black), ])) story.append(t) story.append(p.Spacer(0, 0.1 * u.inch)) # List of references used references = [ ''' Cohen, J. 1960. "A coefficient of agreement for nominal scales." Educational and Psychological Measurement 20: 37-46. ''', ''' Kennedy, RE, Z Yang and WB Cohen. 2010. "Detecting trends in forest disturbance and recovery using yearly Landsat time series: 1. Landtrendr -- Temporal segmentation algorithms." Remote Sensing of Environment 114(2010): 2897-2910. ''', ''' Ohmann, JL, MJ Gregory and HM Roberts. 2014 (in press). "Scale considerations for integrating forest inventory plot data and satellite image data for regional forest mapping." Remote Sensing of Environment. ''', ''' O'Neil, TA, KA Bettinger, M Vander Heyden, BG Marcot, C Barrett, TK Mellen, WM Vanderhaegen, DH Johnson, PJ Doran, L Wunder, and KM Boula. 2001. "Structural conditions and habitat elements of Oregon and Washington. Pages 115-139 in: Johnson, DH and TA O'Neil, editors. 2001. "Wildlife-habitat relationships in Oregon and Washington." Oregon State University Press, Corvallis, OR. ''', ] # Print all references for reference in references: para = p.Paragraph(reference, styles['body_style']) story.append(para) story.append(p.Spacer(0, 0.10 * u.inch)) # Return this story return story
def _create_story(self, scatter_files): # Set up an empty list to hold the story story = [] # Import the report styles styles = report_styles.get_report_styles() # Create a page break story = self._make_page_break(story, self.PORTRAIT) # Section title title_str = '<strong>Local-Scale Accuracy Assessment: ' title_str += 'Scatterplots of Observed vs. Predicted ' title_str += 'Values for Continuous Variables at ' title_str += 'Plot Locations</strong>' para = p.Paragraph(title_str, styles['section_style']) t = p.Table([[para]], colWidths=[7.5 * u.inch]) t.setStyle( p.TableStyle([ ('TOPPADDING', (0, 0), (-1, -1), 6), ('BOTTOMPADDING', (0, 0), (-1, -1), 6), ('BACKGROUND', (0, 0), (-1, -1), '#957348'), ('ALIGNMENT', (0, 0), (-1, -1), 'LEFT'), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ('GRID', (0, 0), (-1, -1), 0.25, colors.black), ])) story.append(t) story.append(p.Spacer(0, 0.2 * u.inch)) # Scatter explanation scatter_str = ''' These scatterplots compare the observed plot values against predicted (modeled) values for each plot used in the GNN model. We use a modified leave-one-out (LOO) approach. In traditional LOO accuracy assessment, a model is run with <i>n</i>-1 plots and then accuracy is determined at the plot left out of modeling, for all plots used in modeling. Because of computing limitations, we use a 'second-nearest-neighbor' approach. We develop our models with all plots, but in determining accuracy, we don't allow a plot to assign itself as a neighbor at the plot location. This yields similar accuracy assessment results as a true cross-validation approach, but probably slightly underestimates the true accuracy of the distributed (first-nearest-neighbor) map.<br/><br/> The observed value comes directly from the plot data, whereas the predicted value comes from the GNN prediction for the plot location. The GNN prediction is the mean of pixel values for a window that approximates the field plot configuration.<br/><br/> The correlation coefficients, normalized Root Mean Squared Errors (RMSE), and coefficients of determination (R-square) are given. The RMSE is normalized by dividing the RMSE by the observed mean value. ''' para = p.Paragraph(scatter_str, styles['body_style']) story.append(para) story.append(p.Spacer(0, 0.1 * u.inch)) # Add the scatterplot images to a list of lists table_cols = 2 scatter_table = [] scatter_row = [] for (i, fn) in enumerate(scatter_files): scatter_row.append(p.Image(fn, 3.4 * u.inch, 3.0 * u.inch)) if (i % table_cols) == (table_cols - 1): scatter_table.append(scatter_row) scatter_row = [] # Determine if there are any scatterplots left to print if len(scatter_row) != 0: for i in range(len(scatter_row), table_cols): scatter_row.append(p.Paragraph('', styles['body_style'])) scatter_table.append(scatter_row) # Style this into a reportlab table and add to the story width = 3.75 * u.inch t = p.Table(scatter_table, colWidths=[width, width]) t.setStyle( p.TableStyle([ ('ALIGNMENT', (0, 0), (-1, -1), 'CENTER'), ('VALIGN', (0, 0), (-1, -1), 'CENTER'), ('TOPPADDING', (0, 0), (-1, -1), 6.0), ('BOTTOMPADDING', (0, 0), (-1, -1), 6.0), ])) story.append(t) # Return this story return story
def _create_story(self): # Set up an empty list to hold the story story = [] # Import the report styles styles = report_styles.get_report_styles() # Create a page break story = self._make_page_break(story, self.PORTRAIT) # Section title title_str = '<strong>Data Dictionary</strong>' para = p.Paragraph(title_str, styles['section_style']) t = p.Table([[para]], colWidths=[7.5 * u.inch]) t.setStyle( p.TableStyle([ ('TOPPADDING', (0, 0), (-1, -1), 6), ('BOTTOMPADDING', (0, 0), (-1, -1), 6), ('BACKGROUND', (0, 0), (-1, -1), '#957348'), ('ALIGNMENT', (0, 0), (-1, -1), 'LEFT'), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ('GRID', (0, 0), (-1, -1), 0.25, colors.black), ])) story.append(t) story.append(p.Spacer(0, 0.1 * u.inch)) # Read in the stand attribute metadata mp = xsmp.XMLStandMetadataParser(self.stand_metadata_file) # Subset the attributes to those that are accuracy attributes, are # identified to go into the report, and are not species variables attrs = [] for attr in mp.attributes: if attr.accuracy_attr == 1 and attr.project_attr == 1 and \ attr.species_attr == 0: attrs.append(attr.field_name) # Set up the master dictionary table dictionary_table = [] # Iterate through the attributes and print out the field information # and codes if present for attr in attrs: metadata = mp.get_attribute(attr) field_name = metadata.field_name units = metadata.units description = metadata.description field_para = p.Paragraph(field_name, styles['body_style_10']) if units != 'none': description += ' (' + units + ')' field_desc_para = p.Paragraph(description, styles['body_style_10']) # If this field has codes, create a sub table underneath the # field description if metadata.codes: # Set up a container to hold the code rows code_table = [] # Iterate over all code rows and append to the code_table for code in metadata.codes: code_para = \ p.Paragraph(code.code_value, styles['code_style']) description = self.txt_to_html(code.description) code_desc_para = \ p.Paragraph(description, styles['code_style']) code_table.append([code_para, code_desc_para]) # Convert this to a reportlab table t = p.Table(code_table, colWidths=[0.75 * u.inch, 4.5 * u.inch]) t.setStyle( p.TableStyle([ ('TOPPADDING', (0, 0), (-1, -1), 3), ('BOTTOMPADDING', (0, 0), (-1, -1), 3), ('BACKGROUND', (0, 0), (-1, -1), '#f7f7ea'), ('ALIGNMENT', (0, 0), (-1, -1), 'LEFT'), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ('GRID', (0, 0), (-1, -1), 0.25, colors.white), ])) # Create a stack of the field description and field codes elements = \ [[field_desc_para], [t]] # If no codes exist, just add the field description else: elements = [[field_desc_para]] # Create a reportlab table of the field description and # (if present) field codes description_table = \ p.Table(elements, colWidths=[5.25 * u.inch]) description_table.setStyle( p.TableStyle([ ('TOPPADDING', (0, 0), (-1, 0), 0), ('BOTTOMPADDING', (0, -1), (-1, -1), 0), ('LEFTPADDING', (0, 0), (-1, -1), 0), ('RIGHTPADDING', (0, 0), (-1, -1), 0), ('ALIGNMENT', (0, 0), (-1, -1), 'LEFT'), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ])) dictionary_table.append([field_para, description_table]) # Format the dictionary table into a reportlab table t = p.Table(dictionary_table, colWidths=[1.6 * u.inch, 5.4 * u.inch]) t.setStyle( p.TableStyle([ ('TOPPADDING', (0, 0), (0, -1), 5), ('BOTTOMPADDING', (0, 0), (0, -1), 5), ('GRID', (0, 0), (-1, -1), 1, colors.white), ('ALIGNMENT', (0, 0), (-1, -1), 'LEFT'), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ('BACKGROUND', (0, 0), (-1, -1), '#f1efe4'), ])) story.append(t) # Description of the species information that is attached to ArcInfo # grids. We don't enumerate the codes here, but just give this # summary information spp_str = """ Individual species abundances are attached to ArcInfo grids that LEMMA distributes. For this model, fields designate species codes based on the <link color="#0000ff" href="http://plants.usda.gov/">USDA PLANTS database</link> from the year 2000, and values represent species """ if self.model_type in ['sppsz', 'sppba']: spp_str += " basal area (m^2/ha)." elif self.model_type in ['trecov', 'wdycov']: spp_str += " percent cover." para = p.Paragraph(spp_str, styles['body_style']) story.append(p.Spacer(0, 0.1 * u.inch)) story.append(para) # Return this story return story
def _create_story(self, histogram_files): # Set up an empty list to hold the story story = [] # Import the report styles styles = report_styles.get_report_styles() # Create a page break story = self._make_page_break(story, self.PORTRAIT) # Section title title_str = '<strong>Regional-Scale Accuracy Assessment:<br/> Area ' title_str += 'Distributions from Regional Inventory Plots vs. ' title_str += 'GNN</strong>' para = p.Paragraph(title_str, styles['section_style']) t = p.Table([[para]], colWidths=[7.5 * u.inch]) t.setStyle( p.TableStyle([ ('TOPPADDING', (0, 0), (-1, -1), 6), ('BOTTOMPADDING', (0, 0), (-1, -1), 6), ('BACKGROUND', (0, 0), (-1, -1), '#957348'), ('ALIGNMENT', (0, 0), (-1, -1), 'LEFT'), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ('GRID', (0, 0), (-1, -1), 0.25, colors.black), ])) story.append(t) story.append(p.Spacer(0, 0.20 * u.inch)) # Histogram explanation histo_str = ''' These histograms compare the distributions of land area in different vegetation conditions as estimated from a regional, sample- (plot-) based inventory (FIA Annual Plots) to model predictions from GNN (based on counts of 30-m pixels). <br/><br/> For the FIA annual plots, the distributions of forest area are determined by summing the 'area expansion factors' at the plot condition-class level. The plot-based estimates are subject to sampling error, but this is not shown in the graphs due to complexities involved. For more information about the FIA Annual inventory sample design, see the <link href="http://fia.fs.fed.us/library/database-documentation" color="blue">FIADB Users Manual</link>. <br/><br/> Some plots were not visited on the ground due to denied access or hazardous conditions, so the area these plots represent cannot be characterized and is included in the bar labeled 'unsampled.' <br/><br/> The bars labeled 'nonforest' also require explanation. For GNN, this is the area of nonforest in the map, which is derived from ancillary (non-GNN) spatial data sources such as the National Land Cover Data (NLCD) or Ecological Systems maps from the Gap Analysis Program (GAP). This mapped nonforest is referred to as the GNN 'nonforest mask.' <br/><br/> For the plots, the 'nonforest' bar represents the nonforest area as estimated from the FIA Annual sample. ''' para = p.Paragraph(histo_str, styles['body_style']) story.append(para) story.append(p.Spacer(0, 0.1 * u.inch)) # Add the histogram images to a list of lists table_cols = 2 histogram_table = [] histogram_row = [] for (i, fn) in enumerate(histogram_files): histogram_row.append(p.Image(fn, 3.4 * u.inch, 3.0 * u.inch)) if (i % table_cols) == (table_cols - 1): histogram_table.append(histogram_row) histogram_row = [] # Determine if there are any histograms left to print if len(histogram_row) != 0: for i in range(len(histogram_row), table_cols): histogram_row.append(p.Paragraph('', styles['body_style'])) histogram_table.append(histogram_row) # Style this into a reportlab table and add to the story width = 3.75 * u.inch t = p.Table(histogram_table, colWidths=[width, width]) t.setStyle( p.TableStyle([ ('ALIGNMENT', (0, 0), (-1, -1), 'CENTER'), ('VALIGN', (0, 0), (-1, -1), 'CENTER'), ('TOPPADDING', (0, 0), (-1, -1), 6.0), ('BOTTOMPADDING', (0, 0), (-1, -1), 6.0), ])) story.append(t) # Return this story return story
def _create_story(self): # Set up an empty list to hold the story story = [] # Import the report styles styles = report_styles.get_report_styles() # Create a page break story = self._make_page_break(story, self.LANDSCAPE) # This class is somewhat of a hack, in that it likely only works on # rotated paragraphs which fit into the desired cell area class RotatedParagraph(p.Paragraph): def wrap(self, availHeight, availWidth): h, w = \ p.Paragraph.wrap(self, self.canv.stringWidth(self.text), self.canv._leading) return w, h def draw(self): self.canv.rotate(90) self.canv.translate(0.0, -10.0) p.Paragraph.draw(self) # Section title title_str = '<strong>Local-Scale Accuracy Assessment: ' title_str += 'Error Matrix for Vegetation Classes at Plot ' title_str += 'Locations</strong>' para = p.Paragraph(title_str, styles['section_style']) t = p.Table([[para]], colWidths=[10.0 * u.inch]) t.setStyle( p.TableStyle([ ('TOPPADDING', (0, 0), (-1, -1), 3), ('BOTTOMPADDING', (0, 0), (-1, -1), 3), ('BACKGROUND', (0, 0), (-1, -1), '#957348'), ('ALIGNMENT', (0, 0), (-1, -1), 'LEFT'), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ('GRID', (0, 0), (-1, -1), 0.25, colors.black), ])) story.append(t) story.append(p.Spacer(0, 0.1 * u.inch)) # Read in the vegclass error matrix names = ['P_' + str(x) for x in range(1, 12)] names.insert(0, 'OBSERVED') names.extend(['TOTAL', 'CORRECT', 'FUZZY_CORRECT']) vc_data = mlab.csv2rec(self.vc_errmatrix_file, skiprows=1, names=names) vc_data = mlab.rec_drop_fields(vc_data, ['OBSERVED']) # Read in the stand attribute metadata mp = xsmp.XMLStandMetadataParser(self.stand_metadata_file) # Get the class names from the metadata vegclass_metadata = mp.get_attribute('VEGCLASS') vc_codes = vegclass_metadata.codes # Create a list of lists to hold the vegclass table vegclass_table = [] # Add an empty row which will be a span row for the predicted label header_row = [] for i in xrange(2): header_row.append('') prd_str = '<strong>Predicted Class</strong>' para = p.Paragraph(prd_str, styles['body_style_10_center']) header_row.append(para) for i in xrange(len(vc_data) - 1): header_row.append('') vegclass_table.append(header_row) # Add the predicted labels summary_labels = ('Total', '% Correct', '% FCorrect') header_row = [] for i in xrange(2): header_row.append('') for code in vc_codes: label = re.sub('-', '-<br/>', code.label) para = p.Paragraph(label, styles['body_style_10_right']) header_row.append(para) for label in summary_labels: label = re.sub(' ', '<br/>', label) para = p.Paragraph(label, styles['body_style_10_right']) header_row.append(para) vegclass_table.append(header_row) # Set a variable to distinguish between plot counts and percents # in order to format them differently format_break = 11 # Set the cells which should be blank blank_cells = \ [(11, 12), (11, 13), (12, 11), (12, 13), (13, 11), (13, 12)] # Add the data for (i, row) in enumerate(vc_data): vegclass_row = [] for (j, elem) in enumerate(row): # Blank cells if (i, j) in blank_cells: elem_str = '' # Cells that represent plot counts elif i <= format_break and j <= format_break: elem_str = '%d' % int(elem) # Cells that represent percentages else: elem_str = '%.1f' % float(elem) para = p.Paragraph(elem_str, styles['body_style_10_right']) vegclass_row.append(para) # Add the observed labels at the beginning of each data row if i == 0: obs_str = '<strong>Observed Class</strong>' para = \ RotatedParagraph(obs_str, styles['body_style_10_center']) else: para = '' vegclass_row.insert(0, para) if i < len(vc_codes): label = vc_codes[i].label else: index = i - len(vc_codes) label = summary_labels[index] para = p.Paragraph(label, styles['body_style_10_right']) vegclass_row.insert(1, para) # Add this row to the table vegclass_table.append(vegclass_row) # Set up the widths for the table cells widths = [] widths.append(0.3) widths.append(0.85) for i in xrange(len(vc_codes)): widths.append(0.56) for i in xrange(3): widths.append(0.66) widths = [x * u.inch for x in widths] # Convert the vegclass table into a reportlab table t = p.Table(vegclass_table, colWidths=widths) t.setStyle( p.TableStyle([ ('SPAN', (0, 0), (1, 1)), ('SPAN', (0, 2), (0, -1)), ('SPAN', (2, 0), (-1, 0)), ('BACKGROUND', (0, 0), (-1, -1), '#f1efe4'), ('GRID', (0, 0), (-1, -1), 1, colors.white), ('ALIGNMENT', (0, 0), (-1, -1), 'LEFT'), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ('VALIGN', (0, 2), (0, -1), 'MIDDLE'), ('VALIGN', (2, 1), (-1, 1), 'MIDDLE'), ('TOPPADDING', (0, 0), (-1, -1), 2), ('BOTTOMPADDING', (0, 0), (-1, -1), 3), ])) # Set up the shading for the truly correct cells correct = {} for i in xrange(len(vc_codes)): val = i + 1 correct[val] = val for key in correct: val = correct[key] t.setStyle( p.TableStyle([ ('BACKGROUND', (key + 1, val + 1), (key + 1, val + 1), '#aaaaaa'), ])) # Set up the shading for the fuzzy correct cells fuzzy = {} fuzzy[1] = [2] fuzzy[2] = [1, 3, 5, 8] fuzzy[3] = [2, 4, 5] fuzzy[4] = [3, 6, 7] fuzzy[5] = [2, 3, 6, 8] fuzzy[6] = [4, 5, 7, 9] fuzzy[7] = [4, 6, 10, 11] fuzzy[8] = [2, 5, 9] fuzzy[9] = [6, 8, 10] fuzzy[10] = [7, 9, 11] fuzzy[11] = [7, 10] for key in fuzzy: for elem in fuzzy[key]: t.setStyle( p.TableStyle([ ('BACKGROUND', (key + 1, elem + 1), (key + 1, elem + 1), '#dddddd'), ])) # Add this table to the story story.append(t) story.append(p.Spacer(0, 0.1 * u.inch)) # Explanation and definitions of vegetation class categories cell_str = """ Cell values are model plot counts. Dark gray cells represent plots where the observed class matches the predicted class and are included in the percent correct. Light gray cells represent cases where the observed and predicted differ slightly (within +/- one class) based on canopy cover, hardwood proportion or average stand diameter, and are included in the percent fuzzy correct. """ para = p.Paragraph(cell_str, styles['body_style_9']) story.append(para) story.append(p.Spacer(0, 0.1 * u.inch)) head_str = ''' <strong>Vegetation Class (VEGCLASS) Definitions</strong> -- CANCOV (canopy cover of all live trees), BAH_PROP (proportion of hardwood basal area), and QMD_DOM (quadratic mean diameter of all dominant and codominant trees). ''' para = p.Paragraph(head_str, styles['body_style_9']) story.append(para) story.append(p.Spacer(0, 0.1 * u.inch)) # Print out the vegclass code definitions for code in vc_codes: label = code.label desc = self.txt_to_html(code.description) doc_str = '<strong>' + label + ':</strong> ' + desc para = p.Paragraph(doc_str, styles['body_style_9']) story.append(para) return story
def _create_story(self): # Set up an empty list to hold the story story = [] # Import the report styles styles = report_styles.get_report_styles() # Create a page break story = self._make_page_break(story, self.PORTRAIT) # Section title title_str = '<strong>Local-Scale Accuracy Assessment:<br/>' title_str += 'Species Accuracy at Plot Locations' title_str += '</strong>' para = p.Paragraph(title_str, styles['section_style']) t = p.Table([[para]], colWidths=[7.5 * u.inch]) t.setStyle( p.TableStyle([ ('TOPPADDING', (0, 0), (-1, -1), 6), ('BOTTOMPADDING', (0, 0), (-1, -1), 6), ('BACKGROUND', (0, 0), (-1, -1), '#957348'), ('ALIGNMENT', (0, 0), (-1, -1), 'LEFT'), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ('GRID', (0, 0), (-1, -1), 0.25, colors.black), ])) story.append(t) story.append(p.Spacer(0, 0.2 * u.inch)) # Kappa explanation kappa_str = ''' Cohen's kappa coefficient (Cohen, 1960) is a statistical measure of reliability, accounting for agreement occurring by chance. The equation for kappa is: ''' para = p.Paragraph(kappa_str, styles['body_style']) story.append(para) story.append(p.Spacer(0, 0.05 * u.inch)) kappa_str = ''' kappa = (Pr(a) - Pr(e)) / (1.0 - Pr(e)) ''' para = p.Paragraph(kappa_str, styles['indented']) story.append(para) story.append(p.Spacer(0, 0.05 * u.inch)) kappa_str = ''' where Pr(a) is the relative observed agreement among raters, and Pr(e) is the probability that agreement is due to chance.<br/><br/> <strong>Abbreviations Used:</strong><br/> OP/PP = Observed Present / Predicted Present<br/> OA/PP = Observed Absent / Predicted Present (errors of commission)<br/> OP/PA = Observed Present / Predicted Absent (errors of ommission)<br/> OA/PA = Observed Absent / Predicted Absent ''' para = p.Paragraph(kappa_str, styles['body_style']) story.append(para) story.append(p.Spacer(0, 0.2 * u.inch)) # Create a list of lists to hold the species accuracy information species_table = [] # Header row header_row = [] spp_str = '<strong>Species PLANTS Code<br/>' spp_str += 'Scientific Name / Common Name</strong>' para = p.Paragraph(spp_str, styles['body_style_10']) header_row.append(para) spp_str = '<strong>Species prevalence</strong>' para = p.Paragraph(spp_str, styles['body_style_10']) header_row.append(para) p1 = p.Paragraph('<strong>OP/PP</strong>', styles['body_style_10_right']) p2 = p.Paragraph('<strong>OP/PA</strong>', styles['body_style_10_right']) p3 = p.Paragraph('<strong>OA/PP</strong>', styles['body_style_10_right']) p4 = p.Paragraph('<strong>OA/PA</strong>', styles['body_style_10_right']) header_cells = [[p1, p2], [p3, p4]] t = p.Table(header_cells, colWidths=[0.75 * u.inch, 0.75 * u.inch]) t.setStyle( p.TableStyle([ ('GRID', (0, 0), (-1, -1), 1, colors.white), ('ALIGNMENT', (0, 0), (-1, -1), 'LEFT'), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ('TOPPADDING', (0, 0), (-1, -1), 2), ('BOTTOMPADDING', (0, 0), (-1, -1), 2), ])) header_row.append(t) kappa_str = '<strong>Kappa coefficient</strong>' para = p.Paragraph(kappa_str, styles['body_style_10']) header_row.append(para) species_table.append(header_row) # Open the species accuracy file into a recarray spp_data = utilities.csv2rec(self.species_accuracy_file) # Read in the stand attribute metadata mp = xsmp.XMLStandMetadataParser(self.stand_metadata_file) # Read in the report metadata if it exists if self.report_metadata_file: rmp = xrmp.XMLReportMetadataParser(self.report_metadata_file) else: rmp = None # Subset the attributes to just species attrs = [] for attr in mp.attributes: if attr.species_attr == 1 and 'NOTALY' not in attr.field_name: attrs.append(attr.field_name) # Iterate over the species and print out the statistics for spp in attrs: # Empty row to hold the formatted output species_row = [] # Get the scientific and common names from the report metadata # if it exists; otherwise, just use the species symbol if rmp is not None: # Strip off any suffix if it exists try: spp_plain = spp.split('_')[0] spp_info = rmp.get_species(spp_plain) spp_str = spp_info.spp_symbol + '<br/>' spp_str += spp_info.scientific_name + ' / ' spp_str += spp_info.common_name except IndexError: spp_str = spp else: spp_str = spp para = p.Paragraph(spp_str, styles['body_style_10']) species_row.append(para) # Get the statistical information data = spp_data[spp_data.SPECIES == spp][0] counts = [data.OP_PP, data.OP_PA, data.OA_PP, data.OA_PA] prevalence = data.PREVALENCE kappa = data.KAPPA # Species prevalence prevalence_str = '%.4f' % prevalence para = p.Paragraph(prevalence_str, styles['body_style_10_right']) species_row.append(para) # Capture the plot counts in an inner table count_cells = [] count_row = [] for i in range(0, 4): para = p.Paragraph( '%d' % counts[i], styles['body_style_10_right']) count_row.append(para) if i % 2 == 1: count_cells.append(count_row) count_row = [] t = p.Table(count_cells, colWidths=[0.75 * u.inch, 0.75 * u.inch]) t.setStyle( p.TableStyle([ ('GRID', (0, 0), (-1, -1), 1, colors.white), ('ALIGNMENT', (0, 0), (-1, -1), 'LEFT'), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ('TOPPADDING', (0, 0), (-1, -1), 2), ('BOTTOMPADDING', (0, 0), (-1, -1), 2), ])) species_row.append(t) # Print out the kappa statistic kappa_str = '%.4f' % kappa para = p.Paragraph(kappa_str, styles['body_style_10_right']) species_row.append(para) # Push this row to the master species table species_table.append(species_row) # Style this into a reportlab table and add to the story col_widths = [(x * u.inch) for x in [4.0, 0.75, 1.5, 0.75]] t = p.Table(species_table, colWidths=col_widths) t.setStyle( p.TableStyle([ ('BACKGROUND', (0, 0), (-1, -1), '#f1efe4'), ('GRID', (0, 0), (-1, -1), 2, colors.white), ('TOPPADDING', (0, 0), (0, -1), 2), ('BOTTOMPADDING', (0, 0), (0, -1), 2), ('LEFTPADDING', (0, 0), (0, -1), 6), ('RIGHTPADDING', (0, 0), (0, -1), 6), ('ALIGNMENT', (0, 0), (0, -1), 'LEFT'), ('VALIGN', (0, 0), (0, -1), 'TOP'), ('TOPPADDING', (1, 0), (1, -1), 2), ('BOTTOMPADDING', (1, 0), (1, -1), 2), ('LEFTPADDING', (1, 0), (1, -1), 6), ('RIGHTPADDING', (1, 0), (1, -1), 6), ('ALIGNMENT', (1, 0), (1, -1), 'RIGHT'), ('VALIGN', (1, 0), (1, 0), 'TOP'), ('VALIGN', (1, 1), (1, -1), 'MIDDLE'), ('TOPPADDING', (2, 0), (2, -1), 0), ('BOTTOMPADDING', (2, 0), (2, -1), 0), ('LEFTPADDING', (2, 0), (2, -1), 0), ('RIGHTPADDING', (2, 0), (2, -1), 0), ('ALIGNMENT', (2, 0), (2, -1), 'LEFT'), ('VALIGN', (2, 0), (2, -1), 'TOP'), ('TOPPADDING', (3, 0), (3, -1), 2), ('BOTTOMPADDING', (3, 0), (3, -1), 2), ('LEFTPADDING', (3, 0), (3, -1), 6), ('RIGHTPADDING', (3, 0), (3, -1), 6), ('ALIGNMENT', (3, 0), (3, -1), 'RIGHT'), ('VALIGN', (3, 0), (3, 0), 'TOP'), ('VALIGN', (3, 1), (3, -1), 'MIDDLE'), ])) story.append(t) story.append(p.Spacer(0, 0.1 * u.inch)) rare_species_str = """ Note that some very rare species do not appear in this accuracy report, because these species were not included when building the initial ordination model. The full set of species is available upon request. """ para = p.Paragraph(rare_species_str, styles['body_style']) story.append(para) # Return this story return story
def _create_story(self, scatter_files): # Set up an empty list to hold the story story = [] # Import the report styles styles = report_styles.get_report_styles() # Create a page break story = self._make_page_break(story, self.PORTRAIT) # Section title title_str = "<strong>Local-Scale Accuracy Assessment: " title_str += "Scatterplots of Observed vs. Predicted " title_str += "Values for Continuous Variables at " title_str += "Plot Locations</strong>" para = p.Paragraph(title_str, styles["section_style"]) t = p.Table([[para]], colWidths=[7.5 * u.inch]) t.setStyle( p.TableStyle( [ ("TOPPADDING", (0, 0), (-1, -1), 6), ("BOTTOMPADDING", (0, 0), (-1, -1), 6), ("BACKGROUND", (0, 0), (-1, -1), "#957348"), ("ALIGNMENT", (0, 0), (-1, -1), "LEFT"), ("VALIGN", (0, 0), (-1, -1), "TOP"), ("GRID", (0, 0), (-1, -1), 0.25, colors.black), ] ) ) story.append(t) story.append(p.Spacer(0, 0.2 * u.inch)) # Scatter explanation scatter_str = """ These scatterplots compare the observed plot values against predicted (modeled) values for each plot used in the GNN model. We use a modified leave-one-out (LOO) approach. In traditional LOO accuracy assessment, a model is run with <i>n</i>-1 plots and then accuracy is determined at the plot left out of modeling, for all plots used in modeling. Because of computing limitations, we use a 'second-nearest-neighbor' approach. We develop our models with all plots, but in determining accuracy, we don't allow a plot to assign itself as a neighbor at the plot location. This yields similar accuracy assessment results as a true cross-validation approach, but probably slightly underestimates the true accuracy of the distributed (first-nearest-neighbor) map.<br/><br/> The observed value comes directly from the plot data, whereas the predicted value comes from the GNN prediction for the plot location. The GNN prediction is the mean of pixel values for a window that approximates the field plot configuration.<br/><br/> The correlation coefficients, normalized Root Mean Squared Errors (RMSE), and coefficients of determination (R-square) are given. The RMSE is normalized by dividing the RMSE by the observed mean value. """ para = p.Paragraph(scatter_str, styles["body_style"]) story.append(para) story.append(p.Spacer(0, 0.1 * u.inch)) # Add the scatterplot images to a list of lists table_cols = 2 scatter_table = [] scatter_row = [] for (i, fn) in enumerate(scatter_files): scatter_row.append(p.Image(fn, 3.4 * u.inch, 3.0 * u.inch)) if (i % table_cols) == (table_cols - 1): scatter_table.append(scatter_row) scatter_row = [] # Determine if there are any scatterplots left to print if len(scatter_row) != 0: for i in range(len(scatter_row), table_cols): scatter_row.append(p.Paragraph("", styles["body_style"])) scatter_table.append(scatter_row) # Style this into a reportlab table and add to the story width = 3.75 * u.inch t = p.Table(scatter_table, colWidths=[width, width]) t.setStyle( p.TableStyle( [ ("ALIGNMENT", (0, 0), (-1, -1), "CENTER"), ("VALIGN", (0, 0), (-1, -1), "CENTER"), ("TOPPADDING", (0, 0), (-1, -1), 6.0), ("BOTTOMPADDING", (0, 0), (-1, -1), 6.0), ] ) ) story.append(t) # Return this story return story
def _create_story(self): # Set up an empty list to hold the story story = [] # Import the report styles styles = report_styles.get_report_styles() # Open the report metadata rmp = xrmp.XMLReportMetadataParser(self.report_metadata_file) # Offset on this first page story.append(p.Spacer(0.0, 0.43 * u.inch)) # Report title title_str = 'GNN Accuracy Assessment Report' title = p.Paragraph(title_str, styles['title_style']) story.append(title) # Model region name and number mr_name = rmp.model_region_name subtitle_str = mr_name + ' (Modeling Region ' + str(self.mr) + ')' para = p.Paragraph(subtitle_str, styles['sub_title_style']) story.append(para) # Model type information model_type_dict = { 'sppsz': 'Basal-Area by Species-Size Combinations', 'trecov': 'Tree Percent Cover by Species', 'wdycov': 'Woody Percent Cover by Species', 'sppba': 'Basal-Area by Species', } model_type_str = 'Model Type: ' model_type_str += model_type_dict[self.model_type] para = p.Paragraph(model_type_str, styles['sub_title_style']) story.append(para) story.append(p.Spacer(0.0, 0.7 * u.inch)) # Image and flowable to hold MR image and region description substory = [] mr_image_path = rmp.image_path image = p.Image(mr_image_path, 3.0 * u.inch, 3.86 * u.inch, mask='auto') para = p.Paragraph('Overview', styles['heading_style']) substory.append(para) overview = rmp.model_region_overview para = p.Paragraph(overview, styles['body_style']) substory.append(para) image_flowable = p.ImageAndFlowables(image, substory, imageSide='left', imageRightPadding=6) story.append(image_flowable) story.append(p.Spacer(0.0, 0.2 * u.inch)) # Contact information para = p.Paragraph('Contact Information:', styles['heading_style']) story.append(para) story.append(p.Spacer(0.0, 0.1 * u.inch)) contacts = rmp.contacts contact_table = [] contact_row = [] table_cols = 3 for (i, contact) in enumerate(contacts): contact_str = '<b>' + contact.name + '</b><br/>' contact_str += contact.position_title + '<br/>' contact_str += contact.affiliation + '<br/>' contact_str += 'Phone: ' + contact.phone_number + '<br/>' contact_str += 'Email: ' + contact.email_address para = p.Paragraph(contact_str, styles['body_style_9']) contact_row.append(para) if (i % table_cols) == (table_cols - 1): contact_table.append(contact_row) contact_row = [] t = p.Table(contact_table) t.setStyle( p.TableStyle([ ('TOPPADDING', (0, 0), (-1, -1), 4), ('BOTTOMPADDING', (0, 0), (-1, -1), 4), ('LEFTPADDING', (0, 0), (-1, -1), 6), ('RIGHTPADDING', (0, 0), (-1, -1), 6), ('BACKGROUND', (0, 0), (-1, -1), '#f1efe4'), ('ALIGNMENT', (0, 0), (-1, -1), 'LEFT'), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ('GRID', (0, 0), (-1, -1), 1.0, colors.white), ])) story.append(t) story.append(p.Spacer(0, 0.15 * u.inch)) # Website link web_str = '<strong>LEMMA Website:</strong> ' web_str += '<link color="#0000ff" ' web_str += 'href="http://lemma.forestry.oregonstate.edu/">' web_str += 'http://lemma.forestry.oregonstate.edu</link>' para = p.Paragraph(web_str, styles['body_style']) story.append(para) # Page break story = self._make_page_break(story, self.PORTRAIT) # General model information para = p.Paragraph('General Information', styles['heading_style']) story.append(para) story.append(p.Spacer(0, 0.1 * u.inch)) # Report date current_time = datetime.now() now = current_time.strftime("%Y.%m.%d") time_str = '<strong>Report Date:</strong> ' + now para = p.Paragraph(time_str, styles['body_style']) story.append(para) story.append(p.Spacer(0, 0.1 * u.inch)) # Model region area locale.setlocale(locale.LC_ALL, "") mr_area_ha = rmp.model_region_area mr_area_ac = mr_area_ha * self.ACRES_PER_HECTARE ha = locale.format('%d', mr_area_ha, True) ac = locale.format('%d', mr_area_ac, True) area_str = '<strong>Model Region Area:</strong> ' area_str += str(ha) + ' hectares (' + str(ac) + ' acres)' para = p.Paragraph(area_str, styles['body_style']) story.append(para) story.append(p.Spacer(0, 0.1 * u.inch)) # Model imagery date mr_imagery_str = '<strong>Model Imagery Date:</strong> ' mr_imagery_str += str(self.model_year) para = p.Paragraph(mr_imagery_str, styles['body_style']) story.append(para) story.append(p.Spacer(0, 0.1 * u.inch)) # Plot matching if self.model_type == 'sppsz': plot_title = ''' <strong>Matching Plots to Imagery for Model Development: </strong> ''' para = p.Paragraph(plot_title, styles['body_style']) story.append(para) story.append(p.Spacer(0, 0.1 * u.inch)) imagery_str = """ The current versions of the GNN maps were developed using data from inventory plots that span a range of dates, and from a yearly time-series of Landsat imagery mosaics from 1984 to 2012 developed with the LandTrendr algorithms (Kennedy et al., 2010). For model development, plots were matched to spectral data for the same year as plot measurement. In addition, because as many as four plots were measured at a given plot location, we constrained the imputation for a given map year to only one plot from each location -- the plot nearest in date to the imagery (map) year. See Ohmann et al. (in press) for more detailed information about the GNN modeling process. """ para = p.Paragraph(imagery_str, styles['body_style']) story.append(para) story.append(p.Spacer(0, 0.10 * u.inch)) # Mask information mask_title = '<strong>Nonforest Mask Information:</strong>' para = p.Paragraph(mask_title, styles['body_style']) story.append(para) story.append(p.Spacer(0, 0.1 * u.inch)) mask_str = ''' An important limitation of the GNN map products is the separation of forest and nonforest lands. The GNN modeling applies to forest areas only, where we have detailed field plot data. Nonforest areas are 'masked' as such using an ancillary map. In California, Oregon, Washington and parts of adjacent states, we are using maps of Ecological Systems developed for the Gap Analysis Program (GAP) as our nonforest mask. There are 'unmasked' versions of our GNN maps available upon request, in case you have an alternative map of nonforest for your area of interest that you would like to apply to the GNN maps. ''' para = p.Paragraph(mask_str, styles['body_style']) story.append(para) story.append(p.Spacer(0, 0.1 * u.inch)) # Spatial uncertainty nn_dist_title = \ '<strong>Spatial Depictions of GNN Map Uncertainty:</strong>' para = p.Paragraph(nn_dist_title, styles['body_style']) story.append(para) story.append(p.Spacer(0, 0.1 * u.inch)) nn_dist_str = ''' In addition to the map diagnostics provided in this report, we develop spatial depictions of map uncertainty (available upon request). The value shown in the grid for a pixel is the distance from that pixel to the nearest-neighbor plot that was imputed to the pixel by GNN, and whose vegetation attributes are associated with the pixel. 'Distance' is Euclidean distance in multi-dimensional gradient space from the gradient model, where the axes are weighted by how much variation they explain. The nearest-neighbor distance is in gradient (model) space, not geographic space. The nearest-neighbor-distance grid can be interpreted as a map of potential map accuracy, although it is an indicator of accuracy rather than a direct measure. In general, the user of a GNN map would have more confidence in map reliability for areas where nearest-neighbor distance is short, where a similar plot was available (nearby) for the model, and less confidence (more uncertainty) where nearest-neighbor distance is long. Typically, high nearest-neighbor distances are seen in areas with lower sampling intensity of inventory plots. ''' para = p.Paragraph(nn_dist_str, styles['body_style']) story.append(para) story.append(p.Spacer(0, 0.1 * u.inch)) # Page break story = self._make_page_break(story, self.PORTRAIT) # Inventory plots by date plot_title = '<strong>Inventory Plots in Model Development</strong>' para = p.Paragraph(plot_title, styles['heading_style']) story.append(para) story.append(p.Spacer(0, 0.10 * u.inch)) # Get all the data sources from the report metadata file data_sources = rmp.plot_data_sources # Track the total number of plots total_plots = 0 # Create an empty master table plot_table = [] # Create the header row p1 = p.Paragraph('<strong>Data Source</strong>', styles['contact_style']) p2 = p.Paragraph('<strong>Description</strong>', styles['contact_style']) p3 = p.Paragraph('<strong>Plot Count by Year</strong>', styles['contact_style']) plot_table.append([p1, p2, p3]) # Iterate over all data sources for ds in data_sources: # Iterate over all assessment years for this data source and # build an inner table that gives this information pc_table = [] # pc_row = [] # Hack to avoid the table row being too long. Should only # impact models that use R6_ECO plots if len(ds.assessment_years) > 30: # Increment the total plot count and track the # number of plots in this data source ds_count = 0 for ay in ds.assessment_years: total_plots += ay.plot_count ds_count += ay.plot_count # Get the minimum and maximum years years = [x.assessment_year for x in ds.assessment_years] min_year = min(years) max_year = max(years) para_str = str(min_year) + '-' + str(max_year) + ': ' para_str += str(ds_count) para = p.Paragraph(para_str, styles['contact_style_right']) pc_table.append([[para]]) else: for ay in ds.assessment_years: year = ay.assessment_year plot_count = str(ay.plot_count) # Increment the plot count for total total_plots += ay.plot_count # Add the table row for this year's plot count pc_row = [] para = p.Paragraph(year, styles['contact_style_right']) pc_row.append(para) para = p.Paragraph(plot_count, styles['contact_style_right']) pc_row.append(para) pc_table.append(pc_row) # Create the inner table t = p.Table(pc_table) # Table style t.setStyle( p.TableStyle([ ('GRID', (0, 0), (-1, -1), 1, colors.white), ('ALIGNMENT', (0, 0), (-1, -1), 'LEFT'), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ('TOPPADDING', (0, 0), (-1, -1), 2), ('BOTTOMPADDING', (0, 0), (-1, -1), 2), ])) # Add data_source and description to the table p1 = p.Paragraph(ds.data_source, styles['contact_style']) p2 = p.Paragraph(ds.description, styles['contact_style']) # Append these to the master table plot_table.append([p1, p2, t]) # Now append the plot count - columns 1 and 2 will be merged in the # table formatting upon return p1 = p.Paragraph('Total Plots', styles['contact_style_right_bold']) p2 = p.Paragraph(str(total_plots), styles['contact_style_right_bold']) plot_table.append(['', p1, p2]) # Format the table into reportlab t = p.Table(plot_table, colWidths=[1.3 * u.inch, 4.2 * u.inch, 1.3 * u.inch]) t.hAlign = 'LEFT' t.setStyle( p.TableStyle([ ('GRID', (0, 0), (-1, -2), 1.5, colors.white), ('BOX', (0, -1), (-1, -1), 1.5, colors.white), ('LINEAFTER', (1, -1), (1, -1), 1.5, colors.white), ('BACKGROUND', (0, 0), (-1, -1), '#f1efe4'), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ('TOPPADDING', (0, 0), (1, -2), 2), ('BOTTOMPADDING', (0, 0), (1, -2), 2), ('LEFTPADDING', (0, 0), (1, -2), 6), ('RIGHTPADDING', (0, 0), (1, -2), 6), ('ALIGNMENT', (0, 0), (1, -2), 'LEFT'), ('TOPPADDING', (2, 0), (2, 0), 2), ('BOTTOMPADDING', (2, 0), (2, 0), 2), ('LEFTPADDING', (2, 0), (2, 0), 6), ('RIGHTPADDING', (2, 0), (2, 0), 6), ('ALIGNMENT', (2, 0), (2, 0), 'LEFT'), ('TOPPADDING', (2, 1), (2, -2), 0), ('BOTTOMPADDING', (2, 1), (2, -2), 0), ('LEFTPADDING', (2, 1), (2, -2), 0), ('RIGHTPADDING', (2, 1), (2, -2), 0), ('ALIGNMENT', (2, 1), (2, -2), 'LEFT'), ('TOPPADDING', (0, -1), (2, -1), 4), ('BOTTOMPADDING', (0, -1), (2, -1), 4), ('LEFTPADDING', (0, -1), (2, -1), 6), ('RIGHTPADDING', (0, -1), (2, -1), 6), ('ALIGNMENT', (0, -1), (2, -1), 'RIGHT'), ])) # Append this table to the main story story.append(t) # Page break story = self._make_page_break(story, self.PORTRAIT) # Print out the spatial predictor variables that are in this model ord_title = 'Spatial Predictor Variables in Model Development' para = p.Paragraph(ord_title, styles['heading_style']) story.append(para) story.append(p.Spacer(0, 0.10 * u.inch)) ord_var_str = """ The list below represents the spatial predictor (GIS/remote sensing) variables that were used in creating this model. """ para = p.Paragraph(ord_var_str, styles['body_style']) story.append(para) story.append(p.Spacer(0, 0.1 * u.inch)) # Empty container for ordination_rows ordination_table = [] # Create the header row p1 = p.Paragraph('<strong>Variable</strong>', styles['contact_style']) p2 = p.Paragraph('<strong>Description</strong>', styles['contact_style']) p3 = p.Paragraph('<strong>Data Source</strong>', styles['contact_style']) ordination_table.append([p1, p2, p3]) # Read in the ordination variable list and, for each variable, # print out the variable name, description, and source into a table for var in rmp.ordination_variables: name = p.Paragraph(var.field_name, styles['contact_style']) desc = p.Paragraph(var.description, styles['contact_style']) source = p.Paragraph(var.source, styles['contact_style']) ordination_table.append([name, desc, source]) # Create a reportlab table from this list t = p.Table(ordination_table, colWidths=[1.0 * u.inch, 2.3 * u.inch, 3.5 * u.inch]) t.hAlign = 'LEFT' t.setStyle( p.TableStyle([ ('GRID', (0, 0), (-1, -1), 1.5, colors.white), ('BACKGROUND', (0, 0), (-1, -1), '#f1efe4'), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ])) # Add to the story story.append(t) # Return the story return story
def _create_story(self): # Set up an empty list to hold the story story = [] # Import the report styles styles = report_styles.get_report_styles() # Create a page break story = self._make_page_break(story, self.LANDSCAPE) # This class is somewhat of a hack, in that it likely only works on # rotated paragraphs which fit into the desired cell area class RotatedParagraph(p.Paragraph): def wrap(self, availHeight, availWidth): h, w = \ p.Paragraph.wrap(self, self.canv.stringWidth(self.text), self.canv._leading) return w, h def draw(self): self.canv.rotate(90) self.canv.translate(0.0, -10.0) p.Paragraph.draw(self) # Section title title_str = '<strong>Local-Scale Accuracy Assessment: ' title_str += 'Error Matrix for Vegetation Classes at Plot ' title_str += 'Locations</strong>' para = p.Paragraph(title_str, styles['section_style']) t = p.Table([[para]], colWidths=[10.0 * u.inch]) t.setStyle( p.TableStyle([ ('TOPPADDING', (0, 0), (-1, -1), 3), ('BOTTOMPADDING', (0, 0), (-1, -1), 3), ('BACKGROUND', (0, 0), (-1, -1), '#957348'), ('ALIGNMENT', (0, 0), (-1, -1), 'LEFT'), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ('GRID', (0, 0), (-1, -1), 0.25, colors.black), ])) story.append(t) story.append(p.Spacer(0, 0.1 * u.inch)) # Read in the vegclass error matrix names = ['P_' + str(x) for x in range(1, 12)] names.insert(0, 'OBSERVED') names.extend(['TOTAL', 'CORRECT', 'FUZZY_CORRECT']) vc_data = mlab.csv2rec(self.vc_errmatrix_file, skiprows=1, names=names) vc_data = mlab.rec_drop_fields(vc_data, ['OBSERVED']) # Read in the stand attribute metadata mp = xsmp.XMLStandMetadataParser(self.stand_metadata_file) # Get the class names from the metadata vegclass_metadata = mp.get_attribute('VEGCLASS') vc_codes = vegclass_metadata.codes # Create a list of lists to hold the vegclass table vegclass_table = [] # Add an empty row which will be a span row for the predicted label header_row = [] for i in xrange(2): header_row.append('') prd_str = '<strong>Predicted Class</strong>' para = p.Paragraph(prd_str, styles['body_style_10_center']) header_row.append(para) for i in xrange(len(vc_data) - 1): header_row.append('') vegclass_table.append(header_row) # Add the predicted labels summary_labels = ('Total', '% Correct', '% FCorrect') header_row = [] for i in xrange(2): header_row.append('') for code in vc_codes: label = re.sub('-', '-<br/>', code.label) para = p.Paragraph(label, styles['body_style_10_right']) header_row.append(para) for label in summary_labels: label = re.sub(' ', '<br/>', label) para = p.Paragraph(label, styles['body_style_10_right']) header_row.append(para) vegclass_table.append(header_row) # Set a variable to distinguish between plot counts and percents # in order to format them differently format_break = 11 # Set the cells which should be blank blank_cells = \ [(11, 12), (11, 13), (12, 11), (12, 13), (13, 11), (13, 12)] # Add the data for (i, row) in enumerate(vc_data): vegclass_row = [] for (j, elem) in enumerate(row): # Blank cells if (i, j) in blank_cells: elem_str = '' # Cells that represent plot counts elif i <= format_break and j <= format_break: elem_str = '%d' % int(elem) # Cells that represent percentages else: elem_str = '%.1f' % float(elem) para = p.Paragraph(elem_str, styles['body_style_10_right']) vegclass_row.append(para) # Add the observed labels at the beginning of each data row if i == 0: obs_str = '<strong>Observed Class</strong>' para = \ RotatedParagraph(obs_str, styles['body_style_10_center']) else: para = '' vegclass_row.insert(0, para) if i < len(vc_codes): label = vc_codes[i].label else: index = i - len(vc_codes) label = summary_labels[index] para = p.Paragraph(label, styles['body_style_10_right']) vegclass_row.insert(1, para) # Add this row to the table vegclass_table.append(vegclass_row) # Set up the widths for the table cells widths = [] widths.append(0.3) widths.append(0.85) for i in xrange(len(vc_codes)): widths.append(0.56) for i in xrange(3): widths.append(0.66) widths = [x * u.inch for x in widths] # Convert the vegclass table into a reportlab table t = p.Table(vegclass_table, colWidths=widths) t.setStyle( p.TableStyle([ ('SPAN', (0, 0), (1, 1)), ('SPAN', (0, 2), (0, -1)), ('SPAN', (2, 0), (-1, 0)), ('BACKGROUND', (0, 0), (-1, -1), '#f1efe4'), ('GRID', (0, 0), (-1, -1), 1, colors.white), ('ALIGNMENT', (0, 0), (-1, -1), 'LEFT'), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ('VALIGN', (0, 2), (0, -1), 'MIDDLE'), ('VALIGN', (2, 1), (-1, 1), 'MIDDLE'), ('TOPPADDING', (0, 0), (-1, -1), 2), ('BOTTOMPADDING', (0, 0), (-1, -1), 3), ])) # Set up the shading for the truly correct cells correct = {} for i in xrange(len(vc_codes)): val = i + 1 correct[val] = val for key in correct: val = correct[key] t.setStyle( p.TableStyle([ ('BACKGROUND', (key + 1, val + 1), (key + 1, val + 1), '#aaaaaa'), ])) # Set up the shading for the fuzzy correct cells fuzzy = {} fuzzy[1] = [2] fuzzy[2] = [1, 3, 5, 8] fuzzy[3] = [2, 4, 5] fuzzy[4] = [3, 6, 7] fuzzy[5] = [2, 3, 6, 8] fuzzy[6] = [4, 5, 7, 9] fuzzy[7] = [4, 6, 10, 11] fuzzy[8] = [2, 5, 9] fuzzy[9] = [6, 8, 10] fuzzy[10] = [7, 9, 11] fuzzy[11] = [7, 10] for key in fuzzy: for elem in fuzzy[key]: t.setStyle( p.TableStyle([ ('BACKGROUND', (key + 1, elem + 1), (key + 1, elem + 1), '#dddddd'), ])) # Add this table to the story story.append(t) story.append(p.Spacer(0, 0.1 * u.inch)) # Explanation and definitions of vegetation class categories cell_str = """ Cell values are model plot counts. Dark gray cells represent plots where the observed class matches the predicted class and are included in the percent correct. Light gray cells represent cases where the observed and predicted differ slightly (within +/- one class) based on canopy cover, hardwood proportion or average stand diameter, and are included in the percent fuzzy correct. """ para = p.Paragraph(cell_str, styles['body_style_9']) story.append(para) story.append(p.Spacer(0, 0.1 * u.inch)) head_str = ''' <strong>Vegetation Class (VEGCLASS) Definitions</strong> -- CANCOV (canopy cover of all live trees), BAH_PROP (proportion of hardwood basal area), and QMD_DOM (quadratic mean diameter of all dominant and codominant trees). ''' para = p.Paragraph(head_str, styles['body_style_9']) story.append(para) story.append(p.Spacer(0, 0.1 * u.inch)) # Print out the vegclass code definitions for code in vc_codes: label = code.label desc = self.txt_to_html(code.description) doc_str = '<strong>' + label + ':</strong> ' + desc para = p.Paragraph(doc_str, styles['body_style_9']) story.append(para) return story
def _create_story(self): # Set up an empty list to hold the story story = [] # Import the report styles styles = report_styles.get_report_styles() # Create a page break story = self._make_page_break(story, self.PORTRAIT) # Section title title_str = "<strong>Data Dictionary</strong>" para = p.Paragraph(title_str, styles["section_style"]) t = p.Table([[para]], colWidths=[7.5 * u.inch]) t.setStyle( p.TableStyle( [ ("TOPPADDING", (0, 0), (-1, -1), 6), ("BOTTOMPADDING", (0, 0), (-1, -1), 6), ("BACKGROUND", (0, 0), (-1, -1), "#957348"), ("ALIGNMENT", (0, 0), (-1, -1), "LEFT"), ("VALIGN", (0, 0), (-1, -1), "TOP"), ("GRID", (0, 0), (-1, -1), 0.25, colors.black), ] ) ) story.append(t) story.append(p.Spacer(0, 0.1 * u.inch)) # Read in the stand attribute metadata mp = xsmp.XMLStandMetadataParser(self.stand_metadata_file) # Subset the attributes to those that are accuracy attributes, are # identified to go into the report, and are not species variables attrs = [] for attr in mp.attributes: if attr.accuracy_attr == 1 and attr.project_attr == 1 and attr.species_attr == 0: attrs.append(attr.field_name) # Set up the master dictionary table dictionary_table = [] # Iterate through the attributes and print out the field information # and codes if present for attr in attrs: metadata = mp.get_attribute(attr) field_name = metadata.field_name units = metadata.units description = metadata.description field_para = p.Paragraph(field_name, styles["body_style_10"]) if units != "none": description += " (" + units + ")" field_desc_para = p.Paragraph(description, styles["body_style_10"]) # If this field has codes, create a sub table underneath the # field description if metadata.codes: # Set up a container to hold the code rows code_table = [] # Iterate over all code rows and append to the code_table for code in metadata.codes: code_para = p.Paragraph(code.code_value, styles["code_style"]) description = self.txt_to_html(code.description) code_desc_para = p.Paragraph(description, styles["code_style"]) code_table.append([code_para, code_desc_para]) # Convert this to a reportlab table t = p.Table(code_table, colWidths=[0.75 * u.inch, 4.5 * u.inch]) t.setStyle( p.TableStyle( [ ("TOPPADDING", (0, 0), (-1, -1), 3), ("BOTTOMPADDING", (0, 0), (-1, -1), 3), ("BACKGROUND", (0, 0), (-1, -1), "#f7f7ea"), ("ALIGNMENT", (0, 0), (-1, -1), "LEFT"), ("VALIGN", (0, 0), (-1, -1), "TOP"), ("GRID", (0, 0), (-1, -1), 0.25, colors.white), ] ) ) # Create a stack of the field description and field codes elements = [[field_desc_para], [t]] # If no codes exist, just add the field description else: elements = [[field_desc_para]] # Create a reportlab table of the field description and # (if present) field codes description_table = p.Table(elements, colWidths=[5.25 * u.inch]) description_table.setStyle( p.TableStyle( [ ("TOPPADDING", (0, 0), (-1, 0), 0), ("BOTTOMPADDING", (0, -1), (-1, -1), 0), ("LEFTPADDING", (0, 0), (-1, -1), 0), ("RIGHTPADDING", (0, 0), (-1, -1), 0), ("ALIGNMENT", (0, 0), (-1, -1), "LEFT"), ("VALIGN", (0, 0), (-1, -1), "TOP"), ] ) ) dictionary_table.append([field_para, description_table]) # Format the dictionary table into a reportlab table t = p.Table(dictionary_table, colWidths=[1.6 * u.inch, 5.4 * u.inch]) t.setStyle( p.TableStyle( [ ("TOPPADDING", (0, 0), (0, -1), 5), ("BOTTOMPADDING", (0, 0), (0, -1), 5), ("GRID", (0, 0), (-1, -1), 1, colors.white), ("ALIGNMENT", (0, 0), (-1, -1), "LEFT"), ("VALIGN", (0, 0), (-1, -1), "TOP"), ("BACKGROUND", (0, 0), (-1, -1), "#f1efe4"), ] ) ) story.append(t) # Description of the species information that is attached to ArcInfo # grids. We don't enumerate the codes here, but just give this # summary information spp_str = """ Individual species abundances are attached to ArcInfo grids that LEMMA distributes. For this model, fields designate species codes based on the <link color="#0000ff" href="http://plants.usda.gov/">USDA PLANTS database</link> from the year 2000, and values represent species """ if self.model_type in ["sppsz", "sppba"]: spp_str += " basal area (m^2/ha)." elif self.model_type in ["trecov", "wdycov"]: spp_str += " percent cover." para = p.Paragraph(spp_str, styles["body_style"]) story.append(p.Spacer(0, 0.1 * u.inch)) story.append(para) # Return this story return story
def _create_story(self, histogram_files): # Set up an empty list to hold the story story = [] # Import the report styles styles = report_styles.get_report_styles() # Create a page break story = self._make_page_break(story, self.PORTRAIT) # Section title title_str = '<strong>Regional-Scale Accuracy Assessment:<br/> Area ' title_str += 'Distributions from Regional Inventory Plots vs. ' title_str += 'GNN</strong>' para = p.Paragraph(title_str, styles['section_style']) t = p.Table([[para]], colWidths=[7.5 * u.inch]) t.setStyle( p.TableStyle([ ('TOPPADDING', (0, 0), (-1, -1), 6), ('BOTTOMPADDING', (0, 0), (-1, -1), 6), ('BACKGROUND', (0, 0), (-1, -1), '#957348'), ('ALIGNMENT', (0, 0), (-1, -1), 'LEFT'), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ('GRID', (0, 0), (-1, -1), 0.25, colors.black), ])) story.append(t) story.append(p.Spacer(0, 0.20 * u.inch)) # Histogram explanation histo_str = ''' These histograms compare the distributions of land area in different vegetation conditions as estimated from a regional, sample- (plot-) based inventory (FIA Annual Plots) to model predictions from GNN (based on counts of 30-m pixels). <br/><br/> For the FIA annual plots, the distributions of forest area are determined by summing the 'area expansion factors' at the plot condition-class level. The plot-based estimates are subject to sampling error, but this is not shown in the graphs due to complexities involved. For more information about the FIA Annual inventory sample design, see the <link href="http://fia.fs.fed.us/library/database-documentation" color="blue">FIADB Users Manual</link>. <br/><br/> Some plots were not visited on the ground due to denied access or hazardous conditions, so the area these plots represent cannot be characterized and is included in the bar labeled 'unsampled.' <br/><br/> The bars labeled 'nonforest' also require explanation. For GNN, this is the area of nonforest in the map, which is derived from ancillary (non-GNN) spatial data sources such as the National Land Cover Data (NLCD) or Ecological Systems maps from the Gap Analysis Program (GAP). This mapped nonforest is referred to as the GNN 'nonforest mask.' <br/><br/> For the plots, the 'nonforest' bar represents the nonforest area as estimated from the FIA Annual sample. ''' para = p.Paragraph(histo_str, styles['body_style']) story.append(para) story.append(p.Spacer(0, 0.1 * u.inch)) # Add the histogram images to a list of lists table_cols = 2 histogram_table = [] histogram_row = [] for (i, fn) in enumerate(histogram_files): histogram_row.append(p.Image(fn, 3.4 * u.inch, 3.0 * u.inch)) if (i % table_cols) == (table_cols - 1): histogram_table.append(histogram_row) histogram_row = [] # Determine if there are any histograms left to print if len(histogram_row) != 0: for i in range(len(histogram_row), table_cols): histogram_row.append(p.Paragraph('', styles['body_style'])) histogram_table.append(histogram_row) # Style this into a reportlab table and add to the story width = 3.75 * u.inch t = p.Table(histogram_table, colWidths=[width, width]) t.setStyle( p.TableStyle([ ('ALIGNMENT', (0, 0), (-1, -1), 'CENTER'), ('VALIGN', (0, 0), (-1, -1), 'CENTER'), ('TOPPADDING', (0, 0), (-1, -1), 6.0), ('BOTTOMPADDING', (0, 0), (-1, -1), 6.0), ])) story.append(t) # Return this story return story
def _create_story(self): # Set up an empty list to hold the story story = [] # Import the report styles styles = report_styles.get_report_styles() # Create a page break story = self._make_page_break(story, self.PORTRAIT) # Section title title_str = '<strong>Local-Scale Accuracy Assessment:<br/>' title_str += 'Species Accuracy at Plot Locations' title_str += '</strong>' para = p.Paragraph(title_str, styles['section_style']) t = p.Table([[para]], colWidths=[7.5 * u.inch]) t.setStyle( p.TableStyle([ ('TOPPADDING', (0, 0), (-1, -1), 6), ('BOTTOMPADDING', (0, 0), (-1, -1), 6), ('BACKGROUND', (0, 0), (-1, -1), '#957348'), ('ALIGNMENT', (0, 0), (-1, -1), 'LEFT'), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ('GRID', (0, 0), (-1, -1), 0.25, colors.black), ])) story.append(t) story.append(p.Spacer(0, 0.2 * u.inch)) # Kappa explanation kappa_str = ''' Cohen's kappa coefficient (Cohen, 1960) is a statistical measure of reliability, accounting for agreement occurring by chance. The equation for kappa is: ''' para = p.Paragraph(kappa_str, styles['body_style']) story.append(para) story.append(p.Spacer(0, 0.05 * u.inch)) kappa_str = ''' kappa = (Pr(a) - Pr(e)) / (1.0 - Pr(e)) ''' para = p.Paragraph(kappa_str, styles['indented']) story.append(para) story.append(p.Spacer(0, 0.05 * u.inch)) kappa_str = ''' where Pr(a) is the relative observed agreement among raters, and Pr(e) is the probability that agreement is due to chance.<br/><br/> <strong>Abbreviations Used:</strong><br/> OP/PP = Observed Present / Predicted Present<br/> OA/PP = Observed Absent / Predicted Present (errors of commission)<br/> OP/PA = Observed Present / Predicted Absent (errors of ommission)<br/> OA/PA = Observed Absent / Predicted Absent ''' para = p.Paragraph(kappa_str, styles['body_style']) story.append(para) story.append(p.Spacer(0, 0.2 * u.inch)) # Create a list of lists to hold the species accuracy information species_table = [] # Header row header_row = [] spp_str = '<strong>Species PLANTS Code<br/>' spp_str += 'Scientific Name / Common Name</strong>' para = p.Paragraph(spp_str, styles['body_style_10']) header_row.append(para) spp_str = '<strong>Species prevalence</strong>' para = p.Paragraph(spp_str, styles['body_style_10']) header_row.append(para) p1 = p.Paragraph('<strong>OP/PP</strong>', styles['body_style_10_right']) p2 = p.Paragraph('<strong>OP/PA</strong>', styles['body_style_10_right']) p3 = p.Paragraph('<strong>OA/PP</strong>', styles['body_style_10_right']) p4 = p.Paragraph('<strong>OA/PA</strong>', styles['body_style_10_right']) header_cells = [[p1, p2], [p3, p4]] t = p.Table(header_cells, colWidths=[0.75 * u.inch, 0.75 * u.inch]) t.setStyle( p.TableStyle([ ('GRID', (0, 0), (-1, -1), 1, colors.white), ('ALIGNMENT', (0, 0), (-1, -1), 'LEFT'), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ('TOPPADDING', (0, 0), (-1, -1), 2), ('BOTTOMPADDING', (0, 0), (-1, -1), 2), ])) header_row.append(t) kappa_str = '<strong>Kappa coefficient</strong>' para = p.Paragraph(kappa_str, styles['body_style_10']) header_row.append(para) species_table.append(header_row) # Open the species accuracy file into a recarray spp_data = utilities.csv2rec(self.species_accuracy_file) # Read in the stand attribute metadata mp = xsmp.XMLStandMetadataParser(self.stand_metadata_file) # Read in the report metadata if it exists if self.report_metadata_file: rmp = xrmp.XMLReportMetadataParser(self.report_metadata_file) else: rmp = None # Subset the attributes to just species attrs = [] for attr in mp.attributes: if attr.species_attr == 1 and 'NOTALY' not in attr.field_name: attrs.append(attr.field_name) # Iterate over the species and print out the statistics for spp in attrs: # Empty row to hold the formatted output species_row = [] # Get the scientific and common names from the report metadata # if it exists; otherwise, just use the species symbol if rmp is not None: # Strip off any suffix if it exists try: spp_plain = spp.split('_')[0] spp_info = rmp.get_species(spp_plain) spp_str = spp_info.spp_symbol + '<br/>' spp_str += spp_info.scientific_name + ' / ' spp_str += spp_info.common_name except IndexError: spp_str = spp else: spp_str = spp para = p.Paragraph(spp_str, styles['body_style_10']) species_row.append(para) # Get the statistical information data = spp_data[spp_data.SPECIES == spp][0] counts = [data.OP_PP, data.OP_PA, data.OA_PP, data.OA_PA] prevalence = data.PREVALENCE kappa = data.KAPPA # Species prevalence prevalence_str = '%.4f' % prevalence para = p.Paragraph(prevalence_str, styles['body_style_10_right']) species_row.append(para) # Capture the plot counts in an inner table count_cells = [] count_row = [] for i in range(0, 4): para = p.Paragraph('%d' % counts[i], styles['body_style_10_right']) count_row.append(para) if i % 2 == 1: count_cells.append(count_row) count_row = [] t = p.Table(count_cells, colWidths=[0.75 * u.inch, 0.75 * u.inch]) t.setStyle( p.TableStyle([ ('GRID', (0, 0), (-1, -1), 1, colors.white), ('ALIGNMENT', (0, 0), (-1, -1), 'LEFT'), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ('TOPPADDING', (0, 0), (-1, -1), 2), ('BOTTOMPADDING', (0, 0), (-1, -1), 2), ])) species_row.append(t) # Print out the kappa statistic kappa_str = '%.4f' % kappa para = p.Paragraph(kappa_str, styles['body_style_10_right']) species_row.append(para) # Push this row to the master species table species_table.append(species_row) # Style this into a reportlab table and add to the story col_widths = [(x * u.inch) for x in [4.0, 0.75, 1.5, 0.75]] t = p.Table(species_table, colWidths=col_widths) t.setStyle( p.TableStyle([ ('BACKGROUND', (0, 0), (-1, -1), '#f1efe4'), ('GRID', (0, 0), (-1, -1), 2, colors.white), ('TOPPADDING', (0, 0), (0, -1), 2), ('BOTTOMPADDING', (0, 0), (0, -1), 2), ('LEFTPADDING', (0, 0), (0, -1), 6), ('RIGHTPADDING', (0, 0), (0, -1), 6), ('ALIGNMENT', (0, 0), (0, -1), 'LEFT'), ('VALIGN', (0, 0), (0, -1), 'TOP'), ('TOPPADDING', (1, 0), (1, -1), 2), ('BOTTOMPADDING', (1, 0), (1, -1), 2), ('LEFTPADDING', (1, 0), (1, -1), 6), ('RIGHTPADDING', (1, 0), (1, -1), 6), ('ALIGNMENT', (1, 0), (1, -1), 'RIGHT'), ('VALIGN', (1, 0), (1, 0), 'TOP'), ('VALIGN', (1, 1), (1, -1), 'MIDDLE'), ('TOPPADDING', (2, 0), (2, -1), 0), ('BOTTOMPADDING', (2, 0), (2, -1), 0), ('LEFTPADDING', (2, 0), (2, -1), 0), ('RIGHTPADDING', (2, 0), (2, -1), 0), ('ALIGNMENT', (2, 0), (2, -1), 'LEFT'), ('VALIGN', (2, 0), (2, -1), 'TOP'), ('TOPPADDING', (3, 0), (3, -1), 2), ('BOTTOMPADDING', (3, 0), (3, -1), 2), ('LEFTPADDING', (3, 0), (3, -1), 6), ('RIGHTPADDING', (3, 0), (3, -1), 6), ('ALIGNMENT', (3, 0), (3, -1), 'RIGHT'), ('VALIGN', (3, 0), (3, 0), 'TOP'), ('VALIGN', (3, 1), (3, -1), 'MIDDLE'), ])) story.append(t) story.append(p.Spacer(0, 0.1 * u.inch)) rare_species_str = """ Note that some very rare species do not appear in this accuracy report, because these species were not included when building the initial ordination model. The full set of species is available upon request. """ para = p.Paragraph(rare_species_str, styles['body_style']) story.append(para) # Return this story return story