def test_csi_from_sr_and_pod(self): """Ensures correct output from csi_from_sr_and_pod.""" this_csi_matrix = model_eval.csi_from_sr_and_pod( SUCCESS_RATIO_MATRIX, POD_MATRIX) self.assertTrue( numpy.allclose(this_csi_matrix, CSI_MATRIX, atol=TOLERANCE))
def plot_performance_diagram(axes_object, pod_by_threshold, success_ratio_by_threshold, line_colour=DEFAULT_PERFORMANCE_COLOUR, line_width=DEFAULT_PERFORMANCE_WIDTH, bias_line_colour=DEFAULT_FREQ_BIAS_COLOUR, bias_line_width=DEFAULT_FREQ_BIAS_WIDTH): """Plots performance diagram. T = number of binarization thresholds For the definition of a "binarization threshold" and the role they play in performance diagrams, see `model_evaluation.get_points_in_performance_diagram`. :param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`. :param pod_by_threshold: length-T numpy array of POD (probability of detection) values. :param success_ratio_by_threshold: length-T numpy array of success ratios. :param line_colour: Colour (in any format accepted by `matplotlib.colors`). :param line_width: Line width (real positive number). :param bias_line_colour: Colour of contour lines for frequency bias. :param bias_line_width: Width of contour lines for frequency bias. """ error_checking.assert_is_numpy_array(pod_by_threshold, num_dimensions=1) error_checking.assert_is_geq_numpy_array(pod_by_threshold, 0., allow_nan=True) error_checking.assert_is_leq_numpy_array(pod_by_threshold, 1., allow_nan=True) num_thresholds = len(pod_by_threshold) error_checking.assert_is_numpy_array(success_ratio_by_threshold, exact_dimensions=numpy.array( [num_thresholds])) error_checking.assert_is_geq_numpy_array(success_ratio_by_threshold, 0., allow_nan=True) error_checking.assert_is_leq_numpy_array(success_ratio_by_threshold, 1., allow_nan=True) success_ratio_matrix, pod_matrix = model_eval.get_sr_pod_grid() csi_matrix = model_eval.csi_from_sr_and_pod(success_ratio_matrix, pod_matrix) frequency_bias_matrix = model_eval.frequency_bias_from_sr_and_pod( success_ratio_matrix, pod_matrix) this_colour_map_object, this_colour_norm_object = _get_csi_colour_scheme() pyplot.contourf(success_ratio_matrix, pod_matrix, csi_matrix, LEVELS_FOR_CSI_CONTOURS, cmap=this_colour_map_object, norm=this_colour_norm_object, vmin=0., vmax=1., axes=axes_object) colour_bar_object = plotting_utils.plot_colour_bar( axes_object_or_matrix=axes_object, data_matrix=csi_matrix, colour_map_object=this_colour_map_object, colour_norm_object=this_colour_norm_object, orientation_string='vertical', extend_min=False, extend_max=False) colour_bar_object.set_label('CSI (critical success index)') bias_colour_tuple = plotting_utils.colour_from_numpy_to_tuple( bias_line_colour) bias_colours_2d_tuple = () for _ in range(len(LEVELS_FOR_FREQ_BIAS_CONTOURS)): bias_colours_2d_tuple += (bias_colour_tuple, ) bias_contour_object = pyplot.contour(success_ratio_matrix, pod_matrix, frequency_bias_matrix, LEVELS_FOR_FREQ_BIAS_CONTOURS, colors=bias_colours_2d_tuple, linewidths=bias_line_width, linestyles='dashed', axes=axes_object) pyplot.clabel(bias_contour_object, inline=True, inline_spacing=PIXEL_PADDING_FOR_FREQ_BIAS_LABELS, fmt=STRING_FORMAT_FOR_FREQ_BIAS_LABELS, fontsize=FONT_SIZE) nan_flags = numpy.logical_or(numpy.isnan(success_ratio_by_threshold), numpy.isnan(pod_by_threshold)) if not numpy.all(nan_flags): real_indices = numpy.where(numpy.invert(nan_flags))[0] axes_object.plot( success_ratio_by_threshold[real_indices], pod_by_threshold[real_indices], color=plotting_utils.colour_from_numpy_to_tuple(line_colour), linestyle='solid', linewidth=line_width) axes_object.set_xlabel('Success ratio (1 - FAR)') axes_object.set_ylabel('POD (probability of detection)') axes_object.set_xlim(0., 1.) axes_object.set_ylim(0., 1.)
def plot_performance_diagram(axes_object, pod_by_threshold, success_ratio_by_threshold, line_colour=PERF_DIAGRAM_COLOUR, plot_background=True): """Plots performance diagram. T = number of binarization thresholds For the definition of a "binarization threshold" and the role they play in performance diagrams, see `model_evaluation.get_points_in_performance_diagram`. :param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`. :param pod_by_threshold: length-T numpy array of POD (probability of detection) values. :param success_ratio_by_threshold: length-T numpy array of success ratios. :param line_colour: Line colour. :param plot_background: Boolean flag. If True, will plot background (frequency-bias and CSI contours). :return: line_handle: Line handle for ROC curve. """ error_checking.assert_is_numpy_array(pod_by_threshold, num_dimensions=1) error_checking.assert_is_geq_numpy_array(pod_by_threshold, 0., allow_nan=True) error_checking.assert_is_leq_numpy_array(pod_by_threshold, 1., allow_nan=True) num_thresholds = len(pod_by_threshold) expected_dim = numpy.array([num_thresholds], dtype=int) error_checking.assert_is_numpy_array(success_ratio_by_threshold, exact_dimensions=expected_dim) error_checking.assert_is_geq_numpy_array(success_ratio_by_threshold, 0., allow_nan=True) error_checking.assert_is_leq_numpy_array(success_ratio_by_threshold, 1., allow_nan=True) error_checking.assert_is_boolean(plot_background) if plot_background: success_ratio_matrix, pod_matrix = model_eval.get_sr_pod_grid() csi_matrix = model_eval.csi_from_sr_and_pod( success_ratio_array=success_ratio_matrix, pod_array=pod_matrix) frequency_bias_matrix = model_eval.frequency_bias_from_sr_and_pod( success_ratio_array=success_ratio_matrix, pod_array=pod_matrix) this_colour_map_object, this_colour_norm_object = ( _get_csi_colour_scheme()) pyplot.contourf(success_ratio_matrix, pod_matrix, csi_matrix, CSI_LEVELS, cmap=this_colour_map_object, norm=this_colour_norm_object, vmin=0., vmax=1., axes=axes_object) colour_bar_object = plotting_utils.plot_colour_bar( axes_object_or_matrix=axes_object, data_matrix=csi_matrix, colour_map_object=this_colour_map_object, colour_norm_object=this_colour_norm_object, orientation_string='vertical', extend_min=False, extend_max=False, fraction_of_axis_length=0.8) colour_bar_object.set_label('CSI (critical success index)') bias_colour_tuple = plotting_utils.colour_from_numpy_to_tuple( FREQ_BIAS_COLOUR) bias_colours_2d_tuple = () for _ in range(len(FREQ_BIAS_LEVELS)): bias_colours_2d_tuple += (bias_colour_tuple, ) bias_contour_object = pyplot.contour(success_ratio_matrix, pod_matrix, frequency_bias_matrix, FREQ_BIAS_LEVELS, colors=bias_colours_2d_tuple, linewidths=FREQ_BIAS_WIDTH, linestyles='dashed', axes=axes_object) pyplot.clabel(bias_contour_object, inline=True, inline_spacing=FREQ_BIAS_PADDING, fmt=FREQ_BIAS_STRING_FORMAT, fontsize=FONT_SIZE) nan_flags = numpy.logical_or(numpy.isnan(success_ratio_by_threshold), numpy.isnan(pod_by_threshold)) if numpy.all(nan_flags): line_handle = None else: real_indices = numpy.where(numpy.invert(nan_flags))[0] line_handle = axes_object.plot( success_ratio_by_threshold[real_indices], pod_by_threshold[real_indices], color=plotting_utils.colour_from_numpy_to_tuple(line_colour), linestyle='solid', linewidth=PERF_DIAGRAM_WIDTH)[0] axes_object.set_xlabel('Success ratio (1 - FAR)') axes_object.set_ylabel('POD (probability of detection)') axes_object.set_xlim(0., 1.) axes_object.set_ylim(0., 1.) return line_handle
def _plot_perf_diagrams(evaluation_tables, model_names, best_threshold_indices, marker_indices_by_model, output_file_name, plot_best_thresholds, confidence_level=None): """Plots performance diagrams (one for each model). :param evaluation_tables: See doc for `_plot_roc_curves`. :param model_names: Same. :param best_threshold_indices: Same. :param marker_indices_by_model: Same. :param output_file_name: Same. :param plot_best_thresholds: Same. :param confidence_level: Same. """ num_models = len(evaluation_tables) pod_matrices = [None] * num_models success_ratio_matrices = [None] * num_models legend_strings = [None] * num_models num_bootstrap_reps = None for i in range(num_models): pod_matrices[i] = numpy.vstack( tuple(evaluation_tables[i][ model_eval.POD_BY_THRESHOLD_KEY].values.tolist())) success_ratio_matrices[i] = numpy.vstack( tuple(evaluation_tables[i][ model_eval.SR_BY_THRESHOLD_KEY].values.tolist())) if num_bootstrap_reps is None: num_bootstrap_reps = pod_matrices[i].shape[0] this_num_bootstrap_reps = pod_matrices[i].shape[0] # assert num_bootstrap_reps == this_num_bootstrap_reps if num_bootstrap_reps > 1: this_min_aupd, this_max_aupd = ( bootstrapping.get_confidence_interval( stat_values=evaluation_tables[i][ model_eval.AUPD_KEY].values, confidence_level=confidence_level)) legend_strings[i] = '{0:s}: AUPD = {1:.3f} to {2:.3f}'.format( model_names[i], this_min_aupd, this_max_aupd) else: this_aupd = evaluation_tables[i][model_eval.AUPD_KEY].values[0] legend_strings[i] = '{0:s}: AUPD = {1:.3f}'.format( model_names[i], this_aupd) print(legend_strings[i]) figure_object, axes_object = pyplot.subplots( 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)) legend_handles = [None] * num_models num_colours = COLOUR_MATRIX.shape[0] for i in range(num_models): this_colour = COLOUR_MATRIX[numpy.mod(i, num_colours), ...] if num_bootstrap_reps == 1: legend_handles[i] = model_eval_plotting.plot_performance_diagram( axes_object=axes_object, pod_by_threshold=pod_matrices[i][0, :], success_ratio_by_threshold=success_ratio_matrices[i][0, :], line_colour=this_colour, plot_background=i == 0) this_x = success_ratio_matrices[i][0, best_threshold_indices[i]] this_y = pod_matrices[i][0, best_threshold_indices[i]] these_x = success_ratio_matrices[i][0, marker_indices_by_model[i]] these_y = pod_matrices[i][0, marker_indices_by_model[i]] else: this_ci_bottom_dict, this_ci_mean_dict, this_ci_top_dict = ( _get_ci_one_model(evaluation_table=evaluation_tables[i], for_roc_curve=False, confidence_level=confidence_level)) legend_handles[i] = ( model_eval_plotting.plot_bootstrapped_performance_diagram( axes_object=axes_object, ci_bottom_dict=this_ci_bottom_dict, ci_mean_dict=this_ci_mean_dict, ci_top_dict=this_ci_top_dict, line_colour=this_colour, plot_background=i == 0)) this_x = this_ci_mean_dict[model_eval.SR_BY_THRESHOLD_KEY][ best_threshold_indices[i]] this_y = this_ci_mean_dict[model_eval.POD_BY_THRESHOLD_KEY][ best_threshold_indices[i]] these_x = this_ci_mean_dict[model_eval.SR_BY_THRESHOLD_KEY][ marker_indices_by_model[i]] these_y = this_ci_mean_dict[model_eval.POD_BY_THRESHOLD_KEY][ marker_indices_by_model[i]] this_csi = model_eval.csi_from_sr_and_pod( success_ratio_array=numpy.array([this_x]), pod_array=numpy.array([this_y]))[0] print(('POD, success ratio, and CSI at best probability threshold = ' '{0:.3f}, {1:.3f}, {2:.3f}').format(this_y, this_x, this_csi)) if plot_best_thresholds: axes_object.plot(this_x, this_y, linestyle='None', marker=MARKER_TYPE, markersize=MARKER_SIZE, markeredgewidth=MARKER_EDGE_WIDTH, markerfacecolor=this_colour, markeredgecolor=this_colour) # axes_object.plot( # these_x, these_y, linestyle='None', marker='o', # markersize=12, markeredgewidth=MARKER_EDGE_WIDTH, # markerfacecolor=this_colour, markeredgecolor=this_colour # ) main_legend_handle = axes_object.legend(legend_handles, legend_strings, loc='upper right', bbox_to_anchor=(1, 1), fancybox=True, shadow=False, framealpha=0.5, ncol=1) for this_object in main_legend_handle.legendHandles: this_object.set_linewidth(5.) axes_object.set_title('Performance diagram') plotting_utils.label_axes(axes_object=axes_object, label_string='(b)', y_coord_normalized=1.025) axes_object.set_aspect('equal') print('Saving figure to: "{0:s}"...'.format(output_file_name)) figure_object.savefig(output_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0, bbox_inches='tight') pyplot.close(figure_object)