Пример #1
0
def _read_one_file(evaluation_file_name, heights_m_agl):
    """Reads one evaluation file.

    B = number of bootstrap replicates
    H = number of heights for heating rate

    :param evaluation_file_name: Path to input file (will be read by
        `evaluation.read_file`).
    :param heights_m_agl: See documentation at top of file.
    :return: net_flux_msess_values: length-B numpy array of MSE skill scores for
        net flux.
    :return: heating_rate_msess_matrix: H-by-B numpy array of MSE skill scores
        for heating rate.
    :return: net_flux_biases_w_m02: length-B numpy array of biases (W m^-2) for
        net flux.
    :return: heating_rate_bias_matrix_k_day01: H-by-B numpy array of biases
        (K day^-1) for heating rate.
    """

    print('Reading data from: "{0:s}"...'.format(evaluation_file_name))
    result_table_xarray = evaluation.read_file(evaluation_file_name)

    net_flux_index = numpy.where(
        result_table_xarray.coords[evaluation.AUX_TARGET_FIELD_DIM].values ==
        evaluation.NET_FLUX_NAME)[0][0]

    net_flux_msess_values = (result_table_xarray[
        evaluation.AUX_MSE_SKILL_KEY].values[net_flux_index, :])
    net_flux_biases_w_m02 = (
        result_table_xarray[evaluation.AUX_BIAS_KEY].values[net_flux_index, :])

    heating_rate_index = numpy.where(
        result_table_xarray.coords[evaluation.VECTOR_FIELD_DIM].values ==
        example_utils.SHORTWAVE_HEATING_RATE_NAME)[0][0]

    num_heights = len(heights_m_agl)
    num_bootstrap_reps = len(net_flux_biases_w_m02)
    heating_rate_msess_matrix = numpy.full((num_heights, num_bootstrap_reps),
                                           numpy.nan)
    heating_rate_bias_matrix_k_day01 = numpy.full(
        (num_heights, num_bootstrap_reps), numpy.nan)

    for k in range(num_heights):
        these_diffs = numpy.absolute(
            result_table_xarray.coords[evaluation.HEIGHT_DIM].values -
            heights_m_agl[k])
        this_height_index = numpy.where(these_diffs <= TOLERANCE)[0][0]

        heating_rate_msess_matrix[k, :] = (result_table_xarray[
            evaluation.VECTOR_MSE_SKILL_KEY].values[this_height_index,
                                                    heating_rate_index, :])

        heating_rate_bias_matrix_k_day01[k, :] = (result_table_xarray[
            evaluation.VECTOR_BIAS_KEY].values[this_height_index,
                                               heating_rate_index, :])

    return (net_flux_msess_values, heating_rate_msess_matrix,
            net_flux_biases_w_m02, heating_rate_bias_matrix_k_day01)
Пример #2
0
def _read_files_one_split(evaluation_file_names):
    """Reads evaluation files for one time split.

    T = number of time chunks in this split

    :param evaluation_file_names: length-T list of paths to input files.
    :return: evaluation_tables_xarray: length-T list of xarray tables with
        results.
    """

    num_time_chunks = len(evaluation_file_names)
    evaluation_tables_xarray = []

    for i in range(num_time_chunks):
        print('Reading data from: "{0:s}"...'.format(evaluation_file_names[i]))

        this_table = _augment_eval_table(
            evaluation.read_file(evaluation_file_names[i]))
        evaluation_tables_xarray.append(this_table)

    return evaluation_tables_xarray
Пример #3
0
def _run(input_file_names, output_file_name):
    """Concatenates evaluation files with different bootstrap replicates.

    This is effectively the main method.

    :param input_file_names: See documentation at top of file.
    :param output_file_name: Same.
    """

    result_tables_xarray = []
    num_bootstrap_reps_read = 0

    for this_file_name in input_file_names:
        print('Reading data from: "{0:s}"...'.format(this_file_name))
        this_result_table_xarray = evaluation.read_file(this_file_name)

        num_bootstrap_reps_new = len(this_result_table_xarray.coords[
            evaluation.BOOTSTRAP_REP_DIM].values)
        these_indices = numpy.linspace(num_bootstrap_reps_read,
                                       num_bootstrap_reps_read +
                                       num_bootstrap_reps_new - 1,
                                       num=num_bootstrap_reps_new,
                                       dtype=int)
        this_result_table_xarray = this_result_table_xarray.assign_coords(
            {evaluation.BOOTSTRAP_REP_DIM: these_indices})

        result_tables_xarray.append(this_result_table_xarray)
        num_bootstrap_reps_read += num_bootstrap_reps_new

    print(SEPARATOR_STRING)

    result_table_xarray = xarray.concat(objs=result_tables_xarray,
                                        dim=evaluation.BOOTSTRAP_REP_DIM,
                                        data_vars='minimal')
    print(result_table_xarray)

    print('Writing data to: "{0:s}"...'.format(output_file_name))
    evaluation.write_file(result_table_xarray=result_table_xarray,
                          netcdf_file_name=output_file_name)
Пример #4
0
def _run(evaluation_file_names, line_styles, line_colour_strings,
         set_descriptions_verbose, confidence_level, use_log_scale,
         plot_by_height, output_dir_name):
    """Plots model evaluation.

    This is effectively the main method.

    :param evaluation_file_names: See documentation at top of file.
    :param line_styles: Same.
    :param line_colour_strings: Same.
    :param set_descriptions_verbose: Same.
    :param confidence_level: Same.
    :param use_log_scale: Same.
    :param plot_by_height: Same.
    :param output_dir_name: Same.
    """

    # Check input args.
    file_system_utils.mkdir_recursive_if_necessary(
        directory_name=output_dir_name)

    if confidence_level < 0:
        confidence_level = None

    if confidence_level is not None:
        error_checking.assert_is_geq(confidence_level, 0.9)
        error_checking.assert_is_less_than(confidence_level, 1.)

    num_evaluation_sets = len(evaluation_file_names)
    expected_dim = numpy.array([num_evaluation_sets], dtype=int)

    error_checking.assert_is_string_list(line_styles)
    error_checking.assert_is_numpy_array(numpy.array(line_styles),
                                         exact_dimensions=expected_dim)

    error_checking.assert_is_string_list(set_descriptions_verbose)
    error_checking.assert_is_numpy_array(numpy.array(set_descriptions_verbose),
                                         exact_dimensions=expected_dim)

    set_descriptions_verbose = [
        s.replace('_', ' ') for s in set_descriptions_verbose
    ]
    set_descriptions_abbrev = [
        s.lower().replace(' ', '-') for s in set_descriptions_verbose
    ]

    error_checking.assert_is_string_list(line_colour_strings)
    error_checking.assert_is_numpy_array(numpy.array(line_colour_strings),
                                         exact_dimensions=expected_dim)
    line_colours = [
        numpy.fromstring(s, dtype=float, sep='_') / 255
        for s in line_colour_strings
    ]

    for i in range(num_evaluation_sets):
        error_checking.assert_is_numpy_array(line_colours[i],
                                             exact_dimensions=numpy.array(
                                                 [3], dtype=int))
        error_checking.assert_is_geq_numpy_array(line_colours[i], 0.)
        error_checking.assert_is_leq_numpy_array(line_colours[i], 1.)

    # Read files.
    evaluation_tables_xarray = [xarray.Dataset()] * num_evaluation_sets
    prediction_dicts = [dict()] * num_evaluation_sets

    for i in range(num_evaluation_sets):
        print('Reading data from: "{0:s}"...'.format(evaluation_file_names[i]))
        evaluation_tables_xarray[i] = evaluation.read_file(
            evaluation_file_names[i])

        this_prediction_file_name = (
            evaluation_tables_xarray[i].attrs[evaluation.PREDICTION_FILE_KEY])

        print(
            'Reading data from: "{0:s}"...'.format(this_prediction_file_name))
        prediction_dicts[i] = prediction_io.read_file(
            this_prediction_file_name)

    model_file_name = (
        evaluation_tables_xarray[0].attrs[evaluation.MODEL_FILE_KEY])
    model_metafile_name = neural_net.find_metafile(
        model_dir_name=os.path.split(model_file_name)[0],
        raise_error_if_missing=True)

    print('Reading metadata from: "{0:s}"...'.format(model_metafile_name))
    model_metadata_dict = neural_net.read_metafile(model_metafile_name)
    generator_option_dict = model_metadata_dict[
        neural_net.TRAINING_OPTIONS_KEY]

    scalar_target_names = (
        generator_option_dict[neural_net.SCALAR_TARGET_NAMES_KEY])
    vector_target_names = (
        generator_option_dict[neural_net.VECTOR_TARGET_NAMES_KEY])
    heights_m_agl = generator_option_dict[neural_net.HEIGHTS_KEY]

    try:
        t = evaluation_tables_xarray[0]
        aux_target_names = t.coords[evaluation.AUX_TARGET_FIELD_DIM].values
    except:
        aux_target_names = []

    num_scalar_targets = len(scalar_target_names)
    num_vector_targets = len(vector_target_names)
    num_heights = len(heights_m_agl)
    num_aux_targets = len(aux_target_names)

    example_dict = {
        example_utils.SCALAR_TARGET_NAMES_KEY:
        scalar_target_names,
        example_utils.VECTOR_TARGET_NAMES_KEY:
        vector_target_names,
        example_utils.HEIGHTS_KEY:
        heights_m_agl,
        example_utils.SCALAR_PREDICTOR_NAMES_KEY:
        generator_option_dict[neural_net.SCALAR_PREDICTOR_NAMES_KEY],
        example_utils.VECTOR_PREDICTOR_NAMES_KEY:
        generator_option_dict[neural_net.VECTOR_PREDICTOR_NAMES_KEY]
    }

    normalization_file_name = (
        generator_option_dict[neural_net.NORMALIZATION_FILE_KEY])
    print(('Reading training examples (for climatology) from: "{0:s}"...'
           ).format(normalization_file_name))

    training_example_dict = example_io.read_file(normalization_file_name)
    training_example_dict = example_utils.subset_by_height(
        example_dict=training_example_dict, heights_m_agl=heights_m_agl)
    mean_training_example_dict = normalization.create_mean_example(
        new_example_dict=example_dict,
        training_example_dict=training_example_dict)

    print(SEPARATOR_STRING)

    # Do actual stuff.
    _plot_error_distributions(
        prediction_dicts=prediction_dicts,
        model_metadata_dict=model_metadata_dict,
        aux_target_names=aux_target_names,
        set_descriptions_abbrev=set_descriptions_abbrev,
        set_descriptions_verbose=set_descriptions_verbose,
        output_dir_name=output_dir_name)
    print(SEPARATOR_STRING)

    _plot_reliability_by_height(
        evaluation_tables_xarray=evaluation_tables_xarray,
        vector_target_names=vector_target_names,
        heights_m_agl=heights_m_agl,
        set_descriptions_abbrev=set_descriptions_abbrev,
        set_descriptions_verbose=set_descriptions_verbose,
        output_dir_name=output_dir_name)
    print(SEPARATOR_STRING)

    for k in range(num_vector_targets):
        for this_score_name in list(SCORE_NAME_TO_PROFILE_KEY.keys()):
            _plot_score_profile(
                evaluation_tables_xarray=evaluation_tables_xarray,
                line_styles=line_styles,
                line_colours=line_colours,
                set_descriptions_verbose=set_descriptions_verbose,
                confidence_level=confidence_level,
                target_name=vector_target_names[k],
                score_name=this_score_name,
                use_log_scale=use_log_scale,
                output_dir_name=output_dir_name)

    print(SEPARATOR_STRING)

    for k in range(num_scalar_targets):
        _plot_attributes_diagram(
            evaluation_tables_xarray=evaluation_tables_xarray,
            line_styles=line_styles,
            line_colours=line_colours,
            set_descriptions_abbrev=set_descriptions_abbrev,
            set_descriptions_verbose=set_descriptions_verbose,
            confidence_level=confidence_level,
            mean_training_example_dict=mean_training_example_dict,
            target_name=scalar_target_names[k],
            output_dir_name=output_dir_name)

    for k in range(num_aux_targets):
        _plot_attributes_diagram(
            evaluation_tables_xarray=evaluation_tables_xarray,
            line_styles=line_styles,
            line_colours=line_colours,
            set_descriptions_abbrev=set_descriptions_abbrev,
            set_descriptions_verbose=set_descriptions_verbose,
            confidence_level=confidence_level,
            mean_training_example_dict=mean_training_example_dict,
            target_name=aux_target_names[k],
            output_dir_name=output_dir_name)

    if not plot_by_height:
        return

    print(SEPARATOR_STRING)

    for k in range(num_vector_targets):
        for j in range(num_heights):
            _plot_attributes_diagram(
                evaluation_tables_xarray=evaluation_tables_xarray,
                line_styles=line_styles,
                line_colours=line_colours,
                set_descriptions_abbrev=set_descriptions_abbrev,
                set_descriptions_verbose=set_descriptions_verbose,
                confidence_level=confidence_level,
                mean_training_example_dict=mean_training_example_dict,
                height_m_agl=heights_m_agl[j],
                target_name=vector_target_names[k],
                output_dir_name=output_dir_name)

        if k != num_vector_targets - 1:
            print(SEPARATOR_STRING)
Пример #5
0
def _read_one_file(evaluation_file_name, heights_m_agl, confidence_level):
    """Reads results from one evaluation file.

    :param evaluation_file_name: Path to input file (will be read by
        `evaluation.read_file`).
    :param heights_m_agl: See documentation at top of file.
    :param confidence_level: Same.
    """

    min_percentile = 50. * (1. - confidence_level)
    max_percentile = 50. * (1. + confidence_level)

    print('Reading data from: "{0:s}"...'.format(evaluation_file_name))
    result_table_xarray = evaluation.read_file(evaluation_file_name)

    net_flux_index = numpy.where(
        result_table_xarray.coords[evaluation.AUX_TARGET_FIELD_DIM].values ==
        evaluation.NET_FLUX_NAME)[0][0]

    net_flux_mse_skill_scores = (result_table_xarray[
        evaluation.AUX_MSE_SKILL_KEY].values[net_flux_index, :])
    print('MSE skill score for net flux = {0:.3g} [{1:.3g}, {2:.3g}]'.format(
        numpy.mean(net_flux_mse_skill_scores),
        numpy.percentile(net_flux_mse_skill_scores, min_percentile),
        numpy.percentile(net_flux_mse_skill_scores, max_percentile)))

    net_flux_biases = (
        result_table_xarray[evaluation.AUX_BIAS_KEY].values[net_flux_index, :])
    print('Bias for net flux = {0:.3g} [{1:.3g}, {2:.3g}]'.format(
        numpy.mean(net_flux_biases),
        numpy.percentile(net_flux_biases, min_percentile),
        numpy.percentile(net_flux_biases, max_percentile)))

    heating_rate_index = numpy.where(
        result_table_xarray.coords[evaluation.VECTOR_FIELD_DIM].values ==
        example_utils.SHORTWAVE_HEATING_RATE_NAME)[0][0]

    num_heights = len(heights_m_agl)

    for k in range(num_heights):
        these_diffs = numpy.absolute(
            result_table_xarray.coords[evaluation.HEIGHT_DIM].values -
            heights_m_agl[k])
        this_height_index = numpy.where(these_diffs <= TOLERANCE)[0][0]

        these_mse_skill_scores = (result_table_xarray[
            evaluation.VECTOR_MSE_SKILL_KEY].values[this_height_index,
                                                    heating_rate_index, :])
        print(('MSE skill score for heating rate at {0:d} m AGL = {1:.3g} '
               '[{2:.3g}, {3:.3g}]').format(
                   heights_m_agl[k], numpy.mean(these_mse_skill_scores),
                   numpy.percentile(these_mse_skill_scores, min_percentile),
                   numpy.percentile(these_mse_skill_scores, max_percentile)))

        these_biases = (result_table_xarray[evaluation.VECTOR_BIAS_KEY].values[
            this_height_index, heating_rate_index, :])
        print((
            'Bias for heating rate at {0:d} m AGL = {1:.3g} [{2:.3g}, {3:.3g}]'
        ).format(heights_m_agl[k], numpy.mean(these_biases),
                 numpy.percentile(these_biases, min_percentile),
                 numpy.percentile(these_biases, max_percentile)))
Пример #6
0
def _run(evaluation_dir_name, grid_metafile_name, output_dir_name):
    """Plots evaluation scores by spatial region.

    This is effectively the main method.

    :param evaluation_dir_name: See documentation at top of file.
    :param grid_metafile_name: Same.
    :param output_dir_name: Same.
    """

    file_system_utils.mkdir_recursive_if_necessary(
        directory_name=output_dir_name)

    # Read metadata for grid.
    print('Reading grid metadata from: "{0:s}"...'.format(grid_metafile_name))
    grid_point_latitudes_deg, grid_point_longitudes_deg = (
        prediction_io.read_grid_metafile(grid_metafile_name))

    num_grid_rows = len(grid_point_latitudes_deg)
    num_grid_columns = len(grid_point_longitudes_deg)

    latitude_matrix_deg, longitude_matrix_deg = (
        grids.latlng_vectors_to_matrices(
            unique_latitudes_deg=grid_point_latitudes_deg,
            unique_longitudes_deg=grid_point_longitudes_deg))

    # Read evaluation files.
    eval_table_matrix_xarray = numpy.full((num_grid_rows, num_grid_columns),
                                          None,
                                          dtype=object)

    scalar_field_names = None
    aux_field_names = None
    vector_field_names = None
    heights_m_agl = None

    for i in range(num_grid_rows):
        for j in range(num_grid_columns):
            this_file_name = evaluation.find_file(
                directory_name=evaluation_dir_name,
                grid_row=i,
                grid_column=j,
                raise_error_if_missing=False)

            if not os.path.isfile(this_file_name):
                continue

            print('Reading data from: "{0:s}"...'.format(this_file_name))
            eval_table_matrix_xarray[i,
                                     j] = evaluation.read_file(this_file_name)
            eval_table_matrix_xarray[i, j] = _augment_eval_table(
                eval_table_matrix_xarray[i, j])

            if scalar_field_names is None:
                t = eval_table_matrix_xarray[i, j]

                scalar_field_names = (
                    t.coords[evaluation.SCALAR_FIELD_DIM].values)
                vector_field_names = (
                    t.coords[evaluation.VECTOR_FIELD_DIM].values)
                heights_m_agl = numpy.round(
                    t.coords[evaluation.HEIGHT_DIM].values).astype(int)

                try:
                    aux_field_names = (
                        t.coords[evaluation.AUX_TARGET_FIELD_DIM].values)
                except KeyError:
                    aux_field_names = []

    print(SEPARATOR_STRING)

    evaluation_tables_xarray = numpy.reshape(eval_table_matrix_xarray,
                                             num_grid_rows * num_grid_columns)
    nan_array = numpy.full(len(scalar_field_names), numpy.nan)

    scalar_mae_matrix = numpy.vstack([
        nan_array if t is None else t[evaluation.SCALAR_MAE_KEY].values
        for t in evaluation_tables_xarray
    ])
    scalar_rmse_matrix = numpy.sqrt(
        numpy.vstack([
            nan_array if t is None else t[evaluation.SCALAR_MSE_KEY].values
            for t in evaluation_tables_xarray
        ]))
    scalar_bias_matrix = numpy.vstack([
        nan_array if t is None else t[evaluation.SCALAR_BIAS_KEY].values
        for t in evaluation_tables_xarray
    ])
    scalar_mae_skill_matrix = numpy.vstack([
        nan_array if t is None else t[evaluation.SCALAR_MAE_SKILL_KEY].values
        for t in evaluation_tables_xarray
    ])
    scalar_mse_skill_matrix = numpy.vstack([
        nan_array if t is None else t[evaluation.SCALAR_MSE_SKILL_KEY].values
        for t in evaluation_tables_xarray
    ])
    scalar_correlation_matrix = numpy.vstack([
        nan_array if t is None else t[evaluation.SCALAR_CORRELATION_KEY].values
        for t in evaluation_tables_xarray
    ])
    scalar_kge_matrix = numpy.vstack([
        nan_array if t is None else t[evaluation.SCALAR_KGE_KEY].values
        for t in evaluation_tables_xarray
    ])
    scalar_skewness_matrix = numpy.vstack([
        nan_array if t is None else t[SCALAR_SKEWNESS_KEY].values
        for t in evaluation_tables_xarray
    ])

    grid_dim_tuple = (num_grid_rows, num_grid_columns)

    for k in range(len(scalar_field_names)):
        _plot_all_scores_one_field(
            latitude_matrix_deg=latitude_matrix_deg,
            longitude_matrix_deg=longitude_matrix_deg,
            mae_matrix=numpy.reshape(scalar_mae_matrix[:, k], grid_dim_tuple),
            rmse_matrix=numpy.reshape(scalar_rmse_matrix[:, k],
                                      grid_dim_tuple),
            bias_matrix=numpy.reshape(scalar_bias_matrix[:, k],
                                      grid_dim_tuple),
            mae_skill_score_matrix=numpy.reshape(scalar_mae_skill_matrix[:, k],
                                                 grid_dim_tuple),
            mse_skill_score_matrix=numpy.reshape(scalar_mse_skill_matrix[:, k],
                                                 grid_dim_tuple),
            correlation_matrix=numpy.reshape(scalar_correlation_matrix[:, k],
                                             grid_dim_tuple),
            kge_matrix=numpy.reshape(scalar_kge_matrix[:, k], grid_dim_tuple),
            skewness_matrix=numpy.reshape(scalar_skewness_matrix[:, k],
                                          grid_dim_tuple),
            field_name=scalar_field_names[k],
            output_dir_name=output_dir_name)

        if k == len(scalar_field_names) - 1:
            print(SEPARATOR_STRING)
        else:
            print('\n')

    if len(aux_field_names) > 0:
        nan_array = numpy.full(len(aux_field_names), numpy.nan)

        aux_mae_matrix = numpy.vstack([
            nan_array if t is None else t[evaluation.AUX_MAE_KEY].values
            for t in evaluation_tables_xarray
        ])
        aux_rmse_matrix = numpy.sqrt(
            numpy.vstack([
                nan_array if t is None else t[evaluation.AUX_MSE_KEY].values
                for t in evaluation_tables_xarray
            ]))
        aux_bias_matrix = numpy.vstack([
            nan_array if t is None else t[evaluation.AUX_BIAS_KEY].values
            for t in evaluation_tables_xarray
        ])
        aux_mae_skill_matrix = numpy.vstack([
            nan_array if t is None else t[evaluation.AUX_MAE_SKILL_KEY].values
            for t in evaluation_tables_xarray
        ])
        aux_mse_skill_matrix = numpy.vstack([
            nan_array if t is None else t[evaluation.AUX_MSE_SKILL_KEY].values
            for t in evaluation_tables_xarray
        ])
        aux_correlation_matrix = numpy.vstack([
            nan_array
            if t is None else t[evaluation.AUX_CORRELATION_KEY].values
            for t in evaluation_tables_xarray
        ])
        aux_kge_matrix = numpy.vstack([
            nan_array if t is None else t[evaluation.AUX_KGE_KEY].values
            for t in evaluation_tables_xarray
        ])
        aux_skewness_matrix = numpy.vstack([
            nan_array if t is None else t[AUX_SKEWNESS_KEY].values
            for t in evaluation_tables_xarray
        ])

    for k in range(len(aux_field_names)):
        _plot_all_scores_one_field(
            latitude_matrix_deg=latitude_matrix_deg,
            longitude_matrix_deg=longitude_matrix_deg,
            mae_matrix=numpy.reshape(aux_mae_matrix[:, k], grid_dim_tuple),
            rmse_matrix=numpy.reshape(aux_rmse_matrix[:, k], grid_dim_tuple),
            bias_matrix=numpy.reshape(aux_bias_matrix[:, k], grid_dim_tuple),
            mae_skill_score_matrix=numpy.reshape(aux_mae_skill_matrix[:, k],
                                                 grid_dim_tuple),
            mse_skill_score_matrix=numpy.reshape(aux_mse_skill_matrix[:, k],
                                                 grid_dim_tuple),
            correlation_matrix=numpy.reshape(aux_correlation_matrix[:, k],
                                             grid_dim_tuple),
            kge_matrix=numpy.reshape(aux_kge_matrix[:, k], grid_dim_tuple),
            skewness_matrix=numpy.reshape(aux_skewness_matrix[:, k],
                                          grid_dim_tuple),
            field_name=aux_field_names[k],
            output_dir_name=output_dir_name)

        if k == len(aux_field_names) - 1:
            print(SEPARATOR_STRING)
        else:
            print('\n')

    nan_array = numpy.full((len(heights_m_agl), len(vector_field_names)),
                           numpy.nan)

    vector_mae_matrix = numpy.stack([
        nan_array if t is None else t[evaluation.VECTOR_MAE_KEY].values
        for t in evaluation_tables_xarray
    ],
                                    axis=0)
    vector_rmse_matrix = numpy.sqrt(
        numpy.stack([
            nan_array if t is None else t[evaluation.VECTOR_MSE_KEY].values
            for t in evaluation_tables_xarray
        ],
                    axis=0))
    vector_bias_matrix = numpy.stack([
        nan_array if t is None else t[evaluation.VECTOR_BIAS_KEY].values
        for t in evaluation_tables_xarray
    ],
                                     axis=0)
    vector_mae_skill_matrix = numpy.stack([
        nan_array if t is None else t[evaluation.VECTOR_MAE_SKILL_KEY].values
        for t in evaluation_tables_xarray
    ],
                                          axis=0)
    vector_mse_skill_matrix = numpy.stack([
        nan_array if t is None else t[evaluation.VECTOR_MSE_SKILL_KEY].values
        for t in evaluation_tables_xarray
    ],
                                          axis=0)
    vector_correlation_matrix = numpy.stack([
        nan_array if t is None else t[evaluation.VECTOR_CORRELATION_KEY].values
        for t in evaluation_tables_xarray
    ],
                                            axis=0)
    vector_kge_matrix = numpy.stack([
        nan_array if t is None else t[evaluation.VECTOR_KGE_KEY].values
        for t in evaluation_tables_xarray
    ],
                                    axis=0)
    vector_skewness_matrix = numpy.stack([
        nan_array if t is None else t[VECTOR_SKEWNESS_KEY].values
        for t in evaluation_tables_xarray
    ],
                                         axis=0)

    for k in range(len(vector_field_names)):
        for j in range(len(heights_m_agl)):
            _plot_all_scores_one_field(
                latitude_matrix_deg=latitude_matrix_deg,
                longitude_matrix_deg=longitude_matrix_deg,
                mae_matrix=numpy.reshape(vector_mae_matrix[:, j, k],
                                         grid_dim_tuple),
                rmse_matrix=numpy.reshape(vector_rmse_matrix[:, j, k],
                                          grid_dim_tuple),
                bias_matrix=numpy.reshape(vector_bias_matrix[:, j, k],
                                          grid_dim_tuple),
                mae_skill_score_matrix=numpy.reshape(
                    vector_mae_skill_matrix[:, j, k], grid_dim_tuple),
                mse_skill_score_matrix=numpy.reshape(
                    vector_mse_skill_matrix[:, j, k], grid_dim_tuple),
                correlation_matrix=numpy.reshape(
                    vector_correlation_matrix[:, j, k], grid_dim_tuple),
                kge_matrix=numpy.reshape(vector_kge_matrix[:, j, k],
                                         grid_dim_tuple),
                skewness_matrix=numpy.reshape(vector_skewness_matrix[:, j, k],
                                              grid_dim_tuple),
                field_name=vector_field_names[k],
                height_m_agl=heights_m_agl[j],
                output_dir_name=output_dir_name)

        if k == len(vector_field_names) - 1:
            print(SEPARATOR_STRING)
        else:
            print('\n')

    num_examples_array = numpy.array([
        numpy.nan if t is None else t.attrs[NUM_EXAMPLES_KEY]
        for t in evaluation_tables_xarray
    ])
    num_examples_matrix = numpy.reshape(num_examples_array, grid_dim_tuple)
    max_colour_value = numpy.nanpercentile(num_examples_matrix,
                                           MAX_COLOUR_PERCENTILE)

    figure_object, axes_object = _plot_score_one_field(
        latitude_matrix_deg=latitude_matrix_deg,
        longitude_matrix_deg=longitude_matrix_deg,
        score_matrix=num_examples_matrix,
        colour_map_object=COUNT_COLOUR_MAP_OBJECT,
        min_colour_value=0.,
        max_colour_value=max_colour_value,
        taper_cbar_top=True,
        taper_cbar_bottom=False,
        log_scale=False)

    axes_object.set_title('Number of examples', fontsize=TITLE_FONT_SIZE)
    figure_file_name = '{0:s}/num_examples.jpg'.format(output_dir_name)

    print('Saving figure to: "{0:s}"...'.format(figure_file_name))
    figure_object.savefig(figure_file_name,
                          dpi=FIGURE_RESOLUTION_DPI,
                          pad_inches=0,
                          bbox_inches='tight')
    pyplot.close(figure_object)