Exemplo n.º 1
0
    def test_buffer_to_column_name_large_buffer_grid_columns(self):
        """Ensures correct output from _buffer_to_column_name.

        In this case, the buffer is "5-10 km outside storm" and the column name
        is for grid columns inside the polygon.
        """

        this_column_name = gridded_forecasts._buffer_to_column_name(
            min_buffer_dist_metres=LARGE_BUFFER_MIN_DISTANCE_METRES,
            max_buffer_dist_metres=LARGE_BUFFER_MAX_DISTANCE_METRES,
            column_type=gridded_forecasts.GRID_COLUMNS_IN_POLYGON_COLUMN_TYPE)

        self.assertTrue(this_column_name == LARGE_BUFFER_GRID_COLUMNS_COLUMN)
Exemplo n.º 2
0
    def test_buffer_to_column_name_large_buffer_latlng(self):
        """Ensures correct output from _buffer_to_column_name.

        In this case, the buffer is "5-10 km outside storm" and the column name
        is for lat-long polygons.
        """

        this_column_name = gridded_forecasts._buffer_to_column_name(
            min_buffer_dist_metres=LARGE_BUFFER_MIN_DISTANCE_METRES,
            max_buffer_dist_metres=LARGE_BUFFER_MAX_DISTANCE_METRES,
            column_type=gridded_forecasts.LATLNG_POLYGON_COLUMN_TYPE)

        self.assertTrue(this_column_name == LARGE_BUFFER_LATLNG_COLUMN)
Exemplo n.º 3
0
    def test_buffer_to_column_name_large_buffer_forecast(self):
        """Ensures correct output from _buffer_to_column_name.

        In this case, the buffer is "5-10 km outside storm" and the column name
        is for forecast probabilities.
        """

        this_column_name = gridded_forecasts._buffer_to_column_name(
            min_buffer_dist_metres=LARGE_BUFFER_MIN_DISTANCE_METRES,
            max_buffer_dist_metres=LARGE_BUFFER_MAX_DISTANCE_METRES,
            column_type=gridded_forecasts.FORECAST_COLUMN_TYPE)

        self.assertTrue(this_column_name == LARGE_BUFFER_FORECAST_COLUMN)
Exemplo n.º 4
0
    def test_buffer_to_column_name_small_buffer_xy(self):
        """Ensures correct output from _buffer_to_column_name.

        In this case, the buffer is "inside storm" and the column name is for
        x-y polygons.
        """

        this_column_name = gridded_forecasts._buffer_to_column_name(
            min_buffer_dist_metres=SMALL_BUFFER_MIN_DISTANCE_METRES,
            max_buffer_dist_metres=SMALL_BUFFER_MAX_DISTANCE_METRES,
            column_type=gridded_forecasts.XY_POLYGON_COLUMN_TYPE)

        self.assertTrue(this_column_name == SMALL_BUFFER_XY_COLUMN)
Exemplo n.º 5
0
def _run(input_prediction_file_name, top_tracking_dir_name,
         tracking_scale_metres2, x_spacing_metres, y_spacing_metres,
         effective_radius_metres, smoothing_method_name,
         smoothing_cutoff_radius_metres, smoothing_efold_radius_metres,
         top_output_dir_name):
    """Projects CNN forecasts onto the RAP grid.

    This is effectively the same method.

    :param input_prediction_file_name: See documentation at top of file.
    :param top_tracking_dir_name: Same.
    :param tracking_scale_metres2: Same.
    :param x_spacing_metres: Same.
    :param y_spacing_metres: Same.
    :param effective_radius_metres: Same.
    :param smoothing_method_name: Same.
    :param smoothing_cutoff_radius_metres: Same.
    :param smoothing_efold_radius_metres: Same.
    :param top_output_dir_name: Same.
    """

    print('Reading data from: "{0:s}"...'.format(input_prediction_file_name))
    ungridded_forecast_dict = prediction_io.read_ungridded_predictions(
        input_prediction_file_name)

    target_param_dict = target_val_utils.target_name_to_params(
        ungridded_forecast_dict[prediction_io.TARGET_NAME_KEY])

    min_buffer_dist_metres = target_param_dict[
        target_val_utils.MIN_LINKAGE_DISTANCE_KEY]

    # TODO(thunderhoser): This is HACKY.
    if min_buffer_dist_metres == 0:
        min_buffer_dist_metres = numpy.nan

    max_buffer_dist_metres = target_param_dict[
        target_val_utils.MAX_LINKAGE_DISTANCE_KEY]

    min_lead_time_seconds = target_param_dict[
        target_val_utils.MIN_LEAD_TIME_KEY]

    max_lead_time_seconds = target_param_dict[
        target_val_utils.MAX_LEAD_TIME_KEY]

    forecast_column_name = gridded_forecasts._buffer_to_column_name(
        min_buffer_dist_metres=min_buffer_dist_metres,
        max_buffer_dist_metres=max_buffer_dist_metres,
        column_type=gridded_forecasts.FORECAST_COLUMN_TYPE)

    init_times_unix_sec = numpy.unique(
        ungridded_forecast_dict[prediction_io.STORM_TIMES_KEY])

    tracking_file_names = []

    for this_time_unix_sec in init_times_unix_sec:
        this_tracking_file_name = tracking_io.find_file(
            top_tracking_dir_name=top_tracking_dir_name,
            tracking_scale_metres2=tracking_scale_metres2,
            source_name=tracking_utils.SEGMOTION_NAME,
            valid_time_unix_sec=this_time_unix_sec,
            spc_date_string=time_conversion.time_to_spc_date_string(
                this_time_unix_sec),
            raise_error_if_missing=True)

        tracking_file_names.append(this_tracking_file_name)

    storm_object_table = tracking_io.read_many_files(tracking_file_names)
    print(SEPARATOR_STRING)

    tracking_utils.find_storm_objects(
        all_id_strings=ungridded_forecast_dict[prediction_io.STORM_IDS_KEY],
        all_times_unix_sec=ungridded_forecast_dict[
            prediction_io.STORM_TIMES_KEY],
        id_strings_to_keep=storm_object_table[
            tracking_utils.FULL_ID_COLUMN].values.tolist(),
        times_to_keep_unix_sec=storm_object_table[
            tracking_utils.VALID_TIME_COLUMN].values,
        allow_missing=False)

    sort_indices = tracking_utils.find_storm_objects(
        all_id_strings=storm_object_table[
            tracking_utils.FULL_ID_COLUMN].values.tolist(),
        all_times_unix_sec=storm_object_table[
            tracking_utils.VALID_TIME_COLUMN].values,
        id_strings_to_keep=ungridded_forecast_dict[
            prediction_io.STORM_IDS_KEY],
        times_to_keep_unix_sec=ungridded_forecast_dict[
            prediction_io.STORM_TIMES_KEY],
        allow_missing=False)

    forecast_probabilities = ungridded_forecast_dict[
        prediction_io.PROBABILITY_MATRIX_KEY][sort_indices, 1]

    storm_object_table = storm_object_table.assign(
        **{forecast_column_name: forecast_probabilities})

    gridded_forecast_dict = gridded_forecasts.create_forecast_grids(
        storm_object_table=storm_object_table,
        min_lead_time_sec=min_lead_time_seconds,
        max_lead_time_sec=max_lead_time_seconds,
        lead_time_resolution_sec=gridded_forecasts.
        DEFAULT_LEAD_TIME_RES_SECONDS,
        grid_spacing_x_metres=x_spacing_metres,
        grid_spacing_y_metres=y_spacing_metres,
        interp_to_latlng_grid=False,
        prob_radius_for_grid_metres=effective_radius_metres,
        smoothing_method=smoothing_method_name,
        smoothing_e_folding_radius_metres=smoothing_efold_radius_metres,
        smoothing_cutoff_radius_metres=smoothing_cutoff_radius_metres)

    print(SEPARATOR_STRING)

    output_file_name = prediction_io.find_file(
        top_prediction_dir_name=top_output_dir_name,
        first_init_time_unix_sec=numpy.min(
            storm_object_table[tracking_utils.VALID_TIME_COLUMN].values),
        last_init_time_unix_sec=numpy.max(
            storm_object_table[tracking_utils.VALID_TIME_COLUMN].values),
        gridded=True,
        raise_error_if_missing=False)

    print(('Writing results (forecast grids for {0:d} initial times) to: '
           '"{1:s}"...').format(
               len(gridded_forecast_dict[prediction_io.INIT_TIMES_KEY]),
               output_file_name))

    prediction_io.write_gridded_predictions(
        gridded_forecast_dict=gridded_forecast_dict,
        pickle_file_name=output_file_name)