コード例 #1
0
def _run(model_file_name, layer_names, top_example_dir_name,
         storm_metafile_name, num_examples, top_output_dir_name):
    """Evaluates CNN (convolutional neural net) predictions.

    This is effectively the main method.

    :param model_file_name: See documentation at top of file.
    :param layer_names: Same.
    :param top_example_dir_name: Same.
    :param storm_metafile_name: Same.
    :param num_examples: Same.
    :param top_output_dir_name: Same.
    :raises: ValueError: if feature maps do not have 2 or 3 spatial dimensions.
    """

    print('Reading model from: "{0:s}"...'.format(model_file_name))
    model_object = cnn.read_model(model_file_name)

    model_metafile_name = '{0:s}/model_metadata.p'.format(
        os.path.split(model_file_name)[0])

    print(
        'Reading model metadata from: "{0:s}"...'.format(model_metafile_name))
    model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
    training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
    training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None

    print(
        'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name))
    full_id_strings, storm_times_unix_sec = tracking_io.read_ids_and_times(
        storm_metafile_name)

    print(SEPARATOR_STRING)

    if 0 < num_examples < len(full_id_strings):
        full_id_strings = full_id_strings[:num_examples]
        storm_times_unix_sec = storm_times_unix_sec[:num_examples]

    list_of_predictor_matrices = testing_io.read_specific_examples(
        top_example_dir_name=top_example_dir_name,
        desired_full_id_strings=full_id_strings,
        desired_times_unix_sec=storm_times_unix_sec,
        option_dict=training_option_dict,
        list_of_layer_operation_dicts=model_metadata_dict[
            cnn.LAYER_OPERATIONS_KEY])[0]

    print(SEPARATOR_STRING)

    include_soundings = (training_option_dict[trainval_io.SOUNDING_FIELDS_KEY]
                         is not None)

    if include_soundings:
        sounding_matrix = list_of_predictor_matrices[-1]
    else:
        sounding_matrix = None

    num_layers = len(layer_names)
    feature_matrix_by_layer = [None] * num_layers

    for k in range(num_layers):
        if model_metadata_dict[cnn.CONV_2D3D_KEY]:
            if training_option_dict[trainval_io.UPSAMPLE_REFLECTIVITY_KEY]:
                feature_matrix_by_layer[k] = cnn.apply_2d_or_3d_cnn(
                    model_object=model_object,
                    radar_image_matrix=list_of_predictor_matrices[0],
                    sounding_matrix=sounding_matrix,
                    return_features=True,
                    feature_layer_name=layer_names[k])
            else:
                feature_matrix_by_layer[k] = cnn.apply_2d3d_cnn(
                    model_object=model_object,
                    reflectivity_matrix_dbz=list_of_predictor_matrices[0],
                    azimuthal_shear_matrix_s01=list_of_predictor_matrices[1],
                    sounding_matrix=sounding_matrix,
                    return_features=True,
                    feature_layer_name=layer_names[k])
        else:
            feature_matrix_by_layer[k] = cnn.apply_2d_or_3d_cnn(
                model_object=model_object,
                radar_image_matrix=list_of_predictor_matrices[0],
                sounding_matrix=sounding_matrix,
                return_features=True,
                feature_layer_name=layer_names[k])

    for k in range(num_layers):
        this_output_dir_name = '{0:s}/{1:s}'.format(top_output_dir_name,
                                                    layer_names[k])

        file_system_utils.mkdir_recursive_if_necessary(
            directory_name=this_output_dir_name)

        _plot_feature_maps_one_layer(feature_matrix=feature_matrix_by_layer[k],
                                     full_id_strings=full_id_strings,
                                     storm_times_unix_sec=storm_times_unix_sec,
                                     layer_name=layer_names[k],
                                     output_dir_name=this_output_dir_name)

        print(SEPARATOR_STRING)
コード例 #2
0
def _run(cnn_file_name, upconvnet_file_name, top_example_dir_name,
         baseline_storm_metafile_name, trial_storm_metafile_name,
         num_baseline_examples, num_trial_examples, num_novel_examples,
         cnn_feature_layer_name, percent_svd_variance_to_keep,
         output_file_name):
    """Runs novelty detection.

    This is effectively the main method.

    :param cnn_file_name: See documentation at top of file.
    :param upconvnet_file_name: Same.
    :param top_example_dir_name: Same.
    :param baseline_storm_metafile_name: Same.
    :param trial_storm_metafile_name: Same.
    :param num_baseline_examples: Same.
    :param num_trial_examples: Same.
    :param num_novel_examples: Same.
    :param cnn_feature_layer_name: Same.
    :param percent_svd_variance_to_keep: Same.
    :param output_file_name: Same.
    :raises: ValueError: if dimensions of first CNN input matrix != dimensions
        of upconvnet output.
    """

    print('Reading trained CNN from: "{0:s}"...'.format(cnn_file_name))
    cnn_model_object = cnn.read_model(cnn_file_name)

    cnn_metafile_name = '{0:s}/model_metadata.p'.format(
        os.path.split(cnn_file_name)[0]
    )

    print('Reading trained upconvnet from: "{0:s}"...'.format(
        upconvnet_file_name))
    upconvnet_model_object = cnn.read_model(upconvnet_file_name)

    # ucn_output_dimensions = numpy.array(
    #     upconvnet_model_object.output.get_shape().as_list()[1:], dtype=int
    # )

    if isinstance(cnn_model_object.input, list):
        first_cnn_input_tensor = cnn_model_object.input[0]
    else:
        first_cnn_input_tensor = cnn_model_object.input

    cnn_input_dimensions = numpy.array(
        first_cnn_input_tensor.get_shape().as_list()[1:], dtype=int
    )

    # if not numpy.array_equal(cnn_input_dimensions, ucn_output_dimensions):
    #     error_string = (
    #         'Dimensions of first CNN input matrix ({0:s}) should equal '
    #         'dimensions of upconvnet output ({1:s}).'
    #     ).format(str(cnn_input_dimensions), str(ucn_output_dimensions))
    #
    #     raise ValueError(error_string)

    print('Reading CNN metadata from: "{0:s}"...'.format(cnn_metafile_name))
    cnn_metadata_dict = cnn.read_model_metadata(cnn_metafile_name)

    print('Reading metadata for baseline examples from: "{0:s}"...'.format(
        baseline_storm_metafile_name))
    baseline_full_id_strings, baseline_times_unix_sec = (
        tracking_io.read_ids_and_times(baseline_storm_metafile_name)
    )

    print('Reading metadata for trial examples from: "{0:s}"...'.format(
        trial_storm_metafile_name))
    trial_full_id_strings, trial_times_unix_sec = (
        tracking_io.read_ids_and_times(trial_storm_metafile_name)
    )

    if 0 < num_baseline_examples < len(baseline_full_id_strings):
        baseline_full_id_strings = baseline_full_id_strings[
            :num_baseline_examples]
        baseline_times_unix_sec = baseline_times_unix_sec[
            :num_baseline_examples]

    if 0 < num_trial_examples < len(trial_full_id_strings):
        trial_full_id_strings = trial_full_id_strings[:num_trial_examples]
        trial_times_unix_sec = trial_times_unix_sec[:num_trial_examples]

    num_trial_examples = len(trial_full_id_strings)

    if num_novel_examples <= 0:
        num_novel_examples = num_trial_examples + 0

    num_novel_examples = min([num_novel_examples, num_trial_examples])
    print('Number of novel examples to find: {0:d}'.format(num_novel_examples))

    bad_baseline_indices = tracking_utils.find_storm_objects(
        all_id_strings=baseline_full_id_strings,
        all_times_unix_sec=baseline_times_unix_sec,
        id_strings_to_keep=trial_full_id_strings,
        times_to_keep_unix_sec=trial_times_unix_sec, allow_missing=True)

    print('Removing {0:d} trial examples from baseline set...'.format(
        len(bad_baseline_indices)
    ))

    baseline_times_unix_sec = numpy.delete(
        baseline_times_unix_sec, bad_baseline_indices
    )
    baseline_full_id_strings = numpy.delete(
        numpy.array(baseline_full_id_strings), bad_baseline_indices
    )
    baseline_full_id_strings = baseline_full_id_strings.tolist()

    # num_baseline_examples = len(baseline_full_id_strings)

    print(SEPARATOR_STRING)

    list_of_baseline_input_matrices, _ = testing_io.read_specific_examples(
        top_example_dir_name=top_example_dir_name,
        desired_full_id_strings=baseline_full_id_strings,
        desired_times_unix_sec=baseline_times_unix_sec,
        option_dict=cnn_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY],
        list_of_layer_operation_dicts=cnn_metadata_dict[
            cnn.LAYER_OPERATIONS_KEY]
    )

    print(SEPARATOR_STRING)

    list_of_trial_input_matrices, _ = testing_io.read_specific_examples(
        top_example_dir_name=top_example_dir_name,
        desired_full_id_strings=trial_full_id_strings,
        desired_times_unix_sec=trial_times_unix_sec,
        option_dict=cnn_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY],
        list_of_layer_operation_dicts=cnn_metadata_dict[
            cnn.LAYER_OPERATIONS_KEY]
    )

    print(SEPARATOR_STRING)

    novelty_dict = novelty_detection.do_novelty_detection(
        list_of_baseline_input_matrices=list_of_baseline_input_matrices,
        list_of_trial_input_matrices=list_of_trial_input_matrices,
        cnn_model_object=cnn_model_object,
        cnn_feature_layer_name=cnn_feature_layer_name,
        upconvnet_model_object=upconvnet_model_object,
        num_novel_examples=num_novel_examples, multipass=False,
        percent_svd_variance_to_keep=percent_svd_variance_to_keep)

    print(SEPARATOR_STRING)

    print('Adding metadata to novelty-detection results...')
    novelty_dict = novelty_detection.add_metadata(
        novelty_dict=novelty_dict,
        baseline_full_id_strings=baseline_full_id_strings,
        baseline_storm_times_unix_sec=baseline_times_unix_sec,
        trial_full_id_strings=trial_full_id_strings,
        trial_storm_times_unix_sec=trial_times_unix_sec,
        cnn_file_name=cnn_file_name, upconvnet_file_name=upconvnet_file_name)

    print('Denormalizing inputs and outputs of novelty detection...')

    novelty_dict[novelty_detection.BASELINE_INPUTS_KEY] = (
        model_interpretation.denormalize_data(
            list_of_input_matrices=novelty_dict[
                novelty_detection.BASELINE_INPUTS_KEY
            ],
            model_metadata_dict=cnn_metadata_dict)
    )

    novelty_dict[novelty_detection.TRIAL_INPUTS_KEY] = (
        model_interpretation.denormalize_data(
            list_of_input_matrices=novelty_dict[
                novelty_detection.TRIAL_INPUTS_KEY
            ],
            model_metadata_dict=cnn_metadata_dict)
    )

    cnn_metadata_dict[
        cnn.TRAINING_OPTION_DICT_KEY][trainval_io.SOUNDING_FIELDS_KEY] = None

    novelty_dict[novelty_detection.NOVEL_IMAGES_UPCONV_KEY] = (
        model_interpretation.denormalize_data(
            list_of_input_matrices=[
                novelty_dict[novelty_detection.NOVEL_IMAGES_UPCONV_KEY]
            ],
            model_metadata_dict=cnn_metadata_dict)
    )[0]

    novelty_dict[novelty_detection.NOVEL_IMAGES_UPCONV_SVD_KEY] = (
        model_interpretation.denormalize_data(
            list_of_input_matrices=[
                novelty_dict[novelty_detection.NOVEL_IMAGES_UPCONV_SVD_KEY]
            ],
            model_metadata_dict=cnn_metadata_dict)
    )[0]

    print('Writing results to: "{0:s}"...'.format(output_file_name))
    novelty_detection.write_standard_file(novelty_dict=novelty_dict,
                                          pickle_file_name=output_file_name)
コード例 #3
0
def _run(upconvnet_file_name, storm_metafile_name, num_examples,
         top_example_dir_name, top_output_dir_name):
    """Plots upconvnet reconstructions of many examples (storm objects).

    This is effectively the main method.

    :param upconvnet_file_name: See documentation at top of file.
    :param storm_metafile_name: Same.
    :param num_examples: Same.
    :param top_example_dir_name: Same.
    :param top_output_dir_name: Same.
    """

    print 'Reading trained upconvnet from: "{0:s}"...'.format(
        upconvnet_file_name)
    upconvnet_model_object = cnn.read_model(upconvnet_file_name)
    upconvnet_metafile_name = '{0:s}/model_metadata.p'.format(
        os.path.split(upconvnet_file_name)[0]
    )

    print 'Reading upconvnet metadata from: "{0:s}"...'.format(
        upconvnet_metafile_name)
    upconvnet_metadata_dict = upconvnet.read_model_metadata(
        upconvnet_metafile_name)
    cnn_file_name = upconvnet_metadata_dict[upconvnet.CNN_FILE_KEY]

    print 'Reading trained CNN from: "{0:s}"...'.format(cnn_file_name)
    cnn_model_object = cnn.read_model(cnn_file_name)
    cnn_metafile_name = '{0:s}/model_metadata.p'.format(
        os.path.split(cnn_file_name)[0]
    )

    print 'Reading CNN metadata from: "{0:s}"...'.format(cnn_metafile_name)
    cnn_metadata_dict = cnn.read_model_metadata(cnn_metafile_name)
    training_option_dict = cnn_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]

    print 'Reading storm IDs and times from: "{0:s}"...'.format(
        storm_metafile_name)
    storm_ids, storm_times_unix_sec = tracking_io.read_ids_and_times(
        storm_metafile_name)

    if 0 < num_examples < len(storm_ids):
        storm_ids = storm_ids[:num_examples]
        storm_times_unix_sec = storm_times_unix_sec[:num_examples]

    print SEPARATOR_STRING
    list_of_predictor_matrices = testing_io.read_specific_examples(
        desired_storm_ids=storm_ids,
        desired_times_unix_sec=storm_times_unix_sec,
        option_dict=training_option_dict,
        top_example_dir_name=top_example_dir_name,
        list_of_layer_operation_dicts=cnn_metadata_dict[
            cnn.LAYER_OPERATIONS_KEY]
    )[0]
    print SEPARATOR_STRING

    actual_radar_matrix = list_of_predictor_matrices[0]
    have_soundings = training_option_dict[trainval_io.SOUNDING_FIELDS_KEY]

    if have_soundings:
        sounding_matrix = list_of_predictor_matrices[-1]
    else:
        sounding_matrix = None

    feature_matrix = cnn.apply_2d_or_3d_cnn(
        model_object=cnn_model_object, radar_image_matrix=actual_radar_matrix,
        sounding_matrix=sounding_matrix, verbose=True, return_features=True,
        feature_layer_name=upconvnet_metadata_dict[
            upconvnet.CNN_FEATURE_LAYER_KEY]
    )
    print '\n'

    reconstructed_radar_matrix = upconvnet.apply_upconvnet(
        model_object=upconvnet_model_object, feature_matrix=feature_matrix,
        verbose=True)
    print '\n'

    print 'Denormalizing actual and reconstructed radar images...'

    cnn_metadata_dict[
        cnn.TRAINING_OPTION_DICT_KEY][trainval_io.SOUNDING_FIELDS_KEY] = None

    actual_radar_matrix = model_interpretation.denormalize_data(
        list_of_input_matrices=[actual_radar_matrix],
        model_metadata_dict=cnn_metadata_dict
    )[0]

    reconstructed_radar_matrix = model_interpretation.denormalize_data(
        list_of_input_matrices=[reconstructed_radar_matrix],
        model_metadata_dict=cnn_metadata_dict
    )[0]

    print SEPARATOR_STRING

    actual_output_dir_name = '{0:s}/actual_images'.format(top_output_dir_name)
    file_system_utils.mkdir_recursive_if_necessary(
        directory_name=actual_output_dir_name)

    # TODO(thunderhoser): Calling a method in another script is hacky.  If this
    # method is going to be reused, should be in a module.
    plot_input_examples.plot_examples(
        list_of_predictor_matrices=[actual_radar_matrix], storm_ids=storm_ids,
        storm_times_unix_sec=storm_times_unix_sec,
        model_metadata_dict=cnn_metadata_dict,
        output_dir_name=actual_output_dir_name)
    print SEPARATOR_STRING

    reconstructed_output_dir_name = '{0:s}/reconstructed_images'.format(
        top_output_dir_name)
    file_system_utils.mkdir_recursive_if_necessary(
        directory_name=reconstructed_output_dir_name)

    plot_input_examples.plot_examples(
        list_of_predictor_matrices=[reconstructed_radar_matrix],
        storm_ids=storm_ids, storm_times_unix_sec=storm_times_unix_sec,
        model_metadata_dict=cnn_metadata_dict,
        output_dir_name=reconstructed_output_dir_name)
コード例 #4
0
def _run(model_file_name, target_class, target_layer_name,
         top_example_dir_name, storm_metafile_name, num_examples,
         output_file_name):
    """Runs Grad-CAM (gradient-weighted class-activation maps).

    This is effectively the main method.

    :param model_file_name: See documentation at top of file.
    :param target_class: Same.
    :param target_layer_name: Same.
    :param top_example_dir_name: Same.
    :param storm_metafile_name: Same.
    :param num_examples: Same.
    :param output_file_name: Same.
    """

    file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name)

    # Read model and metadata.
    print('Reading model from: "{0:s}"...'.format(model_file_name))
    model_object = cnn.read_model(model_file_name)

    model_metafile_name = '{0:s}/model_metadata.p'.format(
        os.path.split(model_file_name)[0])

    print(
        'Reading model metadata from: "{0:s}"...'.format(model_metafile_name))
    model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
    training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
    training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None

    print(
        'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name))
    full_id_strings, storm_times_unix_sec = tracking_io.read_ids_and_times(
        storm_metafile_name)

    print(SEPARATOR_STRING)

    if 0 < num_examples < len(full_id_strings):
        full_id_strings = full_id_strings[:num_examples]
        storm_times_unix_sec = storm_times_unix_sec[:num_examples]

    list_of_input_matrices, sounding_pressure_matrix_pascals = (
        testing_io.read_specific_examples(
            top_example_dir_name=top_example_dir_name,
            desired_full_id_strings=full_id_strings,
            desired_times_unix_sec=storm_times_unix_sec,
            option_dict=training_option_dict,
            list_of_layer_operation_dicts=model_metadata_dict[
                cnn.LAYER_OPERATIONS_KEY]))
    print(SEPARATOR_STRING)

    list_of_cam_matrices = None
    list_of_guided_cam_matrices = None
    new_model_object = None

    num_examples = len(full_id_strings)

    for i in range(num_examples):
        print('Running Grad-CAM for example {0:d} of {1:d}...'.format(
            i + 1, num_examples))

        these_input_matrices = [a[[i], ...] for a in list_of_input_matrices]
        these_cam_matrices = gradcam.run_gradcam(
            model_object=model_object,
            list_of_input_matrices=these_input_matrices,
            target_class=target_class,
            target_layer_name=target_layer_name)

        print('Running guided Grad-CAM for example {0:d} of {1:d}...'.format(
            i + 1, num_examples))

        these_guided_cam_matrices, new_model_object = (
            gradcam.run_guided_gradcam(
                orig_model_object=model_object,
                list_of_input_matrices=these_input_matrices,
                target_layer_name=target_layer_name,
                list_of_cam_matrices=these_cam_matrices,
                new_model_object=new_model_object))

        if list_of_cam_matrices is None:
            list_of_cam_matrices = copy.deepcopy(these_cam_matrices)
            list_of_guided_cam_matrices = copy.deepcopy(
                these_guided_cam_matrices)
        else:
            for j in range(len(these_cam_matrices)):
                if list_of_cam_matrices[j] is None:
                    continue

                list_of_cam_matrices[j] = numpy.concatenate(
                    (list_of_cam_matrices[j], these_cam_matrices[j]), axis=0)

                list_of_guided_cam_matrices[j] = numpy.concatenate(
                    (list_of_guided_cam_matrices[j],
                     these_guided_cam_matrices[j]),
                    axis=0)

    print(SEPARATOR_STRING)
    upsample_refl = training_option_dict[trainval_io.UPSAMPLE_REFLECTIVITY_KEY]

    if upsample_refl:
        list_of_cam_matrices[0] = numpy.expand_dims(list_of_cam_matrices[0],
                                                    axis=-1)

        num_channels = list_of_input_matrices[0].shape[-1]
        list_of_cam_matrices[0] = numpy.repeat(a=list_of_cam_matrices[0],
                                               repeats=num_channels,
                                               axis=-1)

        list_of_cam_matrices = trainval_io.separate_shear_and_reflectivity(
            list_of_input_matrices=list_of_cam_matrices,
            training_option_dict=training_option_dict)

        list_of_cam_matrices[0] = list_of_cam_matrices[0][..., 0]
        list_of_cam_matrices[1] = list_of_cam_matrices[1][..., 0]

    list_of_guided_cam_matrices = trainval_io.separate_shear_and_reflectivity(
        list_of_input_matrices=list_of_guided_cam_matrices,
        training_option_dict=training_option_dict)

    print('Denormalizing predictors...')
    list_of_input_matrices = trainval_io.separate_shear_and_reflectivity(
        list_of_input_matrices=list_of_input_matrices,
        training_option_dict=training_option_dict)

    list_of_input_matrices = model_interpretation.denormalize_data(
        list_of_input_matrices=list_of_input_matrices,
        model_metadata_dict=model_metadata_dict)

    print('Writing class-activation maps to file: "{0:s}"...'.format(
        output_file_name))

    gradcam.write_standard_file(
        pickle_file_name=output_file_name,
        list_of_input_matrices=list_of_input_matrices,
        list_of_cam_matrices=list_of_cam_matrices,
        list_of_guided_cam_matrices=list_of_guided_cam_matrices,
        model_file_name=model_file_name,
        full_id_strings=full_id_strings,
        storm_times_unix_sec=storm_times_unix_sec,
        target_class=target_class,
        target_layer_name=target_layer_name,
        sounding_pressure_matrix_pascals=sounding_pressure_matrix_pascals)
コード例 #5
0
def _run(model_file_name, target_class, target_layer_name,
         top_example_dir_name, storm_metafile_name, num_examples,
         output_file_name):
    """Runs Grad-CAM (gradient-weighted class-activation maps).

    This is effectively the main method.

    :param model_file_name: See documentation at top of file.
    :param target_class: Same.
    :param target_layer_name: Same.
    :param top_example_dir_name: Same.
    :param storm_metafile_name: Same.
    :param num_examples: Same.
    :param output_file_name: Same.
    """

    file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name)

    # Read model and metadata.
    print 'Reading model from: "{0:s}"...'.format(model_file_name)
    model_object = cnn.read_model(model_file_name)
    model_metafile_name = '{0:s}/model_metadata.p'.format(
        os.path.split(model_file_name)[0])

    print 'Reading model metadata from: "{0:s}"...'.format(model_metafile_name)
    model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
    training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
    training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None

    print 'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name)
    storm_ids, storm_times_unix_sec = tracking_io.read_ids_and_times(
        storm_metafile_name)
    print SEPARATOR_STRING

    if 0 < num_examples < len(storm_ids):
        storm_ids = storm_ids[:num_examples]
        storm_times_unix_sec = storm_times_unix_sec[:num_examples]

    list_of_input_matrices, sounding_pressure_matrix_pascals = (
        testing_io.read_specific_examples(
            top_example_dir_name=top_example_dir_name,
            desired_storm_ids=storm_ids,
            desired_times_unix_sec=storm_times_unix_sec,
            option_dict=training_option_dict,
            list_of_layer_operation_dicts=model_metadata_dict[
                cnn.LAYER_OPERATIONS_KEY]))
    print SEPARATOR_STRING

    class_activation_matrix = None
    ggradcam_output_matrix = None
    new_model_object = None

    num_examples = len(storm_ids)

    for i in range(num_examples):
        print 'Running Grad-CAM for example {0:d} of {1:d}...'.format(
            i + 1, num_examples)

        these_input_matrices = [a[[i], ...] for a in list_of_input_matrices]
        this_class_activation_matrix = gradcam.run_gradcam(
            model_object=model_object,
            list_of_input_matrices=these_input_matrices,
            target_class=target_class,
            target_layer_name=target_layer_name)

        print 'Running guided Grad-CAM for example {0:d} of {1:d}...'.format(
            i + 1, num_examples)

        this_ggradcam_output_matrix, new_model_object = (
            gradcam.run_guided_gradcam(
                orig_model_object=model_object,
                list_of_input_matrices=these_input_matrices,
                target_layer_name=target_layer_name,
                class_activation_matrix=this_class_activation_matrix,
                new_model_object=new_model_object))

        this_class_activation_matrix = numpy.expand_dims(
            this_class_activation_matrix, axis=0)
        this_ggradcam_output_matrix = numpy.expand_dims(
            this_ggradcam_output_matrix, axis=0)

        if class_activation_matrix is None:
            class_activation_matrix = this_class_activation_matrix + 0.
            ggradcam_output_matrix = this_ggradcam_output_matrix + 0.
        else:
            class_activation_matrix = numpy.concatenate(
                (class_activation_matrix, this_class_activation_matrix),
                axis=0)
            ggradcam_output_matrix = numpy.concatenate(
                (ggradcam_output_matrix, this_ggradcam_output_matrix), axis=0)

    print SEPARATOR_STRING

    print 'Denormalizing predictors...'
    list_of_input_matrices = model_interpretation.denormalize_data(
        list_of_input_matrices=list_of_input_matrices,
        model_metadata_dict=model_metadata_dict)

    print 'Writing class-activation maps to file: "{0:s}"...'.format(
        output_file_name)
    gradcam.write_standard_file(
        pickle_file_name=output_file_name,
        list_of_input_matrices=list_of_input_matrices,
        class_activation_matrix=class_activation_matrix,
        ggradcam_output_matrix=ggradcam_output_matrix,
        model_file_name=model_file_name,
        storm_ids=storm_ids,
        storm_times_unix_sec=storm_times_unix_sec,
        target_class=target_class,
        target_layer_name=target_layer_name,
        sounding_pressure_matrix_pascals=sounding_pressure_matrix_pascals)
コード例 #6
0
def _run(model_file_name, init_function_name, storm_metafile_name,
         num_examples, top_example_dir_name, component_type_string,
         target_class, layer_name, neuron_indices, channel_index,
         num_iterations, ideal_activation, learning_rate, output_file_name):
    """Runs backwards optimization on a trained CNN.

    This is effectively the main method.

    :param model_file_name: See documentation at top of file.
    :param init_function_name: Same.
    :param storm_metafile_name: Same.
    :param num_examples: Same.
    :param top_example_dir_name: Same.
    :param component_type_string: Same.
    :param target_class: Same.
    :param layer_name: Same.
    :param neuron_indices: Same.
    :param channel_index: Same.
    :param num_iterations: Same.
    :param ideal_activation: Same.
    :param learning_rate: Same.
    :param output_file_name: Same.
    """

    model_interpretation.check_component_type(component_type_string)

    if ideal_activation <= 0:
        ideal_activation = None
    if init_function_name in ['', 'None']:
        init_function_name = None

    model_metafile_name = '{0:s}/model_metadata.p'.format(
        os.path.split(model_file_name)[0])

    print 'Reading model metadata from: "{0:s}"...'.format(model_metafile_name)
    model_metadata_dict = cnn.read_model_metadata(model_metafile_name)

    if init_function_name is None:
        print 'Reading storm metadata from: "{0:s}"...'.format(
            storm_metafile_name)

        storm_ids, storm_times_unix_sec = tracking_io.read_ids_and_times(
            storm_metafile_name)

        if 0 < num_examples < len(storm_ids):
            storm_ids = storm_ids[:num_examples]
            storm_times_unix_sec = storm_times_unix_sec[:num_examples]

        list_of_init_matrices = testing_io.read_specific_examples(
            desired_storm_ids=storm_ids,
            desired_times_unix_sec=storm_times_unix_sec,
            option_dict=model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY],
            top_example_dir_name=top_example_dir_name,
            list_of_layer_operation_dicts=model_metadata_dict[
                cnn.LAYER_OPERATIONS_KEY])[0]

        num_examples = list_of_init_matrices[0].shape[0]
        print SEPARATOR_STRING

    else:
        storm_ids = None
        storm_times_unix_sec = None
        num_examples = 1

        init_function = _create_initializer(
            init_function_name=init_function_name,
            model_metadata_dict=model_metadata_dict)

    print 'Reading model from: "{0:s}"...'.format(model_file_name)
    model_object = cnn.read_model(model_file_name)

    list_of_optimized_matrices = None

    for i in range(num_examples):
        if init_function_name is None:
            this_init_arg = [a[[i], ...] for a in list_of_init_matrices]
        else:
            this_init_arg = init_function

        if component_type_string == CLASS_COMPONENT_TYPE_STRING:
            print(
                '\nOptimizing {0:d}th of {1:d} images for target class {2:d}...'
            ).format(i + 1, num_examples, target_class)

            these_optimized_matrices = backwards_opt.optimize_input_for_class(
                model_object=model_object,
                target_class=target_class,
                init_function_or_matrices=this_init_arg,
                num_iterations=num_iterations,
                learning_rate=learning_rate)

        elif component_type_string == NEURON_COMPONENT_TYPE_STRING:
            print(
                '\nOptimizing {0:d}th of {1:d} images for neuron {2:s} in layer'
                ' "{3:s}"...').format(i + 1, num_examples, str(neuron_indices),
                                      layer_name)

            these_optimized_matrices = backwards_opt.optimize_input_for_neuron(
                model_object=model_object,
                layer_name=layer_name,
                neuron_indices=neuron_indices,
                init_function_or_matrices=this_init_arg,
                num_iterations=num_iterations,
                learning_rate=learning_rate,
                ideal_activation=ideal_activation)

        else:
            print(
                '\nOptimizing {0:d}th of {1:d} images for channel {2:d} in '
                'layer "{3:s}"...').format(i + 1, num_examples, channel_index,
                                           layer_name)

            these_optimized_matrices = backwards_opt.optimize_input_for_channel(
                model_object=model_object,
                layer_name=layer_name,
                channel_index=channel_index,
                init_function_or_matrices=this_init_arg,
                stat_function_for_neuron_activations=K.max,
                num_iterations=num_iterations,
                learning_rate=learning_rate,
                ideal_activation=ideal_activation)

        if list_of_optimized_matrices is None:
            num_matrices = len(these_optimized_matrices)
            list_of_optimized_matrices = [None] * num_matrices

        for k in range(len(list_of_optimized_matrices)):
            if list_of_optimized_matrices[k] is None:
                list_of_optimized_matrices[
                    k] = these_optimized_matrices[k] + 0.
            else:
                list_of_optimized_matrices[k] = numpy.concatenate(
                    (list_of_optimized_matrices[k],
                     these_optimized_matrices[k]),
                    axis=0)

    print SEPARATOR_STRING

    print 'Denormalizing optimized examples...'
    list_of_optimized_matrices = model_interpretation.denormalize_data(
        list_of_input_matrices=list_of_optimized_matrices,
        model_metadata_dict=model_metadata_dict)

    if init_function_name is None:
        print 'Denormalizing input examples...'
        list_of_init_matrices = model_interpretation.denormalize_data(
            list_of_input_matrices=list_of_init_matrices,
            model_metadata_dict=model_metadata_dict)

        this_init_arg = list_of_init_matrices
    else:
        this_init_arg = init_function_name + ''

    print 'Writing results to: "{0:s}"...'.format(output_file_name)
    backwards_opt.write_standard_file(
        pickle_file_name=output_file_name,
        list_of_optimized_matrices=list_of_optimized_matrices,
        model_file_name=model_file_name,
        init_function_name_or_matrices=this_init_arg,
        num_iterations=num_iterations,
        learning_rate=learning_rate,
        component_type_string=component_type_string,
        target_class=target_class,
        layer_name=layer_name,
        neuron_indices=neuron_indices,
        channel_index=channel_index,
        ideal_activation=ideal_activation,
        storm_ids=storm_ids,
        storm_times_unix_sec=storm_times_unix_sec)
コード例 #7
0
def _run(model_file_name, top_example_dir_name, storm_metafile_name,
         num_examples, output_file_name):
    """Creates dummy saliency map for each storm object.

    This is effectively the main method.

    :param model_file_name: See documentation at top of file.
    :param top_example_dir_name: Same.
    :param storm_metafile_name: Same.
    :param num_examples: Same.
    :param output_file_name: Same.
    """

    file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name)

    model_metafile_name = '{0:s}/model_metadata.p'.format(
        os.path.split(model_file_name)[0])

    print(
        'Reading model metadata from: "{0:s}"...'.format(model_metafile_name))
    model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
    training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
    training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None

    print(
        'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name))
    full_id_strings, storm_times_unix_sec = tracking_io.read_ids_and_times(
        storm_metafile_name)

    print(SEPARATOR_STRING)

    if 0 < num_examples < len(full_id_strings):
        full_id_strings = full_id_strings[:num_examples]
        storm_times_unix_sec = storm_times_unix_sec[:num_examples]

    list_of_input_matrices, sounding_pressure_matrix_pascals = (
        testing_io.read_specific_examples(
            top_example_dir_name=top_example_dir_name,
            desired_full_id_strings=full_id_strings,
            desired_times_unix_sec=storm_times_unix_sec,
            option_dict=training_option_dict,
            list_of_layer_operation_dicts=model_metadata_dict[
                cnn.LAYER_OPERATIONS_KEY]))

    radar_matrix = list_of_input_matrices[0]
    num_examples = radar_matrix.shape[0]
    num_channels = radar_matrix.shape[-1]

    kernel_matrix = numpy.expand_dims(EDGE_DETECTOR_MATRIX, axis=-1)
    kernel_matrix = numpy.repeat(kernel_matrix, num_channels, axis=-1)
    kernel_matrix = numpy.expand_dims(kernel_matrix, axis=-1)
    kernel_matrix = numpy.repeat(kernel_matrix, num_channels, axis=-1)

    radar_saliency_matrix = numpy.full(radar_matrix.shape, numpy.nan)

    for i in range(num_examples):
        this_saliency_matrix = standalone_utils.do_2d_convolution(
            feature_matrix=radar_matrix[i, ...],
            kernel_matrix=kernel_matrix,
            pad_edges=True,
            stride_length_px=1)

        radar_saliency_matrix[i, ...] = this_saliency_matrix[0, ...]

    list_of_saliency_matrices = [
        radar_saliency_matrix if k == 0 else list_of_input_matrices[k]
        for k in range(len(list_of_input_matrices))
    ]

    print('Denormalizing model inputs...')
    list_of_input_matrices = model_interpretation.denormalize_data(
        list_of_input_matrices=list_of_input_matrices,
        model_metadata_dict=model_metadata_dict)

    print('Writing saliency maps to file: "{0:s}"...'.format(output_file_name))

    saliency_metadata_dict = saliency_maps.check_metadata(
        component_type_string=model_interpretation.CLASS_COMPONENT_TYPE_STRING,
        target_class=1)

    saliency_maps.write_standard_file(
        pickle_file_name=output_file_name,
        list_of_input_matrices=list_of_input_matrices,
        list_of_saliency_matrices=list_of_saliency_matrices,
        full_id_strings=full_id_strings,
        storm_times_unix_sec=storm_times_unix_sec,
        model_file_name=model_file_name,
        saliency_metadata_dict=saliency_metadata_dict,
        sounding_pressure_matrix_pascals=sounding_pressure_matrix_pascals)
コード例 #8
0
def _run(activation_file_name, storm_metafile_name, num_examples,
         allow_whitespace, top_example_dir_name, radar_field_names,
         radar_heights_m_agl, plot_soundings, num_radar_rows,
         num_radar_columns, output_dir_name):
    """Plots many dataset examples (storm objects).

    This is effectively the main method.

    :param activation_file_name: See documentation at top of file.
    :param storm_metafile_name: Same.
    :param num_examples: Same.
    :param allow_whitespace: Same.
    :param top_example_dir_name: Same.
    :param radar_field_names: Same.
    :param radar_heights_m_agl: Same.
    :param plot_soundings: Same.
    :param num_radar_rows: Same.
    :param num_radar_columns: Same.
    :param output_dir_name: Same.
    :raises: TypeError: if activation file contains activations for more than
        one model component.
    """

    file_system_utils.mkdir_recursive_if_necessary(
        directory_name=output_dir_name)

    storm_activations = None
    if activation_file_name in ['', 'None']:
        activation_file_name = None

    if activation_file_name is None:
        print('Reading data from: "{0:s}"...'.format(storm_metafile_name))
        full_storm_id_strings, storm_times_unix_sec = (
            tracking_io.read_ids_and_times(storm_metafile_name))

        training_option_dict = dict()
        training_option_dict[trainval_io.RADAR_FIELDS_KEY] = radar_field_names
        training_option_dict[
            trainval_io.RADAR_HEIGHTS_KEY] = radar_heights_m_agl
        training_option_dict[
            trainval_io.SOUNDING_FIELDS_KEY] = SOUNDING_FIELD_NAMES
        training_option_dict[
            trainval_io.SOUNDING_HEIGHTS_KEY] = SOUNDING_HEIGHTS_M_AGL

        training_option_dict[trainval_io.NUM_ROWS_KEY] = num_radar_rows
        training_option_dict[trainval_io.NUM_COLUMNS_KEY] = num_radar_columns
        training_option_dict[trainval_io.NORMALIZATION_TYPE_KEY] = None
        training_option_dict[trainval_io.TARGET_NAME_KEY] = DUMMY_TARGET_NAME
        training_option_dict[trainval_io.BINARIZE_TARGET_KEY] = False
        training_option_dict[trainval_io.SAMPLING_FRACTIONS_KEY] = None
        training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None

        model_metadata_dict = {
            cnn.TRAINING_OPTION_DICT_KEY: training_option_dict,
            cnn.LAYER_OPERATIONS_KEY: None,
        }

    else:
        print('Reading data from: "{0:s}"...'.format(activation_file_name))
        activation_matrix, activation_metadata_dict = (
            model_activation.read_file(activation_file_name))

        num_model_components = activation_matrix.shape[1]
        if num_model_components > 1:
            error_string = (
                'The file should contain activations for only one model '
                'component, not {0:d}.').format(num_model_components)

            raise TypeError(error_string)

        full_storm_id_strings = activation_metadata_dict[
            model_activation.FULL_IDS_KEY]
        storm_times_unix_sec = activation_metadata_dict[
            model_activation.STORM_TIMES_KEY]
        storm_activations = activation_matrix[:, 0]

        model_file_name = activation_metadata_dict[
            model_activation.MODEL_FILE_NAME_KEY]
        model_metafile_name = '{0:s}/model_metadata.p'.format(
            os.path.split(model_file_name)[0])

        print('Reading metadata from: "{0:s}"...'.format(model_metafile_name))
        model_metadata_dict = cnn.read_model_metadata(model_metafile_name)

        training_option_dict = model_metadata_dict[
            cnn.TRAINING_OPTION_DICT_KEY]
        training_option_dict[trainval_io.NORMALIZATION_TYPE_KEY] = None
        training_option_dict[trainval_io.SAMPLING_FRACTIONS_KEY] = None
        training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None

        model_metadata_dict[
            cnn.TRAINING_OPTION_DICT_KEY] = training_option_dict

    model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY][
        trainval_io.UPSAMPLE_REFLECTIVITY_KEY] = False

    if 0 < num_examples < len(full_storm_id_strings):
        full_storm_id_strings = full_storm_id_strings[:num_examples]
        storm_times_unix_sec = storm_times_unix_sec[:num_examples]
        if storm_activations is not None:
            storm_activations = storm_activations[:num_examples]

    print(SEPARATOR_STRING)
    list_of_predictor_matrices = testing_io.read_specific_examples(
        desired_full_id_strings=full_storm_id_strings,
        desired_times_unix_sec=storm_times_unix_sec,
        option_dict=model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY],
        top_example_dir_name=top_example_dir_name,
        list_of_layer_operation_dicts=model_metadata_dict[
            cnn.LAYER_OPERATIONS_KEY])[0]
    print(SEPARATOR_STRING)

    plot_examples(list_of_predictor_matrices=list_of_predictor_matrices,
                  model_metadata_dict=model_metadata_dict,
                  output_dir_name=output_dir_name,
                  plot_soundings=plot_soundings,
                  allow_whitespace=allow_whitespace,
                  pmm_flag=False,
                  full_storm_id_strings=full_storm_id_strings,
                  storm_times_unix_sec=storm_times_unix_sec,
                  storm_activations=storm_activations)
コード例 #9
0
def _run(model_file_name, component_type_string, target_class, layer_name,
         ideal_activation, neuron_indices, channel_index, top_example_dir_name,
         storm_metafile_name, num_examples, randomize_weights,
         cascading_random, output_file_name):
    """Computes saliency map for each storm object and each model component.

    This is effectively the main method.

    :param model_file_name: See documentation at top of file.
    :param component_type_string: Same.
    :param target_class: Same.
    :param layer_name: Same.
    :param ideal_activation: Same.
    :param neuron_indices: Same.
    :param channel_index: Same.
    :param top_example_dir_name: Same.
    :param storm_metafile_name: Same.
    :param num_examples: Same.
    :param randomize_weights: Same.
    :param cascading_random: Same.
    :param output_file_name: Same.
    """

    # Check input args.
    file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name)
    model_interpretation.check_component_type(component_type_string)

    # Read model and metadata.
    print('Reading model from: "{0:s}"...'.format(model_file_name))
    model_object = cnn.read_model(model_file_name)

    model_metafile_name = '{0:s}/model_metadata.p'.format(
        os.path.split(model_file_name)[0])

    print(
        'Reading model metadata from: "{0:s}"...'.format(model_metafile_name))
    model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
    training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
    training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None

    output_dir_name, pathless_output_file_name = os.path.split(
        output_file_name)
    extensionless_output_file_name, output_file_extension = os.path.splitext(
        pathless_output_file_name)

    if randomize_weights:
        conv_dense_layer_names = _find_conv_and_dense_layers(model_object)
        conv_dense_layer_names.reverse()
        num_sets = len(conv_dense_layer_names)
    else:
        conv_dense_layer_names = []
        num_sets = 1

    print(
        'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name))
    full_id_strings, storm_times_unix_sec = tracking_io.read_ids_and_times(
        storm_metafile_name)

    print(SEPARATOR_STRING)

    if 0 < num_examples < len(full_id_strings):
        full_id_strings = full_id_strings[:num_examples]
        storm_times_unix_sec = storm_times_unix_sec[:num_examples]

    list_of_input_matrices, sounding_pressure_matrix_pascals = (
        testing_io.read_specific_examples(
            top_example_dir_name=top_example_dir_name,
            desired_full_id_strings=full_id_strings,
            desired_times_unix_sec=storm_times_unix_sec,
            option_dict=training_option_dict,
            list_of_layer_operation_dicts=model_metadata_dict[
                cnn.LAYER_OPERATIONS_KEY]))

    print(SEPARATOR_STRING)

    list_of_input_matrices_denorm = trainval_io.separate_shear_and_reflectivity(
        list_of_input_matrices=copy.deepcopy(list_of_input_matrices),
        training_option_dict=training_option_dict)

    print('Denormalizing model inputs...')
    list_of_input_matrices_denorm = model_interpretation.denormalize_data(
        list_of_input_matrices=list_of_input_matrices_denorm,
        model_metadata_dict=model_metadata_dict)
    print(SEPARATOR_STRING)

    for k in range(num_sets):
        if randomize_weights:
            if cascading_random:
                _reset_weights_in_layer(model_object=model_object,
                                        layer_name=conv_dense_layer_names[k])

                this_model_object = model_object

                this_output_file_name = (
                    '{0:s}/{1:s}_cascading-random_{2:s}{3:s}').format(
                        output_dir_name, extensionless_output_file_name,
                        conv_dense_layer_names[k].replace('_', '-'),
                        output_file_extension)
            else:
                this_model_object = keras.models.Model.from_config(
                    model_object.get_config())
                this_model_object.set_weights(model_object.get_weights())

                _reset_weights_in_layer(model_object=this_model_object,
                                        layer_name=conv_dense_layer_names[k])

                this_output_file_name = '{0:s}/{1:s}_random_{2:s}{3:s}'.format(
                    output_dir_name, extensionless_output_file_name,
                    conv_dense_layer_names[k].replace('_', '-'),
                    output_file_extension)
        else:
            this_model_object = model_object
            this_output_file_name = output_file_name

        # print(K.eval(this_model_object.get_layer(name='dense_3').weights[0]))

        if component_type_string == CLASS_COMPONENT_TYPE_STRING:
            print('Computing saliency maps for target class {0:d}...'.format(
                target_class))

            list_of_saliency_matrices = (
                saliency_maps.get_saliency_maps_for_class_activation(
                    model_object=this_model_object,
                    target_class=target_class,
                    list_of_input_matrices=list_of_input_matrices))

        elif component_type_string == NEURON_COMPONENT_TYPE_STRING:
            print(
                ('Computing saliency maps for neuron {0:s} in layer "{1:s}"...'
                 ).format(str(neuron_indices), layer_name))

            list_of_saliency_matrices = (
                saliency_maps.get_saliency_maps_for_neuron_activation(
                    model_object=this_model_object,
                    layer_name=layer_name,
                    neuron_indices=neuron_indices,
                    list_of_input_matrices=list_of_input_matrices,
                    ideal_activation=ideal_activation))

        else:
            print((
                'Computing saliency maps for channel {0:d} in layer "{1:s}"...'
            ).format(channel_index, layer_name))

            list_of_saliency_matrices = (
                saliency_maps.get_saliency_maps_for_channel_activation(
                    model_object=this_model_object,
                    layer_name=layer_name,
                    channel_index=channel_index,
                    list_of_input_matrices=list_of_input_matrices,
                    stat_function_for_neuron_activations=K.max,
                    ideal_activation=ideal_activation))

        list_of_saliency_matrices = trainval_io.separate_shear_and_reflectivity(
            list_of_input_matrices=list_of_saliency_matrices,
            training_option_dict=training_option_dict)

        print('Writing saliency maps to file: "{0:s}"...'.format(
            this_output_file_name))

        saliency_metadata_dict = saliency_maps.check_metadata(
            component_type_string=component_type_string,
            target_class=target_class,
            layer_name=layer_name,
            ideal_activation=ideal_activation,
            neuron_indices=neuron_indices,
            channel_index=channel_index)

        saliency_maps.write_standard_file(
            pickle_file_name=this_output_file_name,
            list_of_input_matrices=list_of_input_matrices_denorm,
            list_of_saliency_matrices=list_of_saliency_matrices,
            full_id_strings=full_id_strings,
            storm_times_unix_sec=storm_times_unix_sec,
            model_file_name=model_file_name,
            saliency_metadata_dict=saliency_metadata_dict,
            sounding_pressure_matrix_pascals=sounding_pressure_matrix_pascals)
コード例 #10
0
def _run(model_file_name, component_type_string, target_class, layer_name,
         ideal_activation, neuron_indices, channel_index, top_example_dir_name,
         storm_metafile_name, num_examples, output_file_name):
    """Computes saliency map for each storm object and each model component.

    This is effectively the main method.

    :param model_file_name: See documentation at top of file.
    :param component_type_string: Same.
    :param target_class: Same.
    :param layer_name: Same.
    :param ideal_activation: Same.
    :param neuron_indices: Same.
    :param channel_index: Same.
    :param top_example_dir_name: Same.
    :param storm_metafile_name: Same.
    :param num_examples: Same.
    :param output_file_name: Same.
    """

    # Check input args.
    file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name)
    model_interpretation.check_component_type(component_type_string)

    # Read model and metadata.
    print 'Reading model from: "{0:s}"...'.format(model_file_name)
    model_object = cnn.read_model(model_file_name)
    model_metafile_name = '{0:s}/model_metadata.p'.format(
        os.path.split(model_file_name)[0])

    print 'Reading model metadata from: "{0:s}"...'.format(model_metafile_name)
    model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
    training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
    training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None

    print 'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name)
    storm_ids, storm_times_unix_sec = tracking_io.read_ids_and_times(
        storm_metafile_name)
    print SEPARATOR_STRING

    if 0 < num_examples < len(storm_ids):
        storm_ids = storm_ids[:num_examples]
        storm_times_unix_sec = storm_times_unix_sec[:num_examples]

    list_of_input_matrices, sounding_pressure_matrix_pascals = (
        testing_io.read_specific_examples(
            top_example_dir_name=top_example_dir_name,
            desired_storm_ids=storm_ids,
            desired_times_unix_sec=storm_times_unix_sec,
            option_dict=training_option_dict,
            list_of_layer_operation_dicts=model_metadata_dict[
                cnn.LAYER_OPERATIONS_KEY]))
    print SEPARATOR_STRING

    if component_type_string == CLASS_COMPONENT_TYPE_STRING:
        print 'Computing saliency maps for target class {0:d}...'.format(
            target_class)

        list_of_saliency_matrices = (
            saliency_maps.get_saliency_maps_for_class_activation(
                model_object=model_object,
                target_class=target_class,
                list_of_input_matrices=list_of_input_matrices))

    elif component_type_string == NEURON_COMPONENT_TYPE_STRING:
        print('Computing saliency maps for neuron {0:s} in layer "{1:s}"...'
              ).format(str(neuron_indices), layer_name)

        list_of_saliency_matrices = (
            saliency_maps.get_saliency_maps_for_neuron_activation(
                model_object=model_object,
                layer_name=layer_name,
                neuron_indices=neuron_indices,
                list_of_input_matrices=list_of_input_matrices,
                ideal_activation=ideal_activation))

    else:
        print('Computing saliency maps for channel {0:d} in layer "{1:s}"...'
              ).format(channel_index, layer_name)

        list_of_saliency_matrices = (
            saliency_maps.get_saliency_maps_for_channel_activation(
                model_object=model_object,
                layer_name=layer_name,
                channel_index=channel_index,
                list_of_input_matrices=list_of_input_matrices,
                stat_function_for_neuron_activations=K.max,
                ideal_activation=ideal_activation))

    print 'Denormalizing model inputs...'
    list_of_input_matrices = model_interpretation.denormalize_data(
        list_of_input_matrices=list_of_input_matrices,
        model_metadata_dict=model_metadata_dict)

    print 'Writing saliency maps to file: "{0:s}"...'.format(output_file_name)

    saliency_metadata_dict = saliency_maps.check_metadata(
        component_type_string=component_type_string,
        target_class=target_class,
        layer_name=layer_name,
        ideal_activation=ideal_activation,
        neuron_indices=neuron_indices,
        channel_index=channel_index)

    saliency_maps.write_standard_file(
        pickle_file_name=output_file_name,
        list_of_input_matrices=list_of_input_matrices,
        list_of_saliency_matrices=list_of_saliency_matrices,
        storm_ids=storm_ids,
        storm_times_unix_sec=storm_times_unix_sec,
        model_file_name=model_file_name,
        saliency_metadata_dict=saliency_metadata_dict,
        sounding_pressure_matrix_pascals=sounding_pressure_matrix_pascals)