def _run(model_file_name, top_example_dir_name, storm_metafile_name,
         num_examples, output_file_name):
    """Creates dummy saliency map for each storm object.

    This is effectively the main method.

    :param model_file_name: See documentation at top of file.
    :param top_example_dir_name: Same.
    :param storm_metafile_name: Same.
    :param num_examples: Same.
    :param output_file_name: Same.
    """

    file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name)

    model_metafile_name = '{0:s}/model_metadata.p'.format(
        os.path.split(model_file_name)[0])

    print(
        'Reading model metadata from: "{0:s}"...'.format(model_metafile_name))
    model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
    training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
    training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None

    print(
        'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name))
    full_storm_id_strings, storm_times_unix_sec = (
        tracking_io.read_ids_and_times(storm_metafile_name))

    print(SEPARATOR_STRING)

    if 0 < num_examples < len(full_storm_id_strings):
        full_storm_id_strings = full_storm_id_strings[:num_examples]
        storm_times_unix_sec = storm_times_unix_sec[:num_examples]

    example_dict = testing_io.read_predictors_specific_examples(
        top_example_dir_name=top_example_dir_name,
        desired_full_id_strings=full_storm_id_strings,
        desired_times_unix_sec=storm_times_unix_sec,
        option_dict=training_option_dict,
        layer_operation_dicts=model_metadata_dict[cnn.LAYER_OPERATIONS_KEY])
    print(SEPARATOR_STRING)

    predictor_matrices = example_dict[testing_io.INPUT_MATRICES_KEY]
    sounding_pressure_matrix_pa = (
        example_dict[testing_io.SOUNDING_PRESSURES_KEY])

    radar_matrix = predictor_matrices[0]
    num_examples = radar_matrix.shape[0]
    num_channels = radar_matrix.shape[-1]
    num_spatial_dim = len(radar_matrix.shape) - 2

    if num_spatial_dim == 2:
        kernel_matrix = numpy.expand_dims(EDGE_DETECTOR_MATRIX_2D, axis=-1)
    else:
        kernel_matrix = numpy.expand_dims(EDGE_DETECTOR_MATRIX_3D, axis=-1)

    kernel_matrix = numpy.repeat(kernel_matrix, num_channels, axis=-1)
    kernel_matrix = numpy.expand_dims(kernel_matrix, axis=-1)
    kernel_matrix = numpy.repeat(kernel_matrix, num_channels, axis=-1)

    radar_saliency_matrix = numpy.full(radar_matrix.shape, numpy.nan)

    for i in range(num_examples):
        if numpy.mod(i, 10) == 0:
            print((
                'Have created dummy saliency map for {0:d} of {1:d} examples...'
            ).format(i, num_examples))

        if num_spatial_dim == 2:
            this_saliency_matrix = standalone_utils.do_2d_convolution(
                feature_matrix=radar_matrix[i, ...],
                kernel_matrix=kernel_matrix,
                pad_edges=True,
                stride_length_px=1)
        else:
            this_saliency_matrix = standalone_utils.do_3d_convolution(
                feature_matrix=radar_matrix[i, ...],
                kernel_matrix=kernel_matrix,
                pad_edges=True,
                stride_length_px=1)

        radar_saliency_matrix[i, ...] = this_saliency_matrix[0, ...]

    print('Have created dummy saliency map for all {0:d} examples!'.format(
        num_examples))
    print(SEPARATOR_STRING)

    saliency_matrices = [
        radar_saliency_matrix if k == 0 else predictor_matrices[k]
        for k in range(len(predictor_matrices))
    ]
    saliency_matrices = trainval_io.separate_shear_and_reflectivity(
        list_of_input_matrices=saliency_matrices,
        training_option_dict=training_option_dict)
    denorm_predictor_matrices = trainval_io.separate_shear_and_reflectivity(
        list_of_input_matrices=copy.deepcopy(predictor_matrices),
        training_option_dict=training_option_dict)

    print('Denormalizing model inputs...')
    denorm_predictor_matrices = model_interpretation.denormalize_data(
        list_of_input_matrices=denorm_predictor_matrices,
        model_metadata_dict=model_metadata_dict)

    print('Writing saliency maps to file: "{0:s}"...'.format(output_file_name))

    saliency_metadata_dict = saliency_maps.check_metadata(
        component_type_string=model_interpretation.CLASS_COMPONENT_TYPE_STRING,
        target_class=1)

    saliency_maps.write_standard_file(
        pickle_file_name=output_file_name,
        denorm_predictor_matrices=denorm_predictor_matrices,
        saliency_matrices=saliency_matrices,
        full_storm_id_strings=full_storm_id_strings,
        storm_times_unix_sec=storm_times_unix_sec,
        model_file_name=model_file_name,
        metadata_dict=saliency_metadata_dict,
        sounding_pressure_matrix_pa=sounding_pressure_matrix_pa)
def _run(prediction_file_name, top_example_dir_name, diff_colour_map_name,
         num_examples, allow_whitespace, plot_panel_names, add_titles,
         label_colour_bars, colour_bar_length, top_output_dir_name):
    """Plots one or more radar images and their upconvnet reconstructions.

    This is effectively the main method.

    :param prediction_file_name: See documentation at top of file.
    :param top_example_dir_name: Same.
    :param diff_colour_map_name: Same.
    :param num_examples: Same.
    :param allow_whitespace: Same.
    :param plot_panel_names: Same.
    :param add_titles: Same.
    :param label_colour_bars: Same.
    :param colour_bar_length: Same.
    :param top_output_dir_name: Same.
    """

    diff_colour_map_object = pyplot.get_cmap(diff_colour_map_name)

    # Read data.
    print('Reading reconstructed radar images from: "{0:s}"...'.format(
        prediction_file_name
    ))
    prediction_dict = upconvnet.read_predictions(prediction_file_name)

    reconstructed_matrices = prediction_dict[
        upconvnet.RECON_IMAGE_MATRICES_KEY]
    full_storm_id_strings = prediction_dict[upconvnet.FULL_STORM_IDS_KEY]
    storm_times_unix_sec = prediction_dict[upconvnet.STORM_TIMES_KEY]

    if 0 < num_examples < len(full_storm_id_strings):
        reconstructed_matrices = [
            a[:num_examples, ...] for a in reconstructed_matrices
        ]
        full_storm_id_strings = full_storm_id_strings[:num_examples]
        storm_times_unix_sec = storm_times_unix_sec[:num_examples]

    upconvnet_file_name = prediction_dict[upconvnet.UPCONVNET_FILE_KEY]
    upconvnet_metafile_name = cnn.find_metafile(upconvnet_file_name)

    print('Reading upconvnet metadata from: "{0:s}"...'.format(
        upconvnet_metafile_name
    ))
    upconvnet_metadata_dict = upconvnet.read_model_metadata(
        upconvnet_metafile_name)
    cnn_file_name = upconvnet_metadata_dict[upconvnet.CNN_FILE_KEY]
    cnn_metafile_name = cnn.find_metafile(cnn_file_name)

    print('Reading CNN metadata from: "{0:s}"...'.format(cnn_metafile_name))
    cnn_metadata_dict = cnn.read_model_metadata(cnn_metafile_name)

    training_option_dict = cnn_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
    training_option_dict[trainval_io.NORMALIZATION_TYPE_KEY] = None
    training_option_dict[trainval_io.SOUNDING_FIELDS_KEY] = None
    training_option_dict[trainval_io.UPSAMPLE_REFLECTIVITY_KEY] = False
    cnn_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY] = training_option_dict

    print(SEPARATOR_STRING)
    example_dict = testing_io.read_predictors_specific_examples(
        top_example_dir_name=top_example_dir_name,
        desired_full_id_strings=full_storm_id_strings,
        desired_times_unix_sec=storm_times_unix_sec,
        option_dict=training_option_dict,
        layer_operation_dicts=cnn_metadata_dict[cnn.LAYER_OPERATIONS_KEY]
    )
    print(SEPARATOR_STRING)

    plot_examples.plot_examples(
        list_of_predictor_matrices=example_dict[testing_io.INPUT_MATRICES_KEY],
        model_metadata_dict=cnn_metadata_dict,
        output_dir_name='{0:s}/actual_images'.format(top_output_dir_name),
        pmm_flag=False, plot_soundings=False, plot_radar_diffs=False,
        allow_whitespace=allow_whitespace, plot_panel_names=plot_panel_names,
        add_titles=add_titles, label_colour_bars=label_colour_bars,
        colour_bar_length=colour_bar_length,
        full_storm_id_strings=full_storm_id_strings,
        storm_times_unix_sec=storm_times_unix_sec)
    print(SEPARATOR_STRING)

    plot_examples.plot_examples(
        list_of_predictor_matrices=reconstructed_matrices,
        model_metadata_dict=cnn_metadata_dict,
        output_dir_name=
        '{0:s}/reconstructed_images'.format(top_output_dir_name),
        pmm_flag=False, plot_soundings=False, plot_radar_diffs=False,
        allow_whitespace=allow_whitespace, plot_panel_names=plot_panel_names,
        add_titles=add_titles, label_colour_bars=label_colour_bars,
        colour_bar_length=colour_bar_length,
        full_storm_id_strings=full_storm_id_strings,
        storm_times_unix_sec=storm_times_unix_sec)
    print(SEPARATOR_STRING)

    difference_matrices = [
        r - a for r, a in zip(
            reconstructed_matrices, example_dict[testing_io.INPUT_MATRICES_KEY]
        )
    ]

    plot_examples.plot_examples(
        list_of_predictor_matrices=difference_matrices,
        model_metadata_dict=cnn_metadata_dict,
        output_dir_name='{0:s}/differences'.format(top_output_dir_name),
        pmm_flag=False, plot_soundings=False, plot_radar_diffs=True,
        diff_colour_map_object=diff_colour_map_object, max_diff_percentile=99.,
        allow_whitespace=allow_whitespace, plot_panel_names=plot_panel_names,
        add_titles=add_titles, label_colour_bars=label_colour_bars,
        colour_bar_length=colour_bar_length,
        full_storm_id_strings=full_storm_id_strings,
        storm_times_unix_sec=storm_times_unix_sec)
def _run(model_file_name, layer_names, top_example_dir_name,
         storm_metafile_name, num_examples, top_output_dir_name):
    """Evaluates CNN (convolutional neural net) predictions.

    This is effectively the main method.

    :param model_file_name: See documentation at top of file.
    :param layer_names: Same.
    :param top_example_dir_name: Same.
    :param storm_metafile_name: Same.
    :param num_examples: Same.
    :param top_output_dir_name: Same.
    :raises: ValueError: if feature maps do not have 2 or 3 spatial dimensions.
    """

    print('Reading model from: "{0:s}"...'.format(model_file_name))
    model_object = cnn.read_model(model_file_name)

    model_metafile_name = '{0:s}/model_metadata.p'.format(
        os.path.split(model_file_name)[0]
    )

    print('Reading model metadata from: "{0:s}"...'.format(model_metafile_name))
    model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
    training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
    training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None

    print('Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name))
    full_id_strings, storm_times_unix_sec = tracking_io.read_ids_and_times(
        storm_metafile_name)

    print(SEPARATOR_STRING)

    if 0 < num_examples < len(full_id_strings):
        full_id_strings = full_id_strings[:num_examples]
        storm_times_unix_sec = storm_times_unix_sec[:num_examples]

    example_dict = testing_io.read_predictors_specific_examples(
        top_example_dir_name=top_example_dir_name,
        desired_full_id_strings=full_id_strings,
        desired_times_unix_sec=storm_times_unix_sec,
        option_dict=training_option_dict,
        layer_operation_dicts=model_metadata_dict[cnn.LAYER_OPERATIONS_KEY]
    )

    print(SEPARATOR_STRING)
    predictor_matrices = example_dict[testing_io.INPUT_MATRICES_KEY]

    include_soundings = (
        training_option_dict[trainval_io.SOUNDING_FIELDS_KEY] is not None
    )

    if include_soundings:
        sounding_matrix = predictor_matrices[-1]
    else:
        sounding_matrix = None

    num_layers = len(layer_names)
    feature_matrix_by_layer = [numpy.array([])] * num_layers

    for k in range(num_layers):
        if model_metadata_dict[cnn.CONV_2D3D_KEY]:
            if training_option_dict[trainval_io.UPSAMPLE_REFLECTIVITY_KEY]:
                feature_matrix_by_layer[k] = cnn.apply_2d_or_3d_cnn(
                    model_object=model_object,
                    radar_image_matrix=predictor_matrices[0],
                    sounding_matrix=sounding_matrix,
                    return_features=True, feature_layer_name=layer_names[k]
                )
            else:
                feature_matrix_by_layer[k] = cnn.apply_2d3d_cnn(
                    model_object=model_object,
                    reflectivity_matrix_dbz=predictor_matrices[0],
                    azimuthal_shear_matrix_s01=predictor_matrices[1],
                    sounding_matrix=sounding_matrix,
                    return_features=True, feature_layer_name=layer_names[k]
                )
        else:
            feature_matrix_by_layer[k] = cnn.apply_2d_or_3d_cnn(
                model_object=model_object,
                radar_image_matrix=predictor_matrices[0],
                sounding_matrix=sounding_matrix,
                return_features=True, feature_layer_name=layer_names[k]
            )

    for k in range(num_layers):
        this_output_dir_name = '{0:s}/{1:s}'.format(
            top_output_dir_name, layer_names[k]
        )

        file_system_utils.mkdir_recursive_if_necessary(
            directory_name=this_output_dir_name)

        _plot_feature_maps_one_layer(
            feature_matrix=feature_matrix_by_layer[k],
            full_id_strings=full_id_strings,
            storm_times_unix_sec=storm_times_unix_sec,
            layer_name=layer_names[k],
            output_dir_name=this_output_dir_name)

        print(SEPARATOR_STRING)
def _run(model_file_name, target_class, target_layer_name,
         top_example_dir_name, storm_metafile_name, num_examples,
         randomize_weights, cascading_random, output_file_name):
    """Runs Grad-CAM (gradient-weighted class-activation maps).

    This is effectively the main method.

    :param model_file_name: See documentation at top of file.
    :param target_class: Same.
    :param target_layer_name: Same.
    :param top_example_dir_name: Same.
    :param storm_metafile_name: Same.
    :param num_examples: Same.
    :param randomize_weights: Same.
    :param cascading_random: Same.
    :param output_file_name: Same.
    """

    file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name)

    # Read model and metadata.
    print('Reading model from: "{0:s}"...'.format(model_file_name))
    model_object = cnn.read_model(model_file_name)

    model_metafile_name = '{0:s}/model_metadata.p'.format(
        os.path.split(model_file_name)[0])

    print(
        'Reading model metadata from: "{0:s}"...'.format(model_metafile_name))
    model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
    training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
    training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None

    output_dir_name, pathless_output_file_name = os.path.split(
        output_file_name)
    extensionless_output_file_name, output_file_extension = os.path.splitext(
        pathless_output_file_name)

    if randomize_weights:
        conv_dense_layer_names = _find_conv_and_dense_layers(model_object)
        conv_dense_layer_names.reverse()
        num_sets = len(conv_dense_layer_names)
    else:
        conv_dense_layer_names = []
        num_sets = 1

    print(
        'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name))
    full_storm_id_strings, storm_times_unix_sec = (
        tracking_io.read_ids_and_times(storm_metafile_name))

    print(SEPARATOR_STRING)

    if 0 < num_examples < len(full_storm_id_strings):
        full_storm_id_strings = full_storm_id_strings[:num_examples]
        storm_times_unix_sec = storm_times_unix_sec[:num_examples]

    example_dict = testing_io.read_predictors_specific_examples(
        top_example_dir_name=top_example_dir_name,
        desired_full_id_strings=full_storm_id_strings,
        desired_times_unix_sec=storm_times_unix_sec,
        option_dict=training_option_dict,
        layer_operation_dicts=model_metadata_dict[cnn.LAYER_OPERATIONS_KEY])
    print(SEPARATOR_STRING)

    predictor_matrices = example_dict[testing_io.INPUT_MATRICES_KEY]
    sounding_pressure_matrix_pa = (
        example_dict[testing_io.SOUNDING_PRESSURES_KEY])

    print('Denormalizing model inputs...')
    denorm_predictor_matrices = trainval_io.separate_shear_and_reflectivity(
        list_of_input_matrices=copy.deepcopy(predictor_matrices),
        training_option_dict=training_option_dict)
    denorm_predictor_matrices = model_interpretation.denormalize_data(
        list_of_input_matrices=denorm_predictor_matrices,
        model_metadata_dict=model_metadata_dict)
    print(SEPARATOR_STRING)

    for k in range(num_sets):
        if randomize_weights:
            if cascading_random:
                _reset_weights_in_layer(model_object=model_object,
                                        layer_name=conv_dense_layer_names[k])

                this_model_object = model_object

                this_output_file_name = (
                    '{0:s}/{1:s}_cascading-random_{2:s}{3:s}').format(
                        output_dir_name, extensionless_output_file_name,
                        conv_dense_layer_names[k].replace('_', '-'),
                        output_file_extension)
            else:
                this_model_object = keras.models.Model.from_config(
                    model_object.get_config())
                this_model_object.set_weights(model_object.get_weights())

                _reset_weights_in_layer(model_object=this_model_object,
                                        layer_name=conv_dense_layer_names[k])

                this_output_file_name = '{0:s}/{1:s}_random_{2:s}{3:s}'.format(
                    output_dir_name, extensionless_output_file_name,
                    conv_dense_layer_names[k].replace('_', '-'),
                    output_file_extension)
        else:
            this_model_object = model_object
            this_output_file_name = output_file_name

        # print(K.eval(this_model_object.get_layer(name='dense_53').weights[0]))

        these_cam_matrices, these_guided_cam_matrices = (
            _run_gradcam_one_weight_set(
                model_object=this_model_object,
                target_class=target_class,
                target_layer_name=target_layer_name,
                predictor_matrices=predictor_matrices,
                training_option_dict=training_option_dict))

        print('Writing results to file: "{0:s}"...'.format(
            this_output_file_name))
        gradcam.write_standard_file(
            pickle_file_name=this_output_file_name,
            denorm_predictor_matrices=denorm_predictor_matrices,
            cam_matrices=these_cam_matrices,
            guided_cam_matrices=these_guided_cam_matrices,
            full_storm_id_strings=full_storm_id_strings,
            storm_times_unix_sec=storm_times_unix_sec,
            model_file_name=model_file_name,
            target_class=target_class,
            target_layer_name=target_layer_name,
            sounding_pressure_matrix_pa=sounding_pressure_matrix_pa)

        print(SEPARATOR_STRING)
def _run(activation_file_name, num_examples, top_example_dir_name,
         output_dir_name):
    """Plots inputs (radar images) for CNN flow chart.

    This is effectively the main method.

    :param activation_file_name: See documentation at top of file.
    :param num_examples: Same.
    :param top_example_dir_name: Same.
    :param output_dir_name: Same.
    :raises: ValueError: if activation file contains activations for more than
        one model component.
    """

    file_system_utils.mkdir_recursive_if_necessary(
        directory_name=output_dir_name
    )

    print('Reading data from: "{0:s}"...'.format(activation_file_name))
    activation_matrix, activation_metadata_dict = (
        model_activation.read_file(activation_file_name)
    )

    num_model_components = activation_matrix.shape[1]
    if num_model_components > 1:
        error_string = (
            'The file should contain activations for only one model '
            'component, not {0:d}.'
        ).format(num_model_components)

        raise TypeError(error_string)

    full_storm_id_strings = (
        activation_metadata_dict[model_activation.FULL_IDS_KEY]
    )
    storm_times_unix_sec = (
        activation_metadata_dict[model_activation.STORM_TIMES_KEY]
    )
    model_file_name = (
        activation_metadata_dict[model_activation.MODEL_FILE_NAME_KEY]
    )
    model_metafile_name = '{0:s}/model_metadata.p'.format(
        os.path.split(model_file_name)[0]
    )

    print('Reading metadata from: "{0:s}"...'.format(model_metafile_name))
    model_metadata_dict = cnn.read_model_metadata(model_metafile_name)

    training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
    training_option_dict[trainval_io.NORMALIZATION_TYPE_KEY] = None
    training_option_dict[trainval_io.SAMPLING_FRACTIONS_KEY] = None
    training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None
    training_option_dict[trainval_io.RADAR_HEIGHTS_KEY] = RADAR_HEIGHTS_M_AGL

    model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY] = training_option_dict
    model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY][
        trainval_io.UPSAMPLE_REFLECTIVITY_KEY
    ] = False

    if 0 < num_examples < len(full_storm_id_strings):
        full_storm_id_strings = full_storm_id_strings[:num_examples]
        storm_times_unix_sec = storm_times_unix_sec[:num_examples]

    print(SEPARATOR_STRING)
    example_dict = testing_io.read_predictors_specific_examples(
        top_example_dir_name=top_example_dir_name,
        desired_full_id_strings=full_storm_id_strings,
        desired_times_unix_sec=storm_times_unix_sec,
        option_dict=model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY],
        layer_operation_dicts=model_metadata_dict[cnn.LAYER_OPERATIONS_KEY]
    )
    print(SEPARATOR_STRING)

    radar_matrix = example_dict[testing_io.INPUT_MATRICES_KEY][0]
    sounding_matrix = example_dict[testing_io.INPUT_MATRICES_KEY][-1]
    sounding_pressure_matrix_pa = (
        example_dict[testing_io.SOUNDING_PRESSURES_KEY]
    )
    num_examples = len(full_storm_id_strings)

    for i in range(num_examples):
        _plot_one_example(
            radar_matrix=radar_matrix[[i], ...],
            sounding_matrix=sounding_matrix[[i], ...],
            sounding_pressures_pascals=sounding_pressure_matrix_pa[i, ...],
            full_storm_id_string=full_storm_id_strings[i],
            storm_time_unix_sec=storm_times_unix_sec[i],
            model_metadata_dict=model_metadata_dict,
            output_dir_name=output_dir_name
        )
        print('\n')
Esempio n. 6
0
def _run(model_file_name, component_type_string, target_class, layer_name,
         ideal_activation, neuron_indices, channel_index, top_example_dir_name,
         storm_metafile_name, num_examples, randomize_weights,
         cascading_random, output_file_name):
    """Computes saliency map for each storm object and each model component.

    This is effectively the main method.

    :param model_file_name: See documentation at top of file.
    :param component_type_string: Same.
    :param target_class: Same.
    :param layer_name: Same.
    :param ideal_activation: Same.
    :param neuron_indices: Same.
    :param channel_index: Same.
    :param top_example_dir_name: Same.
    :param storm_metafile_name: Same.
    :param num_examples: Same.
    :param randomize_weights: Same.
    :param cascading_random: Same.
    :param output_file_name: Same.
    """

    # Check input args.
    file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name)
    model_interpretation.check_component_type(component_type_string)

    # Read model and metadata.
    print('Reading model from: "{0:s}"...'.format(model_file_name))
    model_object = cnn.read_model(model_file_name)

    model_metafile_name = '{0:s}/model_metadata.p'.format(
        os.path.split(model_file_name)[0])

    print(
        'Reading model metadata from: "{0:s}"...'.format(model_metafile_name))
    model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
    training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
    training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None

    output_dir_name, pathless_output_file_name = os.path.split(
        output_file_name)
    extensionless_output_file_name, output_file_extension = os.path.splitext(
        pathless_output_file_name)

    if randomize_weights:
        conv_dense_layer_names = _find_conv_and_dense_layers(model_object)
        conv_dense_layer_names.reverse()
        num_sets = len(conv_dense_layer_names)
    else:
        conv_dense_layer_names = []
        num_sets = 1

    print(
        'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name))
    full_storm_id_strings, storm_times_unix_sec = (
        tracking_io.read_ids_and_times(storm_metafile_name))

    print(SEPARATOR_STRING)

    if 0 < num_examples < len(full_storm_id_strings):
        full_storm_id_strings = full_storm_id_strings[:num_examples]
        storm_times_unix_sec = storm_times_unix_sec[:num_examples]

    example_dict = testing_io.read_predictors_specific_examples(
        top_example_dir_name=top_example_dir_name,
        desired_full_id_strings=full_storm_id_strings,
        desired_times_unix_sec=storm_times_unix_sec,
        option_dict=training_option_dict,
        layer_operation_dicts=model_metadata_dict[cnn.LAYER_OPERATIONS_KEY])
    print(SEPARATOR_STRING)

    predictor_matrices = example_dict[testing_io.INPUT_MATRICES_KEY]
    sounding_pressure_matrix_pa = example_dict[
        testing_io.SOUNDING_PRESSURES_KEY]

    denorm_predictor_matrices = trainval_io.separate_shear_and_reflectivity(
        list_of_input_matrices=copy.deepcopy(predictor_matrices),
        training_option_dict=training_option_dict)

    print('Denormalizing model inputs...')
    denorm_predictor_matrices = model_interpretation.denormalize_data(
        list_of_input_matrices=denorm_predictor_matrices,
        model_metadata_dict=model_metadata_dict)
    print(SEPARATOR_STRING)

    for k in range(num_sets):
        if randomize_weights:
            if cascading_random:
                _reset_weights_in_layer(model_object=model_object,
                                        layer_name=conv_dense_layer_names[k])

                this_model_object = model_object

                this_output_file_name = (
                    '{0:s}/{1:s}_cascading-random_{2:s}{3:s}').format(
                        output_dir_name, extensionless_output_file_name,
                        conv_dense_layer_names[k].replace('_', '-'),
                        output_file_extension)
            else:
                this_model_object = keras.models.Model.from_config(
                    model_object.get_config())
                this_model_object.set_weights(model_object.get_weights())

                _reset_weights_in_layer(model_object=this_model_object,
                                        layer_name=conv_dense_layer_names[k])

                this_output_file_name = '{0:s}/{1:s}_random_{2:s}{3:s}'.format(
                    output_dir_name, extensionless_output_file_name,
                    conv_dense_layer_names[k].replace('_', '-'),
                    output_file_extension)
        else:
            this_model_object = model_object
            this_output_file_name = output_file_name

        # print(K.eval(this_model_object.get_layer(name='dense_3').weights[0]))

        if component_type_string == CLASS_COMPONENT_TYPE_STRING:
            print('Computing saliency maps for target class {0:d}...'.format(
                target_class))

            saliency_matrices = (
                saliency_maps.get_saliency_maps_for_class_activation(
                    model_object=this_model_object,
                    target_class=target_class,
                    list_of_input_matrices=predictor_matrices))

        elif component_type_string == NEURON_COMPONENT_TYPE_STRING:
            print(
                ('Computing saliency maps for neuron {0:s} in layer "{1:s}"...'
                 ).format(str(neuron_indices), layer_name))

            saliency_matrices = (
                saliency_maps.get_saliency_maps_for_neuron_activation(
                    model_object=this_model_object,
                    layer_name=layer_name,
                    neuron_indices=neuron_indices,
                    list_of_input_matrices=predictor_matrices,
                    ideal_activation=ideal_activation))

        else:
            print((
                'Computing saliency maps for channel {0:d} in layer "{1:s}"...'
            ).format(channel_index, layer_name))

            saliency_matrices = (
                saliency_maps.get_saliency_maps_for_channel_activation(
                    model_object=this_model_object,
                    layer_name=layer_name,
                    channel_index=channel_index,
                    list_of_input_matrices=predictor_matrices,
                    stat_function_for_neuron_activations=K.max,
                    ideal_activation=ideal_activation))

        saliency_matrices = trainval_io.separate_shear_and_reflectivity(
            list_of_input_matrices=saliency_matrices,
            training_option_dict=training_option_dict)

        print('Writing saliency maps to file: "{0:s}"...'.format(
            this_output_file_name))

        saliency_metadata_dict = saliency_maps.check_metadata(
            component_type_string=component_type_string,
            target_class=target_class,
            layer_name=layer_name,
            ideal_activation=ideal_activation,
            neuron_indices=neuron_indices,
            channel_index=channel_index)

        saliency_maps.write_standard_file(
            pickle_file_name=this_output_file_name,
            denorm_predictor_matrices=denorm_predictor_matrices,
            saliency_matrices=saliency_matrices,
            full_storm_id_strings=full_storm_id_strings,
            storm_times_unix_sec=storm_times_unix_sec,
            model_file_name=model_file_name,
            metadata_dict=saliency_metadata_dict,
            sounding_pressure_matrix_pa=sounding_pressure_matrix_pa)
Esempio n. 7
0
def _read_one_example(top_example_dir_name, full_storm_id_string,
                      storm_time_unix_sec, source_name, radar_field_name,
                      include_sounding):
    """Reads one example (storm object).

    T = number of input tensors to model
    H_s = number of heights in sounding

    :param top_example_dir_name: See documentation at top of file.
    :param full_storm_id_string: Full storm ID.
    :param storm_time_unix_sec: Valid time of storm.
    :param source_name: Radar source (must be accepted by
        `radar_utils.check_data_source`).
    :param radar_field_name: Name of radar field (must be accepted by
        `radar_utils.check_field_name`).
    :param include_sounding: Boolean flag.
    :return: predictor_matrices: length-T list of numpy arrays, where
        the [i]th array is the [i]th input tensor to the model.  The first axis
        of each array has length = 1.
    :return: model_metadata_dict: See doc for `cnn.write_model_metadata`.
    :return: sounding_pressures_pa: length-H numpy array of pressures.  If
        soundings were not read, this is None.
    """

    if source_name == radar_utils.GRIDRAD_SOURCE_ID:
        num_radar_rows = NUM_GRIDRAD_ROWS
        num_radar_columns = NUM_GRIDRAD_COLUMNS
    else:
        num_radar_rows = NUM_MYRORSS_ROWS
        num_radar_columns = NUM_MYRORSS_COLUMNS

    training_option_dict = dict()
    training_option_dict[trainval_io.RADAR_FIELDS_KEY] = [radar_field_name]
    training_option_dict[trainval_io.RADAR_HEIGHTS_KEY] = RADAR_HEIGHTS_M_AGL
    training_option_dict[trainval_io.SOUNDING_FIELDS_KEY] = (
        SOUNDING_FIELD_NAMES if include_sounding else None)
    training_option_dict[trainval_io.SOUNDING_HEIGHTS_KEY] = (
        SOUNDING_HEIGHTS_M_AGL)

    training_option_dict[trainval_io.NUM_ROWS_KEY] = num_radar_rows
    training_option_dict[trainval_io.NUM_COLUMNS_KEY] = num_radar_columns
    training_option_dict[trainval_io.NORMALIZATION_TYPE_KEY] = None
    training_option_dict[trainval_io.TARGET_NAME_KEY] = DUMMY_TARGET_NAME
    training_option_dict[trainval_io.BINARIZE_TARGET_KEY] = False
    training_option_dict[trainval_io.SAMPLING_FRACTIONS_KEY] = None
    training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None
    training_option_dict[trainval_io.UPSAMPLE_REFLECTIVITY_KEY] = False

    model_metadata_dict = {
        cnn.TRAINING_OPTION_DICT_KEY: training_option_dict,
        cnn.LAYER_OPERATIONS_KEY: None,
    }

    print(MINOR_SEPARATOR_STRING)

    example_dict = testing_io.read_predictors_specific_examples(
        top_example_dir_name=top_example_dir_name,
        desired_full_id_strings=[full_storm_id_string],
        desired_times_unix_sec=numpy.array([storm_time_unix_sec], dtype=int),
        option_dict=model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY],
        layer_operation_dicts=None)

    predictor_matrices = example_dict[testing_io.INPUT_MATRICES_KEY]
    sounding_pressure_matrix_pa = example_dict[
        testing_io.SOUNDING_PRESSURES_KEY]

    if sounding_pressure_matrix_pa is None:
        sounding_pressures_pa = None
    else:
        sounding_pressures_pa = sounding_pressure_matrix_pa[0, ...]

    return predictor_matrices, model_metadata_dict, sounding_pressures_pa
def _run(model_file_name, top_example_dir_name, storm_metafile_name,
         num_examples, do_backwards_test, separate_radar_heights,
         num_bootstrap_reps, output_file_name):
    """Runs permutation test with specific examples (storm objects).

    This is effectively the main method.

    :param model_file_name: See documentation at top of file.
    :param top_example_dir_name: Same.
    :param storm_metafile_name: Same.
    :param num_examples: Same.
    :param do_backwards_test: Same.
    :param separate_radar_heights: Same.
    :param num_bootstrap_reps: Same.
    :param output_file_name: Same.
    """

    print('Reading model from: "{0:s}"...'.format(model_file_name))
    model_object = cnn.read_model(model_file_name)
    metafile_name = cnn.find_metafile(model_file_name=model_file_name)

    print('Reading metadata from: "{0:s}"...'.format(metafile_name))
    cnn_metadata_dict = cnn.read_model_metadata(metafile_name)
    training_option_dict = cnn_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]

    print(
        'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name))
    full_storm_id_strings, storm_times_unix_sec = (
        tracking_io.read_ids_and_times(storm_metafile_name))
    print(SEPARATOR_STRING)

    if 0 < num_examples < len(full_storm_id_strings):
        numpy.random.seed(RANDOM_SEED)
        good_indices = numpy.random.permutation(len(full_storm_id_strings))
        good_indices = good_indices[:num_examples]

        full_storm_id_strings = [
            full_storm_id_strings[k] for k in good_indices
        ]
        storm_times_unix_sec = storm_times_unix_sec[good_indices]

    example_dict = testing_io.read_predictors_specific_examples(
        top_example_dir_name=top_example_dir_name,
        desired_full_id_strings=full_storm_id_strings,
        desired_times_unix_sec=storm_times_unix_sec,
        option_dict=training_option_dict,
        layer_operation_dicts=cnn_metadata_dict[cnn.LAYER_OPERATIONS_KEY])
    print(SEPARATOR_STRING)

    predictor_matrices = example_dict[testing_io.INPUT_MATRICES_KEY]
    target_values = example_dict[testing_io.TARGET_ARRAY_KEY]

    correlation_matrix, predictor_names = correlation.get_pearson_correlations(
        predictor_matrices=predictor_matrices,
        cnn_metadata_dict=cnn_metadata_dict,
        separate_radar_heights=separate_radar_heights)
    print(SEPARATOR_STRING)

    num_predictors = len(predictor_names)

    for i in range(num_predictors):
        for j in range(i, num_predictors):
            print(('Pearson correlation between "{0:s}" and "{1:s}" = {2:.3f}'
                   ).format(predictor_names[i], predictor_names[j],
                            correlation_matrix[i, j]))

    print(SEPARATOR_STRING)

    if do_backwards_test:
        result_dict = permutation.run_backwards_test(
            model_object=model_object,
            predictor_matrices=predictor_matrices,
            target_values=target_values,
            cnn_metadata_dict=cnn_metadata_dict,
            cost_function=permutation_utils.negative_auc_function,
            separate_radar_heights=separate_radar_heights,
            num_bootstrap_reps=num_bootstrap_reps)
    else:
        result_dict = permutation.run_forward_test(
            model_object=model_object,
            predictor_matrices=predictor_matrices,
            target_values=target_values,
            cnn_metadata_dict=cnn_metadata_dict,
            cost_function=permutation_utils.negative_auc_function,
            separate_radar_heights=separate_radar_heights,
            num_bootstrap_reps=num_bootstrap_reps)

    print(SEPARATOR_STRING)

    result_dict[permutation_utils.MODEL_FILE_KEY] = model_file_name
    result_dict[permutation_utils.TARGET_VALUES_KEY] = target_values
    result_dict[permutation_utils.FULL_IDS_KEY] = full_storm_id_strings
    result_dict[permutation_utils.STORM_TIMES_KEY] = storm_times_unix_sec

    print('Writing results to: "{0:s}"...'.format(output_file_name))
    permutation_utils.write_results(result_dict=result_dict,
                                    pickle_file_name=output_file_name)
def _run(cnn_file_name, upconvnet_file_name, top_example_dir_name,
         baseline_storm_metafile_name, trial_storm_metafile_name,
         num_baseline_examples, num_trial_examples, num_novel_examples,
         cnn_feature_layer_name, percent_variance_to_keep, output_file_name):
    """Runs novelty detection.

    This is effectively the main method.

    :param cnn_file_name: See documentation at top of file.
    :param upconvnet_file_name: Same.
    :param top_example_dir_name: Same.
    :param baseline_storm_metafile_name: Same.
    :param trial_storm_metafile_name: Same.
    :param num_baseline_examples: Same.
    :param num_trial_examples: Same.
    :param num_novel_examples: Same.
    :param cnn_feature_layer_name: Same.
    :param percent_variance_to_keep: Same.
    :param output_file_name: Same.
    :raises: ValueError: if dimensions of first CNN input matrix != dimensions
        of upconvnet output.
    """

    print('Reading trained CNN from: "{0:s}"...'.format(cnn_file_name))
    cnn_model_object = cnn.read_model(cnn_file_name)

    print('Reading trained upconvnet from: "{0:s}"...'.format(
        upconvnet_file_name))
    upconvnet_model_object = cnn.read_model(upconvnet_file_name)
    _check_dimensions(cnn_model_object=cnn_model_object,
                      upconvnet_model_object=upconvnet_model_object)

    print('Reading metadata for baseline examples from: "{0:s}"...'.format(
        baseline_storm_metafile_name))
    baseline_full_id_strings, baseline_times_unix_sec = (
        tracking_io.read_ids_and_times(baseline_storm_metafile_name))

    print('Reading metadata for trial examples from: "{0:s}"...'.format(
        trial_storm_metafile_name))
    trial_full_id_strings, trial_times_unix_sec = (
        tracking_io.read_ids_and_times(trial_storm_metafile_name))

    this_dict = _filter_examples(
        trial_full_id_strings=trial_full_id_strings,
        trial_times_unix_sec=trial_times_unix_sec,
        num_trial_examples=num_trial_examples,
        baseline_full_id_strings=baseline_full_id_strings,
        baseline_times_unix_sec=baseline_times_unix_sec,
        num_baseline_examples=num_baseline_examples,
        num_novel_examples=num_novel_examples)

    trial_full_id_strings = this_dict[TRIAL_STORM_IDS_KEY]
    trial_times_unix_sec = this_dict[TRIAL_STORM_TIMES_KEY]
    baseline_full_id_strings = this_dict[BASELINE_STORM_IDS_KEY]
    baseline_times_unix_sec = this_dict[BASELINE_STORM_TIMES_KEY]
    num_novel_examples = this_dict[NUM_NOVEL_EXAMPLES_KEY]

    cnn_metafile_name = '{0:s}/model_metadata.p'.format(
        os.path.split(cnn_file_name)[0])

    print('Reading CNN metadata from: "{0:s}"...'.format(cnn_metafile_name))
    cnn_metadata_dict = cnn.read_model_metadata(cnn_metafile_name)
    print(SEPARATOR_STRING)

    baseline_predictor_matrices = testing_io.read_predictors_specific_examples(
        top_example_dir_name=top_example_dir_name,
        desired_full_id_strings=baseline_full_id_strings,
        desired_times_unix_sec=baseline_times_unix_sec,
        option_dict=cnn_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY],
        layer_operation_dicts=cnn_metadata_dict[cnn.LAYER_OPERATIONS_KEY])[
            testing_io.INPUT_MATRICES_KEY]

    print(SEPARATOR_STRING)

    trial_predictor_matrices = testing_io.read_predictors_specific_examples(
        top_example_dir_name=top_example_dir_name,
        desired_full_id_strings=trial_full_id_strings,
        desired_times_unix_sec=trial_times_unix_sec,
        option_dict=cnn_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY],
        layer_operation_dicts=cnn_metadata_dict[cnn.LAYER_OPERATIONS_KEY])[
            testing_io.INPUT_MATRICES_KEY]

    print(SEPARATOR_STRING)

    novelty_dict = novelty_detection.do_novelty_detection(
        baseline_predictor_matrices=baseline_predictor_matrices,
        trial_predictor_matrices=trial_predictor_matrices,
        cnn_model_object=cnn_model_object,
        cnn_feature_layer_name=cnn_feature_layer_name,
        upconvnet_model_object=upconvnet_model_object,
        num_novel_examples=num_novel_examples,
        multipass=False,
        percent_variance_to_keep=percent_variance_to_keep)

    print(SEPARATOR_STRING)
    print('Denormalizing inputs and outputs of novelty detection...')

    cnn_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY][
        trainval_io.SOUNDING_FIELDS_KEY] = None

    novelty_dict[novelty_detection.BASELINE_MATRIX_KEY] = (
        model_interpretation.denormalize_data(
            list_of_input_matrices=baseline_predictor_matrices[[0]],
            model_metadata_dict=cnn_metadata_dict))

    novelty_dict[novelty_detection.TRIAL_MATRIX_KEY] = (
        model_interpretation.denormalize_data(
            list_of_input_matrices=trial_predictor_matrices[[0]],
            model_metadata_dict=cnn_metadata_dict))

    novelty_dict[novelty_detection.UPCONV_MATRIX_KEY] = (
        model_interpretation.denormalize_data(
            list_of_input_matrices=[
                novelty_dict[novelty_detection.UPCONV_NORM_MATRIX_KEY]
            ],
            model_metadata_dict=cnn_metadata_dict))[0]
    novelty_dict.pop(novelty_detection.UPCONV_NORM_MATRIX_KEY)

    novelty_dict[novelty_detection.UPCONV_SVD_MATRIX_KEY] = (
        model_interpretation.denormalize_data(
            list_of_input_matrices=[
                novelty_dict[novelty_detection.UPCONV_NORM_SVD_MATRIX_KEY]
            ],
            model_metadata_dict=cnn_metadata_dict))[0]
    novelty_dict.pop(novelty_detection.UPCONV_NORM_SVD_MATRIX_KEY)

    novelty_dict = novelty_detection.add_metadata(
        novelty_dict=novelty_dict,
        baseline_full_id_strings=baseline_full_id_strings,
        baseline_times_unix_sec=baseline_times_unix_sec,
        trial_full_id_strings=trial_full_id_strings,
        trial_times_unix_sec=trial_times_unix_sec,
        cnn_file_name=cnn_file_name,
        upconvnet_file_name=upconvnet_file_name)

    print('Writing results to: "{0:s}"...'.format(output_file_name))
    novelty_detection.write_standard_file(novelty_dict=novelty_dict,
                                          pickle_file_name=output_file_name)
def _run(model_file_name, init_function_name, storm_metafile_name,
         num_examples, top_example_dir_name, component_type_string,
         target_class, layer_name, neuron_indices, channel_index,
         num_iterations, ideal_activation, learning_rate, l2_weight,
         radar_constraint_weight, minmax_constraint_weight, output_file_name):
    """Runs backwards optimization on a trained CNN.

    This is effectively the main method.

    :param model_file_name: See documentation at top of file.
    :param init_function_name: Same.
    :param storm_metafile_name: Same.
    :param num_examples: Same.
    :param top_example_dir_name: Same.
    :param component_type_string: Same.
    :param target_class: Same.
    :param layer_name: Same.
    :param neuron_indices: Same.
    :param channel_index: Same.
    :param num_iterations: Same.
    :param ideal_activation: Same.
    :param learning_rate: Same.
    :param l2_weight: Same.
    :param radar_constraint_weight: Same.
    :param minmax_constraint_weight: Same.
    :param output_file_name: Same.
    """

    if l2_weight <= 0:
        l2_weight = None
    if radar_constraint_weight <= 0:
        radar_constraint_weight = None
    if minmax_constraint_weight <= 0:
        minmax_constraint_weight = None
    if ideal_activation <= 0:
        ideal_activation = None
    if init_function_name in ['', 'None']:
        init_function_name = None

    model_interpretation.check_component_type(component_type_string)

    model_metafile_name = '{0:s}/model_metadata.p'.format(
        os.path.split(model_file_name)[0])

    print(
        'Reading model metadata from: "{0:s}"...'.format(model_metafile_name))
    model_metadata_dict = cnn.read_model_metadata(model_metafile_name)

    input_matrices = None
    init_function = None
    full_storm_id_strings = None
    storm_times_unix_sec = None
    sounding_pressure_matrix_pa = None

    if init_function_name is None:
        print('Reading storm metadata from: "{0:s}"...'.format(
            storm_metafile_name))

        full_storm_id_strings, storm_times_unix_sec = (
            tracking_io.read_ids_and_times(storm_metafile_name))

        if 0 < num_examples < len(full_storm_id_strings):
            full_storm_id_strings = full_storm_id_strings[:num_examples]
            storm_times_unix_sec = storm_times_unix_sec[:num_examples]

        example_dict = testing_io.read_predictors_specific_examples(
            top_example_dir_name=top_example_dir_name,
            desired_full_id_strings=full_storm_id_strings,
            desired_times_unix_sec=storm_times_unix_sec,
            option_dict=model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY],
            layer_operation_dicts=model_metadata_dict[
                cnn.LAYER_OPERATIONS_KEY])
        print(SEPARATOR_STRING)

        input_matrices = example_dict[testing_io.INPUT_MATRICES_KEY]
        sounding_pressure_matrix_pa = example_dict[
            testing_io.SOUNDING_PRESSURES_KEY]
        num_examples = input_matrices[0].shape[0]
    else:
        num_examples = 1
        init_function = _create_initializer(
            init_function_name=init_function_name,
            model_metadata_dict=model_metadata_dict)

    print('Reading model from: "{0:s}"...'.format(model_file_name))
    model_object = cnn.read_model(model_file_name)

    output_matrices = None
    initial_activations = numpy.full(num_examples, numpy.nan)
    final_activations = numpy.full(num_examples, numpy.nan)

    for i in range(num_examples):
        if init_function_name is None:
            this_init_arg = [a[[i], ...] for a in input_matrices]
        else:
            this_init_arg = init_function

        if component_type_string == CLASS_COMPONENT_TYPE_STRING:
            print((
                '\nOptimizing {0:d}th of {1:d} images for target class {2:d}...'
            ).format(i + 1, num_examples, target_class))

            this_result_dict = backwards_opt.optimize_input_for_class(
                model_object=model_object,
                target_class=target_class,
                init_function_or_matrices=this_init_arg,
                num_iterations=num_iterations,
                learning_rate=learning_rate,
                l2_weight=l2_weight,
                radar_constraint_weight=radar_constraint_weight,
                minmax_constraint_weight=minmax_constraint_weight,
                model_metadata_dict=model_metadata_dict)

        elif component_type_string == NEURON_COMPONENT_TYPE_STRING:
            print((
                '\nOptimizing {0:d}th of {1:d} images for neuron {2:s} in layer'
                ' "{3:s}"...').format(i + 1, num_examples, str(neuron_indices),
                                      layer_name))

            this_result_dict = backwards_opt.optimize_input_for_neuron(
                model_object=model_object,
                layer_name=layer_name,
                neuron_indices=neuron_indices,
                init_function_or_matrices=this_init_arg,
                num_iterations=num_iterations,
                learning_rate=learning_rate,
                l2_weight=l2_weight,
                ideal_activation=ideal_activation,
                radar_constraint_weight=radar_constraint_weight,
                minmax_constraint_weight=minmax_constraint_weight,
                model_metadata_dict=model_metadata_dict)

        else:
            print(('\nOptimizing {0:d}th of {1:d} images for channel {2:d} in '
                   'layer "{3:s}"...').format(i + 1, num_examples,
                                              channel_index, layer_name))

            this_result_dict = backwards_opt.optimize_input_for_channel(
                model_object=model_object,
                layer_name=layer_name,
                channel_index=channel_index,
                init_function_or_matrices=this_init_arg,
                stat_function_for_neuron_activations=K.max,
                num_iterations=num_iterations,
                learning_rate=learning_rate,
                l2_weight=l2_weight,
                ideal_activation=ideal_activation,
                radar_constraint_weight=radar_constraint_weight,
                minmax_constraint_weight=minmax_constraint_weight,
                model_metadata_dict=model_metadata_dict)

        initial_activations[i] = this_result_dict[
            backwards_opt.INITIAL_ACTIVATION_KEY]
        final_activations[i] = this_result_dict[
            backwards_opt.FINAL_ACTIVATION_KEY]
        these_output_matrices = this_result_dict[
            backwards_opt.NORM_OUTPUT_MATRICES_KEY]

        if output_matrices is None:
            output_matrices = [None] * len(these_output_matrices)

        for k in range(len(output_matrices)):
            if output_matrices[k] is None:
                output_matrices[k] = these_output_matrices[k] + 0.
            else:
                output_matrices[k] = numpy.concatenate(
                    (output_matrices[k], these_output_matrices[k]), axis=0)

        if init_function_name is None:
            continue

        these_input_matrices = this_result_dict[
            backwards_opt.NORM_INPUT_MATRICES_KEY]

        if input_matrices is None:
            input_matrices = [None] * len(these_input_matrices)

        for k in range(len(input_matrices)):
            if input_matrices[k] is None:
                input_matrices[k] = these_input_matrices[k] + 0.
            else:
                input_matrices[k] = numpy.concatenate(
                    (input_matrices[k], these_input_matrices[k]), axis=0)

    print(SEPARATOR_STRING)
    training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]

    print('Denormalizing input examples...')
    input_matrices = trainval_io.separate_shear_and_reflectivity(
        list_of_input_matrices=input_matrices,
        training_option_dict=training_option_dict)

    input_matrices = model_interpretation.denormalize_data(
        list_of_input_matrices=input_matrices,
        model_metadata_dict=model_metadata_dict)

    print('Denormalizing optimized examples...')
    output_matrices = trainval_io.separate_shear_and_reflectivity(
        list_of_input_matrices=output_matrices,
        training_option_dict=training_option_dict)

    output_matrices = model_interpretation.denormalize_data(
        list_of_input_matrices=output_matrices,
        model_metadata_dict=model_metadata_dict)

    print('Writing results to: "{0:s}"...'.format(output_file_name))
    bwo_metadata_dict = backwards_opt.check_metadata(
        component_type_string=component_type_string,
        num_iterations=num_iterations,
        learning_rate=learning_rate,
        target_class=target_class,
        layer_name=layer_name,
        ideal_activation=ideal_activation,
        neuron_indices=neuron_indices,
        channel_index=channel_index,
        l2_weight=l2_weight,
        radar_constraint_weight=radar_constraint_weight,
        minmax_constraint_weight=minmax_constraint_weight)

    backwards_opt.write_standard_file(
        pickle_file_name=output_file_name,
        denorm_input_matrices=input_matrices,
        denorm_output_matrices=output_matrices,
        initial_activations=initial_activations,
        final_activations=final_activations,
        model_file_name=model_file_name,
        metadata_dict=bwo_metadata_dict,
        full_storm_id_strings=full_storm_id_strings,
        storm_times_unix_sec=storm_times_unix_sec,
        sounding_pressure_matrix_pa=sounding_pressure_matrix_pa)
def _run(activation_file_name, storm_metafile_name, num_examples,
         top_example_dir_name, num_radar_rows, num_radar_columns,
         allow_whitespace, colour_bar_length, output_dir_name):
    """Plots one or more examples (storm objects) for human input.

    This is effectively the main method.

    :param activation_file_name: See documentation at top of file.
    :param storm_metafile_name: Same.
    :param num_examples: Same.
    :param top_example_dir_name: Same.
    :param num_radar_rows: Same.
    :param num_radar_columns: Same.
    :param allow_whitespace: Same.
    :param colour_bar_length: Same.
    :param output_dir_name: Same.
    """

    if num_radar_rows <= 0:
        num_radar_rows = None
    if num_radar_columns <= 0:
        num_radar_columns = None

    if activation_file_name in ['', 'None']:
        activation_file_name = None

    if activation_file_name is None:
        print('Reading data from: "{0:s}"...'.format(storm_metafile_name))
        full_storm_id_strings, storm_times_unix_sec = (
            tracking_io.read_ids_and_times(storm_metafile_name))

        training_option_dict = dict()
        training_option_dict[trainval_io.SOUNDING_FIELDS_KEY] = None
        training_option_dict[trainval_io.SOUNDING_HEIGHTS_KEY] = None

        training_option_dict[trainval_io.NUM_ROWS_KEY] = num_radar_rows
        training_option_dict[trainval_io.NUM_COLUMNS_KEY] = num_radar_columns
        training_option_dict[trainval_io.NORMALIZATION_TYPE_KEY] = None
        training_option_dict[trainval_io.TARGET_NAME_KEY] = DUMMY_TARGET_NAME
        training_option_dict[trainval_io.BINARIZE_TARGET_KEY] = False
        training_option_dict[trainval_io.SAMPLING_FRACTIONS_KEY] = None
        training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None

        model_metadata_dict = {cnn.LAYER_OPERATIONS_KEY: None}
    else:
        print('Reading data from: "{0:s}"...'.format(activation_file_name))
        activation_matrix, activation_metadata_dict = (
            model_activation.read_file(activation_file_name))

        num_model_components = activation_matrix.shape[1]
        if num_model_components > 1:
            error_string = (
                'The file should contain activations for only one model '
                'component, not {0:d}.').format(num_model_components)

            raise TypeError(error_string)

        full_storm_id_strings = activation_metadata_dict[
            model_activation.FULL_IDS_KEY]
        storm_times_unix_sec = activation_metadata_dict[
            model_activation.STORM_TIMES_KEY]

        model_file_name = activation_metadata_dict[
            model_activation.MODEL_FILE_NAME_KEY]
        model_metafile_name = '{0:s}/model_metadata.p'.format(
            os.path.split(model_file_name)[0])

        print('Reading metadata from: "{0:s}"...'.format(model_metafile_name))
        model_metadata_dict = cnn.read_model_metadata(model_metafile_name)

        training_option_dict = model_metadata_dict[
            cnn.TRAINING_OPTION_DICT_KEY]
        training_option_dict[trainval_io.NORMALIZATION_TYPE_KEY] = None
        training_option_dict[trainval_io.SAMPLING_FRACTIONS_KEY] = None
        training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None

    training_option_dict[trainval_io.RADAR_FIELDS_KEY] = SHEAR_FIELD_NAMES
    training_option_dict[trainval_io.RADAR_HEIGHTS_KEY] = REFL_HEIGHTS_M_AGL
    training_option_dict[trainval_io.UPSAMPLE_REFLECTIVITY_KEY] = False
    model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY] = training_option_dict

    if 0 < num_examples < len(full_storm_id_strings):
        full_storm_id_strings = full_storm_id_strings[:num_examples]
        storm_times_unix_sec = storm_times_unix_sec[:num_examples]

    print(SEPARATOR_STRING)
    example_dict = testing_io.read_predictors_specific_examples(
        top_example_dir_name=top_example_dir_name,
        desired_full_id_strings=full_storm_id_strings,
        desired_times_unix_sec=storm_times_unix_sec,
        option_dict=model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY],
        layer_operation_dicts=model_metadata_dict[cnn.LAYER_OPERATIONS_KEY])
    print(SEPARATOR_STRING)

    predictor_matrices = example_dict[testing_io.INPUT_MATRICES_KEY]

    # TODO(thunderhoser): The rest of this code is very HACKY.
    predictor_matrices[0] = trainval_io.upsample_reflectivity(
        predictor_matrices[0][..., 0])
    predictor_matrices[0] = numpy.expand_dims(predictor_matrices[0], axis=-1)

    example_dict = {
        input_examples.RADAR_FIELDS_KEY: SHEAR_FIELD_NAMES,
        input_examples.REFL_IMAGE_MATRIX_KEY: predictor_matrices[0],
        input_examples.AZ_SHEAR_IMAGE_MATRIX_KEY: predictor_matrices[1],
        input_examples.RADAR_HEIGHTS_KEY: REFL_HEIGHTS_M_AGL
    }

    example_dict = input_examples.reduce_examples_3d_to_2d(
        example_dict=example_dict,
        list_of_operation_dicts=[REFL_LAYER_OPERATION_DICT])

    predictor_matrices = [example_dict[input_examples.RADAR_IMAGE_MATRIX_KEY]]

    layer_operation_dicts = [{
        input_examples.RADAR_FIELD_KEY: f,
        input_examples.MIN_HEIGHT_KEY: h1,
        input_examples.MAX_HEIGHT_KEY: h2,
        input_examples.OPERATION_NAME_KEY: op
    } for f, h1, h2, op in zip(
        example_dict[input_examples.RADAR_FIELDS_KEY], example_dict[
            input_examples.MIN_RADAR_HEIGHTS_KEY], example_dict[
                input_examples.MAX_RADAR_HEIGHTS_KEY], example_dict[
                    input_examples.RADAR_LAYER_OPERATION_NAMES_KEY])]

    model_metadata_dict[cnn.LAYER_OPERATIONS_KEY] = layer_operation_dicts

    figure_file_names = plot_examples.plot_examples(
        list_of_predictor_matrices=predictor_matrices,
        model_metadata_dict=model_metadata_dict,
        pmm_flag=False,
        output_dir_name=output_dir_name,
        plot_soundings=False,
        allow_whitespace=allow_whitespace,
        plot_panel_names=False,
        add_titles=False,
        label_colour_bars=True,
        colour_bar_length=colour_bar_length,
        colour_bar_font_size=COLOUR_BAR_FONT_SIZE,
        figure_resolution_dpi=FIGURE_RESOLUTION_DPI,
        refl_opacity=REFL_OPACITY,
        plot_grid_lines=False,
        full_storm_id_strings=full_storm_id_strings,
        storm_times_unix_sec=storm_times_unix_sec)

    for this_file_name in figure_file_names:
        print('Resizing image to {0:d} pixels: "{1:s}"...'.format(
            FIGURE_SIZE_PIXELS, this_file_name))

        imagemagick_utils.resize_image(input_file_name=this_file_name,
                                       output_file_name=this_file_name,
                                       output_size_pixels=FIGURE_SIZE_PIXELS)