コード例 #1
0
def _run(model_file_name, target_class, target_layer_name,
         top_example_dir_name, storm_metafile_name, num_examples,
         randomize_weights, cascading_random, output_file_name):
    """Runs Grad-CAM (gradient-weighted class-activation maps).

    This is effectively the main method.

    :param model_file_name: See documentation at top of file.
    :param target_class: Same.
    :param target_layer_name: Same.
    :param top_example_dir_name: Same.
    :param storm_metafile_name: Same.
    :param num_examples: Same.
    :param randomize_weights: Same.
    :param cascading_random: Same.
    :param output_file_name: Same.
    """

    file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name)

    # Read model and metadata.
    print('Reading model from: "{0:s}"...'.format(model_file_name))
    model_object = cnn.read_model(model_file_name)

    model_metafile_name = '{0:s}/model_metadata.p'.format(
        os.path.split(model_file_name)[0])

    print(
        'Reading model metadata from: "{0:s}"...'.format(model_metafile_name))
    model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
    training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
    training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None

    output_dir_name, pathless_output_file_name = os.path.split(
        output_file_name)
    extensionless_output_file_name, output_file_extension = os.path.splitext(
        pathless_output_file_name)

    if randomize_weights:
        conv_dense_layer_names = _find_conv_and_dense_layers(model_object)
        conv_dense_layer_names.reverse()
        num_sets = len(conv_dense_layer_names)
    else:
        conv_dense_layer_names = []
        num_sets = 1

    print(
        'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name))
    full_storm_id_strings, storm_times_unix_sec = (
        tracking_io.read_ids_and_times(storm_metafile_name))

    print(SEPARATOR_STRING)

    if 0 < num_examples < len(full_storm_id_strings):
        full_storm_id_strings = full_storm_id_strings[:num_examples]
        storm_times_unix_sec = storm_times_unix_sec[:num_examples]

    example_dict = testing_io.read_predictors_specific_examples(
        top_example_dir_name=top_example_dir_name,
        desired_full_id_strings=full_storm_id_strings,
        desired_times_unix_sec=storm_times_unix_sec,
        option_dict=training_option_dict,
        layer_operation_dicts=model_metadata_dict[cnn.LAYER_OPERATIONS_KEY])
    print(SEPARATOR_STRING)

    predictor_matrices = example_dict[testing_io.INPUT_MATRICES_KEY]
    sounding_pressure_matrix_pa = (
        example_dict[testing_io.SOUNDING_PRESSURES_KEY])

    print('Denormalizing model inputs...')
    denorm_predictor_matrices = trainval_io.separate_shear_and_reflectivity(
        list_of_input_matrices=copy.deepcopy(predictor_matrices),
        training_option_dict=training_option_dict)
    denorm_predictor_matrices = model_interpretation.denormalize_data(
        list_of_input_matrices=denorm_predictor_matrices,
        model_metadata_dict=model_metadata_dict)
    print(SEPARATOR_STRING)

    for k in range(num_sets):
        if randomize_weights:
            if cascading_random:
                _reset_weights_in_layer(model_object=model_object,
                                        layer_name=conv_dense_layer_names[k])

                this_model_object = model_object

                this_output_file_name = (
                    '{0:s}/{1:s}_cascading-random_{2:s}{3:s}').format(
                        output_dir_name, extensionless_output_file_name,
                        conv_dense_layer_names[k].replace('_', '-'),
                        output_file_extension)
            else:
                this_model_object = keras.models.Model.from_config(
                    model_object.get_config())
                this_model_object.set_weights(model_object.get_weights())

                _reset_weights_in_layer(model_object=this_model_object,
                                        layer_name=conv_dense_layer_names[k])

                this_output_file_name = '{0:s}/{1:s}_random_{2:s}{3:s}'.format(
                    output_dir_name, extensionless_output_file_name,
                    conv_dense_layer_names[k].replace('_', '-'),
                    output_file_extension)
        else:
            this_model_object = model_object
            this_output_file_name = output_file_name

        # print(K.eval(this_model_object.get_layer(name='dense_53').weights[0]))

        these_cam_matrices, these_guided_cam_matrices = (
            _run_gradcam_one_weight_set(
                model_object=this_model_object,
                target_class=target_class,
                target_layer_name=target_layer_name,
                predictor_matrices=predictor_matrices,
                training_option_dict=training_option_dict))

        print('Writing results to file: "{0:s}"...'.format(
            this_output_file_name))
        gradcam.write_standard_file(
            pickle_file_name=this_output_file_name,
            denorm_predictor_matrices=denorm_predictor_matrices,
            cam_matrices=these_cam_matrices,
            guided_cam_matrices=these_guided_cam_matrices,
            full_storm_id_strings=full_storm_id_strings,
            storm_times_unix_sec=storm_times_unix_sec,
            model_file_name=model_file_name,
            target_class=target_class,
            target_layer_name=target_layer_name,
            sounding_pressure_matrix_pa=sounding_pressure_matrix_pa)

        print(SEPARATOR_STRING)
コード例 #2
0
def _run(model_file_name, top_example_dir_name, storm_metafile_name,
         num_examples, output_file_name):
    """Creates dummy saliency map for each storm object.

    This is effectively the main method.

    :param model_file_name: See documentation at top of file.
    :param top_example_dir_name: Same.
    :param storm_metafile_name: Same.
    :param num_examples: Same.
    :param output_file_name: Same.
    """

    file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name)

    model_metafile_name = '{0:s}/model_metadata.p'.format(
        os.path.split(model_file_name)[0])

    print(
        'Reading model metadata from: "{0:s}"...'.format(model_metafile_name))
    model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
    training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
    training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None

    print(
        'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name))
    full_storm_id_strings, storm_times_unix_sec = (
        tracking_io.read_ids_and_times(storm_metafile_name))

    print(SEPARATOR_STRING)

    if 0 < num_examples < len(full_storm_id_strings):
        full_storm_id_strings = full_storm_id_strings[:num_examples]
        storm_times_unix_sec = storm_times_unix_sec[:num_examples]

    example_dict = testing_io.read_predictors_specific_examples(
        top_example_dir_name=top_example_dir_name,
        desired_full_id_strings=full_storm_id_strings,
        desired_times_unix_sec=storm_times_unix_sec,
        option_dict=training_option_dict,
        layer_operation_dicts=model_metadata_dict[cnn.LAYER_OPERATIONS_KEY])
    print(SEPARATOR_STRING)

    predictor_matrices = example_dict[testing_io.INPUT_MATRICES_KEY]
    sounding_pressure_matrix_pa = (
        example_dict[testing_io.SOUNDING_PRESSURES_KEY])

    radar_matrix = predictor_matrices[0]
    num_examples = radar_matrix.shape[0]
    num_channels = radar_matrix.shape[-1]
    num_spatial_dim = len(radar_matrix.shape) - 2

    if num_spatial_dim == 2:
        kernel_matrix = numpy.expand_dims(EDGE_DETECTOR_MATRIX_2D, axis=-1)
    else:
        kernel_matrix = numpy.expand_dims(EDGE_DETECTOR_MATRIX_3D, axis=-1)

    kernel_matrix = numpy.repeat(kernel_matrix, num_channels, axis=-1)
    kernel_matrix = numpy.expand_dims(kernel_matrix, axis=-1)
    kernel_matrix = numpy.repeat(kernel_matrix, num_channels, axis=-1)

    radar_saliency_matrix = numpy.full(radar_matrix.shape, numpy.nan)

    for i in range(num_examples):
        if numpy.mod(i, 10) == 0:
            print((
                'Have created dummy saliency map for {0:d} of {1:d} examples...'
            ).format(i, num_examples))

        if num_spatial_dim == 2:
            this_saliency_matrix = standalone_utils.do_2d_convolution(
                feature_matrix=radar_matrix[i, ...],
                kernel_matrix=kernel_matrix,
                pad_edges=True,
                stride_length_px=1)
        else:
            this_saliency_matrix = standalone_utils.do_3d_convolution(
                feature_matrix=radar_matrix[i, ...],
                kernel_matrix=kernel_matrix,
                pad_edges=True,
                stride_length_px=1)

        radar_saliency_matrix[i, ...] = this_saliency_matrix[0, ...]

    print('Have created dummy saliency map for all {0:d} examples!'.format(
        num_examples))
    print(SEPARATOR_STRING)

    saliency_matrices = [
        radar_saliency_matrix if k == 0 else predictor_matrices[k]
        for k in range(len(predictor_matrices))
    ]
    saliency_matrices = trainval_io.separate_shear_and_reflectivity(
        list_of_input_matrices=saliency_matrices,
        training_option_dict=training_option_dict)
    denorm_predictor_matrices = trainval_io.separate_shear_and_reflectivity(
        list_of_input_matrices=copy.deepcopy(predictor_matrices),
        training_option_dict=training_option_dict)

    print('Denormalizing model inputs...')
    denorm_predictor_matrices = model_interpretation.denormalize_data(
        list_of_input_matrices=denorm_predictor_matrices,
        model_metadata_dict=model_metadata_dict)

    print('Writing saliency maps to file: "{0:s}"...'.format(output_file_name))

    saliency_metadata_dict = saliency_maps.check_metadata(
        component_type_string=model_interpretation.CLASS_COMPONENT_TYPE_STRING,
        target_class=1)

    saliency_maps.write_standard_file(
        pickle_file_name=output_file_name,
        denorm_predictor_matrices=denorm_predictor_matrices,
        saliency_matrices=saliency_matrices,
        full_storm_id_strings=full_storm_id_strings,
        storm_times_unix_sec=storm_times_unix_sec,
        model_file_name=model_file_name,
        metadata_dict=saliency_metadata_dict,
        sounding_pressure_matrix_pa=sounding_pressure_matrix_pa)
コード例 #3
0
def _run_gradcam_one_weight_set(model_object, target_class, target_layer_name,
                                predictor_matrices, training_option_dict):
    """Runs Grad-CAM with one set of weights.

    T = number of input tensors to model

    :param model_object: Trained CNN (instance of `keras.models.Model` or
        `keras.models.Sequential`).
    :param target_class: See documentation at top of file.
    :param target_layer_name: Same.
    :param predictor_matrices: length-T list of numpy arrays, containing
        normalized predictor matrices.
    :param training_option_dict: Dictionary returned by
        `cnn.read_model_metadata`.
    :return: cam_matrices: length-T list of numpy arrays, containing unguided
        class activations.
    :return: guided_cam_matrices: length-T list of numpy arrays, containing
        guided class activations.
    """

    num_matrices = len(predictor_matrices)
    num_examples = predictor_matrices[0].shape[0]

    cam_matrices = [None] * num_matrices
    guided_cam_matrices = [None] * num_matrices
    new_model_object = None

    for i in range(num_examples):
        print('Running Grad-CAM for example {0:d} of {1:d}...'.format(
            i + 1, num_examples))

        these_predictor_matrices = [a[[i], ...] for a in predictor_matrices]
        these_cam_matrices = gradcam.run_gradcam(
            model_object=model_object,
            list_of_input_matrices=these_predictor_matrices,
            target_class=target_class,
            target_layer_name=target_layer_name)

        print('Running guided Grad-CAM for example {0:d} of {1:d}...'.format(
            i + 1, num_examples))

        these_guided_cam_matrices, new_model_object = (
            gradcam.run_guided_gradcam(
                orig_model_object=model_object,
                list_of_input_matrices=these_predictor_matrices,
                target_layer_name=target_layer_name,
                list_of_cam_matrices=these_cam_matrices,
                new_model_object=new_model_object))

        if all([a is None for a in cam_matrices]):
            for k in range(num_matrices):
                if these_cam_matrices[k] is None:
                    continue

                these_dim = numpy.array(
                    (num_examples, ) + these_cam_matrices[k].shape[1:],
                    dtype=int)
                cam_matrices[k] = numpy.full(these_dim, numpy.nan)

                these_dim = numpy.array(
                    (num_examples, ) + these_guided_cam_matrices[k].shape[1:],
                    dtype=int)
                guided_cam_matrices[k] = numpy.full(these_dim, numpy.nan)

        for k in range(num_matrices):
            if these_cam_matrices[k] is None:
                continue

            cam_matrices[k][i, ...] = these_cam_matrices[k][0, ...]
            guided_cam_matrices[k][i,
                                   ...] = (these_guided_cam_matrices[k][0,
                                                                        ...])

    upsample_refl = training_option_dict[trainval_io.UPSAMPLE_REFLECTIVITY_KEY]

    if upsample_refl:
        cam_matrices[0] = numpy.expand_dims(cam_matrices[0], axis=-1)

        num_channels = predictor_matrices[0].shape[-1]
        cam_matrices[0] = numpy.repeat(a=cam_matrices[0],
                                       repeats=num_channels,
                                       axis=-1)

        cam_matrices = trainval_io.separate_shear_and_reflectivity(
            list_of_input_matrices=cam_matrices,
            training_option_dict=training_option_dict)

        cam_matrices[0] = cam_matrices[0][..., 0]
        cam_matrices[1] = cam_matrices[1][..., 0]

    guided_cam_matrices = trainval_io.separate_shear_and_reflectivity(
        list_of_input_matrices=guided_cam_matrices,
        training_option_dict=training_option_dict)

    return cam_matrices, guided_cam_matrices
コード例 #4
0
def _run(model_file_name, component_type_string, target_class, layer_name,
         ideal_activation, neuron_indices, channel_index, top_example_dir_name,
         storm_metafile_name, num_examples, randomize_weights,
         cascading_random, output_file_name):
    """Computes saliency map for each storm object and each model component.

    This is effectively the main method.

    :param model_file_name: See documentation at top of file.
    :param component_type_string: Same.
    :param target_class: Same.
    :param layer_name: Same.
    :param ideal_activation: Same.
    :param neuron_indices: Same.
    :param channel_index: Same.
    :param top_example_dir_name: Same.
    :param storm_metafile_name: Same.
    :param num_examples: Same.
    :param randomize_weights: Same.
    :param cascading_random: Same.
    :param output_file_name: Same.
    """

    # Check input args.
    file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name)
    model_interpretation.check_component_type(component_type_string)

    # Read model and metadata.
    print('Reading model from: "{0:s}"...'.format(model_file_name))
    model_object = cnn.read_model(model_file_name)

    model_metafile_name = '{0:s}/model_metadata.p'.format(
        os.path.split(model_file_name)[0])

    print(
        'Reading model metadata from: "{0:s}"...'.format(model_metafile_name))
    model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
    training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
    training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None

    output_dir_name, pathless_output_file_name = os.path.split(
        output_file_name)
    extensionless_output_file_name, output_file_extension = os.path.splitext(
        pathless_output_file_name)

    if randomize_weights:
        conv_dense_layer_names = _find_conv_and_dense_layers(model_object)
        conv_dense_layer_names.reverse()
        num_sets = len(conv_dense_layer_names)
    else:
        conv_dense_layer_names = []
        num_sets = 1

    print(
        'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name))
    full_storm_id_strings, storm_times_unix_sec = (
        tracking_io.read_ids_and_times(storm_metafile_name))

    print(SEPARATOR_STRING)

    if 0 < num_examples < len(full_storm_id_strings):
        full_storm_id_strings = full_storm_id_strings[:num_examples]
        storm_times_unix_sec = storm_times_unix_sec[:num_examples]

    example_dict = testing_io.read_predictors_specific_examples(
        top_example_dir_name=top_example_dir_name,
        desired_full_id_strings=full_storm_id_strings,
        desired_times_unix_sec=storm_times_unix_sec,
        option_dict=training_option_dict,
        layer_operation_dicts=model_metadata_dict[cnn.LAYER_OPERATIONS_KEY])
    print(SEPARATOR_STRING)

    predictor_matrices = example_dict[testing_io.INPUT_MATRICES_KEY]
    sounding_pressure_matrix_pa = example_dict[
        testing_io.SOUNDING_PRESSURES_KEY]

    denorm_predictor_matrices = trainval_io.separate_shear_and_reflectivity(
        list_of_input_matrices=copy.deepcopy(predictor_matrices),
        training_option_dict=training_option_dict)

    print('Denormalizing model inputs...')
    denorm_predictor_matrices = model_interpretation.denormalize_data(
        list_of_input_matrices=denorm_predictor_matrices,
        model_metadata_dict=model_metadata_dict)
    print(SEPARATOR_STRING)

    for k in range(num_sets):
        if randomize_weights:
            if cascading_random:
                _reset_weights_in_layer(model_object=model_object,
                                        layer_name=conv_dense_layer_names[k])

                this_model_object = model_object

                this_output_file_name = (
                    '{0:s}/{1:s}_cascading-random_{2:s}{3:s}').format(
                        output_dir_name, extensionless_output_file_name,
                        conv_dense_layer_names[k].replace('_', '-'),
                        output_file_extension)
            else:
                this_model_object = keras.models.Model.from_config(
                    model_object.get_config())
                this_model_object.set_weights(model_object.get_weights())

                _reset_weights_in_layer(model_object=this_model_object,
                                        layer_name=conv_dense_layer_names[k])

                this_output_file_name = '{0:s}/{1:s}_random_{2:s}{3:s}'.format(
                    output_dir_name, extensionless_output_file_name,
                    conv_dense_layer_names[k].replace('_', '-'),
                    output_file_extension)
        else:
            this_model_object = model_object
            this_output_file_name = output_file_name

        # print(K.eval(this_model_object.get_layer(name='dense_3').weights[0]))

        if component_type_string == CLASS_COMPONENT_TYPE_STRING:
            print('Computing saliency maps for target class {0:d}...'.format(
                target_class))

            saliency_matrices = (
                saliency_maps.get_saliency_maps_for_class_activation(
                    model_object=this_model_object,
                    target_class=target_class,
                    list_of_input_matrices=predictor_matrices))

        elif component_type_string == NEURON_COMPONENT_TYPE_STRING:
            print(
                ('Computing saliency maps for neuron {0:s} in layer "{1:s}"...'
                 ).format(str(neuron_indices), layer_name))

            saliency_matrices = (
                saliency_maps.get_saliency_maps_for_neuron_activation(
                    model_object=this_model_object,
                    layer_name=layer_name,
                    neuron_indices=neuron_indices,
                    list_of_input_matrices=predictor_matrices,
                    ideal_activation=ideal_activation))

        else:
            print((
                'Computing saliency maps for channel {0:d} in layer "{1:s}"...'
            ).format(channel_index, layer_name))

            saliency_matrices = (
                saliency_maps.get_saliency_maps_for_channel_activation(
                    model_object=this_model_object,
                    layer_name=layer_name,
                    channel_index=channel_index,
                    list_of_input_matrices=predictor_matrices,
                    stat_function_for_neuron_activations=K.max,
                    ideal_activation=ideal_activation))

        saliency_matrices = trainval_io.separate_shear_and_reflectivity(
            list_of_input_matrices=saliency_matrices,
            training_option_dict=training_option_dict)

        print('Writing saliency maps to file: "{0:s}"...'.format(
            this_output_file_name))

        saliency_metadata_dict = saliency_maps.check_metadata(
            component_type_string=component_type_string,
            target_class=target_class,
            layer_name=layer_name,
            ideal_activation=ideal_activation,
            neuron_indices=neuron_indices,
            channel_index=channel_index)

        saliency_maps.write_standard_file(
            pickle_file_name=this_output_file_name,
            denorm_predictor_matrices=denorm_predictor_matrices,
            saliency_matrices=saliency_matrices,
            full_storm_id_strings=full_storm_id_strings,
            storm_times_unix_sec=storm_times_unix_sec,
            model_file_name=model_file_name,
            metadata_dict=saliency_metadata_dict,
            sounding_pressure_matrix_pa=sounding_pressure_matrix_pa)
コード例 #5
0
def _run(model_file_name, target_class, target_layer_name,
         top_example_dir_name, storm_metafile_name, num_examples,
         output_file_name):
    """Runs Grad-CAM (gradient-weighted class-activation maps).

    This is effectively the main method.

    :param model_file_name: See documentation at top of file.
    :param target_class: Same.
    :param target_layer_name: Same.
    :param top_example_dir_name: Same.
    :param storm_metafile_name: Same.
    :param num_examples: Same.
    :param output_file_name: Same.
    """

    file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name)

    # Read model and metadata.
    print('Reading model from: "{0:s}"...'.format(model_file_name))
    model_object = cnn.read_model(model_file_name)

    model_metafile_name = '{0:s}/model_metadata.p'.format(
        os.path.split(model_file_name)[0])

    print(
        'Reading model metadata from: "{0:s}"...'.format(model_metafile_name))
    model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
    training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
    training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None

    print(
        'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name))
    full_id_strings, storm_times_unix_sec = tracking_io.read_ids_and_times(
        storm_metafile_name)

    print(SEPARATOR_STRING)

    if 0 < num_examples < len(full_id_strings):
        full_id_strings = full_id_strings[:num_examples]
        storm_times_unix_sec = storm_times_unix_sec[:num_examples]

    list_of_input_matrices, sounding_pressure_matrix_pascals = (
        testing_io.read_specific_examples(
            top_example_dir_name=top_example_dir_name,
            desired_full_id_strings=full_id_strings,
            desired_times_unix_sec=storm_times_unix_sec,
            option_dict=training_option_dict,
            list_of_layer_operation_dicts=model_metadata_dict[
                cnn.LAYER_OPERATIONS_KEY]))
    print(SEPARATOR_STRING)

    list_of_cam_matrices = None
    list_of_guided_cam_matrices = None
    new_model_object = None

    num_examples = len(full_id_strings)

    for i in range(num_examples):
        print('Running Grad-CAM for example {0:d} of {1:d}...'.format(
            i + 1, num_examples))

        these_input_matrices = [a[[i], ...] for a in list_of_input_matrices]
        these_cam_matrices = gradcam.run_gradcam(
            model_object=model_object,
            list_of_input_matrices=these_input_matrices,
            target_class=target_class,
            target_layer_name=target_layer_name)

        print('Running guided Grad-CAM for example {0:d} of {1:d}...'.format(
            i + 1, num_examples))

        these_guided_cam_matrices, new_model_object = (
            gradcam.run_guided_gradcam(
                orig_model_object=model_object,
                list_of_input_matrices=these_input_matrices,
                target_layer_name=target_layer_name,
                list_of_cam_matrices=these_cam_matrices,
                new_model_object=new_model_object))

        if list_of_cam_matrices is None:
            list_of_cam_matrices = copy.deepcopy(these_cam_matrices)
            list_of_guided_cam_matrices = copy.deepcopy(
                these_guided_cam_matrices)
        else:
            for j in range(len(these_cam_matrices)):
                if list_of_cam_matrices[j] is None:
                    continue

                list_of_cam_matrices[j] = numpy.concatenate(
                    (list_of_cam_matrices[j], these_cam_matrices[j]), axis=0)

                list_of_guided_cam_matrices[j] = numpy.concatenate(
                    (list_of_guided_cam_matrices[j],
                     these_guided_cam_matrices[j]),
                    axis=0)

    print(SEPARATOR_STRING)
    upsample_refl = training_option_dict[trainval_io.UPSAMPLE_REFLECTIVITY_KEY]

    if upsample_refl:
        list_of_cam_matrices[0] = numpy.expand_dims(list_of_cam_matrices[0],
                                                    axis=-1)

        num_channels = list_of_input_matrices[0].shape[-1]
        list_of_cam_matrices[0] = numpy.repeat(a=list_of_cam_matrices[0],
                                               repeats=num_channels,
                                               axis=-1)

        list_of_cam_matrices = trainval_io.separate_shear_and_reflectivity(
            list_of_input_matrices=list_of_cam_matrices,
            training_option_dict=training_option_dict)

        list_of_cam_matrices[0] = list_of_cam_matrices[0][..., 0]
        list_of_cam_matrices[1] = list_of_cam_matrices[1][..., 0]

    list_of_guided_cam_matrices = trainval_io.separate_shear_and_reflectivity(
        list_of_input_matrices=list_of_guided_cam_matrices,
        training_option_dict=training_option_dict)

    print('Denormalizing predictors...')
    list_of_input_matrices = trainval_io.separate_shear_and_reflectivity(
        list_of_input_matrices=list_of_input_matrices,
        training_option_dict=training_option_dict)

    list_of_input_matrices = model_interpretation.denormalize_data(
        list_of_input_matrices=list_of_input_matrices,
        model_metadata_dict=model_metadata_dict)

    print('Writing class-activation maps to file: "{0:s}"...'.format(
        output_file_name))

    gradcam.write_standard_file(
        pickle_file_name=output_file_name,
        list_of_input_matrices=list_of_input_matrices,
        list_of_cam_matrices=list_of_cam_matrices,
        list_of_guided_cam_matrices=list_of_guided_cam_matrices,
        model_file_name=model_file_name,
        full_id_strings=full_id_strings,
        storm_times_unix_sec=storm_times_unix_sec,
        target_class=target_class,
        target_layer_name=target_layer_name,
        sounding_pressure_matrix_pascals=sounding_pressure_matrix_pascals)
コード例 #6
0
def _run(model_file_name, init_function_name, storm_metafile_name,
         num_examples, top_example_dir_name, component_type_string,
         target_class, layer_name, neuron_indices, channel_index,
         num_iterations, ideal_activation, learning_rate, l2_weight,
         radar_constraint_weight, minmax_constraint_weight, output_file_name):
    """Runs backwards optimization on a trained CNN.

    This is effectively the main method.

    :param model_file_name: See documentation at top of file.
    :param init_function_name: Same.
    :param storm_metafile_name: Same.
    :param num_examples: Same.
    :param top_example_dir_name: Same.
    :param component_type_string: Same.
    :param target_class: Same.
    :param layer_name: Same.
    :param neuron_indices: Same.
    :param channel_index: Same.
    :param num_iterations: Same.
    :param ideal_activation: Same.
    :param learning_rate: Same.
    :param l2_weight: Same.
    :param radar_constraint_weight: Same.
    :param minmax_constraint_weight: Same.
    :param output_file_name: Same.
    """

    if l2_weight <= 0:
        l2_weight = None
    if radar_constraint_weight <= 0:
        radar_constraint_weight = None
    if minmax_constraint_weight <= 0:
        minmax_constraint_weight = None
    if ideal_activation <= 0:
        ideal_activation = None
    if init_function_name in ['', 'None']:
        init_function_name = None

    model_interpretation.check_component_type(component_type_string)

    model_metafile_name = '{0:s}/model_metadata.p'.format(
        os.path.split(model_file_name)[0])

    print(
        'Reading model metadata from: "{0:s}"...'.format(model_metafile_name))
    model_metadata_dict = cnn.read_model_metadata(model_metafile_name)

    input_matrices = None
    init_function = None
    full_storm_id_strings = None
    storm_times_unix_sec = None
    sounding_pressure_matrix_pa = None

    if init_function_name is None:
        print('Reading storm metadata from: "{0:s}"...'.format(
            storm_metafile_name))

        full_storm_id_strings, storm_times_unix_sec = (
            tracking_io.read_ids_and_times(storm_metafile_name))

        if 0 < num_examples < len(full_storm_id_strings):
            full_storm_id_strings = full_storm_id_strings[:num_examples]
            storm_times_unix_sec = storm_times_unix_sec[:num_examples]

        example_dict = testing_io.read_predictors_specific_examples(
            top_example_dir_name=top_example_dir_name,
            desired_full_id_strings=full_storm_id_strings,
            desired_times_unix_sec=storm_times_unix_sec,
            option_dict=model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY],
            layer_operation_dicts=model_metadata_dict[
                cnn.LAYER_OPERATIONS_KEY])
        print(SEPARATOR_STRING)

        input_matrices = example_dict[testing_io.INPUT_MATRICES_KEY]
        sounding_pressure_matrix_pa = example_dict[
            testing_io.SOUNDING_PRESSURES_KEY]
        num_examples = input_matrices[0].shape[0]
    else:
        num_examples = 1
        init_function = _create_initializer(
            init_function_name=init_function_name,
            model_metadata_dict=model_metadata_dict)

    print('Reading model from: "{0:s}"...'.format(model_file_name))
    model_object = cnn.read_model(model_file_name)

    output_matrices = None
    initial_activations = numpy.full(num_examples, numpy.nan)
    final_activations = numpy.full(num_examples, numpy.nan)

    for i in range(num_examples):
        if init_function_name is None:
            this_init_arg = [a[[i], ...] for a in input_matrices]
        else:
            this_init_arg = init_function

        if component_type_string == CLASS_COMPONENT_TYPE_STRING:
            print((
                '\nOptimizing {0:d}th of {1:d} images for target class {2:d}...'
            ).format(i + 1, num_examples, target_class))

            this_result_dict = backwards_opt.optimize_input_for_class(
                model_object=model_object,
                target_class=target_class,
                init_function_or_matrices=this_init_arg,
                num_iterations=num_iterations,
                learning_rate=learning_rate,
                l2_weight=l2_weight,
                radar_constraint_weight=radar_constraint_weight,
                minmax_constraint_weight=minmax_constraint_weight,
                model_metadata_dict=model_metadata_dict)

        elif component_type_string == NEURON_COMPONENT_TYPE_STRING:
            print((
                '\nOptimizing {0:d}th of {1:d} images for neuron {2:s} in layer'
                ' "{3:s}"...').format(i + 1, num_examples, str(neuron_indices),
                                      layer_name))

            this_result_dict = backwards_opt.optimize_input_for_neuron(
                model_object=model_object,
                layer_name=layer_name,
                neuron_indices=neuron_indices,
                init_function_or_matrices=this_init_arg,
                num_iterations=num_iterations,
                learning_rate=learning_rate,
                l2_weight=l2_weight,
                ideal_activation=ideal_activation,
                radar_constraint_weight=radar_constraint_weight,
                minmax_constraint_weight=minmax_constraint_weight,
                model_metadata_dict=model_metadata_dict)

        else:
            print(('\nOptimizing {0:d}th of {1:d} images for channel {2:d} in '
                   'layer "{3:s}"...').format(i + 1, num_examples,
                                              channel_index, layer_name))

            this_result_dict = backwards_opt.optimize_input_for_channel(
                model_object=model_object,
                layer_name=layer_name,
                channel_index=channel_index,
                init_function_or_matrices=this_init_arg,
                stat_function_for_neuron_activations=K.max,
                num_iterations=num_iterations,
                learning_rate=learning_rate,
                l2_weight=l2_weight,
                ideal_activation=ideal_activation,
                radar_constraint_weight=radar_constraint_weight,
                minmax_constraint_weight=minmax_constraint_weight,
                model_metadata_dict=model_metadata_dict)

        initial_activations[i] = this_result_dict[
            backwards_opt.INITIAL_ACTIVATION_KEY]
        final_activations[i] = this_result_dict[
            backwards_opt.FINAL_ACTIVATION_KEY]
        these_output_matrices = this_result_dict[
            backwards_opt.NORM_OUTPUT_MATRICES_KEY]

        if output_matrices is None:
            output_matrices = [None] * len(these_output_matrices)

        for k in range(len(output_matrices)):
            if output_matrices[k] is None:
                output_matrices[k] = these_output_matrices[k] + 0.
            else:
                output_matrices[k] = numpy.concatenate(
                    (output_matrices[k], these_output_matrices[k]), axis=0)

        if init_function_name is None:
            continue

        these_input_matrices = this_result_dict[
            backwards_opt.NORM_INPUT_MATRICES_KEY]

        if input_matrices is None:
            input_matrices = [None] * len(these_input_matrices)

        for k in range(len(input_matrices)):
            if input_matrices[k] is None:
                input_matrices[k] = these_input_matrices[k] + 0.
            else:
                input_matrices[k] = numpy.concatenate(
                    (input_matrices[k], these_input_matrices[k]), axis=0)

    print(SEPARATOR_STRING)
    training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]

    print('Denormalizing input examples...')
    input_matrices = trainval_io.separate_shear_and_reflectivity(
        list_of_input_matrices=input_matrices,
        training_option_dict=training_option_dict)

    input_matrices = model_interpretation.denormalize_data(
        list_of_input_matrices=input_matrices,
        model_metadata_dict=model_metadata_dict)

    print('Denormalizing optimized examples...')
    output_matrices = trainval_io.separate_shear_and_reflectivity(
        list_of_input_matrices=output_matrices,
        training_option_dict=training_option_dict)

    output_matrices = model_interpretation.denormalize_data(
        list_of_input_matrices=output_matrices,
        model_metadata_dict=model_metadata_dict)

    print('Writing results to: "{0:s}"...'.format(output_file_name))
    bwo_metadata_dict = backwards_opt.check_metadata(
        component_type_string=component_type_string,
        num_iterations=num_iterations,
        learning_rate=learning_rate,
        target_class=target_class,
        layer_name=layer_name,
        ideal_activation=ideal_activation,
        neuron_indices=neuron_indices,
        channel_index=channel_index,
        l2_weight=l2_weight,
        radar_constraint_weight=radar_constraint_weight,
        minmax_constraint_weight=minmax_constraint_weight)

    backwards_opt.write_standard_file(
        pickle_file_name=output_file_name,
        denorm_input_matrices=input_matrices,
        denorm_output_matrices=output_matrices,
        initial_activations=initial_activations,
        final_activations=final_activations,
        model_file_name=model_file_name,
        metadata_dict=bwo_metadata_dict,
        full_storm_id_strings=full_storm_id_strings,
        storm_times_unix_sec=storm_times_unix_sec,
        sounding_pressure_matrix_pa=sounding_pressure_matrix_pa)
コード例 #7
0
def _apply_upconvnet_one_file(example_file_name, num_examples,
                              upconvnet_model_object, cnn_model_object,
                              cnn_metadata_dict, cnn_feature_layer_name,
                              upconvnet_file_name, top_output_dir_name):
    """Applies upconvnet to examples from one file.

    :param example_file_name: Path to input file (will be read by
        `input_examples.read_example_file`).
    :param num_examples: Number of examples to read.
    :param upconvnet_model_object: Trained upconvnet (instance of
        `keras.models.Model` or `keras.models.Sequential`).
    :param cnn_model_object: Trained CNN (instance of
        `keras.models.Model` or `keras.models.Sequential`).
    :param cnn_metadata_dict: Dictionary returned by `cnn.read_model_metadata`.
    param cnn_feature_layer_name: Name of CNN layer whose output is the feature
        vector, which is the input to the upconvnet.
    :param upconvnet_file_name: See documentation at top of file.
    :param top_output_dir_name: Same.
    """

    # Do housekeeping.
    training_option_dict = cnn_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
    training_option_dict[trainval_io.EXAMPLE_FILES_KEY] = [example_file_name]

    if cnn_metadata_dict[cnn.LAYER_OPERATIONS_KEY] is not None:
        generator_object = testing_io.gridrad_generator_2d_reduced(
            option_dict=training_option_dict,
            desired_num_examples=num_examples,
            list_of_operation_dicts=cnn_metadata_dict[
                cnn.LAYER_OPERATIONS_KEY])

    elif cnn_metadata_dict[cnn.CONV_2D3D_KEY]:
        generator_object = testing_io.myrorss_generator_2d3d(
            option_dict=training_option_dict,
            desired_num_examples=num_examples)
    else:
        generator_object = testing_io.generator_2d_or_3d(
            option_dict=training_option_dict,
            desired_num_examples=num_examples)

    # Apply upconvnet.
    full_storm_id_strings = []
    storm_times_unix_sec = numpy.array([], dtype=int)
    reconstructed_radar_matrix = None
    mse_by_example = numpy.array([], dtype=float)

    while True:
        try:
            this_storm_object_dict = next(generator_object)
            print('\n')
        except StopIteration:
            break

        full_storm_id_strings += this_storm_object_dict[
            testing_io.FULL_IDS_KEY]
        storm_times_unix_sec = numpy.concatenate(
            (storm_times_unix_sec,
             this_storm_object_dict[testing_io.STORM_TIMES_KEY]))

        these_input_matrices = this_storm_object_dict[
            testing_io.INPUT_MATRICES_KEY]
        this_actual_matrix = these_input_matrices[0]

        this_reconstructed_matrix = upconvnet.apply_upconvnet(
            cnn_input_matrices=these_input_matrices,
            cnn_model_object=cnn_model_object,
            cnn_feature_layer_name=cnn_feature_layer_name,
            ucn_model_object=upconvnet_model_object,
            verbose=True)
        print(MINOR_SEPARATOR_STRING)

        if reconstructed_radar_matrix is None:
            reconstructed_radar_matrix = this_reconstructed_matrix + 0.
        else:
            reconstructed_radar_matrix = numpy.concatenate(
                (reconstructed_radar_matrix, this_reconstructed_matrix),
                axis=0)

        num_dimensions = len(this_actual_matrix.shape)
        all_axes_except_first = numpy.linspace(1,
                                               num_dimensions - 1,
                                               num=num_dimensions - 1,
                                               dtype=int).tolist()

        these_mse = numpy.mean(
            (this_actual_matrix - this_reconstructed_matrix)**2,
            axis=tuple(all_axes_except_first))
        mse_by_example = numpy.concatenate((mse_by_example, these_mse))

    print(MINOR_SEPARATOR_STRING)
    if len(full_storm_id_strings) == 0:
        return

    print('Mean sqaured error = {0:.3e}'.format(numpy.mean(mse_by_example)))

    # Denormalize reconstructed images.
    print('Denormalizing reconstructed radar images...')

    metadata_dict_no_soundings = copy.deepcopy(cnn_metadata_dict)
    metadata_dict_no_soundings[cnn.TRAINING_OPTION_DICT_KEY][
        trainval_io.SOUNDING_FIELDS_KEY] = None
    option_dict_no_soundings = metadata_dict_no_soundings[
        cnn.TRAINING_OPTION_DICT_KEY]

    denorm_recon_radar_matrices = trainval_io.separate_shear_and_reflectivity(
        list_of_input_matrices=[reconstructed_radar_matrix],
        training_option_dict=option_dict_no_soundings)

    if training_option_dict[trainval_io.UPSAMPLE_REFLECTIVITY_KEY]:
        denorm_recon_radar_matrices[0] = trainval_io.downsample_reflectivity(
            reflectivity_matrix_dbz=denorm_recon_radar_matrices[0][..., 0])

        denorm_recon_radar_matrices[0] = numpy.expand_dims(
            denorm_recon_radar_matrices[0], axis=-1)

    denorm_recon_radar_matrices = model_interpretation.denormalize_data(
        list_of_input_matrices=denorm_recon_radar_matrices,
        model_metadata_dict=metadata_dict_no_soundings)

    # Write reconstructed images.
    spc_date_string = time_conversion.time_to_spc_date_string(
        numpy.median(storm_times_unix_sec).astype(int))

    output_file_name = upconvnet.find_prediction_file(
        top_directory_name=top_output_dir_name,
        spc_date_string=spc_date_string,
        raise_error_if_missing=False)

    print('Writing predictions to: "{0:s}"...'.format(output_file_name))

    upconvnet.write_predictions(
        pickle_file_name=output_file_name,
        denorm_recon_radar_matrices=denorm_recon_radar_matrices,
        full_storm_id_strings=full_storm_id_strings,
        storm_times_unix_sec=storm_times_unix_sec,
        mse_by_example=mse_by_example,
        upconvnet_file_name=upconvnet_file_name)