コード例 #1
0
    def test_get_hilo_activation_examples_many(self):
        """Ensures correct output from _get_hilo_activation_examples.

        In this case, many examples are returned.
        """

        (these_high_indices,
         these_low_indices) = model_activation.get_hilo_activation_examples(
             storm_activations=STORM_ACTIVATIONS,
             num_low_activation_examples=NUM_LOW_ACTIVATION_EXAMPLES_MANY,
             num_high_activation_examples=NUM_HIGH_ACTIVATION_EXAMPLES_MANY)

        self.assertTrue(
            numpy.array_equal(these_high_indices, HIGH_INDICES_MANY))
        self.assertTrue(numpy.array_equal(these_low_indices, LOW_INDICES_MANY))
コード例 #2
0
    def test_get_hilo_activations_few(self):
        """Ensures correct output from _get_hilo_activation_examples.

        In this case, only few examples are returned.
        """

        these_high_indices, these_low_indices = (
            model_activation.get_hilo_activation_examples(
                storm_activations=STORM_ACTIVATIONS,
                num_low_activation_examples=NUM_LOW_ACTIVATIONS_FEW,
                num_high_activation_examples=NUM_HIGH_ACTIVATIONS_FEW,
                unique_storm_cells=False))

        self.assertTrue(numpy.array_equal(these_high_indices,
                                          HIGH_INDICES_FEW))
        self.assertTrue(numpy.array_equal(these_low_indices, LOW_INDICES_FEW))
コード例 #3
0
    def test_get_hilo_activations_few_unique(self):
        """Ensures correct output from _get_hilo_activation_examples.

        In this case, only few examples (from unique storm cells) are returned.
        """

        these_high_indices, these_low_indices = (
            model_activation.get_hilo_activation_examples(
                storm_activations=STORM_ACTIVATIONS,
                num_low_activation_examples=NUM_LOW_ACTIVATIONS_FEW,
                num_high_activation_examples=NUM_HIGH_ACTIVATIONS_FEW,
                unique_storm_cells=True,
                full_storm_id_strings=FULL_STORM_ID_STRINGS))

        self.assertTrue(
            numpy.array_equal(these_high_indices, HIGH_INDICES_FEW_UNIQUE))
        self.assertTrue(
            numpy.array_equal(these_low_indices, LOW_INDICES_FEW_UNIQUE))
コード例 #4
0
def _run(input_activation_file_name, unique_storm_cells,
         num_low_activation_examples, num_high_activation_examples, num_hits,
         num_misses, num_false_alarms, num_correct_nulls, top_target_dir_name,
         output_dir_name):
    """Finds extreme examples (storm objects), based on model activations.

    This is effectively the main method.

    :param input_activation_file_name: See documentation at top of file.
    :param unique_storm_cells: Same.
    :param num_low_activation_examples: Same.
    :param num_high_activation_examples: Same.
    :param num_hits: Same.
    :param num_misses: Same.
    :param num_false_alarms: Same.
    :param num_correct_nulls: Same.
    :param top_target_dir_name: Same.
    :param output_dir_name: Same.
    :raises: ValueError: if the activation file contains activations for more
        than one model component.
    """

    # Check input args.
    example_counts = numpy.array([
        num_low_activation_examples, num_high_activation_examples, num_hits,
        num_misses, num_false_alarms, num_correct_nulls
    ],
                                 dtype=int)

    error_checking.assert_is_geq_numpy_array(example_counts, 0)

    file_system_utils.mkdir_recursive_if_necessary(
        directory_name=output_dir_name)

    # Read activations.
    print('Reading activations from: "{0:s}"...'.format(
        input_activation_file_name))
    activation_matrix, activation_metadata_dict = model_activation.read_file(
        input_activation_file_name)

    num_model_components = activation_matrix.shape[1]
    if num_model_components > 1:
        error_string = (
            'The file should contain activations for only one model component, '
            'not {0:d}.').format(num_model_components)

        raise ValueError(error_string)

    storm_activations = activation_matrix[:, 0]
    full_id_strings = activation_metadata_dict[model_activation.FULL_IDS_KEY]
    storm_times_unix_sec = activation_metadata_dict[
        model_activation.STORM_TIMES_KEY]

    num_storm_objects = len(full_id_strings)
    error_checking.assert_is_leq(numpy.sum(example_counts), num_storm_objects)

    # Find high- and low-activation examples.
    if num_low_activation_examples + num_high_activation_examples > 0:
        high_indices, low_indices = (
            model_activation.get_hilo_activation_examples(
                storm_activations=storm_activations,
                num_low_activation_examples=num_low_activation_examples,
                num_high_activation_examples=num_high_activation_examples,
                unique_storm_cells=unique_storm_cells,
                full_storm_id_strings=full_id_strings))
    else:
        high_indices = numpy.array([], dtype=int)
        low_indices = numpy.array([], dtype=int)

    # Write high-activation examples to file.
    if len(high_indices) > 0:
        high_activation_file_name = '{0:s}/high_activation_examples.p'.format(
            output_dir_name)

        print((
            'Writing IDs and times for high-activation examples to: "{0:s}"...'
        ).format(high_activation_file_name))

        this_activation_matrix = numpy.reshape(storm_activations[high_indices],
                                               (len(high_indices), 1))

        model_activation.write_file(
            pickle_file_name=high_activation_file_name,
            activation_matrix=this_activation_matrix,
            full_id_strings=[full_id_strings[k] for k in high_indices],
            storm_times_unix_sec=storm_times_unix_sec[high_indices],
            model_file_name=activation_metadata_dict[
                model_activation.MODEL_FILE_NAME_KEY],
            component_type_string=activation_metadata_dict[
                model_activation.COMPONENT_TYPE_KEY],
            target_class=activation_metadata_dict[
                model_activation.TARGET_CLASS_KEY],
            layer_name=activation_metadata_dict[
                model_activation.LAYER_NAME_KEY],
            neuron_index_matrix=activation_metadata_dict[
                model_activation.NEURON_INDICES_KEY],
            channel_indices=activation_metadata_dict[
                model_activation.CHANNEL_INDICES_KEY])

    # Write low-activation examples to file
    if len(low_indices) > 0:
        low_activation_file_name = '{0:s}/low_activation_examples.p'.format(
            output_dir_name)

        print(
            ('Writing IDs and times for low-activation examples to: "{0:s}"...'
             ).format(low_activation_file_name))

        this_activation_matrix = numpy.reshape(storm_activations[low_indices],
                                               (len(low_indices), 1))

        model_activation.write_file(
            pickle_file_name=low_activation_file_name,
            activation_matrix=this_activation_matrix,
            full_id_strings=[full_id_strings[k] for k in low_indices],
            storm_times_unix_sec=storm_times_unix_sec[low_indices],
            model_file_name=activation_metadata_dict[
                model_activation.MODEL_FILE_NAME_KEY],
            component_type_string=activation_metadata_dict[
                model_activation.COMPONENT_TYPE_KEY],
            target_class=activation_metadata_dict[
                model_activation.TARGET_CLASS_KEY],
            layer_name=activation_metadata_dict[
                model_activation.LAYER_NAME_KEY],
            neuron_index_matrix=activation_metadata_dict[
                model_activation.NEURON_INDICES_KEY],
            channel_indices=activation_metadata_dict[
                model_activation.CHANNEL_INDICES_KEY])

    if num_hits + num_misses + num_false_alarms + num_correct_nulls == 0:
        return

    print(SEPARATOR_STRING)
    target_value_dict = _read_target_values(
        top_target_dir_name=top_target_dir_name,
        storm_activations=storm_activations,
        activation_metadata_dict=activation_metadata_dict)
    print(SEPARATOR_STRING)

    full_id_strings = target_value_dict[FULL_IDS_KEY]
    storm_times_unix_sec = target_value_dict[STORM_TIMES_KEY]
    storm_activations = target_value_dict[STORM_ACTIVATIONS_KEY]
    storm_target_values = target_value_dict[TARGET_VALUES_KEY]

    ct_extreme_dict = model_activation.get_contingency_table_extremes(
        storm_activations=storm_activations,
        storm_target_values=storm_target_values,
        num_hits=num_hits,
        num_misses=num_misses,
        num_false_alarms=num_false_alarms,
        num_correct_nulls=num_correct_nulls,
        unique_storm_cells=unique_storm_cells,
        full_storm_id_strings=full_id_strings)

    hit_indices = ct_extreme_dict[model_activation.HIT_INDICES_KEY]
    miss_indices = ct_extreme_dict[model_activation.MISS_INDICES_KEY]
    false_alarm_indices = (
        ct_extreme_dict[model_activation.FALSE_ALARM_INDICES_KEY])
    correct_null_indices = (
        ct_extreme_dict[model_activation.CORRECT_NULL_INDICES_KEY])

    hit_id_strings = [full_id_strings[k] for k in hit_indices]
    miss_id_strings = [full_id_strings[k] for k in miss_indices]
    false_alarm_id_strings = [full_id_strings[k] for k in false_alarm_indices]
    correct_null_id_strings = [
        full_id_strings[k] for k in correct_null_indices
    ]

    hit_primary_id_strings = (
        temporal_tracking.full_to_partial_ids(hit_id_strings)[0])
    miss_primary_id_strings = (
        temporal_tracking.full_to_partial_ids(miss_id_strings)[0])
    fa_primary_id_strings = (
        temporal_tracking.full_to_partial_ids(false_alarm_id_strings)[0])
    cn_primary_id_strings = (
        temporal_tracking.full_to_partial_ids(correct_null_id_strings)[0])

    these_flags = numpy.array(
        [i in miss_primary_id_strings for i in hit_primary_id_strings],
        dtype=bool)
    print(
        ('Number of primary IDs in best hits AND worst misses = {0:d}').format(
            numpy.sum(these_flags)))

    these_flags = numpy.array(
        [i in fa_primary_id_strings for i in hit_primary_id_strings],
        dtype=bool)
    print(('Number of primary IDs in best hits AND worst false alarms = {0:d}'
           ).format(numpy.sum(these_flags)))

    these_flags = numpy.array(
        [i in cn_primary_id_strings for i in hit_primary_id_strings],
        dtype=bool)
    print(('Number of primary IDs in best hits AND best correct nulls = {0:d}'
           ).format(numpy.sum(these_flags)))

    these_flags = numpy.array(
        [i in fa_primary_id_strings for i in miss_primary_id_strings],
        dtype=bool)
    print(
        ('Number of primary IDs in worst misses AND worst false alarms = {0:d}'
         ).format(numpy.sum(these_flags)))

    these_flags = numpy.array(
        [i in cn_primary_id_strings for i in miss_primary_id_strings],
        dtype=bool)
    print(
        ('Number of primary IDs in worst misses AND best correct nulls = {0:d}'
         ).format(numpy.sum(these_flags)))

    these_flags = numpy.array(
        [i in cn_primary_id_strings for i in fa_primary_id_strings],
        dtype=bool)
    print((
        'Number of primary IDs in worst false alarms AND best correct nulls = '
        '{0:d}').format(numpy.sum(these_flags)))

    # Write best hits to file.
    if len(hit_indices) > 0:
        best_hit_file_name = '{0:s}/best_hits.p'.format(output_dir_name)
        print('Writing IDs and times for best hits to: "{0:s}"...'.format(
            best_hit_file_name))

        this_activation_matrix = numpy.reshape(storm_activations[hit_indices],
                                               (len(hit_indices), 1))

        model_activation.write_file(
            pickle_file_name=best_hit_file_name,
            activation_matrix=this_activation_matrix,
            full_id_strings=[full_id_strings[k] for k in hit_indices],
            storm_times_unix_sec=storm_times_unix_sec[hit_indices],
            model_file_name=activation_metadata_dict[
                model_activation.MODEL_FILE_NAME_KEY],
            component_type_string=activation_metadata_dict[
                model_activation.COMPONENT_TYPE_KEY],
            target_class=activation_metadata_dict[
                model_activation.TARGET_CLASS_KEY],
            layer_name=activation_metadata_dict[
                model_activation.LAYER_NAME_KEY],
            neuron_index_matrix=activation_metadata_dict[
                model_activation.NEURON_INDICES_KEY],
            channel_indices=activation_metadata_dict[
                model_activation.CHANNEL_INDICES_KEY])

    # Write worst misses to file.
    if len(miss_indices) > 0:
        worst_miss_file_name = '{0:s}/worst_misses.p'.format(output_dir_name)
        print('Writing IDs and times for worst misses to: "{0:s}"...'.format(
            worst_miss_file_name))

        this_activation_matrix = numpy.reshape(storm_activations[miss_indices],
                                               (len(miss_indices), 1))

        model_activation.write_file(
            pickle_file_name=worst_miss_file_name,
            activation_matrix=this_activation_matrix,
            full_id_strings=[full_id_strings[k] for k in miss_indices],
            storm_times_unix_sec=storm_times_unix_sec[miss_indices],
            model_file_name=activation_metadata_dict[
                model_activation.MODEL_FILE_NAME_KEY],
            component_type_string=activation_metadata_dict[
                model_activation.COMPONENT_TYPE_KEY],
            target_class=activation_metadata_dict[
                model_activation.TARGET_CLASS_KEY],
            layer_name=activation_metadata_dict[
                model_activation.LAYER_NAME_KEY],
            neuron_index_matrix=activation_metadata_dict[
                model_activation.NEURON_INDICES_KEY],
            channel_indices=activation_metadata_dict[
                model_activation.CHANNEL_INDICES_KEY])

    # Write worst false alarms to file.
    if len(false_alarm_indices) > 0:
        worst_fa_file_name = '{0:s}/worst_false_alarms.p'.format(
            output_dir_name)

        print(('Writing IDs and times for worst false alarms to: "{0:s}"...'
               ).format(worst_fa_file_name))

        this_activation_matrix = numpy.reshape(
            storm_activations[false_alarm_indices],
            (len(false_alarm_indices), 1))

        model_activation.write_file(
            pickle_file_name=worst_fa_file_name,
            activation_matrix=this_activation_matrix,
            full_id_strings=[full_id_strings[k] for k in false_alarm_indices],
            storm_times_unix_sec=storm_times_unix_sec[false_alarm_indices],
            model_file_name=activation_metadata_dict[
                model_activation.MODEL_FILE_NAME_KEY],
            component_type_string=activation_metadata_dict[
                model_activation.COMPONENT_TYPE_KEY],
            target_class=activation_metadata_dict[
                model_activation.TARGET_CLASS_KEY],
            layer_name=activation_metadata_dict[
                model_activation.LAYER_NAME_KEY],
            neuron_index_matrix=activation_metadata_dict[
                model_activation.NEURON_INDICES_KEY],
            channel_indices=activation_metadata_dict[
                model_activation.CHANNEL_INDICES_KEY])

    # Write best correct nulls to file.
    if len(correct_null_indices) > 0:
        best_cn_file_name = '{0:s}/best_correct_nulls.p'.format(
            output_dir_name)

        print(('Writing IDs and times for best correct nulls to: "{0:s}"...'
               ).format(best_cn_file_name))

        this_activation_matrix = numpy.reshape(
            storm_activations[correct_null_indices],
            (len(correct_null_indices), 1))

        model_activation.write_file(
            pickle_file_name=best_cn_file_name,
            activation_matrix=this_activation_matrix,
            full_id_strings=[full_id_strings[k] for k in correct_null_indices],
            storm_times_unix_sec=storm_times_unix_sec[correct_null_indices],
            model_file_name=activation_metadata_dict[
                model_activation.MODEL_FILE_NAME_KEY],
            component_type_string=activation_metadata_dict[
                model_activation.COMPONENT_TYPE_KEY],
            target_class=activation_metadata_dict[
                model_activation.TARGET_CLASS_KEY],
            layer_name=activation_metadata_dict[
                model_activation.LAYER_NAME_KEY],
            neuron_index_matrix=activation_metadata_dict[
                model_activation.NEURON_INDICES_KEY],
            channel_indices=activation_metadata_dict[
                model_activation.CHANNEL_INDICES_KEY])
コード例 #5
0
def _run(prediction_file_names, top_match_dir_name, unique_storm_cells,
         num_hits, num_misses, num_false_alarms, num_correct_nulls,
         num_disagreements, output_dir_names):
    """Finds extreme examples vis-a-vis two models.

    This is effectively the main method.

    :param prediction_file_names: See documentation at top of file.
    :param top_match_dir_name: Same.
    :param unique_storm_cells: Same.
    :param num_hits: Same.
    :param num_misses: Same.
    :param num_false_alarms: Same.
    :param num_correct_nulls: Same.
    :param num_disagreements: Same.
    :param output_dir_names: Same.
    """

    # TODO(thunderhoser): Throw error if multiclass predictions are read.

    # Check input args.
    example_counts = numpy.array([
        num_hits, num_misses, num_false_alarms, num_correct_nulls,
        num_disagreements
    ],
                                 dtype=int)

    error_checking.assert_is_geq_numpy_array(example_counts, 0)

    first_output_dir_name = output_dir_names[0]
    file_system_utils.mkdir_recursive_if_necessary(
        directory_name=first_output_dir_name)

    second_output_dir_name = output_dir_names[1]
    file_system_utils.mkdir_recursive_if_necessary(
        directory_name=second_output_dir_name)

    # Match storm objects between the two prediction files.
    print('Reading data from: "{0:s}"...'.format(prediction_file_names[0]))
    first_prediction_dict = prediction_io.read_ungridded_predictions(
        prediction_file_names[0])

    print('Reading data from: "{0:s}"...'.format(prediction_file_names[1]))
    second_prediction_dict = prediction_io.read_ungridded_predictions(
        prediction_file_names[1])

    print(SEPARATOR_STRING)
    first_prediction_dict, second_prediction_dict = _match_storm_objects(
        first_prediction_dict=first_prediction_dict,
        second_prediction_dict=second_prediction_dict,
        top_match_dir_name=top_match_dir_name)
    print(SEPARATOR_STRING)

    observed_labels = first_prediction_dict[prediction_io.OBSERVED_LABELS_KEY]

    first_model_file_name = first_prediction_dict[prediction_io.MODEL_FILE_KEY]
    first_full_id_strings = first_prediction_dict[prediction_io.STORM_IDS_KEY]
    first_storm_times_unix_sec = first_prediction_dict[
        prediction_io.STORM_TIMES_KEY]
    first_probabilities = first_prediction_dict[
        prediction_io.PROBABILITY_MATRIX_KEY][:, 1]

    second_model_file_name = second_prediction_dict[
        prediction_io.MODEL_FILE_KEY]
    second_full_id_strings = second_prediction_dict[
        prediction_io.STORM_IDS_KEY]
    second_storm_times_unix_sec = second_prediction_dict[
        prediction_io.STORM_TIMES_KEY]
    second_probabilities = second_prediction_dict[
        prediction_io.PROBABILITY_MATRIX_KEY][:, 1]

    if num_disagreements > 0:
        second_high_indices, first_high_indices = (
            model_activation.get_hilo_activation_examples(
                storm_activations=second_probabilities - first_probabilities,
                num_low_activation_examples=num_disagreements,
                num_high_activation_examples=num_disagreements,
                unique_storm_cells=unique_storm_cells,
                full_storm_id_strings=first_full_id_strings))

        # Print summary to command window.
        this_mean_diff = numpy.mean(second_probabilities[second_high_indices] -
                                    first_probabilities[second_high_indices])

        print((
            'Average prob difference for {0:d} worst disagreements with second '
            'model higher: {1:.3f}').format(num_disagreements, this_mean_diff))

        this_mean_diff = numpy.mean(second_probabilities[first_high_indices] -
                                    first_probabilities[first_high_indices])

        print((
            'Average prob difference for {0:d} worst disagreements with first '
            'model higher: {1:.3f}').format(num_disagreements, this_mean_diff))

        # Write file.
        this_activation_file_name = '{0:s}/low_disagreement_examples.p'.format(
            first_output_dir_name)

        print(('Writing disagreements (second model higher) to: "{0:s}"...'
               ).format(this_activation_file_name))

        this_activation_matrix = numpy.reshape(
            first_probabilities[second_high_indices],
            (len(second_high_indices), 1))

        model_activation.write_file(
            pickle_file_name=this_activation_file_name,
            activation_matrix=this_activation_matrix,
            full_id_strings=[
                first_full_id_strings[j] for j in second_high_indices
            ],
            storm_times_unix_sec=first_storm_times_unix_sec[
                second_high_indices],
            model_file_name=first_model_file_name,
            component_type_string=CLASS_COMPONENT_STRING,
            target_class=1)

        # Write file.
        this_activation_file_name = '{0:s}/high_disagreement_examples.p'.format(
            second_output_dir_name)

        print(('Writing disagreements (second model higher) to: "{0:s}"...'
               ).format(this_activation_file_name))

        this_activation_matrix = numpy.reshape(
            second_probabilities[second_high_indices],
            (len(second_high_indices), 1))

        model_activation.write_file(
            pickle_file_name=this_activation_file_name,
            activation_matrix=this_activation_matrix,
            full_id_strings=[
                second_full_id_strings[j] for j in second_high_indices
            ],
            storm_times_unix_sec=second_storm_times_unix_sec[
                second_high_indices],
            model_file_name=second_model_file_name,
            component_type_string=CLASS_COMPONENT_STRING,
            target_class=1)

        # Write file.
        this_activation_file_name = '{0:s}/high_disagreement_examples.p'.format(
            first_output_dir_name)

        print(('Writing disagreements (first model higher) to: "{0:s}"...'
               ).format(this_activation_file_name))

        this_activation_matrix = numpy.reshape(
            first_probabilities[first_high_indices],
            (len(first_high_indices), 1))

        model_activation.write_file(
            pickle_file_name=this_activation_file_name,
            activation_matrix=this_activation_matrix,
            full_id_strings=[
                first_full_id_strings[j] for j in first_high_indices
            ],
            storm_times_unix_sec=first_storm_times_unix_sec[
                first_high_indices],
            model_file_name=first_model_file_name,
            component_type_string=CLASS_COMPONENT_STRING,
            target_class=1)

        # Write file.
        this_activation_file_name = '{0:s}/low_disagreement_examples.p'.format(
            second_output_dir_name)

        print(('Writing disagreements (first model higher) to: "{0:s}"...'
               ).format(this_activation_file_name))

        this_activation_matrix = numpy.reshape(
            second_probabilities[first_high_indices],
            (len(first_high_indices), 1))

        model_activation.write_file(
            pickle_file_name=this_activation_file_name,
            activation_matrix=this_activation_matrix,
            full_id_strings=[
                second_full_id_strings[j] for j in first_high_indices
            ],
            storm_times_unix_sec=second_storm_times_unix_sec[
                first_high_indices],
            model_file_name=second_model_file_name,
            component_type_string=CLASS_COMPONENT_STRING,
            target_class=1)

    if num_hits + num_misses + num_false_alarms + num_correct_nulls == 0:
        return

    mean_probabilities = 0.5 * (first_probabilities + second_probabilities)

    ct_extreme_dict = model_activation.get_contingency_table_extremes(
        storm_activations=mean_probabilities,
        storm_target_values=observed_labels,
        num_hits=num_hits,
        num_misses=num_misses,
        num_false_alarms=num_false_alarms,
        num_correct_nulls=num_correct_nulls,
        unique_storm_cells=unique_storm_cells,
        full_storm_id_strings=first_full_id_strings)

    hit_indices = ct_extreme_dict[model_activation.HIT_INDICES_KEY]
    miss_indices = ct_extreme_dict[model_activation.MISS_INDICES_KEY]
    false_alarm_indices = ct_extreme_dict[
        model_activation.FALSE_ALARM_INDICES_KEY]
    correct_null_indices = ct_extreme_dict[
        model_activation.CORRECT_NULL_INDICES_KEY]

    if num_hits > 0:
        print((
            'Mean probability from first and second model for {0:d} best hits: '
            '{1:.3f}, {2:.3f}').format(
                num_hits, numpy.mean(first_probabilities[hit_indices]),
                numpy.mean(second_probabilities[hit_indices])))

        this_activation_file_name = '{0:s}/best_hits.p'.format(
            first_output_dir_name)

        print('Writing best hits to: "{0:s}"...'.format(
            this_activation_file_name))
        this_activation_matrix = numpy.reshape(
            first_probabilities[hit_indices], (len(hit_indices), 1))

        model_activation.write_file(
            pickle_file_name=this_activation_file_name,
            activation_matrix=this_activation_matrix,
            full_id_strings=[first_full_id_strings[j] for j in hit_indices],
            storm_times_unix_sec=first_storm_times_unix_sec[hit_indices],
            model_file_name=first_model_file_name,
            component_type_string=CLASS_COMPONENT_STRING,
            target_class=1)

        this_activation_file_name = '{0:s}/best_hits.p'.format(
            second_output_dir_name)

        print('Writing best hits to: "{0:s}"...'.format(
            this_activation_file_name))
        this_activation_matrix = numpy.reshape(
            second_probabilities[hit_indices], (len(hit_indices), 1))

        model_activation.write_file(
            pickle_file_name=this_activation_file_name,
            activation_matrix=this_activation_matrix,
            full_id_strings=[second_full_id_strings[j] for j in hit_indices],
            storm_times_unix_sec=second_storm_times_unix_sec[hit_indices],
            model_file_name=second_model_file_name,
            component_type_string=CLASS_COMPONENT_STRING,
            target_class=1)

    if num_misses > 0:
        print(('Mean probability from first and second model for {0:d} worst '
               'misses: {1:.3f}, {2:.3f}').format(
                   num_misses, numpy.mean(first_probabilities[miss_indices]),
                   numpy.mean(second_probabilities[miss_indices])))

        this_activation_file_name = '{0:s}/worst_misses.p'.format(
            first_output_dir_name)

        print('Writing worst misses to: "{0:s}"...'.format(
            this_activation_file_name))
        this_activation_matrix = numpy.reshape(
            first_probabilities[miss_indices], (len(miss_indices), 1))

        model_activation.write_file(
            pickle_file_name=this_activation_file_name,
            activation_matrix=this_activation_matrix,
            full_id_strings=[first_full_id_strings[j] for j in miss_indices],
            storm_times_unix_sec=first_storm_times_unix_sec[miss_indices],
            model_file_name=first_model_file_name,
            component_type_string=CLASS_COMPONENT_STRING,
            target_class=1)

        this_activation_file_name = '{0:s}/worst_misses.p'.format(
            second_output_dir_name)

        print('Writing worst misses to: "{0:s}"...'.format(
            this_activation_file_name))
        this_activation_matrix = numpy.reshape(
            second_probabilities[miss_indices], (len(miss_indices), 1))

        model_activation.write_file(
            pickle_file_name=this_activation_file_name,
            activation_matrix=this_activation_matrix,
            full_id_strings=[second_full_id_strings[j] for j in miss_indices],
            storm_times_unix_sec=second_storm_times_unix_sec[miss_indices],
            model_file_name=second_model_file_name,
            component_type_string=CLASS_COMPONENT_STRING,
            target_class=1)

    if num_false_alarms > 0:
        print(('Mean probability from first and second model for {0:d} worst '
               'false alarms: {1:.3f}, {2:.3f}').format(
                   num_false_alarms,
                   numpy.mean(first_probabilities[false_alarm_indices]),
                   numpy.mean(second_probabilities[false_alarm_indices])))

        this_activation_file_name = '{0:s}/worst_false_alarms.p'.format(
            first_output_dir_name)

        print('Writing worst false alarms to: "{0:s}"...'.format(
            this_activation_file_name))
        this_activation_matrix = numpy.reshape(
            first_probabilities[false_alarm_indices],
            (len(false_alarm_indices), 1))

        model_activation.write_file(
            pickle_file_name=this_activation_file_name,
            activation_matrix=this_activation_matrix,
            full_id_strings=[
                first_full_id_strings[j] for j in false_alarm_indices
            ],
            storm_times_unix_sec=first_storm_times_unix_sec[
                false_alarm_indices],
            model_file_name=first_model_file_name,
            component_type_string=CLASS_COMPONENT_STRING,
            target_class=1)

        this_activation_file_name = '{0:s}/worst_false_alarms.p'.format(
            second_output_dir_name)

        print('Writing worst false alarms to: "{0:s}"...'.format(
            this_activation_file_name))
        this_activation_matrix = numpy.reshape(
            second_probabilities[false_alarm_indices],
            (len(false_alarm_indices), 1))

        model_activation.write_file(
            pickle_file_name=this_activation_file_name,
            activation_matrix=this_activation_matrix,
            full_id_strings=[
                second_full_id_strings[j] for j in false_alarm_indices
            ],
            storm_times_unix_sec=second_storm_times_unix_sec[
                false_alarm_indices],
            model_file_name=second_model_file_name,
            component_type_string=CLASS_COMPONENT_STRING,
            target_class=1)

    if num_correct_nulls > 0:
        print(('Mean probability from first and second model for {0:d} best '
               'correct nulls: {1:.3f}, {2:.3f}').format(
                   num_correct_nulls,
                   numpy.mean(first_probabilities[correct_null_indices]),
                   numpy.mean(second_probabilities[correct_null_indices])))

        this_activation_file_name = '{0:s}/best_correct_nulls.p'.format(
            first_output_dir_name)

        print('Writing best correct nulls to: "{0:s}"...'.format(
            this_activation_file_name))
        this_activation_matrix = numpy.reshape(
            first_probabilities[correct_null_indices],
            (len(correct_null_indices), 1))

        model_activation.write_file(
            pickle_file_name=this_activation_file_name,
            activation_matrix=this_activation_matrix,
            full_id_strings=[
                first_full_id_strings[j] for j in correct_null_indices
            ],
            storm_times_unix_sec=first_storm_times_unix_sec[
                correct_null_indices],
            model_file_name=first_model_file_name,
            component_type_string=CLASS_COMPONENT_STRING,
            target_class=1)

        this_activation_file_name = '{0:s}/best_correct_nulls.p'.format(
            second_output_dir_name)

        print('Writing best correct nulls to: "{0:s}"...'.format(
            this_activation_file_name))
        this_activation_matrix = numpy.reshape(
            second_probabilities[correct_null_indices],
            (len(correct_null_indices), 1))

        model_activation.write_file(
            pickle_file_name=this_activation_file_name,
            activation_matrix=this_activation_matrix,
            full_id_strings=[
                second_full_id_strings[j] for j in correct_null_indices
            ],
            storm_times_unix_sec=second_storm_times_unix_sec[
                correct_null_indices],
            model_file_name=second_model_file_name,
            component_type_string=CLASS_COMPONENT_STRING,
            target_class=1)