def _run(storm_metafile_name, warning_dir_name): """Finds which storms are linked to an NWS tornado warning. This is effectively the main method. :param storm_metafile_name: See documentation at top of file. :param warning_dir_name: Same. """ print( 'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name)) full_storm_id_strings, valid_times_unix_sec = ( tracking_io.read_ids_and_times(storm_metafile_name)) secondary_id_strings = ( temporal_tracking.full_to_partial_ids(full_storm_id_strings)[-1]) these_times_unix_sec = numpy.concatenate( (valid_times_unix_sec, valid_times_unix_sec - NUM_SECONDS_PER_DAY, valid_times_unix_sec + NUM_SECONDS_PER_DAY)) spc_date_strings = [ time_conversion.time_to_spc_date_string(t) for t in these_times_unix_sec ] spc_date_strings = numpy.unique(numpy.array(spc_date_strings)) linked_secondary_id_strings = [] for this_spc_date_string in spc_date_strings: this_file_name = '{0:s}/tornado_warnings_{1:s}.p'.format( warning_dir_name, this_spc_date_string) print('Reading warnings from: "{0:s}"...'.format(this_file_name)) this_file_handle = open(this_file_name, 'rb') this_warning_table = pickle.load(this_file_handle) this_file_handle.close() this_num_warnings = len(this_warning_table.index) for k in range(this_num_warnings): linked_secondary_id_strings += ( this_warning_table[LINKED_SECONDARY_IDS_KEY].values[k]) print(SEPARATOR_STRING) storm_warned_flags = numpy.array( [s in linked_secondary_id_strings for s in secondary_id_strings], dtype=bool) print(('{0:d} of {1:d} storm objects are linked to an NWS tornado warning!' ).format(numpy.sum(storm_warned_flags), len(storm_warned_flags)))
def _run(cnn_file_name, upconvnet_file_name, top_example_dir_name, baseline_storm_metafile_name, trial_storm_metafile_name, num_baseline_examples, num_trial_examples, num_novel_examples, cnn_feature_layer_name, percent_svd_variance_to_keep, output_file_name): """Runs novelty detection. This is effectively the main method. :param cnn_file_name: See documentation at top of file. :param upconvnet_file_name: Same. :param top_example_dir_name: Same. :param baseline_storm_metafile_name: Same. :param trial_storm_metafile_name: Same. :param num_baseline_examples: Same. :param num_trial_examples: Same. :param num_novel_examples: Same. :param cnn_feature_layer_name: Same. :param percent_svd_variance_to_keep: Same. :param output_file_name: Same. :raises: ValueError: if dimensions of first CNN input matrix != dimensions of upconvnet output. """ print('Reading trained CNN from: "{0:s}"...'.format(cnn_file_name)) cnn_model_object = cnn.read_model(cnn_file_name) cnn_metafile_name = '{0:s}/model_metadata.p'.format( os.path.split(cnn_file_name)[0] ) print('Reading trained upconvnet from: "{0:s}"...'.format( upconvnet_file_name)) upconvnet_model_object = cnn.read_model(upconvnet_file_name) # ucn_output_dimensions = numpy.array( # upconvnet_model_object.output.get_shape().as_list()[1:], dtype=int # ) if isinstance(cnn_model_object.input, list): first_cnn_input_tensor = cnn_model_object.input[0] else: first_cnn_input_tensor = cnn_model_object.input cnn_input_dimensions = numpy.array( first_cnn_input_tensor.get_shape().as_list()[1:], dtype=int ) # if not numpy.array_equal(cnn_input_dimensions, ucn_output_dimensions): # error_string = ( # 'Dimensions of first CNN input matrix ({0:s}) should equal ' # 'dimensions of upconvnet output ({1:s}).' # ).format(str(cnn_input_dimensions), str(ucn_output_dimensions)) # # raise ValueError(error_string) print('Reading CNN metadata from: "{0:s}"...'.format(cnn_metafile_name)) cnn_metadata_dict = cnn.read_model_metadata(cnn_metafile_name) print('Reading metadata for baseline examples from: "{0:s}"...'.format( baseline_storm_metafile_name)) baseline_full_id_strings, baseline_times_unix_sec = ( tracking_io.read_ids_and_times(baseline_storm_metafile_name) ) print('Reading metadata for trial examples from: "{0:s}"...'.format( trial_storm_metafile_name)) trial_full_id_strings, trial_times_unix_sec = ( tracking_io.read_ids_and_times(trial_storm_metafile_name) ) if 0 < num_baseline_examples < len(baseline_full_id_strings): baseline_full_id_strings = baseline_full_id_strings[ :num_baseline_examples] baseline_times_unix_sec = baseline_times_unix_sec[ :num_baseline_examples] if 0 < num_trial_examples < len(trial_full_id_strings): trial_full_id_strings = trial_full_id_strings[:num_trial_examples] trial_times_unix_sec = trial_times_unix_sec[:num_trial_examples] num_trial_examples = len(trial_full_id_strings) if num_novel_examples <= 0: num_novel_examples = num_trial_examples + 0 num_novel_examples = min([num_novel_examples, num_trial_examples]) print('Number of novel examples to find: {0:d}'.format(num_novel_examples)) bad_baseline_indices = tracking_utils.find_storm_objects( all_id_strings=baseline_full_id_strings, all_times_unix_sec=baseline_times_unix_sec, id_strings_to_keep=trial_full_id_strings, times_to_keep_unix_sec=trial_times_unix_sec, allow_missing=True) print('Removing {0:d} trial examples from baseline set...'.format( len(bad_baseline_indices) )) baseline_times_unix_sec = numpy.delete( baseline_times_unix_sec, bad_baseline_indices ) baseline_full_id_strings = numpy.delete( numpy.array(baseline_full_id_strings), bad_baseline_indices ) baseline_full_id_strings = baseline_full_id_strings.tolist() # num_baseline_examples = len(baseline_full_id_strings) print(SEPARATOR_STRING) list_of_baseline_input_matrices, _ = testing_io.read_specific_examples( top_example_dir_name=top_example_dir_name, desired_full_id_strings=baseline_full_id_strings, desired_times_unix_sec=baseline_times_unix_sec, option_dict=cnn_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY], list_of_layer_operation_dicts=cnn_metadata_dict[ cnn.LAYER_OPERATIONS_KEY] ) print(SEPARATOR_STRING) list_of_trial_input_matrices, _ = testing_io.read_specific_examples( top_example_dir_name=top_example_dir_name, desired_full_id_strings=trial_full_id_strings, desired_times_unix_sec=trial_times_unix_sec, option_dict=cnn_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY], list_of_layer_operation_dicts=cnn_metadata_dict[ cnn.LAYER_OPERATIONS_KEY] ) print(SEPARATOR_STRING) novelty_dict = novelty_detection.do_novelty_detection( list_of_baseline_input_matrices=list_of_baseline_input_matrices, list_of_trial_input_matrices=list_of_trial_input_matrices, cnn_model_object=cnn_model_object, cnn_feature_layer_name=cnn_feature_layer_name, upconvnet_model_object=upconvnet_model_object, num_novel_examples=num_novel_examples, multipass=False, percent_svd_variance_to_keep=percent_svd_variance_to_keep) print(SEPARATOR_STRING) print('Adding metadata to novelty-detection results...') novelty_dict = novelty_detection.add_metadata( novelty_dict=novelty_dict, baseline_full_id_strings=baseline_full_id_strings, baseline_storm_times_unix_sec=baseline_times_unix_sec, trial_full_id_strings=trial_full_id_strings, trial_storm_times_unix_sec=trial_times_unix_sec, cnn_file_name=cnn_file_name, upconvnet_file_name=upconvnet_file_name) print('Denormalizing inputs and outputs of novelty detection...') novelty_dict[novelty_detection.BASELINE_INPUTS_KEY] = ( model_interpretation.denormalize_data( list_of_input_matrices=novelty_dict[ novelty_detection.BASELINE_INPUTS_KEY ], model_metadata_dict=cnn_metadata_dict) ) novelty_dict[novelty_detection.TRIAL_INPUTS_KEY] = ( model_interpretation.denormalize_data( list_of_input_matrices=novelty_dict[ novelty_detection.TRIAL_INPUTS_KEY ], model_metadata_dict=cnn_metadata_dict) ) cnn_metadata_dict[ cnn.TRAINING_OPTION_DICT_KEY][trainval_io.SOUNDING_FIELDS_KEY] = None novelty_dict[novelty_detection.NOVEL_IMAGES_UPCONV_KEY] = ( model_interpretation.denormalize_data( list_of_input_matrices=[ novelty_dict[novelty_detection.NOVEL_IMAGES_UPCONV_KEY] ], model_metadata_dict=cnn_metadata_dict) )[0] novelty_dict[novelty_detection.NOVEL_IMAGES_UPCONV_SVD_KEY] = ( model_interpretation.denormalize_data( list_of_input_matrices=[ novelty_dict[novelty_detection.NOVEL_IMAGES_UPCONV_SVD_KEY] ], model_metadata_dict=cnn_metadata_dict) )[0] print('Writing results to: "{0:s}"...'.format(output_file_name)) novelty_detection.write_standard_file(novelty_dict=novelty_dict, pickle_file_name=output_file_name)
def _run(model_file_name, component_type_string, target_class, layer_name, ideal_activation, neuron_indices, channel_index, top_example_dir_name, storm_metafile_name, num_examples, randomize_weights, cascading_random, output_file_name): """Computes saliency map for each storm object and each model component. This is effectively the main method. :param model_file_name: See documentation at top of file. :param component_type_string: Same. :param target_class: Same. :param layer_name: Same. :param ideal_activation: Same. :param neuron_indices: Same. :param channel_index: Same. :param top_example_dir_name: Same. :param storm_metafile_name: Same. :param num_examples: Same. :param randomize_weights: Same. :param cascading_random: Same. :param output_file_name: Same. """ # Check input args. file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name) model_interpretation.check_component_type(component_type_string) # Read model and metadata. print('Reading model from: "{0:s}"...'.format(model_file_name)) model_object = cnn.read_model(model_file_name) model_metafile_name = '{0:s}/model_metadata.p'.format( os.path.split(model_file_name)[0]) print( 'Reading model metadata from: "{0:s}"...'.format(model_metafile_name)) model_metadata_dict = cnn.read_model_metadata(model_metafile_name) training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY] training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None output_dir_name, pathless_output_file_name = os.path.split( output_file_name) extensionless_output_file_name, output_file_extension = os.path.splitext( pathless_output_file_name) if randomize_weights: conv_dense_layer_names = _find_conv_and_dense_layers(model_object) conv_dense_layer_names.reverse() num_sets = len(conv_dense_layer_names) else: conv_dense_layer_names = [] num_sets = 1 print( 'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name)) full_storm_id_strings, storm_times_unix_sec = ( tracking_io.read_ids_and_times(storm_metafile_name)) print(SEPARATOR_STRING) if 0 < num_examples < len(full_storm_id_strings): full_storm_id_strings = full_storm_id_strings[:num_examples] storm_times_unix_sec = storm_times_unix_sec[:num_examples] example_dict = testing_io.read_predictors_specific_examples( top_example_dir_name=top_example_dir_name, desired_full_id_strings=full_storm_id_strings, desired_times_unix_sec=storm_times_unix_sec, option_dict=training_option_dict, layer_operation_dicts=model_metadata_dict[cnn.LAYER_OPERATIONS_KEY]) print(SEPARATOR_STRING) predictor_matrices = example_dict[testing_io.INPUT_MATRICES_KEY] sounding_pressure_matrix_pa = example_dict[ testing_io.SOUNDING_PRESSURES_KEY] denorm_predictor_matrices = trainval_io.separate_shear_and_reflectivity( list_of_input_matrices=copy.deepcopy(predictor_matrices), training_option_dict=training_option_dict) print('Denormalizing model inputs...') denorm_predictor_matrices = model_interpretation.denormalize_data( list_of_input_matrices=denorm_predictor_matrices, model_metadata_dict=model_metadata_dict) print(SEPARATOR_STRING) for k in range(num_sets): if randomize_weights: if cascading_random: _reset_weights_in_layer(model_object=model_object, layer_name=conv_dense_layer_names[k]) this_model_object = model_object this_output_file_name = ( '{0:s}/{1:s}_cascading-random_{2:s}{3:s}').format( output_dir_name, extensionless_output_file_name, conv_dense_layer_names[k].replace('_', '-'), output_file_extension) else: this_model_object = keras.models.Model.from_config( model_object.get_config()) this_model_object.set_weights(model_object.get_weights()) _reset_weights_in_layer(model_object=this_model_object, layer_name=conv_dense_layer_names[k]) this_output_file_name = '{0:s}/{1:s}_random_{2:s}{3:s}'.format( output_dir_name, extensionless_output_file_name, conv_dense_layer_names[k].replace('_', '-'), output_file_extension) else: this_model_object = model_object this_output_file_name = output_file_name # print(K.eval(this_model_object.get_layer(name='dense_3').weights[0])) if component_type_string == CLASS_COMPONENT_TYPE_STRING: print('Computing saliency maps for target class {0:d}...'.format( target_class)) saliency_matrices = ( saliency_maps.get_saliency_maps_for_class_activation( model_object=this_model_object, target_class=target_class, list_of_input_matrices=predictor_matrices)) elif component_type_string == NEURON_COMPONENT_TYPE_STRING: print( ('Computing saliency maps for neuron {0:s} in layer "{1:s}"...' ).format(str(neuron_indices), layer_name)) saliency_matrices = ( saliency_maps.get_saliency_maps_for_neuron_activation( model_object=this_model_object, layer_name=layer_name, neuron_indices=neuron_indices, list_of_input_matrices=predictor_matrices, ideal_activation=ideal_activation)) else: print(( 'Computing saliency maps for channel {0:d} in layer "{1:s}"...' ).format(channel_index, layer_name)) saliency_matrices = ( saliency_maps.get_saliency_maps_for_channel_activation( model_object=this_model_object, layer_name=layer_name, channel_index=channel_index, list_of_input_matrices=predictor_matrices, stat_function_for_neuron_activations=K.max, ideal_activation=ideal_activation)) saliency_matrices = trainval_io.separate_shear_and_reflectivity( list_of_input_matrices=saliency_matrices, training_option_dict=training_option_dict) print('Writing saliency maps to file: "{0:s}"...'.format( this_output_file_name)) saliency_metadata_dict = saliency_maps.check_metadata( component_type_string=component_type_string, target_class=target_class, layer_name=layer_name, ideal_activation=ideal_activation, neuron_indices=neuron_indices, channel_index=channel_index) saliency_maps.write_standard_file( pickle_file_name=this_output_file_name, denorm_predictor_matrices=denorm_predictor_matrices, saliency_matrices=saliency_matrices, full_storm_id_strings=full_storm_id_strings, storm_times_unix_sec=storm_times_unix_sec, model_file_name=model_file_name, metadata_dict=saliency_metadata_dict, sounding_pressure_matrix_pa=sounding_pressure_matrix_pa)
def _run(model_file_name, top_example_dir_name, storm_metafile_name, output_dir_name): """Uses trained CNN to make predictions for specific examples. This is effectively the main method. :param model_file_name: See documentation at top of file. :param top_example_dir_name: Same. :param storm_metafile_name: Same. :param output_dir_name: Same. :raises: ValueError: if the model does multi-class classification. """ print('Reading CNN from: "{0:s}"...'.format(model_file_name)) model_object = cnn.read_model(model_file_name) num_output_neurons = ( model_object.layers[-1].output.get_shape().as_list()[-1] ) if num_output_neurons > 2: error_string = ( 'The model has {0:d} output neurons, which suggests {0:d}-class ' 'classification. This script handles only binary classification.' ).format(num_output_neurons) raise ValueError(error_string) soundings_only = False if isinstance(model_object.input, list): list_of_input_tensors = model_object.input else: list_of_input_tensors = [model_object.input] if len(list_of_input_tensors) == 1: these_spatial_dim = numpy.array( list_of_input_tensors[0].get_shape().as_list()[1:-1], dtype=int ) soundings_only = len(these_spatial_dim) == 1 cnn_metafile_name = cnn.find_metafile( model_file_name=model_file_name, raise_error_if_missing=True ) print('Reading CNN metadata from: "{0:s}"...'.format(cnn_metafile_name)) cnn_metadata_dict = cnn.read_model_metadata(cnn_metafile_name) print('Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name)) desired_full_id_strings, desired_times_unix_sec = ( tracking_io.read_ids_and_times(storm_metafile_name) ) unique_spc_date_strings = list(set([ time_conversion.time_to_spc_date_string(t) for t in desired_times_unix_sec ])) example_file_names = [ input_examples.find_example_file( top_directory_name=top_example_dir_name, shuffled=False, spc_date_string=d, raise_error_if_missing=True ) for d in unique_spc_date_strings ] first_spc_date_string = time_conversion.time_to_spc_date_string( numpy.min(desired_times_unix_sec) ) last_spc_date_string = time_conversion.time_to_spc_date_string( numpy.max(desired_times_unix_sec) ) training_option_dict = cnn_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY] training_option_dict[trainval_io.EXAMPLE_FILES_KEY] = example_file_names training_option_dict[trainval_io.FIRST_STORM_TIME_KEY] = ( time_conversion.get_start_of_spc_date(first_spc_date_string) ) training_option_dict[trainval_io.LAST_STORM_TIME_KEY] = ( time_conversion.get_end_of_spc_date(last_spc_date_string) ) training_option_dict[trainval_io.NUM_EXAMPLES_PER_BATCH_KEY] = ( NUM_EXAMPLES_PER_BATCH ) if soundings_only: generator_object = testing_io.sounding_generator( option_dict=training_option_dict, desired_full_id_strings=desired_full_id_strings, desired_times_unix_sec=desired_times_unix_sec) elif cnn_metadata_dict[cnn.LAYER_OPERATIONS_KEY] is not None: generator_object = testing_io.gridrad_generator_2d_reduced( option_dict=training_option_dict, desired_full_id_strings=desired_full_id_strings, desired_times_unix_sec=desired_times_unix_sec, list_of_operation_dicts=cnn_metadata_dict[ cnn.LAYER_OPERATIONS_KEY] ) elif cnn_metadata_dict[cnn.CONV_2D3D_KEY]: generator_object = testing_io.myrorss_generator_2d3d( option_dict=training_option_dict, desired_full_id_strings=desired_full_id_strings, desired_times_unix_sec=desired_times_unix_sec) else: generator_object = testing_io.generator_2d_or_3d( option_dict=training_option_dict, desired_full_id_strings=desired_full_id_strings, desired_times_unix_sec=desired_times_unix_sec) include_soundings = ( training_option_dict[trainval_io.SOUNDING_FIELDS_KEY] is not None ) full_storm_id_strings = [] storm_times_unix_sec = numpy.array([], dtype=int) observed_labels = numpy.array([], dtype=int) class_probability_matrix = None while True: try: this_storm_object_dict = next(generator_object) print(SEPARATOR_STRING) except StopIteration: break full_storm_id_strings += this_storm_object_dict[testing_io.FULL_IDS_KEY] storm_times_unix_sec = numpy.concatenate(( storm_times_unix_sec, this_storm_object_dict[testing_io.STORM_TIMES_KEY] )) observed_labels = numpy.concatenate(( observed_labels, this_storm_object_dict[testing_io.TARGET_ARRAY_KEY] )) if soundings_only: these_predictor_matrices = [ this_storm_object_dict[testing_io.SOUNDING_MATRIX_KEY] ] else: these_predictor_matrices = this_storm_object_dict[ testing_io.INPUT_MATRICES_KEY] if include_soundings: this_sounding_matrix = these_predictor_matrices[-1] else: this_sounding_matrix = None if soundings_only: this_probability_matrix = cnn.apply_cnn_soundings_only( model_object=model_object, sounding_matrix=this_sounding_matrix, verbose=True) elif cnn_metadata_dict[cnn.CONV_2D3D_KEY]: if training_option_dict[trainval_io.UPSAMPLE_REFLECTIVITY_KEY]: this_probability_matrix = cnn.apply_2d_or_3d_cnn( model_object=model_object, radar_image_matrix=these_predictor_matrices[0], sounding_matrix=this_sounding_matrix, verbose=True) else: this_probability_matrix = cnn.apply_2d3d_cnn( model_object=model_object, reflectivity_matrix_dbz=these_predictor_matrices[0], azimuthal_shear_matrix_s01=these_predictor_matrices[1], sounding_matrix=this_sounding_matrix, verbose=True) else: this_probability_matrix = cnn.apply_2d_or_3d_cnn( model_object=model_object, radar_image_matrix=these_predictor_matrices[0], sounding_matrix=this_sounding_matrix, verbose=True) print(SEPARATOR_STRING) if class_probability_matrix is None: class_probability_matrix = this_probability_matrix + 0. else: class_probability_matrix = numpy.concatenate( (class_probability_matrix, this_probability_matrix), axis=0 ) output_file_name = prediction_io.find_ungridded_file( directory_name=output_dir_name, raise_error_if_missing=False) print('Writing results to: "{0:s}"...'.format(output_file_name)) prediction_io.write_ungridded_predictions( netcdf_file_name=output_file_name, class_probability_matrix=class_probability_matrix, observed_labels=observed_labels, storm_ids=full_storm_id_strings, storm_times_unix_sec=storm_times_unix_sec, target_name=training_option_dict[trainval_io.TARGET_NAME_KEY], model_file_name=model_file_name )
def _run(storm_metafile_name, top_tracking_dir_name, latitude_buffer_deg, longitude_buffer_deg, rap_directory_name, ruc_directory_name, lead_time_seconds, lag_time_seconds, field_name_grib1, output_dir_name): """Plots RAP/RUC field centered on each example (storm object). This is effectively the main method. :param storm_metafile_name: See documentation at top of file. :param top_tracking_dir_name: Same. :param latitude_buffer_deg: Same. :param longitude_buffer_deg: Same. :param rap_directory_name: Same. :param ruc_directory_name: Same. :param lead_time_seconds: Same. :param lag_time_seconds: Same. :param field_name_grib1: Same. :param output_dir_name: Same. """ file_system_utils.mkdir_recursive_if_necessary( directory_name=output_dir_name ) error_checking.assert_is_geq(latitude_buffer_deg, 1.) error_checking.assert_is_geq(longitude_buffer_deg, 1.) error_checking.assert_is_greater(lead_time_seconds, 0) error_checking.assert_is_greater(lag_time_seconds, 0) print('Reading metadata from: "{0:s}"...'.format(storm_metafile_name)) full_storm_id_strings, storm_times_unix_sec = ( tracking_io.read_ids_and_times(storm_metafile_name) ) init_times_unix_sec = ( storm_times_unix_sec + lead_time_seconds - lag_time_seconds ) init_times_unix_sec = number_rounding.floor_to_nearest( init_times_unix_sec, INIT_TIME_INTERVAL_SEC ) init_times_unix_sec = init_times_unix_sec.astype(int) num_examples = len(full_storm_id_strings) rap_file_names = [None] * num_examples ruc_file_names = [None] * num_examples for i in range(num_examples): if init_times_unix_sec[i] >= FIRST_RAP_TIME_UNIX_SEC: rap_file_names[i] = nwp_model_io.find_rap_file_any_grid( top_directory_name=rap_directory_name, init_time_unix_sec=init_times_unix_sec[i], lead_time_hours=0, raise_error_if_missing=True ) continue ruc_file_names[i] = nwp_model_io.find_ruc_file_any_grid( top_directory_name=ruc_directory_name, init_time_unix_sec=init_times_unix_sec[i], lead_time_hours=0, raise_error_if_missing=True ) for i in range(num_examples): _plot_rapruc_one_example( full_storm_id_string=full_storm_id_strings[i], storm_time_unix_sec=storm_times_unix_sec[i], top_tracking_dir_name=top_tracking_dir_name, latitude_buffer_deg=latitude_buffer_deg, longitude_buffer_deg=longitude_buffer_deg, lead_time_seconds=lead_time_seconds, field_name_grib1=field_name_grib1, output_dir_name=output_dir_name, rap_file_name=rap_file_names[i], ruc_file_name=ruc_file_names[i] )
def _run(model_file_name, top_example_dir_name, storm_metafile_name, num_examples, do_backwards_test, separate_radar_heights, num_bootstrap_reps, output_file_name): """Runs permutation test with specific examples (storm objects). This is effectively the main method. :param model_file_name: See documentation at top of file. :param top_example_dir_name: Same. :param storm_metafile_name: Same. :param num_examples: Same. :param do_backwards_test: Same. :param separate_radar_heights: Same. :param num_bootstrap_reps: Same. :param output_file_name: Same. """ print('Reading model from: "{0:s}"...'.format(model_file_name)) model_object = cnn.read_model(model_file_name) metafile_name = cnn.find_metafile(model_file_name=model_file_name) print('Reading metadata from: "{0:s}"...'.format(metafile_name)) cnn_metadata_dict = cnn.read_model_metadata(metafile_name) training_option_dict = cnn_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY] print( 'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name)) full_storm_id_strings, storm_times_unix_sec = ( tracking_io.read_ids_and_times(storm_metafile_name)) print(SEPARATOR_STRING) if 0 < num_examples < len(full_storm_id_strings): numpy.random.seed(RANDOM_SEED) good_indices = numpy.random.permutation(len(full_storm_id_strings)) good_indices = good_indices[:num_examples] full_storm_id_strings = [ full_storm_id_strings[k] for k in good_indices ] storm_times_unix_sec = storm_times_unix_sec[good_indices] example_dict = testing_io.read_predictors_specific_examples( top_example_dir_name=top_example_dir_name, desired_full_id_strings=full_storm_id_strings, desired_times_unix_sec=storm_times_unix_sec, option_dict=training_option_dict, layer_operation_dicts=cnn_metadata_dict[cnn.LAYER_OPERATIONS_KEY]) print(SEPARATOR_STRING) predictor_matrices = example_dict[testing_io.INPUT_MATRICES_KEY] target_values = example_dict[testing_io.TARGET_ARRAY_KEY] correlation_matrix, predictor_names = correlation.get_pearson_correlations( predictor_matrices=predictor_matrices, cnn_metadata_dict=cnn_metadata_dict, separate_radar_heights=separate_radar_heights) print(SEPARATOR_STRING) num_predictors = len(predictor_names) for i in range(num_predictors): for j in range(i, num_predictors): print(('Pearson correlation between "{0:s}" and "{1:s}" = {2:.3f}' ).format(predictor_names[i], predictor_names[j], correlation_matrix[i, j])) print(SEPARATOR_STRING) if do_backwards_test: result_dict = permutation.run_backwards_test( model_object=model_object, predictor_matrices=predictor_matrices, target_values=target_values, cnn_metadata_dict=cnn_metadata_dict, cost_function=permutation_utils.negative_auc_function, separate_radar_heights=separate_radar_heights, num_bootstrap_reps=num_bootstrap_reps) else: result_dict = permutation.run_forward_test( model_object=model_object, predictor_matrices=predictor_matrices, target_values=target_values, cnn_metadata_dict=cnn_metadata_dict, cost_function=permutation_utils.negative_auc_function, separate_radar_heights=separate_radar_heights, num_bootstrap_reps=num_bootstrap_reps) print(SEPARATOR_STRING) result_dict[permutation_utils.MODEL_FILE_KEY] = model_file_name result_dict[permutation_utils.TARGET_VALUES_KEY] = target_values result_dict[permutation_utils.FULL_IDS_KEY] = full_storm_id_strings result_dict[permutation_utils.STORM_TIMES_KEY] = storm_times_unix_sec print('Writing results to: "{0:s}"...'.format(output_file_name)) permutation_utils.write_results(result_dict=result_dict, pickle_file_name=output_file_name)
def _run(activation_file_name, storm_metafile_name, num_examples, top_example_dir_name, num_radar_rows, num_radar_columns, allow_whitespace, colour_bar_length, output_dir_name): """Plots one or more examples (storm objects) for human input. This is effectively the main method. :param activation_file_name: See documentation at top of file. :param storm_metafile_name: Same. :param num_examples: Same. :param top_example_dir_name: Same. :param num_radar_rows: Same. :param num_radar_columns: Same. :param allow_whitespace: Same. :param colour_bar_length: Same. :param output_dir_name: Same. """ if num_radar_rows <= 0: num_radar_rows = None if num_radar_columns <= 0: num_radar_columns = None if activation_file_name in ['', 'None']: activation_file_name = None if activation_file_name is None: print('Reading data from: "{0:s}"...'.format(storm_metafile_name)) full_storm_id_strings, storm_times_unix_sec = ( tracking_io.read_ids_and_times(storm_metafile_name)) training_option_dict = dict() training_option_dict[trainval_io.SOUNDING_FIELDS_KEY] = None training_option_dict[trainval_io.SOUNDING_HEIGHTS_KEY] = None training_option_dict[trainval_io.NUM_ROWS_KEY] = num_radar_rows training_option_dict[trainval_io.NUM_COLUMNS_KEY] = num_radar_columns training_option_dict[trainval_io.NORMALIZATION_TYPE_KEY] = None training_option_dict[trainval_io.TARGET_NAME_KEY] = DUMMY_TARGET_NAME training_option_dict[trainval_io.BINARIZE_TARGET_KEY] = False training_option_dict[trainval_io.SAMPLING_FRACTIONS_KEY] = None training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None model_metadata_dict = {cnn.LAYER_OPERATIONS_KEY: None} else: print('Reading data from: "{0:s}"...'.format(activation_file_name)) activation_matrix, activation_metadata_dict = ( model_activation.read_file(activation_file_name)) num_model_components = activation_matrix.shape[1] if num_model_components > 1: error_string = ( 'The file should contain activations for only one model ' 'component, not {0:d}.').format(num_model_components) raise TypeError(error_string) full_storm_id_strings = activation_metadata_dict[ model_activation.FULL_IDS_KEY] storm_times_unix_sec = activation_metadata_dict[ model_activation.STORM_TIMES_KEY] model_file_name = activation_metadata_dict[ model_activation.MODEL_FILE_NAME_KEY] model_metafile_name = '{0:s}/model_metadata.p'.format( os.path.split(model_file_name)[0]) print('Reading metadata from: "{0:s}"...'.format(model_metafile_name)) model_metadata_dict = cnn.read_model_metadata(model_metafile_name) training_option_dict = model_metadata_dict[ cnn.TRAINING_OPTION_DICT_KEY] training_option_dict[trainval_io.NORMALIZATION_TYPE_KEY] = None training_option_dict[trainval_io.SAMPLING_FRACTIONS_KEY] = None training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None training_option_dict[trainval_io.RADAR_FIELDS_KEY] = SHEAR_FIELD_NAMES training_option_dict[trainval_io.RADAR_HEIGHTS_KEY] = REFL_HEIGHTS_M_AGL training_option_dict[trainval_io.UPSAMPLE_REFLECTIVITY_KEY] = False model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY] = training_option_dict if 0 < num_examples < len(full_storm_id_strings): full_storm_id_strings = full_storm_id_strings[:num_examples] storm_times_unix_sec = storm_times_unix_sec[:num_examples] print(SEPARATOR_STRING) example_dict = testing_io.read_predictors_specific_examples( top_example_dir_name=top_example_dir_name, desired_full_id_strings=full_storm_id_strings, desired_times_unix_sec=storm_times_unix_sec, option_dict=model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY], layer_operation_dicts=model_metadata_dict[cnn.LAYER_OPERATIONS_KEY]) print(SEPARATOR_STRING) predictor_matrices = example_dict[testing_io.INPUT_MATRICES_KEY] # TODO(thunderhoser): The rest of this code is very HACKY. predictor_matrices[0] = trainval_io.upsample_reflectivity( predictor_matrices[0][..., 0]) predictor_matrices[0] = numpy.expand_dims(predictor_matrices[0], axis=-1) example_dict = { input_examples.RADAR_FIELDS_KEY: SHEAR_FIELD_NAMES, input_examples.REFL_IMAGE_MATRIX_KEY: predictor_matrices[0], input_examples.AZ_SHEAR_IMAGE_MATRIX_KEY: predictor_matrices[1], input_examples.RADAR_HEIGHTS_KEY: REFL_HEIGHTS_M_AGL } example_dict = input_examples.reduce_examples_3d_to_2d( example_dict=example_dict, list_of_operation_dicts=[REFL_LAYER_OPERATION_DICT]) predictor_matrices = [example_dict[input_examples.RADAR_IMAGE_MATRIX_KEY]] layer_operation_dicts = [{ input_examples.RADAR_FIELD_KEY: f, input_examples.MIN_HEIGHT_KEY: h1, input_examples.MAX_HEIGHT_KEY: h2, input_examples.OPERATION_NAME_KEY: op } for f, h1, h2, op in zip( example_dict[input_examples.RADAR_FIELDS_KEY], example_dict[ input_examples.MIN_RADAR_HEIGHTS_KEY], example_dict[ input_examples.MAX_RADAR_HEIGHTS_KEY], example_dict[ input_examples.RADAR_LAYER_OPERATION_NAMES_KEY])] model_metadata_dict[cnn.LAYER_OPERATIONS_KEY] = layer_operation_dicts figure_file_names = plot_examples.plot_examples( list_of_predictor_matrices=predictor_matrices, model_metadata_dict=model_metadata_dict, pmm_flag=False, output_dir_name=output_dir_name, plot_soundings=False, allow_whitespace=allow_whitespace, plot_panel_names=False, add_titles=False, label_colour_bars=True, colour_bar_length=colour_bar_length, colour_bar_font_size=COLOUR_BAR_FONT_SIZE, figure_resolution_dpi=FIGURE_RESOLUTION_DPI, refl_opacity=REFL_OPACITY, plot_grid_lines=False, full_storm_id_strings=full_storm_id_strings, storm_times_unix_sec=storm_times_unix_sec) for this_file_name in figure_file_names: print('Resizing image to {0:d} pixels: "{1:s}"...'.format( FIGURE_SIZE_PIXELS, this_file_name)) imagemagick_utils.resize_image(input_file_name=this_file_name, output_file_name=this_file_name, output_size_pixels=FIGURE_SIZE_PIXELS)
def _run(model_file_name, top_example_dir_name, storm_metafile_name, num_examples, output_file_name): """Creates dummy saliency map for each storm object. This is effectively the main method. :param model_file_name: See documentation at top of file. :param top_example_dir_name: Same. :param storm_metafile_name: Same. :param num_examples: Same. :param output_file_name: Same. """ file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name) model_metafile_name = '{0:s}/model_metadata.p'.format( os.path.split(model_file_name)[0]) print( 'Reading model metadata from: "{0:s}"...'.format(model_metafile_name)) model_metadata_dict = cnn.read_model_metadata(model_metafile_name) training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY] training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None print( 'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name)) full_storm_id_strings, storm_times_unix_sec = ( tracking_io.read_ids_and_times(storm_metafile_name)) print(SEPARATOR_STRING) if 0 < num_examples < len(full_storm_id_strings): full_storm_id_strings = full_storm_id_strings[:num_examples] storm_times_unix_sec = storm_times_unix_sec[:num_examples] example_dict = testing_io.read_predictors_specific_examples( top_example_dir_name=top_example_dir_name, desired_full_id_strings=full_storm_id_strings, desired_times_unix_sec=storm_times_unix_sec, option_dict=training_option_dict, layer_operation_dicts=model_metadata_dict[cnn.LAYER_OPERATIONS_KEY]) print(SEPARATOR_STRING) predictor_matrices = example_dict[testing_io.INPUT_MATRICES_KEY] sounding_pressure_matrix_pa = ( example_dict[testing_io.SOUNDING_PRESSURES_KEY]) radar_matrix = predictor_matrices[0] num_examples = radar_matrix.shape[0] num_channels = radar_matrix.shape[-1] num_spatial_dim = len(radar_matrix.shape) - 2 if num_spatial_dim == 2: kernel_matrix = numpy.expand_dims(EDGE_DETECTOR_MATRIX_2D, axis=-1) else: kernel_matrix = numpy.expand_dims(EDGE_DETECTOR_MATRIX_3D, axis=-1) kernel_matrix = numpy.repeat(kernel_matrix, num_channels, axis=-1) kernel_matrix = numpy.expand_dims(kernel_matrix, axis=-1) kernel_matrix = numpy.repeat(kernel_matrix, num_channels, axis=-1) radar_saliency_matrix = numpy.full(radar_matrix.shape, numpy.nan) for i in range(num_examples): if numpy.mod(i, 10) == 0: print(( 'Have created dummy saliency map for {0:d} of {1:d} examples...' ).format(i, num_examples)) if num_spatial_dim == 2: this_saliency_matrix = standalone_utils.do_2d_convolution( feature_matrix=radar_matrix[i, ...], kernel_matrix=kernel_matrix, pad_edges=True, stride_length_px=1) else: this_saliency_matrix = standalone_utils.do_3d_convolution( feature_matrix=radar_matrix[i, ...], kernel_matrix=kernel_matrix, pad_edges=True, stride_length_px=1) radar_saliency_matrix[i, ...] = this_saliency_matrix[0, ...] print('Have created dummy saliency map for all {0:d} examples!'.format( num_examples)) print(SEPARATOR_STRING) saliency_matrices = [ radar_saliency_matrix if k == 0 else predictor_matrices[k] for k in range(len(predictor_matrices)) ] saliency_matrices = trainval_io.separate_shear_and_reflectivity( list_of_input_matrices=saliency_matrices, training_option_dict=training_option_dict) denorm_predictor_matrices = trainval_io.separate_shear_and_reflectivity( list_of_input_matrices=copy.deepcopy(predictor_matrices), training_option_dict=training_option_dict) print('Denormalizing model inputs...') denorm_predictor_matrices = model_interpretation.denormalize_data( list_of_input_matrices=denorm_predictor_matrices, model_metadata_dict=model_metadata_dict) print('Writing saliency maps to file: "{0:s}"...'.format(output_file_name)) saliency_metadata_dict = saliency_maps.check_metadata( component_type_string=model_interpretation.CLASS_COMPONENT_TYPE_STRING, target_class=1) saliency_maps.write_standard_file( pickle_file_name=output_file_name, denorm_predictor_matrices=denorm_predictor_matrices, saliency_matrices=saliency_matrices, full_storm_id_strings=full_storm_id_strings, storm_times_unix_sec=storm_times_unix_sec, model_file_name=model_file_name, metadata_dict=saliency_metadata_dict, sounding_pressure_matrix_pa=sounding_pressure_matrix_pa)
def _run(model_file_name, init_function_name, storm_metafile_name, num_examples, top_example_dir_name, component_type_string, target_class, layer_name, neuron_indices, channel_index, num_iterations, ideal_activation, learning_rate, output_file_name): """Runs backwards optimization on a trained CNN. This is effectively the main method. :param model_file_name: See documentation at top of file. :param init_function_name: Same. :param storm_metafile_name: Same. :param num_examples: Same. :param top_example_dir_name: Same. :param component_type_string: Same. :param target_class: Same. :param layer_name: Same. :param neuron_indices: Same. :param channel_index: Same. :param num_iterations: Same. :param ideal_activation: Same. :param learning_rate: Same. :param output_file_name: Same. """ model_interpretation.check_component_type(component_type_string) if ideal_activation <= 0: ideal_activation = None if init_function_name in ['', 'None']: init_function_name = None model_metafile_name = '{0:s}/model_metadata.p'.format( os.path.split(model_file_name)[0]) print 'Reading model metadata from: "{0:s}"...'.format(model_metafile_name) model_metadata_dict = cnn.read_model_metadata(model_metafile_name) if init_function_name is None: print 'Reading storm metadata from: "{0:s}"...'.format( storm_metafile_name) storm_ids, storm_times_unix_sec = tracking_io.read_ids_and_times( storm_metafile_name) if 0 < num_examples < len(storm_ids): storm_ids = storm_ids[:num_examples] storm_times_unix_sec = storm_times_unix_sec[:num_examples] list_of_init_matrices = testing_io.read_specific_examples( desired_storm_ids=storm_ids, desired_times_unix_sec=storm_times_unix_sec, option_dict=model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY], top_example_dir_name=top_example_dir_name, list_of_layer_operation_dicts=model_metadata_dict[ cnn.LAYER_OPERATIONS_KEY])[0] num_examples = list_of_init_matrices[0].shape[0] print SEPARATOR_STRING else: storm_ids = None storm_times_unix_sec = None num_examples = 1 init_function = _create_initializer( init_function_name=init_function_name, model_metadata_dict=model_metadata_dict) print 'Reading model from: "{0:s}"...'.format(model_file_name) model_object = cnn.read_model(model_file_name) list_of_optimized_matrices = None for i in range(num_examples): if init_function_name is None: this_init_arg = [a[[i], ...] for a in list_of_init_matrices] else: this_init_arg = init_function if component_type_string == CLASS_COMPONENT_TYPE_STRING: print( '\nOptimizing {0:d}th of {1:d} images for target class {2:d}...' ).format(i + 1, num_examples, target_class) these_optimized_matrices = backwards_opt.optimize_input_for_class( model_object=model_object, target_class=target_class, init_function_or_matrices=this_init_arg, num_iterations=num_iterations, learning_rate=learning_rate) elif component_type_string == NEURON_COMPONENT_TYPE_STRING: print( '\nOptimizing {0:d}th of {1:d} images for neuron {2:s} in layer' ' "{3:s}"...').format(i + 1, num_examples, str(neuron_indices), layer_name) these_optimized_matrices = backwards_opt.optimize_input_for_neuron( model_object=model_object, layer_name=layer_name, neuron_indices=neuron_indices, init_function_or_matrices=this_init_arg, num_iterations=num_iterations, learning_rate=learning_rate, ideal_activation=ideal_activation) else: print( '\nOptimizing {0:d}th of {1:d} images for channel {2:d} in ' 'layer "{3:s}"...').format(i + 1, num_examples, channel_index, layer_name) these_optimized_matrices = backwards_opt.optimize_input_for_channel( model_object=model_object, layer_name=layer_name, channel_index=channel_index, init_function_or_matrices=this_init_arg, stat_function_for_neuron_activations=K.max, num_iterations=num_iterations, learning_rate=learning_rate, ideal_activation=ideal_activation) if list_of_optimized_matrices is None: num_matrices = len(these_optimized_matrices) list_of_optimized_matrices = [None] * num_matrices for k in range(len(list_of_optimized_matrices)): if list_of_optimized_matrices[k] is None: list_of_optimized_matrices[ k] = these_optimized_matrices[k] + 0. else: list_of_optimized_matrices[k] = numpy.concatenate( (list_of_optimized_matrices[k], these_optimized_matrices[k]), axis=0) print SEPARATOR_STRING print 'Denormalizing optimized examples...' list_of_optimized_matrices = model_interpretation.denormalize_data( list_of_input_matrices=list_of_optimized_matrices, model_metadata_dict=model_metadata_dict) if init_function_name is None: print 'Denormalizing input examples...' list_of_init_matrices = model_interpretation.denormalize_data( list_of_input_matrices=list_of_init_matrices, model_metadata_dict=model_metadata_dict) this_init_arg = list_of_init_matrices else: this_init_arg = init_function_name + '' print 'Writing results to: "{0:s}"...'.format(output_file_name) backwards_opt.write_standard_file( pickle_file_name=output_file_name, list_of_optimized_matrices=list_of_optimized_matrices, model_file_name=model_file_name, init_function_name_or_matrices=this_init_arg, num_iterations=num_iterations, learning_rate=learning_rate, component_type_string=component_type_string, target_class=target_class, layer_name=layer_name, neuron_indices=neuron_indices, channel_index=channel_index, ideal_activation=ideal_activation, storm_ids=storm_ids, storm_times_unix_sec=storm_times_unix_sec)
def _run(cnn_file_name, upconvnet_file_name, top_example_dir_name, baseline_storm_metafile_name, trial_storm_metafile_name, num_baseline_examples, num_trial_examples, num_novel_examples, cnn_feature_layer_name, percent_variance_to_keep, output_file_name): """Runs novelty detection. This is effectively the main method. :param cnn_file_name: See documentation at top of file. :param upconvnet_file_name: Same. :param top_example_dir_name: Same. :param baseline_storm_metafile_name: Same. :param trial_storm_metafile_name: Same. :param num_baseline_examples: Same. :param num_trial_examples: Same. :param num_novel_examples: Same. :param cnn_feature_layer_name: Same. :param percent_variance_to_keep: Same. :param output_file_name: Same. :raises: ValueError: if dimensions of first CNN input matrix != dimensions of upconvnet output. """ print('Reading trained CNN from: "{0:s}"...'.format(cnn_file_name)) cnn_model_object = cnn.read_model(cnn_file_name) print('Reading trained upconvnet from: "{0:s}"...'.format( upconvnet_file_name)) upconvnet_model_object = cnn.read_model(upconvnet_file_name) _check_dimensions(cnn_model_object=cnn_model_object, upconvnet_model_object=upconvnet_model_object) print('Reading metadata for baseline examples from: "{0:s}"...'.format( baseline_storm_metafile_name)) baseline_full_id_strings, baseline_times_unix_sec = ( tracking_io.read_ids_and_times(baseline_storm_metafile_name)) print('Reading metadata for trial examples from: "{0:s}"...'.format( trial_storm_metafile_name)) trial_full_id_strings, trial_times_unix_sec = ( tracking_io.read_ids_and_times(trial_storm_metafile_name)) this_dict = _filter_examples( trial_full_id_strings=trial_full_id_strings, trial_times_unix_sec=trial_times_unix_sec, num_trial_examples=num_trial_examples, baseline_full_id_strings=baseline_full_id_strings, baseline_times_unix_sec=baseline_times_unix_sec, num_baseline_examples=num_baseline_examples, num_novel_examples=num_novel_examples) trial_full_id_strings = this_dict[TRIAL_STORM_IDS_KEY] trial_times_unix_sec = this_dict[TRIAL_STORM_TIMES_KEY] baseline_full_id_strings = this_dict[BASELINE_STORM_IDS_KEY] baseline_times_unix_sec = this_dict[BASELINE_STORM_TIMES_KEY] num_novel_examples = this_dict[NUM_NOVEL_EXAMPLES_KEY] cnn_metafile_name = '{0:s}/model_metadata.p'.format( os.path.split(cnn_file_name)[0]) print('Reading CNN metadata from: "{0:s}"...'.format(cnn_metafile_name)) cnn_metadata_dict = cnn.read_model_metadata(cnn_metafile_name) print(SEPARATOR_STRING) baseline_predictor_matrices = testing_io.read_predictors_specific_examples( top_example_dir_name=top_example_dir_name, desired_full_id_strings=baseline_full_id_strings, desired_times_unix_sec=baseline_times_unix_sec, option_dict=cnn_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY], layer_operation_dicts=cnn_metadata_dict[cnn.LAYER_OPERATIONS_KEY])[ testing_io.INPUT_MATRICES_KEY] print(SEPARATOR_STRING) trial_predictor_matrices = testing_io.read_predictors_specific_examples( top_example_dir_name=top_example_dir_name, desired_full_id_strings=trial_full_id_strings, desired_times_unix_sec=trial_times_unix_sec, option_dict=cnn_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY], layer_operation_dicts=cnn_metadata_dict[cnn.LAYER_OPERATIONS_KEY])[ testing_io.INPUT_MATRICES_KEY] print(SEPARATOR_STRING) novelty_dict = novelty_detection.do_novelty_detection( baseline_predictor_matrices=baseline_predictor_matrices, trial_predictor_matrices=trial_predictor_matrices, cnn_model_object=cnn_model_object, cnn_feature_layer_name=cnn_feature_layer_name, upconvnet_model_object=upconvnet_model_object, num_novel_examples=num_novel_examples, multipass=False, percent_variance_to_keep=percent_variance_to_keep) print(SEPARATOR_STRING) print('Denormalizing inputs and outputs of novelty detection...') cnn_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY][ trainval_io.SOUNDING_FIELDS_KEY] = None novelty_dict[novelty_detection.BASELINE_MATRIX_KEY] = ( model_interpretation.denormalize_data( list_of_input_matrices=baseline_predictor_matrices[[0]], model_metadata_dict=cnn_metadata_dict)) novelty_dict[novelty_detection.TRIAL_MATRIX_KEY] = ( model_interpretation.denormalize_data( list_of_input_matrices=trial_predictor_matrices[[0]], model_metadata_dict=cnn_metadata_dict)) novelty_dict[novelty_detection.UPCONV_MATRIX_KEY] = ( model_interpretation.denormalize_data( list_of_input_matrices=[ novelty_dict[novelty_detection.UPCONV_NORM_MATRIX_KEY] ], model_metadata_dict=cnn_metadata_dict))[0] novelty_dict.pop(novelty_detection.UPCONV_NORM_MATRIX_KEY) novelty_dict[novelty_detection.UPCONV_SVD_MATRIX_KEY] = ( model_interpretation.denormalize_data( list_of_input_matrices=[ novelty_dict[novelty_detection.UPCONV_NORM_SVD_MATRIX_KEY] ], model_metadata_dict=cnn_metadata_dict))[0] novelty_dict.pop(novelty_detection.UPCONV_NORM_SVD_MATRIX_KEY) novelty_dict = novelty_detection.add_metadata( novelty_dict=novelty_dict, baseline_full_id_strings=baseline_full_id_strings, baseline_times_unix_sec=baseline_times_unix_sec, trial_full_id_strings=trial_full_id_strings, trial_times_unix_sec=trial_times_unix_sec, cnn_file_name=cnn_file_name, upconvnet_file_name=upconvnet_file_name) print('Writing results to: "{0:s}"...'.format(output_file_name)) novelty_detection.write_standard_file(novelty_dict=novelty_dict, pickle_file_name=output_file_name)
def _run(storm_metafile_name, top_tracking_dir_name, lead_time_seconds, output_file_name): """Plots spatial distribution of examples (storm objects) in file. This is effectively the main method. :param storm_metafile_name: See documentation at top of file. :param top_tracking_dir_name: Same. :param lead_time_seconds: Same. :param output_file_name: Same. """ file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name) # Read storm metadata. print( 'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name)) orig_full_id_strings, orig_times_unix_sec = ( tracking_io.read_ids_and_times(storm_metafile_name)) orig_primary_id_strings = temporal_tracking.full_to_partial_ids( orig_full_id_strings)[0] # Find relevant tracking files. spc_date_strings = [ time_conversion.time_to_spc_date_string(t) for t in orig_times_unix_sec ] spc_date_strings += [ time_conversion.time_to_spc_date_string(t + lead_time_seconds) for t in orig_times_unix_sec ] spc_date_strings = list(set(spc_date_strings)) tracking_file_names = [] for this_spc_date_string in spc_date_strings: tracking_file_names += tracking_io.find_files_one_spc_date( top_tracking_dir_name=top_tracking_dir_name, tracking_scale_metres2=DUMMY_TRACKING_SCALE_METRES2, source_name=tracking_utils.SEGMOTION_NAME, spc_date_string=this_spc_date_string, raise_error_if_missing=False)[0] file_times_unix_sec = numpy.array( [tracking_io.file_name_to_time(f) for f in tracking_file_names], dtype=int) num_orig_storm_objects = len(orig_full_id_strings) num_files = len(file_times_unix_sec) keep_file_flags = numpy.full(num_files, 0, dtype=bool) for i in range(num_orig_storm_objects): these_flags = numpy.logical_and( file_times_unix_sec >= orig_times_unix_sec[i], file_times_unix_sec <= orig_times_unix_sec[i] + lead_time_seconds) keep_file_flags = numpy.logical_or(keep_file_flags, these_flags) del file_times_unix_sec keep_file_indices = numpy.where(keep_file_flags)[0] tracking_file_names = [tracking_file_names[k] for k in keep_file_indices] # Read relevant tracking files. num_files = len(tracking_file_names) storm_object_tables = [None] * num_files print(SEPARATOR_STRING) for i in range(num_files): print('Reading data from: "{0:s}"...'.format(tracking_file_names[i])) this_table = tracking_io.read_file(tracking_file_names[i]) storm_object_tables[i] = this_table.loc[this_table[ tracking_utils.PRIMARY_ID_COLUMN].isin( numpy.array(orig_primary_id_strings))] if i == 0: continue storm_object_tables[i] = storm_object_tables[i].align( storm_object_tables[0], axis=1)[0] storm_object_table = pandas.concat(storm_object_tables, axis=0, ignore_index=True) print(SEPARATOR_STRING) # Find relevant storm objects. orig_object_rows = tracking_utils.find_storm_objects( all_id_strings=storm_object_table[ tracking_utils.FULL_ID_COLUMN].values.tolist(), all_times_unix_sec=storm_object_table[ tracking_utils.VALID_TIME_COLUMN].values, id_strings_to_keep=orig_full_id_strings, times_to_keep_unix_sec=orig_times_unix_sec) good_object_rows = numpy.array([], dtype=int) for i in range(num_orig_storm_objects): # Non-merging successors only! first_rows = temporal_tracking.find_successors( storm_object_table=storm_object_table, target_row=orig_object_rows[i], num_seconds_forward=lead_time_seconds, max_num_sec_id_changes=1, change_type_string=temporal_tracking.SPLIT_STRING, return_all_on_path=True) second_rows = temporal_tracking.find_successors( storm_object_table=storm_object_table, target_row=orig_object_rows[i], num_seconds_forward=lead_time_seconds, max_num_sec_id_changes=0, change_type_string=temporal_tracking.MERGER_STRING, return_all_on_path=True) first_rows = first_rows.tolist() second_rows = second_rows.tolist() these_rows = set(first_rows) & set(second_rows) these_rows = numpy.array(list(these_rows), dtype=int) good_object_rows = numpy.concatenate((good_object_rows, these_rows)) good_object_rows = numpy.unique(good_object_rows) storm_object_table = storm_object_table.iloc[good_object_rows] times_of_day_sec = numpy.mod( storm_object_table[tracking_utils.VALID_TIME_COLUMN].values, NUM_SECONDS_IN_DAY) storm_object_table = storm_object_table.assign( **{tracking_utils.VALID_TIME_COLUMN: times_of_day_sec}) min_plot_latitude_deg = -LATLNG_BUFFER_DEG + numpy.min( storm_object_table[tracking_utils.CENTROID_LATITUDE_COLUMN].values) max_plot_latitude_deg = LATLNG_BUFFER_DEG + numpy.max( storm_object_table[tracking_utils.CENTROID_LATITUDE_COLUMN].values) min_plot_longitude_deg = -LATLNG_BUFFER_DEG + numpy.min( storm_object_table[tracking_utils.CENTROID_LONGITUDE_COLUMN].values) max_plot_longitude_deg = LATLNG_BUFFER_DEG + numpy.max( storm_object_table[tracking_utils.CENTROID_LONGITUDE_COLUMN].values) _, axes_object, basemap_object = ( plotting_utils.create_equidist_cylindrical_map( min_latitude_deg=min_plot_latitude_deg, max_latitude_deg=max_plot_latitude_deg, min_longitude_deg=min_plot_longitude_deg, max_longitude_deg=max_plot_longitude_deg, resolution_string='i')) plotting_utils.plot_coastlines(basemap_object=basemap_object, axes_object=axes_object, line_colour=BORDER_COLOUR, line_width=BORDER_WIDTH * 2) plotting_utils.plot_countries(basemap_object=basemap_object, axes_object=axes_object, line_colour=BORDER_COLOUR, line_width=BORDER_WIDTH) plotting_utils.plot_states_and_provinces(basemap_object=basemap_object, axes_object=axes_object, line_colour=BORDER_COLOUR, line_width=BORDER_WIDTH) plotting_utils.plot_parallels(basemap_object=basemap_object, axes_object=axes_object, num_parallels=NUM_PARALLELS, line_width=BORDER_WIDTH) plotting_utils.plot_meridians(basemap_object=basemap_object, axes_object=axes_object, num_meridians=NUM_MERIDIANS, line_width=BORDER_WIDTH) # colour_bar_object = storm_plotting.plot_storm_tracks( # storm_object_table=storm_object_table, axes_object=axes_object, # basemap_object=basemap_object, colour_map_object=COLOUR_MAP_OBJECT, # colour_min_unix_sec=0, colour_max_unix_sec=NUM_SECONDS_IN_DAY - 1, # line_width=TRACK_LINE_WIDTH, # start_marker_type=None, end_marker_type=None # ) colour_bar_object = storm_plotting.plot_storm_centroids( storm_object_table=storm_object_table, axes_object=axes_object, basemap_object=basemap_object, colour_map_object=COLOUR_MAP_OBJECT, colour_min_unix_sec=0, colour_max_unix_sec=NUM_SECONDS_IN_DAY - 1) tick_times_unix_sec = numpy.linspace(0, NUM_SECONDS_IN_DAY, num=NUM_HOURS_IN_DAY + 1, dtype=int) tick_times_unix_sec = tick_times_unix_sec[:-1] tick_times_unix_sec = tick_times_unix_sec[::2] tick_time_strings = [ time_conversion.unix_sec_to_string(t, COLOUR_BAR_TIME_FORMAT) for t in tick_times_unix_sec ] colour_bar_object.set_ticks(tick_times_unix_sec) colour_bar_object.set_ticklabels(tick_time_strings) print('Saving figure to: "{0:s}"...'.format(output_file_name)) pyplot.savefig(output_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0, bbox_inches='tight') pyplot.close()
def _run(activation_file_name, storm_metafile_name, num_examples, allow_whitespace, top_example_dir_name, radar_field_names, radar_heights_m_agl, plot_soundings, num_radar_rows, num_radar_columns, output_dir_name): """Plots many dataset examples (storm objects). This is effectively the main method. :param activation_file_name: See documentation at top of file. :param storm_metafile_name: Same. :param num_examples: Same. :param allow_whitespace: Same. :param top_example_dir_name: Same. :param radar_field_names: Same. :param radar_heights_m_agl: Same. :param plot_soundings: Same. :param num_radar_rows: Same. :param num_radar_columns: Same. :param output_dir_name: Same. :raises: TypeError: if activation file contains activations for more than one model component. """ file_system_utils.mkdir_recursive_if_necessary( directory_name=output_dir_name) storm_activations = None if activation_file_name in ['', 'None']: activation_file_name = None if activation_file_name is None: print('Reading data from: "{0:s}"...'.format(storm_metafile_name)) full_storm_id_strings, storm_times_unix_sec = ( tracking_io.read_ids_and_times(storm_metafile_name)) training_option_dict = dict() training_option_dict[trainval_io.RADAR_FIELDS_KEY] = radar_field_names training_option_dict[ trainval_io.RADAR_HEIGHTS_KEY] = radar_heights_m_agl training_option_dict[ trainval_io.SOUNDING_FIELDS_KEY] = SOUNDING_FIELD_NAMES training_option_dict[ trainval_io.SOUNDING_HEIGHTS_KEY] = SOUNDING_HEIGHTS_M_AGL training_option_dict[trainval_io.NUM_ROWS_KEY] = num_radar_rows training_option_dict[trainval_io.NUM_COLUMNS_KEY] = num_radar_columns training_option_dict[trainval_io.NORMALIZATION_TYPE_KEY] = None training_option_dict[trainval_io.TARGET_NAME_KEY] = DUMMY_TARGET_NAME training_option_dict[trainval_io.BINARIZE_TARGET_KEY] = False training_option_dict[trainval_io.SAMPLING_FRACTIONS_KEY] = None training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None model_metadata_dict = { cnn.TRAINING_OPTION_DICT_KEY: training_option_dict, cnn.LAYER_OPERATIONS_KEY: None, } else: print('Reading data from: "{0:s}"...'.format(activation_file_name)) activation_matrix, activation_metadata_dict = ( model_activation.read_file(activation_file_name)) num_model_components = activation_matrix.shape[1] if num_model_components > 1: error_string = ( 'The file should contain activations for only one model ' 'component, not {0:d}.').format(num_model_components) raise TypeError(error_string) full_storm_id_strings = activation_metadata_dict[ model_activation.FULL_IDS_KEY] storm_times_unix_sec = activation_metadata_dict[ model_activation.STORM_TIMES_KEY] storm_activations = activation_matrix[:, 0] model_file_name = activation_metadata_dict[ model_activation.MODEL_FILE_NAME_KEY] model_metafile_name = '{0:s}/model_metadata.p'.format( os.path.split(model_file_name)[0]) print('Reading metadata from: "{0:s}"...'.format(model_metafile_name)) model_metadata_dict = cnn.read_model_metadata(model_metafile_name) training_option_dict = model_metadata_dict[ cnn.TRAINING_OPTION_DICT_KEY] training_option_dict[trainval_io.NORMALIZATION_TYPE_KEY] = None training_option_dict[trainval_io.SAMPLING_FRACTIONS_KEY] = None training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None model_metadata_dict[ cnn.TRAINING_OPTION_DICT_KEY] = training_option_dict model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY][ trainval_io.UPSAMPLE_REFLECTIVITY_KEY] = False if 0 < num_examples < len(full_storm_id_strings): full_storm_id_strings = full_storm_id_strings[:num_examples] storm_times_unix_sec = storm_times_unix_sec[:num_examples] if storm_activations is not None: storm_activations = storm_activations[:num_examples] print(SEPARATOR_STRING) list_of_predictor_matrices = testing_io.read_specific_examples( desired_full_id_strings=full_storm_id_strings, desired_times_unix_sec=storm_times_unix_sec, option_dict=model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY], top_example_dir_name=top_example_dir_name, list_of_layer_operation_dicts=model_metadata_dict[ cnn.LAYER_OPERATIONS_KEY])[0] print(SEPARATOR_STRING) plot_examples(list_of_predictor_matrices=list_of_predictor_matrices, model_metadata_dict=model_metadata_dict, output_dir_name=output_dir_name, plot_soundings=plot_soundings, allow_whitespace=allow_whitespace, pmm_flag=False, full_storm_id_strings=full_storm_id_strings, storm_times_unix_sec=storm_times_unix_sec, storm_activations=storm_activations)
def _run(model_file_name, init_function_name, storm_metafile_name, num_examples, top_example_dir_name, component_type_string, target_class, layer_name, neuron_indices, channel_index, num_iterations, ideal_activation, learning_rate, l2_weight, radar_constraint_weight, minmax_constraint_weight, output_file_name): """Runs backwards optimization on a trained CNN. This is effectively the main method. :param model_file_name: See documentation at top of file. :param init_function_name: Same. :param storm_metafile_name: Same. :param num_examples: Same. :param top_example_dir_name: Same. :param component_type_string: Same. :param target_class: Same. :param layer_name: Same. :param neuron_indices: Same. :param channel_index: Same. :param num_iterations: Same. :param ideal_activation: Same. :param learning_rate: Same. :param l2_weight: Same. :param radar_constraint_weight: Same. :param minmax_constraint_weight: Same. :param output_file_name: Same. """ if l2_weight <= 0: l2_weight = None if radar_constraint_weight <= 0: radar_constraint_weight = None if minmax_constraint_weight <= 0: minmax_constraint_weight = None if ideal_activation <= 0: ideal_activation = None if init_function_name in ['', 'None']: init_function_name = None model_interpretation.check_component_type(component_type_string) model_metafile_name = '{0:s}/model_metadata.p'.format( os.path.split(model_file_name)[0]) print( 'Reading model metadata from: "{0:s}"...'.format(model_metafile_name)) model_metadata_dict = cnn.read_model_metadata(model_metafile_name) input_matrices = None init_function = None full_storm_id_strings = None storm_times_unix_sec = None sounding_pressure_matrix_pa = None if init_function_name is None: print('Reading storm metadata from: "{0:s}"...'.format( storm_metafile_name)) full_storm_id_strings, storm_times_unix_sec = ( tracking_io.read_ids_and_times(storm_metafile_name)) if 0 < num_examples < len(full_storm_id_strings): full_storm_id_strings = full_storm_id_strings[:num_examples] storm_times_unix_sec = storm_times_unix_sec[:num_examples] example_dict = testing_io.read_predictors_specific_examples( top_example_dir_name=top_example_dir_name, desired_full_id_strings=full_storm_id_strings, desired_times_unix_sec=storm_times_unix_sec, option_dict=model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY], layer_operation_dicts=model_metadata_dict[ cnn.LAYER_OPERATIONS_KEY]) print(SEPARATOR_STRING) input_matrices = example_dict[testing_io.INPUT_MATRICES_KEY] sounding_pressure_matrix_pa = example_dict[ testing_io.SOUNDING_PRESSURES_KEY] num_examples = input_matrices[0].shape[0] else: num_examples = 1 init_function = _create_initializer( init_function_name=init_function_name, model_metadata_dict=model_metadata_dict) print('Reading model from: "{0:s}"...'.format(model_file_name)) model_object = cnn.read_model(model_file_name) output_matrices = None initial_activations = numpy.full(num_examples, numpy.nan) final_activations = numpy.full(num_examples, numpy.nan) for i in range(num_examples): if init_function_name is None: this_init_arg = [a[[i], ...] for a in input_matrices] else: this_init_arg = init_function if component_type_string == CLASS_COMPONENT_TYPE_STRING: print(( '\nOptimizing {0:d}th of {1:d} images for target class {2:d}...' ).format(i + 1, num_examples, target_class)) this_result_dict = backwards_opt.optimize_input_for_class( model_object=model_object, target_class=target_class, init_function_or_matrices=this_init_arg, num_iterations=num_iterations, learning_rate=learning_rate, l2_weight=l2_weight, radar_constraint_weight=radar_constraint_weight, minmax_constraint_weight=minmax_constraint_weight, model_metadata_dict=model_metadata_dict) elif component_type_string == NEURON_COMPONENT_TYPE_STRING: print(( '\nOptimizing {0:d}th of {1:d} images for neuron {2:s} in layer' ' "{3:s}"...').format(i + 1, num_examples, str(neuron_indices), layer_name)) this_result_dict = backwards_opt.optimize_input_for_neuron( model_object=model_object, layer_name=layer_name, neuron_indices=neuron_indices, init_function_or_matrices=this_init_arg, num_iterations=num_iterations, learning_rate=learning_rate, l2_weight=l2_weight, ideal_activation=ideal_activation, radar_constraint_weight=radar_constraint_weight, minmax_constraint_weight=minmax_constraint_weight, model_metadata_dict=model_metadata_dict) else: print(('\nOptimizing {0:d}th of {1:d} images for channel {2:d} in ' 'layer "{3:s}"...').format(i + 1, num_examples, channel_index, layer_name)) this_result_dict = backwards_opt.optimize_input_for_channel( model_object=model_object, layer_name=layer_name, channel_index=channel_index, init_function_or_matrices=this_init_arg, stat_function_for_neuron_activations=K.max, num_iterations=num_iterations, learning_rate=learning_rate, l2_weight=l2_weight, ideal_activation=ideal_activation, radar_constraint_weight=radar_constraint_weight, minmax_constraint_weight=minmax_constraint_weight, model_metadata_dict=model_metadata_dict) initial_activations[i] = this_result_dict[ backwards_opt.INITIAL_ACTIVATION_KEY] final_activations[i] = this_result_dict[ backwards_opt.FINAL_ACTIVATION_KEY] these_output_matrices = this_result_dict[ backwards_opt.NORM_OUTPUT_MATRICES_KEY] if output_matrices is None: output_matrices = [None] * len(these_output_matrices) for k in range(len(output_matrices)): if output_matrices[k] is None: output_matrices[k] = these_output_matrices[k] + 0. else: output_matrices[k] = numpy.concatenate( (output_matrices[k], these_output_matrices[k]), axis=0) if init_function_name is None: continue these_input_matrices = this_result_dict[ backwards_opt.NORM_INPUT_MATRICES_KEY] if input_matrices is None: input_matrices = [None] * len(these_input_matrices) for k in range(len(input_matrices)): if input_matrices[k] is None: input_matrices[k] = these_input_matrices[k] + 0. else: input_matrices[k] = numpy.concatenate( (input_matrices[k], these_input_matrices[k]), axis=0) print(SEPARATOR_STRING) training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY] print('Denormalizing input examples...') input_matrices = trainval_io.separate_shear_and_reflectivity( list_of_input_matrices=input_matrices, training_option_dict=training_option_dict) input_matrices = model_interpretation.denormalize_data( list_of_input_matrices=input_matrices, model_metadata_dict=model_metadata_dict) print('Denormalizing optimized examples...') output_matrices = trainval_io.separate_shear_and_reflectivity( list_of_input_matrices=output_matrices, training_option_dict=training_option_dict) output_matrices = model_interpretation.denormalize_data( list_of_input_matrices=output_matrices, model_metadata_dict=model_metadata_dict) print('Writing results to: "{0:s}"...'.format(output_file_name)) bwo_metadata_dict = backwards_opt.check_metadata( component_type_string=component_type_string, num_iterations=num_iterations, learning_rate=learning_rate, target_class=target_class, layer_name=layer_name, ideal_activation=ideal_activation, neuron_indices=neuron_indices, channel_index=channel_index, l2_weight=l2_weight, radar_constraint_weight=radar_constraint_weight, minmax_constraint_weight=minmax_constraint_weight) backwards_opt.write_standard_file( pickle_file_name=output_file_name, denorm_input_matrices=input_matrices, denorm_output_matrices=output_matrices, initial_activations=initial_activations, final_activations=final_activations, model_file_name=model_file_name, metadata_dict=bwo_metadata_dict, full_storm_id_strings=full_storm_id_strings, storm_times_unix_sec=storm_times_unix_sec, sounding_pressure_matrix_pa=sounding_pressure_matrix_pa)
def _run(input_example_dir_name, storm_metafile_name, num_examples_in_subset, subset_randomly, output_example_file_name): """Extracts desired examples and writes them to one file. This is effectively the main method. :param input_example_dir_name: See documentation at top of file. :param storm_metafile_name: Same. :param num_examples_in_subset: Same. :param subset_randomly: Same. :param output_example_file_name: Same. """ print( 'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name)) example_id_strings, example_times_unix_sec = ( tracking_io.read_ids_and_times(storm_metafile_name)) if not 0 < num_examples_in_subset < len(example_id_strings): num_examples_in_subset = None if num_examples_in_subset is not None: if subset_randomly: these_indices = numpy.linspace(0, len(example_id_strings) - 1, num=len(example_id_strings), dtype=int) these_indices = numpy.random.choice(these_indices, size=num_examples_in_subset, replace=False) example_id_strings = [example_id_strings[k] for k in these_indices] example_times_unix_sec = example_times_unix_sec[these_indices] else: example_id_strings = example_id_strings[:num_examples_in_subset] example_times_unix_sec = ( example_times_unix_sec[:num_examples_in_subset]) example_spc_date_strings = numpy.array([ time_conversion.time_to_spc_date_string(t) for t in example_times_unix_sec ]) spc_date_strings = numpy.unique(example_spc_date_strings) example_file_name_by_day = [ input_examples.find_example_file( top_directory_name=input_example_dir_name, shuffled=False, spc_date_string=d, raise_error_if_missing=True) for d in spc_date_strings ] num_days = len(spc_date_strings) for i in range(num_days): print('Reading data from: "{0:s}"...'.format( example_file_name_by_day[i])) all_example_dict = input_examples.read_example_file( netcdf_file_name=example_file_name_by_day[i], read_all_target_vars=True) these_indices = numpy.where( example_spc_date_strings == spc_date_strings[i])[0] desired_indices = tracking_utils.find_storm_objects( all_id_strings=all_example_dict[input_examples.FULL_IDS_KEY], all_times_unix_sec=all_example_dict[ input_examples.STORM_TIMES_KEY], id_strings_to_keep=[example_id_strings[k] for k in these_indices], times_to_keep_unix_sec=example_times_unix_sec[these_indices], allow_missing=False) desired_example_dict = input_examples.subset_examples( example_dict=all_example_dict, indices_to_keep=desired_indices) print('Writing {0:d} desired examples to: "{1:s}"...'.format( len(desired_indices), output_example_file_name)) input_examples.write_example_file( netcdf_file_name=output_example_file_name, example_dict=desired_example_dict, append_to_file=i > 0)
def _run(upconvnet_file_name, storm_metafile_name, num_examples, top_example_dir_name, top_output_dir_name): """Plots upconvnet reconstructions of many examples (storm objects). This is effectively the main method. :param upconvnet_file_name: See documentation at top of file. :param storm_metafile_name: Same. :param num_examples: Same. :param top_example_dir_name: Same. :param top_output_dir_name: Same. """ print 'Reading trained upconvnet from: "{0:s}"...'.format( upconvnet_file_name) upconvnet_model_object = cnn.read_model(upconvnet_file_name) upconvnet_metafile_name = '{0:s}/model_metadata.p'.format( os.path.split(upconvnet_file_name)[0] ) print 'Reading upconvnet metadata from: "{0:s}"...'.format( upconvnet_metafile_name) upconvnet_metadata_dict = upconvnet.read_model_metadata( upconvnet_metafile_name) cnn_file_name = upconvnet_metadata_dict[upconvnet.CNN_FILE_KEY] print 'Reading trained CNN from: "{0:s}"...'.format(cnn_file_name) cnn_model_object = cnn.read_model(cnn_file_name) cnn_metafile_name = '{0:s}/model_metadata.p'.format( os.path.split(cnn_file_name)[0] ) print 'Reading CNN metadata from: "{0:s}"...'.format(cnn_metafile_name) cnn_metadata_dict = cnn.read_model_metadata(cnn_metafile_name) training_option_dict = cnn_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY] print 'Reading storm IDs and times from: "{0:s}"...'.format( storm_metafile_name) storm_ids, storm_times_unix_sec = tracking_io.read_ids_and_times( storm_metafile_name) if 0 < num_examples < len(storm_ids): storm_ids = storm_ids[:num_examples] storm_times_unix_sec = storm_times_unix_sec[:num_examples] print SEPARATOR_STRING list_of_predictor_matrices = testing_io.read_specific_examples( desired_storm_ids=storm_ids, desired_times_unix_sec=storm_times_unix_sec, option_dict=training_option_dict, top_example_dir_name=top_example_dir_name, list_of_layer_operation_dicts=cnn_metadata_dict[ cnn.LAYER_OPERATIONS_KEY] )[0] print SEPARATOR_STRING actual_radar_matrix = list_of_predictor_matrices[0] have_soundings = training_option_dict[trainval_io.SOUNDING_FIELDS_KEY] if have_soundings: sounding_matrix = list_of_predictor_matrices[-1] else: sounding_matrix = None feature_matrix = cnn.apply_2d_or_3d_cnn( model_object=cnn_model_object, radar_image_matrix=actual_radar_matrix, sounding_matrix=sounding_matrix, verbose=True, return_features=True, feature_layer_name=upconvnet_metadata_dict[ upconvnet.CNN_FEATURE_LAYER_KEY] ) print '\n' reconstructed_radar_matrix = upconvnet.apply_upconvnet( model_object=upconvnet_model_object, feature_matrix=feature_matrix, verbose=True) print '\n' print 'Denormalizing actual and reconstructed radar images...' cnn_metadata_dict[ cnn.TRAINING_OPTION_DICT_KEY][trainval_io.SOUNDING_FIELDS_KEY] = None actual_radar_matrix = model_interpretation.denormalize_data( list_of_input_matrices=[actual_radar_matrix], model_metadata_dict=cnn_metadata_dict )[0] reconstructed_radar_matrix = model_interpretation.denormalize_data( list_of_input_matrices=[reconstructed_radar_matrix], model_metadata_dict=cnn_metadata_dict )[0] print SEPARATOR_STRING actual_output_dir_name = '{0:s}/actual_images'.format(top_output_dir_name) file_system_utils.mkdir_recursive_if_necessary( directory_name=actual_output_dir_name) # TODO(thunderhoser): Calling a method in another script is hacky. If this # method is going to be reused, should be in a module. plot_input_examples.plot_examples( list_of_predictor_matrices=[actual_radar_matrix], storm_ids=storm_ids, storm_times_unix_sec=storm_times_unix_sec, model_metadata_dict=cnn_metadata_dict, output_dir_name=actual_output_dir_name) print SEPARATOR_STRING reconstructed_output_dir_name = '{0:s}/reconstructed_images'.format( top_output_dir_name) file_system_utils.mkdir_recursive_if_necessary( directory_name=reconstructed_output_dir_name) plot_input_examples.plot_examples( list_of_predictor_matrices=[reconstructed_radar_matrix], storm_ids=storm_ids, storm_times_unix_sec=storm_times_unix_sec, model_metadata_dict=cnn_metadata_dict, output_dir_name=reconstructed_output_dir_name)
def _run(model_file_name, target_class, target_layer_name, top_example_dir_name, storm_metafile_name, num_examples, output_file_name): """Runs Grad-CAM (gradient-weighted class-activation maps). This is effectively the main method. :param model_file_name: See documentation at top of file. :param target_class: Same. :param target_layer_name: Same. :param top_example_dir_name: Same. :param storm_metafile_name: Same. :param num_examples: Same. :param output_file_name: Same. """ file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name) # Read model and metadata. print('Reading model from: "{0:s}"...'.format(model_file_name)) model_object = cnn.read_model(model_file_name) model_metafile_name = '{0:s}/model_metadata.p'.format( os.path.split(model_file_name)[0]) print( 'Reading model metadata from: "{0:s}"...'.format(model_metafile_name)) model_metadata_dict = cnn.read_model_metadata(model_metafile_name) training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY] training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None print( 'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name)) full_id_strings, storm_times_unix_sec = tracking_io.read_ids_and_times( storm_metafile_name) print(SEPARATOR_STRING) if 0 < num_examples < len(full_id_strings): full_id_strings = full_id_strings[:num_examples] storm_times_unix_sec = storm_times_unix_sec[:num_examples] list_of_input_matrices, sounding_pressure_matrix_pascals = ( testing_io.read_specific_examples( top_example_dir_name=top_example_dir_name, desired_full_id_strings=full_id_strings, desired_times_unix_sec=storm_times_unix_sec, option_dict=training_option_dict, list_of_layer_operation_dicts=model_metadata_dict[ cnn.LAYER_OPERATIONS_KEY])) print(SEPARATOR_STRING) list_of_cam_matrices = None list_of_guided_cam_matrices = None new_model_object = None num_examples = len(full_id_strings) for i in range(num_examples): print('Running Grad-CAM for example {0:d} of {1:d}...'.format( i + 1, num_examples)) these_input_matrices = [a[[i], ...] for a in list_of_input_matrices] these_cam_matrices = gradcam.run_gradcam( model_object=model_object, list_of_input_matrices=these_input_matrices, target_class=target_class, target_layer_name=target_layer_name) print('Running guided Grad-CAM for example {0:d} of {1:d}...'.format( i + 1, num_examples)) these_guided_cam_matrices, new_model_object = ( gradcam.run_guided_gradcam( orig_model_object=model_object, list_of_input_matrices=these_input_matrices, target_layer_name=target_layer_name, list_of_cam_matrices=these_cam_matrices, new_model_object=new_model_object)) if list_of_cam_matrices is None: list_of_cam_matrices = copy.deepcopy(these_cam_matrices) list_of_guided_cam_matrices = copy.deepcopy( these_guided_cam_matrices) else: for j in range(len(these_cam_matrices)): if list_of_cam_matrices[j] is None: continue list_of_cam_matrices[j] = numpy.concatenate( (list_of_cam_matrices[j], these_cam_matrices[j]), axis=0) list_of_guided_cam_matrices[j] = numpy.concatenate( (list_of_guided_cam_matrices[j], these_guided_cam_matrices[j]), axis=0) print(SEPARATOR_STRING) upsample_refl = training_option_dict[trainval_io.UPSAMPLE_REFLECTIVITY_KEY] if upsample_refl: list_of_cam_matrices[0] = numpy.expand_dims(list_of_cam_matrices[0], axis=-1) num_channels = list_of_input_matrices[0].shape[-1] list_of_cam_matrices[0] = numpy.repeat(a=list_of_cam_matrices[0], repeats=num_channels, axis=-1) list_of_cam_matrices = trainval_io.separate_shear_and_reflectivity( list_of_input_matrices=list_of_cam_matrices, training_option_dict=training_option_dict) list_of_cam_matrices[0] = list_of_cam_matrices[0][..., 0] list_of_cam_matrices[1] = list_of_cam_matrices[1][..., 0] list_of_guided_cam_matrices = trainval_io.separate_shear_and_reflectivity( list_of_input_matrices=list_of_guided_cam_matrices, training_option_dict=training_option_dict) print('Denormalizing predictors...') list_of_input_matrices = trainval_io.separate_shear_and_reflectivity( list_of_input_matrices=list_of_input_matrices, training_option_dict=training_option_dict) list_of_input_matrices = model_interpretation.denormalize_data( list_of_input_matrices=list_of_input_matrices, model_metadata_dict=model_metadata_dict) print('Writing class-activation maps to file: "{0:s}"...'.format( output_file_name)) gradcam.write_standard_file( pickle_file_name=output_file_name, list_of_input_matrices=list_of_input_matrices, list_of_cam_matrices=list_of_cam_matrices, list_of_guided_cam_matrices=list_of_guided_cam_matrices, model_file_name=model_file_name, full_id_strings=full_id_strings, storm_times_unix_sec=storm_times_unix_sec, target_class=target_class, target_layer_name=target_layer_name, sounding_pressure_matrix_pascals=sounding_pressure_matrix_pascals)
def _run(model_file_name, target_class, target_layer_name, top_example_dir_name, storm_metafile_name, num_examples, randomize_weights, cascading_random, output_file_name): """Runs Grad-CAM (gradient-weighted class-activation maps). This is effectively the main method. :param model_file_name: See documentation at top of file. :param target_class: Same. :param target_layer_name: Same. :param top_example_dir_name: Same. :param storm_metafile_name: Same. :param num_examples: Same. :param randomize_weights: Same. :param cascading_random: Same. :param output_file_name: Same. """ file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name) # Read model and metadata. print('Reading model from: "{0:s}"...'.format(model_file_name)) model_object = cnn.read_model(model_file_name) model_metafile_name = '{0:s}/model_metadata.p'.format( os.path.split(model_file_name)[0]) print( 'Reading model metadata from: "{0:s}"...'.format(model_metafile_name)) model_metadata_dict = cnn.read_model_metadata(model_metafile_name) training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY] training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None output_dir_name, pathless_output_file_name = os.path.split( output_file_name) extensionless_output_file_name, output_file_extension = os.path.splitext( pathless_output_file_name) if randomize_weights: conv_dense_layer_names = _find_conv_and_dense_layers(model_object) conv_dense_layer_names.reverse() num_sets = len(conv_dense_layer_names) else: conv_dense_layer_names = [] num_sets = 1 print( 'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name)) full_storm_id_strings, storm_times_unix_sec = ( tracking_io.read_ids_and_times(storm_metafile_name)) print(SEPARATOR_STRING) if 0 < num_examples < len(full_storm_id_strings): full_storm_id_strings = full_storm_id_strings[:num_examples] storm_times_unix_sec = storm_times_unix_sec[:num_examples] example_dict = testing_io.read_predictors_specific_examples( top_example_dir_name=top_example_dir_name, desired_full_id_strings=full_storm_id_strings, desired_times_unix_sec=storm_times_unix_sec, option_dict=training_option_dict, layer_operation_dicts=model_metadata_dict[cnn.LAYER_OPERATIONS_KEY]) print(SEPARATOR_STRING) predictor_matrices = example_dict[testing_io.INPUT_MATRICES_KEY] sounding_pressure_matrix_pa = ( example_dict[testing_io.SOUNDING_PRESSURES_KEY]) print('Denormalizing model inputs...') denorm_predictor_matrices = trainval_io.separate_shear_and_reflectivity( list_of_input_matrices=copy.deepcopy(predictor_matrices), training_option_dict=training_option_dict) denorm_predictor_matrices = model_interpretation.denormalize_data( list_of_input_matrices=denorm_predictor_matrices, model_metadata_dict=model_metadata_dict) print(SEPARATOR_STRING) for k in range(num_sets): if randomize_weights: if cascading_random: _reset_weights_in_layer(model_object=model_object, layer_name=conv_dense_layer_names[k]) this_model_object = model_object this_output_file_name = ( '{0:s}/{1:s}_cascading-random_{2:s}{3:s}').format( output_dir_name, extensionless_output_file_name, conv_dense_layer_names[k].replace('_', '-'), output_file_extension) else: this_model_object = keras.models.Model.from_config( model_object.get_config()) this_model_object.set_weights(model_object.get_weights()) _reset_weights_in_layer(model_object=this_model_object, layer_name=conv_dense_layer_names[k]) this_output_file_name = '{0:s}/{1:s}_random_{2:s}{3:s}'.format( output_dir_name, extensionless_output_file_name, conv_dense_layer_names[k].replace('_', '-'), output_file_extension) else: this_model_object = model_object this_output_file_name = output_file_name # print(K.eval(this_model_object.get_layer(name='dense_53').weights[0])) these_cam_matrices, these_guided_cam_matrices = ( _run_gradcam_one_weight_set( model_object=this_model_object, target_class=target_class, target_layer_name=target_layer_name, predictor_matrices=predictor_matrices, training_option_dict=training_option_dict)) print('Writing results to file: "{0:s}"...'.format( this_output_file_name)) gradcam.write_standard_file( pickle_file_name=this_output_file_name, denorm_predictor_matrices=denorm_predictor_matrices, cam_matrices=these_cam_matrices, guided_cam_matrices=these_guided_cam_matrices, full_storm_id_strings=full_storm_id_strings, storm_times_unix_sec=storm_times_unix_sec, model_file_name=model_file_name, target_class=target_class, target_layer_name=target_layer_name, sounding_pressure_matrix_pa=sounding_pressure_matrix_pa) print(SEPARATOR_STRING)
def _run(model_file_name, target_class, target_layer_name, top_example_dir_name, storm_metafile_name, num_examples, output_file_name): """Runs Grad-CAM (gradient-weighted class-activation maps). This is effectively the main method. :param model_file_name: See documentation at top of file. :param target_class: Same. :param target_layer_name: Same. :param top_example_dir_name: Same. :param storm_metafile_name: Same. :param num_examples: Same. :param output_file_name: Same. """ file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name) # Read model and metadata. print 'Reading model from: "{0:s}"...'.format(model_file_name) model_object = cnn.read_model(model_file_name) model_metafile_name = '{0:s}/model_metadata.p'.format( os.path.split(model_file_name)[0]) print 'Reading model metadata from: "{0:s}"...'.format(model_metafile_name) model_metadata_dict = cnn.read_model_metadata(model_metafile_name) training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY] training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None print 'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name) storm_ids, storm_times_unix_sec = tracking_io.read_ids_and_times( storm_metafile_name) print SEPARATOR_STRING if 0 < num_examples < len(storm_ids): storm_ids = storm_ids[:num_examples] storm_times_unix_sec = storm_times_unix_sec[:num_examples] list_of_input_matrices, sounding_pressure_matrix_pascals = ( testing_io.read_specific_examples( top_example_dir_name=top_example_dir_name, desired_storm_ids=storm_ids, desired_times_unix_sec=storm_times_unix_sec, option_dict=training_option_dict, list_of_layer_operation_dicts=model_metadata_dict[ cnn.LAYER_OPERATIONS_KEY])) print SEPARATOR_STRING class_activation_matrix = None ggradcam_output_matrix = None new_model_object = None num_examples = len(storm_ids) for i in range(num_examples): print 'Running Grad-CAM for example {0:d} of {1:d}...'.format( i + 1, num_examples) these_input_matrices = [a[[i], ...] for a in list_of_input_matrices] this_class_activation_matrix = gradcam.run_gradcam( model_object=model_object, list_of_input_matrices=these_input_matrices, target_class=target_class, target_layer_name=target_layer_name) print 'Running guided Grad-CAM for example {0:d} of {1:d}...'.format( i + 1, num_examples) this_ggradcam_output_matrix, new_model_object = ( gradcam.run_guided_gradcam( orig_model_object=model_object, list_of_input_matrices=these_input_matrices, target_layer_name=target_layer_name, class_activation_matrix=this_class_activation_matrix, new_model_object=new_model_object)) this_class_activation_matrix = numpy.expand_dims( this_class_activation_matrix, axis=0) this_ggradcam_output_matrix = numpy.expand_dims( this_ggradcam_output_matrix, axis=0) if class_activation_matrix is None: class_activation_matrix = this_class_activation_matrix + 0. ggradcam_output_matrix = this_ggradcam_output_matrix + 0. else: class_activation_matrix = numpy.concatenate( (class_activation_matrix, this_class_activation_matrix), axis=0) ggradcam_output_matrix = numpy.concatenate( (ggradcam_output_matrix, this_ggradcam_output_matrix), axis=0) print SEPARATOR_STRING print 'Denormalizing predictors...' list_of_input_matrices = model_interpretation.denormalize_data( list_of_input_matrices=list_of_input_matrices, model_metadata_dict=model_metadata_dict) print 'Writing class-activation maps to file: "{0:s}"...'.format( output_file_name) gradcam.write_standard_file( pickle_file_name=output_file_name, list_of_input_matrices=list_of_input_matrices, class_activation_matrix=class_activation_matrix, ggradcam_output_matrix=ggradcam_output_matrix, model_file_name=model_file_name, storm_ids=storm_ids, storm_times_unix_sec=storm_times_unix_sec, target_class=target_class, target_layer_name=target_layer_name, sounding_pressure_matrix_pascals=sounding_pressure_matrix_pascals)
def _run(model_file_name, layer_names, top_example_dir_name, storm_metafile_name, num_examples, top_output_dir_name): """Evaluates CNN (convolutional neural net) predictions. This is effectively the main method. :param model_file_name: See documentation at top of file. :param layer_names: Same. :param top_example_dir_name: Same. :param storm_metafile_name: Same. :param num_examples: Same. :param top_output_dir_name: Same. :raises: ValueError: if feature maps do not have 2 or 3 spatial dimensions. """ print('Reading model from: "{0:s}"...'.format(model_file_name)) model_object = cnn.read_model(model_file_name) model_metafile_name = '{0:s}/model_metadata.p'.format( os.path.split(model_file_name)[0]) print( 'Reading model metadata from: "{0:s}"...'.format(model_metafile_name)) model_metadata_dict = cnn.read_model_metadata(model_metafile_name) training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY] training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None print( 'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name)) full_id_strings, storm_times_unix_sec = tracking_io.read_ids_and_times( storm_metafile_name) print(SEPARATOR_STRING) if 0 < num_examples < len(full_id_strings): full_id_strings = full_id_strings[:num_examples] storm_times_unix_sec = storm_times_unix_sec[:num_examples] list_of_predictor_matrices = testing_io.read_specific_examples( top_example_dir_name=top_example_dir_name, desired_full_id_strings=full_id_strings, desired_times_unix_sec=storm_times_unix_sec, option_dict=training_option_dict, list_of_layer_operation_dicts=model_metadata_dict[ cnn.LAYER_OPERATIONS_KEY])[0] print(SEPARATOR_STRING) include_soundings = (training_option_dict[trainval_io.SOUNDING_FIELDS_KEY] is not None) if include_soundings: sounding_matrix = list_of_predictor_matrices[-1] else: sounding_matrix = None num_layers = len(layer_names) feature_matrix_by_layer = [None] * num_layers for k in range(num_layers): if model_metadata_dict[cnn.CONV_2D3D_KEY]: if training_option_dict[trainval_io.UPSAMPLE_REFLECTIVITY_KEY]: feature_matrix_by_layer[k] = cnn.apply_2d_or_3d_cnn( model_object=model_object, radar_image_matrix=list_of_predictor_matrices[0], sounding_matrix=sounding_matrix, return_features=True, feature_layer_name=layer_names[k]) else: feature_matrix_by_layer[k] = cnn.apply_2d3d_cnn( model_object=model_object, reflectivity_matrix_dbz=list_of_predictor_matrices[0], azimuthal_shear_matrix_s01=list_of_predictor_matrices[1], sounding_matrix=sounding_matrix, return_features=True, feature_layer_name=layer_names[k]) else: feature_matrix_by_layer[k] = cnn.apply_2d_or_3d_cnn( model_object=model_object, radar_image_matrix=list_of_predictor_matrices[0], sounding_matrix=sounding_matrix, return_features=True, feature_layer_name=layer_names[k]) for k in range(num_layers): this_output_dir_name = '{0:s}/{1:s}'.format(top_output_dir_name, layer_names[k]) file_system_utils.mkdir_recursive_if_necessary( directory_name=this_output_dir_name) _plot_feature_maps_one_layer(feature_matrix=feature_matrix_by_layer[k], full_id_strings=full_id_strings, storm_times_unix_sec=storm_times_unix_sec, layer_name=layer_names[k], output_dir_name=this_output_dir_name) print(SEPARATOR_STRING)
def _run(model_file_name, component_type_string, target_class, layer_name, ideal_activation, neuron_indices, channel_index, top_example_dir_name, storm_metafile_name, num_examples, output_file_name): """Computes saliency map for each storm object and each model component. This is effectively the main method. :param model_file_name: See documentation at top of file. :param component_type_string: Same. :param target_class: Same. :param layer_name: Same. :param ideal_activation: Same. :param neuron_indices: Same. :param channel_index: Same. :param top_example_dir_name: Same. :param storm_metafile_name: Same. :param num_examples: Same. :param output_file_name: Same. """ # Check input args. file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name) model_interpretation.check_component_type(component_type_string) # Read model and metadata. print 'Reading model from: "{0:s}"...'.format(model_file_name) model_object = cnn.read_model(model_file_name) model_metafile_name = '{0:s}/model_metadata.p'.format( os.path.split(model_file_name)[0]) print 'Reading model metadata from: "{0:s}"...'.format(model_metafile_name) model_metadata_dict = cnn.read_model_metadata(model_metafile_name) training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY] training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None print 'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name) storm_ids, storm_times_unix_sec = tracking_io.read_ids_and_times( storm_metafile_name) print SEPARATOR_STRING if 0 < num_examples < len(storm_ids): storm_ids = storm_ids[:num_examples] storm_times_unix_sec = storm_times_unix_sec[:num_examples] list_of_input_matrices, sounding_pressure_matrix_pascals = ( testing_io.read_specific_examples( top_example_dir_name=top_example_dir_name, desired_storm_ids=storm_ids, desired_times_unix_sec=storm_times_unix_sec, option_dict=training_option_dict, list_of_layer_operation_dicts=model_metadata_dict[ cnn.LAYER_OPERATIONS_KEY])) print SEPARATOR_STRING if component_type_string == CLASS_COMPONENT_TYPE_STRING: print 'Computing saliency maps for target class {0:d}...'.format( target_class) list_of_saliency_matrices = ( saliency_maps.get_saliency_maps_for_class_activation( model_object=model_object, target_class=target_class, list_of_input_matrices=list_of_input_matrices)) elif component_type_string == NEURON_COMPONENT_TYPE_STRING: print('Computing saliency maps for neuron {0:s} in layer "{1:s}"...' ).format(str(neuron_indices), layer_name) list_of_saliency_matrices = ( saliency_maps.get_saliency_maps_for_neuron_activation( model_object=model_object, layer_name=layer_name, neuron_indices=neuron_indices, list_of_input_matrices=list_of_input_matrices, ideal_activation=ideal_activation)) else: print('Computing saliency maps for channel {0:d} in layer "{1:s}"...' ).format(channel_index, layer_name) list_of_saliency_matrices = ( saliency_maps.get_saliency_maps_for_channel_activation( model_object=model_object, layer_name=layer_name, channel_index=channel_index, list_of_input_matrices=list_of_input_matrices, stat_function_for_neuron_activations=K.max, ideal_activation=ideal_activation)) print 'Denormalizing model inputs...' list_of_input_matrices = model_interpretation.denormalize_data( list_of_input_matrices=list_of_input_matrices, model_metadata_dict=model_metadata_dict) print 'Writing saliency maps to file: "{0:s}"...'.format(output_file_name) saliency_metadata_dict = saliency_maps.check_metadata( component_type_string=component_type_string, target_class=target_class, layer_name=layer_name, ideal_activation=ideal_activation, neuron_indices=neuron_indices, channel_index=channel_index) saliency_maps.write_standard_file( pickle_file_name=output_file_name, list_of_input_matrices=list_of_input_matrices, list_of_saliency_matrices=list_of_saliency_matrices, storm_ids=storm_ids, storm_times_unix_sec=storm_times_unix_sec, model_file_name=model_file_name, saliency_metadata_dict=saliency_metadata_dict, sounding_pressure_matrix_pascals=sounding_pressure_matrix_pascals)