def test_do_2d_convolution_padding1_stride2(self): """Ensures correct output from do_2d_convolution. In this case, edges are padded and stride length = 2. """ this_feature_matrix = standalone_utils.do_2d_convolution( feature_matrix=ORIG_FEATURE_MATRIX_2D + 0., kernel_matrix=KERNEL_MATRIX_2D, pad_edges=True, stride_length_px=2) self.assertTrue( numpy.allclose(this_feature_matrix, FEATURE_MATRIX_2D_PADDING1_STRIDE2, atol=TOLERANCE))
def _run(include_caption, output_dir_name): """Makes animation to explain multivariate convolution. This is effectively the main method. :param include_caption: See documentation at top of file. :param output_dir_name: Same. """ file_system_utils.mkdir_recursive_if_necessary( directory_name=output_dir_name) output_feature_matrix = standalone_utils.do_2d_convolution( feature_matrix=INPUT_FEATURE_MATRIX, kernel_matrix=KERNEL_MATRIX, pad_edges=True, stride_length_px=1) output_feature_matrix = output_feature_matrix[0, ..., 0] num_grid_rows = INPUT_FEATURE_MATRIX.shape[0] num_grid_columns = INPUT_FEATURE_MATRIX.shape[1] image_file_names = [] kernel_width_ratio = float(KERNEL_MATRIX.shape[1]) / num_grid_columns kernel_height_ratio = float(KERNEL_MATRIX.shape[0]) / num_grid_rows for i in range(num_grid_rows): for j in range(num_grid_columns): this_figure_object, this_axes_object_matrix = ( plotting_utils.create_paneled_figure( num_rows=NUM_PANEL_ROWS, num_columns=NUM_PANEL_COLUMNS, horizontal_spacing=0.2, vertical_spacing=0., shared_x_axis=False, shared_y_axis=False, keep_aspect_ratio=True) ) letter_label = None _plot_feature_map( feature_matrix_2d=INPUT_FEATURE_MATRIX[..., 0], kernel_row=i, kernel_column=j, is_output_map=False, axes_object=this_axes_object_matrix[0, 0] ) if letter_label is None: letter_label = 'a' else: letter_label = chr(ord(letter_label) + 1) plotting_utils.label_axes( axes_object=this_axes_object_matrix[0, 0], label_string='({0:s})'.format(letter_label), font_size=PANEL_LETTER_FONT_SIZE, y_coord_normalized=1.04, x_coord_normalized=0.1 ) _plot_feature_map( feature_matrix_2d=output_feature_matrix, kernel_row=i, kernel_column=j, is_output_map=True, axes_object=this_axes_object_matrix[0, 2] ) this_bbox_object = this_axes_object_matrix[0, 1].get_position() this_width = kernel_width_ratio * ( this_bbox_object.x1 - this_bbox_object.x0 ) this_height = kernel_height_ratio * ( this_bbox_object.y1 - this_bbox_object.y0 ) this_bbox_object.x0 += 0.5 * this_width this_bbox_object.y0 = ( this_axes_object_matrix[0, 0].get_position().y0 + 0.1 ) this_bbox_object.x1 = this_bbox_object.x0 + this_width this_bbox_object.y1 = this_bbox_object.y0 + this_height this_axes_object_matrix[0, 1].set_position(this_bbox_object) _plot_kernel( kernel_matrix_2d=KERNEL_MATRIX[..., 0, 0], feature_matrix_2d=INPUT_FEATURE_MATRIX[..., 0], feature_row_at_center=i, feature_column_at_center=j, axes_object=this_axes_object_matrix[0, 1] ) letter_label = chr(ord(letter_label) + 1) plotting_utils.label_axes( axes_object=this_axes_object_matrix[0, 1], label_string='({0:s})'.format(letter_label), font_size=PANEL_LETTER_FONT_SIZE, y_coord_normalized=1.04, x_coord_normalized=0.2 ) _plot_feature_to_kernel_lines( kernel_matrix_2d=KERNEL_MATRIX[..., 0, 0], feature_matrix_2d=INPUT_FEATURE_MATRIX[..., 0], feature_row_at_center=i, feature_column_at_center=j, kernel_axes_object=this_axes_object_matrix[0, 1], feature_axes_object=this_axes_object_matrix[0, 0] ) letter_label = chr(ord(letter_label) + 1) plotting_utils.label_axes( axes_object=this_axes_object_matrix[0, 2], label_string='({0:s})'.format(letter_label), font_size=PANEL_LETTER_FONT_SIZE, y_coord_normalized=1.04, x_coord_normalized=0.1 ) if include_caption: this_figure_object.text( 0.5, 0.35, FIGURE_CAPTION, fontsize=DEFAULT_FONT_SIZE, color='k', horizontalalignment='center', verticalalignment='top') image_file_names.append( '{0:s}/conv_animation_row{1:d}_column{2:d}.jpg'.format( output_dir_name, i, j) ) print('Saving figure to: "{0:s}"...'.format(image_file_names[-1])) this_figure_object.savefig( image_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0, bbox_inches='tight' ) pyplot.close(this_figure_object) animation_file_name = '{0:s}/conv_animation.gif'.format(output_dir_name) print('Creating animation: "{0:s}"...'.format(animation_file_name)) imagemagick_utils.create_gif( input_file_names=image_file_names, output_file_name=animation_file_name, num_seconds_per_frame=0.5, resize_factor=0.5)
def _run(model_file_name, top_example_dir_name, storm_metafile_name, num_examples, output_file_name): """Creates dummy saliency map for each storm object. This is effectively the main method. :param model_file_name: See documentation at top of file. :param top_example_dir_name: Same. :param storm_metafile_name: Same. :param num_examples: Same. :param output_file_name: Same. """ file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name) model_metafile_name = '{0:s}/model_metadata.p'.format( os.path.split(model_file_name)[0]) print( 'Reading model metadata from: "{0:s}"...'.format(model_metafile_name)) model_metadata_dict = cnn.read_model_metadata(model_metafile_name) training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY] training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None print( 'Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name)) full_storm_id_strings, storm_times_unix_sec = ( tracking_io.read_ids_and_times(storm_metafile_name)) print(SEPARATOR_STRING) if 0 < num_examples < len(full_storm_id_strings): full_storm_id_strings = full_storm_id_strings[:num_examples] storm_times_unix_sec = storm_times_unix_sec[:num_examples] example_dict = testing_io.read_predictors_specific_examples( top_example_dir_name=top_example_dir_name, desired_full_id_strings=full_storm_id_strings, desired_times_unix_sec=storm_times_unix_sec, option_dict=training_option_dict, layer_operation_dicts=model_metadata_dict[cnn.LAYER_OPERATIONS_KEY]) print(SEPARATOR_STRING) predictor_matrices = example_dict[testing_io.INPUT_MATRICES_KEY] sounding_pressure_matrix_pa = ( example_dict[testing_io.SOUNDING_PRESSURES_KEY]) radar_matrix = predictor_matrices[0] num_examples = radar_matrix.shape[0] num_channels = radar_matrix.shape[-1] num_spatial_dim = len(radar_matrix.shape) - 2 if num_spatial_dim == 2: kernel_matrix = numpy.expand_dims(EDGE_DETECTOR_MATRIX_2D, axis=-1) else: kernel_matrix = numpy.expand_dims(EDGE_DETECTOR_MATRIX_3D, axis=-1) kernel_matrix = numpy.repeat(kernel_matrix, num_channels, axis=-1) kernel_matrix = numpy.expand_dims(kernel_matrix, axis=-1) kernel_matrix = numpy.repeat(kernel_matrix, num_channels, axis=-1) radar_saliency_matrix = numpy.full(radar_matrix.shape, numpy.nan) for i in range(num_examples): if numpy.mod(i, 10) == 0: print(( 'Have created dummy saliency map for {0:d} of {1:d} examples...' ).format(i, num_examples)) if num_spatial_dim == 2: this_saliency_matrix = standalone_utils.do_2d_convolution( feature_matrix=radar_matrix[i, ...], kernel_matrix=kernel_matrix, pad_edges=True, stride_length_px=1) else: this_saliency_matrix = standalone_utils.do_3d_convolution( feature_matrix=radar_matrix[i, ...], kernel_matrix=kernel_matrix, pad_edges=True, stride_length_px=1) radar_saliency_matrix[i, ...] = this_saliency_matrix[0, ...] print('Have created dummy saliency map for all {0:d} examples!'.format( num_examples)) print(SEPARATOR_STRING) saliency_matrices = [ radar_saliency_matrix if k == 0 else predictor_matrices[k] for k in range(len(predictor_matrices)) ] saliency_matrices = trainval_io.separate_shear_and_reflectivity( list_of_input_matrices=saliency_matrices, training_option_dict=training_option_dict) denorm_predictor_matrices = trainval_io.separate_shear_and_reflectivity( list_of_input_matrices=copy.deepcopy(predictor_matrices), training_option_dict=training_option_dict) print('Denormalizing model inputs...') denorm_predictor_matrices = model_interpretation.denormalize_data( list_of_input_matrices=denorm_predictor_matrices, model_metadata_dict=model_metadata_dict) print('Writing saliency maps to file: "{0:s}"...'.format(output_file_name)) saliency_metadata_dict = saliency_maps.check_metadata( component_type_string=model_interpretation.CLASS_COMPONENT_TYPE_STRING, target_class=1) saliency_maps.write_standard_file( pickle_file_name=output_file_name, denorm_predictor_matrices=denorm_predictor_matrices, saliency_matrices=saliency_matrices, full_storm_id_strings=full_storm_id_strings, storm_times_unix_sec=storm_times_unix_sec, model_file_name=model_file_name, metadata_dict=saliency_metadata_dict, sounding_pressure_matrix_pa=sounding_pressure_matrix_pa)
def _run(example_file_name, example_indices, num_radar_rows, num_radar_columns, normalization_file_name, output_dir_name): """Makes figure to explain one convolution block. This is effectively the main method. :param example_file_name: See documentation at top of file. :param example_indices: Same. :param num_radar_rows: Same. :param num_radar_columns: Same. :param normalization_file_name: Same. :param output_dir_name: Same. """ if num_radar_rows <= 0: num_radar_rows = None if num_radar_columns <= 0: num_radar_columns = None file_system_utils.mkdir_recursive_if_necessary( directory_name=output_dir_name) print('Reading data from: "{0:s}"...'.format(example_file_name)) example_dict = input_examples.read_example_file( netcdf_file_name=example_file_name, read_all_target_vars=False, target_name=DUMMY_TARGET_NAME, include_soundings=False, num_rows_to_keep=num_radar_rows, num_columns_to_keep=num_radar_columns, radar_heights_to_keep_m_agl=numpy.array([RADAR_HEIGHT_M_AGL], dtype=int) ) if input_examples.REFL_IMAGE_MATRIX_KEY in example_dict: input_feature_matrix = example_dict[ input_examples.REFL_IMAGE_MATRIX_KEY] else: field_index = example_dict[input_examples.RADAR_FIELDS_KEY].index( RADAR_FIELD_NAME ) input_feature_matrix = example_dict[ input_examples.RADAR_IMAGE_MATRIX_KEY ][..., [field_index]] num_examples = input_feature_matrix.shape[0] error_checking.assert_is_geq_numpy_array(example_indices, 0) error_checking.assert_is_less_than_numpy_array( example_indices, num_examples) input_feature_matrix = dl_utils.normalize_radar_images( radar_image_matrix=input_feature_matrix, field_names=[RADAR_FIELD_NAME], normalization_type_string=NORMALIZATION_TYPE_STRING, normalization_param_file_name=normalization_file_name) if len(input_feature_matrix.shape) == 4: input_feature_matrix = input_feature_matrix[..., 0] else: input_feature_matrix = input_feature_matrix[..., 0, 0] input_feature_matrix = numpy.expand_dims(input_feature_matrix, axis=-1) print('Doing convolution for all {0:d} examples...'.format(num_examples)) feature_matrix_after_conv = None for i in range(num_examples): this_feature_matrix = standalone_utils.do_2d_convolution( feature_matrix=input_feature_matrix[i, ...] + 0, kernel_matrix=KERNEL_MATRIX, pad_edges=False, stride_length_px=1 )[0, ...] if feature_matrix_after_conv is None: feature_matrix_after_conv = numpy.full( (num_examples,) + this_feature_matrix.shape, numpy.nan ) feature_matrix_after_conv[i, ...] = this_feature_matrix print('Doing activation for all {0:d} examples...'.format(num_examples)) feature_matrix_after_activn = standalone_utils.do_activation( input_values=feature_matrix_after_conv + 0, function_name=architecture_utils.RELU_FUNCTION_STRING, alpha=0.2) print('Doing batch norm for all {0:d} examples...'.format(num_examples)) feature_matrix_after_bn = standalone_utils.do_batch_normalization( feature_matrix=feature_matrix_after_activn + 0 ) print('Doing max-pooling for all {0:d} examples...\n'.format(num_examples)) feature_matrix_after_pooling = None for i in range(num_examples): this_feature_matrix = standalone_utils.do_2d_pooling( feature_matrix=feature_matrix_after_bn[i, ...], stride_length_px=2, pooling_type_string=standalone_utils.MAX_POOLING_TYPE_STRING )[0, ...] if feature_matrix_after_pooling is None: feature_matrix_after_pooling = numpy.full( (num_examples,) + this_feature_matrix.shape, numpy.nan ) feature_matrix_after_pooling[i, ...] = this_feature_matrix for i in example_indices: this_output_file_name = '{0:s}/convolution_block{1:06d}.jpg'.format( output_dir_name, i) _plot_one_example( input_feature_matrix=input_feature_matrix[i, ...], feature_matrix_after_conv=feature_matrix_after_conv[i, ...], feature_matrix_after_activn=feature_matrix_after_activn[i, ...], feature_matrix_after_bn=feature_matrix_after_bn[i, ...], feature_matrix_after_pooling=feature_matrix_after_pooling[i, ...], output_file_name=this_output_file_name)