def define_gabor_fragment(frag_size): """ Explicitly Define Fragment (pixel by pixel). A Gabor Fit will be found. :param frag_size: :return: """ bg_value = 0 # frag = np.ones(frag_size, dtype='uint8') * 255 # frag[:, frag_size[0] // 2 - 2, :] = 0 # frag[:, frag_size[0] // 2 - 1, :] = 0 # frag[:, frag_size[0] // 2, :] = 0 # frag[:, frag_size[0] // 2 + 1, :] = 0 # frag[:, frag_size[0] // 2 + 2, :] = 0 frag = np.array([[255, 255, 0, 0, 0, 255, 255], [255, 255, 0, 0, 0, 255, 255], [255, 255, 0, 0, 0, 255, 255], [255, 255, 0, 0, 0, 255, 255], [255, 255, 0, 0, 0, 255, 255], [255, 255, 0, 0, 0, 255, 255], [255, 255, 0, 0, 0, 255, 255]]) frag = np.stack([frag, frag, frag], axis=-1) # -------------------------------------------------------------- plt.figure() plt.imshow(frag) plt.title("Specified Fragment") import pdb pdb.set_trace() print("Finding Gabor Fit ...") frag = (frag - frag.min()) / (frag.max() - frag.min()) gabor_params_list = gabor_fits.find_best_fit_2d_gabor(frag, verbose=1) g_params = gabor_fits.convert_gabor_params_list_to_dict(gabor_params_list) g_params.print_params(g_params) fitted_gabor = gabor_fits.get_gabor_fragment(gabor_params, frag_size[:2]) f, ax_arr = plt.subplots(1, 2) ax_arr[0].imshow(frag) ax_arr[0].set_title("Specified Fragment") ax_arr[1].imshow(fitted_gabor) ax_arr[1].set_title("Generated Fragment") return fitted_gabor, g_params, bg_value
def define_gabor_parameters(frag_size): """ :return: """ bg_value = 0 gabor_params_list = np.array([[0, -1., 145, 0.33, 2.00, 15.25, 0, 0]]) # --------------------------------- g_params = gabor_fits.convert_gabor_params_list_to_dict(gabor_params_list) frag = gabor_fits.get_gabor_fragment(g_params, frag_size) # Display Fragment plt.figure() plt.imshow(frag) plt.title("Generated Fragment") return frag, g_params, bg_value
def main(model, optimal_stim_dict=None, r_dir="."): # Contour Data Set Normalization (channel_wise_optimal_full14_frag7) chan_means = np.array([0.46958107, 0.47102246, 0.46911009]) chan_stds = np.array([0.46108359, 0.46187091, 0.46111096]) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = model.to(device) frag_size = np.array([7, 7]) full_tile_size = np.array([14, 14]) img_size = np.array([256, 256, 3]) # Get temporal responses for contour lengths c_len_arr = np.array([1, 3, 5, 7, 9]) # c_len_arr = np.array([9]) # Average responses over n_images n_images = 20 # ----------------------------------------------------------------------------------- # Register Callbacks # ----------------------------------------------------------------------------------- model.edge_extract.register_forward_hook(edge_extract_cb) model.contour_integration_layer.register_forward_hook( contour_integration_cb) n_channels = model.edge_extract.weight.shape[0] # ----------------------------------------------------------------------------------- # Find optimal stimuli for each kernel # ----------------------------------------------------------------------------------- print(">>>> Getting Optimal stimuli for kernels...") if optimal_stim_dict is not None: tracked_optimal_stim_dict = optimal_stim_dict print("Use Stored Responses") else: tracked_optimal_stim_dict = {} for ch_idx in range(n_channels): print("{0} processing channel {1} {0}".format("*" * 20, ch_idx)) gabor_params = find_optimal_stimulus( model=model, device_to_use=device, k_idx=ch_idx, extract_point='contour_integration_layer_out', ch_mus=chan_means, ch_sigmas=chan_stds, frag_size=frag_size, ) tracked_optimal_stim_dict[ch_idx] = gabor_params # Save the optimal stimuli pickle_file = os.path.join(results_dir, 'optimal_stimuli.pickle') print("Saving optimal gabor parameters @ {}".format(pickle_file)) with open(pickle_file, 'wb') as h: pickle.dump(tracked_optimal_stim_dict, h) # ----------------------------------------------------------------------------------- # Get Dynamic time responses for each kernel # ----------------------------------------------------------------------------------- print(">>>> Getting responses per time step") # Tell the model to store iterative predictions model.contour_integration_layer.store_recurrent_acts = True # # Increase the number of time steps to see what happens beyond trained iterations train_n_iter = model.contour_integration_layer.n_iters - 1 # model.contour_integration_layer.n_iters = 7 overall_results = {} # results is dictionary of dictionaries (referenced by channel index), one for each channel # each channel dictionary contains # { # iou_per_len, # for each Length # mean_clen_1_e_act, # std_clen_1_e_act, # mean_clea_1_i_act, # std_c_len_1_i_act # } for ch_idx in range(n_channels): print("{0} processing channel {1} {0}".format("*" * 20, ch_idx)) g_params = tracked_optimal_stim_dict.get(ch_idx, None) if g_params is not None: frag = gabor_fits.get_gabor_fragment(g_params, spatial_size=frag_size) bg = g_params[0]['bg'] iou_per_len_arr = [] ch_results_dict = {} for c_len in c_len_arr: print("length {}".format(c_len)) iou, mean_e_resp, std_e_resp, mean_i_resp, std_i_resp = get_responses_per_iteration( model=model, device=device, g_params=g_params, frag=frag, bg=bg, n_images=n_images, c_len=c_len, full_tile_size=full_tile_size, img_size=img_size, chan_means=chan_means, chan_stds=chan_stds, ch_idx=ch_idx) iou_per_len_arr.append(iou) ch_results_dict['c_len_{}_mean_e_resp'.format( c_len)] = mean_e_resp ch_results_dict['c_len_{}_std_e_resp'.format( c_len)] = std_e_resp ch_results_dict['c_len_{}_mean_i_resp'.format( c_len)] = mean_i_resp ch_results_dict['c_len_{}_std_i_resp'.format( c_len)] = std_i_resp ch_results_dict['iou_per_len'] = np.array(iou_per_len_arr) overall_results[ch_idx] = ch_results_dict # ---------------------------------------------------------------------------- # Plot the results # ---------------------------------------------------------------------------- print("Plotting Results ...") for ch_idx in range(n_channels): ch_results = overall_results.get(ch_idx, None) if ch_results is not None: per_chan_r_dir = os.path.join( r_dir, 'individual_channels/{}'.format(ch_idx)) if not os.path.exists(per_chan_r_dir): os.makedirs(per_chan_r_dir) f_iou = plt.figure(figsize=(9, 9)) plt.plot(c_len_arr, ch_results['iou_per_len'], marker='x') plt.title("IoU per length. Channel {}".format(ch_idx)) plt.xlabel("Length") plt.ylabel("IoU") f_iou.savefig(os.path.join(per_chan_r_dir, 'iou_ch_{}.jpg'.format(ch_idx)), format='jpg') f_resp, ax_arr = plt.subplots(2, 1, figsize=(11, 7), sharex=True) f_resp.suptitle( "Responses per Iteration. Channel ={}".format(ch_idx)) ax_arr[0].set_title("Excitatory") # ax_arr[0].set_xlabel("Time") ax_arr[0].set_ylabel("Activation") ax_arr[0].axvline(train_n_iter, linestyle='--', color='black') ax_arr[1].set_title("Inhibitory") ax_arr[1].set_xlabel("time") ax_arr[1].set_ylabel("Activation") ax_arr[1].axvline(train_n_iter, linestyle='--', color='black') for c_len in c_len_arr: e_mean_resp = ch_results['c_len_{}_mean_e_resp'.format(c_len)] e_std_resp = ch_results['c_len_{}_std_e_resp'.format(c_len)] i_mean_resp = ch_results['c_len_{}_mean_i_resp'.format(c_len)] i_std_resp = ch_results['c_len_{}_std_i_resp'.format(c_len)] timesteps = np.arange(0, len(e_mean_resp)) color = next(ax_arr[0]._get_lines.prop_cycler)['color'] ax_arr[0].plot(timesteps, e_mean_resp, label='clen_{}'.format(c_len), color=color) ax_arr[0].fill_between(timesteps, e_mean_resp + e_std_resp, e_mean_resp - e_std_resp, alpha=0.2, color=color) ax_arr[1].plot(timesteps, i_mean_resp, label='clen_{}'.format(c_len), color=color) ax_arr[1].fill_between(timesteps, i_mean_resp + i_std_resp, i_mean_resp - i_std_resp, alpha=0.2, color=color) ax_arr[0].legend() f_resp.savefig(os.path.join(per_chan_r_dir, 'resp_ch_{}.jpg'.format(ch_idx)), format='jpg') plt.close(f_iou) plt.close(f_resp)
def find_optimal_stimulus(model, device_to_use, k_idx, ch_mus, ch_sigmas, extract_point, frag_size=np.array([7, 7]), img_size=np.array([256, 256, 3])): """ Copied from experiment gain vs len :return: """ global edge_extract_act global cont_int_in_act global cont_int_out_act orient_arr = np.arange(0, 180, 5) img_center = img_size[0:2] // 2 tgt_n_acts = np.zeros((len(base_gabor_parameters), len(orient_arr))) tgt_n_max_act = 0 tgt_n_opt_params = None for base_gp_idx, base_gabor_params in enumerate(base_gabor_parameters): print("Processing Base Gabor Param Set {}".format(base_gp_idx)) for o_idx, orient in enumerate(orient_arr): # Change orientation g_params = copy.deepcopy(base_gabor_params) for c_idx in range(len(g_params)): g_params[c_idx]["theta_deg"] = orient # Create Test Image - Single fragment @ center frag = gabor_fits.get_gabor_fragment(g_params, spatial_size=frag_size) bg = base_gabor_params[0]['bg'] if bg is None: bg = fields1993_stimuli.get_mean_pixel_value_at_boundary(frag) test_img = np.ones(img_size, dtype='uint8') * bg add_one = 1 if frag_size[0] % 2 == 0: add_one = 0 test_img[img_center[0] - frag_size[0] // 2:img_center[0] + frag_size[0] // 2 + add_one, img_center[0] - frag_size[0] // 2:img_center[0] + frag_size[0] // 2 + add_one, :, ] = frag test_img = transform_functional.to_tensor(test_img) # # Debug - Show Test Image # # ----------------------- # plt.figure() # plt.imshow(np.transpose(test_img, axes=(1, 2, 0))) # plt.title("Input Image - Find optimal stimulus") # import pdb # pdb.set_trace() # Get target activations process_image(model, device_to_use, ch_mus, ch_sigmas, test_img) # Get Target Neuron Activation # ---------------------------- if extract_point == 'edge_extract_layer_out': center_n_acts = \ edge_extract_act[ 0, :, edge_extract_act.shape[2]//2, edge_extract_act.shape[3]//2] elif extract_point == 'contour_integration_layer_in': center_n_acts = \ cont_int_in_act[ 0, :, cont_int_in_act.shape[2]//2, cont_int_in_act.shape[3]//2] else: # 'contour_integration_layer_out' center_n_acts = \ cont_int_out_act[ 0, :, cont_int_out_act.shape[2]//2, cont_int_out_act.shape[3]//2] tgt_n_act = center_n_acts[k_idx] tgt_n_acts[base_gp_idx, o_idx] = tgt_n_act # # Debug - Display all channel responses to individual test image # # -------------------------------------------------------------- # plt.figure() # plt.plot(center_n_acts) # plt.title("Center Neuron Activations. Base Gabor Set {}. Orientation {}".format( # base_gp_idx, orient)) # import pdb # pdb.set_trace() if tgt_n_act > tgt_n_max_act: tgt_n_max_act = tgt_n_act tgt_n_opt_params = copy.deepcopy(g_params) max_active_n = int(np.argmax(center_n_acts)) extra_info = { 'optim_stim_act_value': tgt_n_max_act, 'optim_stim_base_gabor_set': base_gp_idx, 'optim_stim_act_orient': orient, 'max_active_neuron_is_target': (max_active_n == k_idx), 'max_active_neuron_value': center_n_acts[max_active_n], 'max_active_neuron_idx': max_active_n } for item in tgt_n_opt_params: item['extra_info'] = extra_info # # ----------------------------------------- # # Debug - Tuning Curve for Individual base Gabor Params # plt.figure() # plt.plot(orient_arr, tgt_n_acts[base_gp_idx, :]) # plt.title("Neuron {}: responses vs Orientation. Gabor Set {}".format(k_idx, base_gp_idx)) # import pdb # pdb.set_trace() # --------------------------- if tgt_n_opt_params is not None: # Save optimal tuning curve for item in tgt_n_opt_params: opt_base_g_params_set = item['extra_info'][ 'optim_stim_base_gabor_set'] item['extra_info']['orient_tuning_curve_x'] = orient_arr item['extra_info']['orient_tuning_curve_y'] = tgt_n_acts[ opt_base_g_params_set, ] # # Debug: plot tuning curves for all gabor sets # # ------------------------------------------------ # plt.figure() # for base_gp_idx, base_gabor_params in enumerate(base_gabor_parameters): # # if base_gp_idx == tgt_n_opt_params[0]['extra_info']['optim_stim_base_gabor_set']: # line_width = 5 # plt.plot( # tgt_n_opt_params[0]['extra_info']['optim_stim_act_orient'], # tgt_n_opt_params[0]['extra_info']['max_active_neuron_value'], # marker='x', markersize=10, # label='max active neuron Index {}'.format( # tgt_n_opt_params[0]['extra_info']['max_active_neuron_idx']) # ) # else: # line_width = 2 # # plt.plot( # orient_arr, tgt_n_acts[base_gp_idx, ], # label='param set {}'.format(base_gp_idx), linewidth=line_width # ) # # plt.legend() # plt.grid(True) # plt.title( # "Kernel {}. Max Active Base Set {}. Is most responsive to this stimulus {}".format( # k_idx, # tgt_n_opt_params[0]['extra_info']['optim_stim_base_gabor_set'], # tgt_n_opt_params[0]['extra_info']['max_active_neuron_is_target']) # ) # # import pdb # pdb.set_trace() return tgt_n_opt_params
for o_idx, orient in enumerate(orient_arr): # Change Orientation # ------------------ gabor_params = copy.deepcopy(base_gabor_params) for ch_idx in range(len(gabor_params)): gabor_params[ch_idx]["theta_deg"] = orient # Create Test Image # ----------------- # Create a fragment from gabor_params, place in center of image. # This location is optimized to fit within the receptive fields # of centrally located neurons. Next, get target neuron responses. frag = gabor_fits.get_gabor_fragment(gabor_params, spatial_size=frag_size) bg = base_gabor_params[0]['bg'] if bg is None: bg = fields1993_stimuli.get_mean_pixel_value_at_boundary( frag) test_image = np.ones(image_size, dtype='uint8') * bg test_image[image_center[0] - frag_size[0] // 2:image_center[0] + frag_size[0] // 2 + 1, image_center[0] - frag_size[0] // 2:image_center[0] + frag_size[0] // 2 + 1, :, ] = frag # Debug - Show Test Image # plt.figure()
def get_contour_gain_vs_spacing(model, device_to_use, g_params, k_idx, ch_mus, ch_sigmas, rslt_dir, full_tile_s_arr, frag_tile_s, c_len=7, n_images=50, img_size=np.array([256, 256, 3]), epsilon=1e-5): """ TODO: Add description """ global edge_extract_act global cont_int_in_act global cont_int_out_act # tracking variables ------------------------------------------------- tgt_n = k_idx max_act_n_idx = g_params[0]['extra_info']['max_active_neuron_idx'] tgt_n_out_acts = np.zeros((n_images, full_tile_s_arr.shape[0])) max_act_n_acts = np.zeros_like(tgt_n_out_acts) tgt_n_single_frag_acts = np.zeros(n_images) max_act_n_single_frag_acts = np.zeros_like(tgt_n_single_frag_acts) # ----------------------------------------------------------------- frag = gabor_fits.get_gabor_fragment(g_params, spatial_size=frag_tile_s) bg = g_params[0]['bg'] # First get response to Single fragment and co-linear distance = 1 (noise pattern) for img_idx in range(n_images): test_img, test_img_label, contour_frags_starts, end_acc_angle, start_acc_angle = \ fields1993_stimuli.generate_contour_image( frag=frag, frag_params=g_params, c_len=1, beta=0, alpha=0, f_tile_size=np.array([14, 14]), img_size=img_size, random_alpha_rot=True, rand_inter_frag_direction_change=True, use_d_jitter=False, bg_frag_relocate=False, bg=bg ) test_img = transform_functional.to_tensor(test_img) process_image(model, device_to_use, ch_mus, ch_sigmas, test_img) center_n_acts = \ cont_int_out_act[ 0, :, cont_int_out_act.shape[2] // 2, cont_int_out_act.shape[3] // 2] tgt_n_single_frag_acts[img_idx] = center_n_acts[tgt_n] max_act_n_single_frag_acts[img_idx] = center_n_acts[max_act_n_idx] print( "Tgt Neuron Single Fragment (RCD=1.0) Resp: mean {:0.2f}, std {:0.2f}". format(np.mean(tgt_n_single_frag_acts), np.std(tgt_n_single_frag_acts))) print( "Max Active Neuron Single Fragment (RCD=1.0) Resp: mean {:0.2f}, std {:0.2f}" .format(np.mean(max_act_n_single_frag_acts), np.std(max_act_n_single_frag_acts))) # # Debug # plt.figure() # plt.imshow(np.transpose(test_img, axes=(1, 2, 0))) # plt.title("Input Image") # import pdb # pdb.set_trace() for ft_idx, full_tile_s in enumerate(full_tile_s_arr): print("Processing Full Tile size = {}".format(full_tile_s)) # Next Get responses for full_tile_s fragment spacing for img_idx in range(n_images): # (1) Create Test Image test_img, test_img_label, contour_frags_starts, end_acc_angle, start_acc_angle = \ fields1993_stimuli.generate_contour_image( frag=frag, frag_params=g_params, c_len=c_len, beta=0, alpha=0, f_tile_size=full_tile_s, img_size=img_size, random_alpha_rot=True, rand_inter_frag_direction_change=True, use_d_jitter=False, bg_frag_relocate=True, bg=bg ) test_img = transform_functional.to_tensor(test_img) # test_img_label = torch.from_numpy(np.array(test_img_label)).unsqueeze(0) # # Debug - Plot Test Image # # ------------------------ # if img_idx == 0: # disp_img = np.transpose(test_img.numpy(), axes=(1, 2, 0)) # disp_img = (disp_img - disp_img.min()) / (disp_img.max() - disp_img.min()) * 255. # disp_img = disp_img.astype('uint8') # disp_label = test_img_label.numpy() # # print(disp_label) # print("Label is valid? {}".format(fields1993_stimuli.is_label_valid(disp_label))) # # plt.figure() # plt.imshow(disp_img) # plt.title("Input Image. Full Tile Size = {}".format(full_tile_s)) # # # Highlight Label Tiles # disp_label_image = fields1993_stimuli.plot_label_on_image( # disp_img, # disp_label, # full_tile_s, # edge_color=(250, 0, 0), # edge_width=2, # display_figure=False # ) # # # Highlight All background Tiles # full_tile_starts = fields1993_stimuli.get_background_tiles_locations( # frag_len=full_tile_s[0], # img_len=img_size[1], # row_offset=0, # space_bw_tiles=0, # tgt_n_visual_rf_start=img_size[0] // 2 - (full_tile_s[0] // 2) # ) # # disp_label_image = fields1993_stimuli.highlight_tiles( # disp_label_image, # full_tile_s, # full_tile_starts, # edge_color=(255, 255, 0)) # # plt.figure() # plt.imshow(disp_label_image) # plt.title("Labeled Image. Full Tile Size = {}".format(full_tile_s)) # (2) Get output Activations _ = process_image(model, device_to_use, ch_mus, ch_sigmas, test_img) center_n_acts = \ cont_int_out_act[ 0, :, cont_int_out_act.shape[2] // 2, cont_int_out_act.shape[3] // 2] tgt_n_out_acts[img_idx, ft_idx] = center_n_acts[tgt_n] max_act_n_acts[img_idx, ft_idx] = center_n_acts[max_act_n_idx] # # ------------------ # import pdb # pdb.set_trace() # plt.close('all') # ------------------------------------------- # Gain # ------------------------------------------- # In Li2006, Gain was defined as output of neuron / mean output to noise pattern # where the noise pattern was defined as optimal stimulus at center of RF and all # others fragments were random. This corresponds to resp c_len=x/ mean resp clen=1 tgt_n_avg_noise_resp = np.mean(tgt_n_single_frag_acts) max_active_n_avg_noise_resp = np.mean(max_act_n_single_frag_acts) tgt_n_gains = tgt_n_out_acts / (tgt_n_avg_noise_resp + epsilon) max_active_n_gains = max_act_n_acts / (max_active_n_avg_noise_resp + epsilon) tgt_n_mean_gain_arr = np.mean(tgt_n_gains, axis=0) tgt_n_std_gain_arr = np.std(tgt_n_gains, axis=0) max_act_n_mean_gain_arr = np.mean(max_active_n_gains, axis=0) max_act_n_std_gain_arr = np.std(max_active_n_gains, axis=0) # ----------------------------------------------------------------------------------- # Plots # ----------------------------------------------------------------------------------- # Fragment spacing measured in Relative co-linear distance metric # Defined as the ratio distance between fragments / length of fragment rcd_arr = (full_tile_s_arr[:, 0] - frag_tile_s[0]) / frag_tile_s[0] # Gain vs Spacing f, ax_arr = plt.subplots(1, 2) ax_arr[0].errorbar(rcd_arr, tgt_n_mean_gain_arr, tgt_n_std_gain_arr, label='Target Neuron {}'.format(tgt_n)) ax_arr[1].errorbar(rcd_arr, max_act_n_mean_gain_arr, max_act_n_std_gain_arr, label='Max Active Neuron {}'.format(max_act_n_idx)) ax_arr[0].set_xlabel("Contour Spacing (Relative Colinear Distance)") ax_arr[1].set_xlabel("Contour Spacing (Relative Colinear Distance)") ax_arr[0].set_ylabel("Gain") ax_arr[1].set_ylabel("Gain") ax_arr[0].set_ylim(bottom=0) ax_arr[1].set_ylim(bottom=0) ax_arr[0].grid() ax_arr[1].grid() ax_arr[0].legend() ax_arr[1].legend() f.suptitle("Contour Gain Vs Spacing - Neuron {}".format(k_idx)) f.savefig(os.path.join(rslt_dir, 'gain_vs_spacing.jpg'), format='jpg') plt.close(f) # Output Activations vs Spacing f = plt.figure() plt.errorbar(rcd_arr, np.mean(tgt_n_out_acts, axis=0), np.std(tgt_n_out_acts, axis=0), label='target_neuron_{}'.format(tgt_n)) plt.errorbar(rcd_arr, np.mean(max_act_n_acts, axis=0), np.std(max_act_n_acts, axis=0), label='max_active_neuron_{}'.format(max_act_n_idx)) plt.plot(rcd_arr[0], tgt_n_avg_noise_resp, marker='x', markersize=10, color='red', label='tgt_n_single_frag_resp') plt.plot(rcd_arr[0], max_active_n_avg_noise_resp, marker='x', markersize=10, color='green', label='max_active_n_single_frag_resp') plt.legend() plt.grid() plt.xlabel("Fragment spacing (Relative Co-Linear Distance)") plt.ylabel("Activations") plt.title("Output Activations") f.savefig(os.path.join(rslt_dir, 'output_activations_vs_spacing.jpg'), format='jpg') plt.close(f) # Save output Activations tgt_n_mean_out_acts = np.mean(tgt_n_out_acts, axis=0) tgt_n_std_out_acts = np.std(tgt_n_out_acts, axis=0) return None, tgt_n_mean_gain_arr, tgt_n_std_gain_arr, max_act_n_mean_gain_arr, \ max_act_n_std_gain_arr, tgt_n_avg_noise_resp, max_active_n_avg_noise_resp, \ tgt_n_mean_out_acts, tgt_n_std_out_acts
for ch_idx, ch_params in enumerate(valid_best_fits): params.append( { 'x0': np.min((ch_params[0], 2)), 'y0': np.min((ch_params[1], 2)), 'theta_deg': single_theta, 'amp': ch_params[3], 'sigma': np.min((ch_params[4], 2)), 'lambda1': ch_params[5], 'psi': np.min((ch_params[6], 3)), 'gamma': ch_params[7] } ) frag = gabor_fits.get_gabor_fragment(params, fragment_size) # # Display frag and generated Gabor # f, ax_arr = plt.subplots(1, 2) # display_kernel = (kernel - kernel.min()) / (kernel.max() - kernel.min()) # ax_arr[0].imshow(display_kernel) # ax_arr[0].set_title('kernel') # ax_arr[1].imshow(frag) # ax_arr[1].set_title('fragment') # # import pdb # pdb.set_trace() # Generate a Test image with Fragment in the center # ------------------------------------------------- center_tile_start = image_size[0:2] // 2 - fragment_size[0:2] // 2
def get_contour_gain_vs_length(model, device_to_use, g_params, k_idx, ch_mus, ch_sigmas, rslt_dir, c_len_arr, frag_size=np.array([7, 7]), full_tile_size=np.array([14, 14]), img_size=np.array([256, 256, 3]), n_images=50, epsilon=1e-5, iou_results=True): """ :param iou_results: :param c_len_arr: :param rslt_dir: :param epsilon: :param model: :param device_to_use: :param g_params: :param k_idx: :param ch_mus: :param ch_sigmas: :param frag_size: :param full_tile_size: :param img_size: :param n_images: :return: """ global edge_extract_act global cont_int_in_act global cont_int_out_act # tracking variables ------------------------------------------------- iou_arr = [] tgt_n = k_idx max_act_n_idx = g_params[0]['extra_info']['max_active_neuron_idx'] tgt_n_out_acts = np.zeros((n_images, len(c_len_arr))) max_act_n_acts = np.zeros_like(tgt_n_out_acts) # ----------------------------------------------------------------- frag = gabor_fits.get_gabor_fragment(g_params, spatial_size=frag_size) bg = g_params[0]['bg'] for c_len_idx, c_len in enumerate(c_len_arr): print("Processing contour length = {}".format(c_len)) iou = 0 for img_idx in range(n_images): # (1) Create Test Image test_img, test_img_label, contour_frags_starts, end_acc_angle, start_acc_angle = \ fields1993_stimuli.generate_contour_image( frag=frag, frag_params=g_params, c_len=c_len, beta=0, alpha=0, f_tile_size=full_tile_size, img_size=img_size, random_alpha_rot=True, rand_inter_frag_direction_change=True, use_d_jitter=False, bg_frag_relocate=True, bg=bg ) test_img = transform_functional.to_tensor(test_img) test_img_label = torch.from_numpy( np.array(test_img_label)).unsqueeze(0) # # Debug - Plot Test Image # # ------------------------ # if img_idx == 0: # disp_img = np.transpose(test_img.numpy(), axes=(1, 2, 0)) # disp_img = (disp_img - disp_img.min()) / (disp_img.max() - disp_img.min()) * 255. # disp_img = disp_img.astype('uint8') # disp_label = test_img_label.numpy() # # print(disp_label) # print("Label is valid? {}".format(fields1993_stimuli.is_label_valid(disp_label))) # # plt.figure() # plt.imshow(disp_img) # plt.title("Input Image. Contour Length = {}".format(c_len)) # # # Highlight Label Tiles # disp_label_image = fields1993_stimuli.plot_label_on_image( # disp_img, # disp_label, # full_tile_size, # edge_color=(250, 0, 0), # edge_width=2, # display_figure=False # ) # # # Highlight All background Tiles # full_tile_starts = fields1993_stimuli.get_background_tiles_locations( # frag_len=full_tile_size[0], # img_len=img_size[1], # row_offset=0, # space_bw_tiles=0, # tgt_n_visual_rf_start=img_size[0] // 2 - (full_tile_size[0] // 2) # ) # # disp_label_image = fields1993_stimuli.highlight_tiles( # disp_label_image, # full_tile_size, # full_tile_starts, # edge_color=(255, 255, 0)) # # plt.figure() # plt.imshow(disp_label_image) # plt.title("Labeled Image. Countour Length = {}".format(c_len)) # (2) Get output Activations if iou_results: label = test_img_label iou += process_image(model, device_to_use, ch_mus, ch_sigmas, test_img, label) else: label = None process_image(model, device_to_use, ch_mus, ch_sigmas, test_img, label) center_n_acts = \ cont_int_out_act[ 0, :, cont_int_out_act.shape[2] // 2, cont_int_out_act.shape[3] // 2] tgt_n_out_acts[img_idx, c_len_idx] = center_n_acts[tgt_n] max_act_n_acts[img_idx, c_len_idx] = center_n_acts[max_act_n_idx] iou_arr.append(iou / n_images) # # --------------------------------- # import pdb # pdb.set_trace() # plt.close('all') # IOU if iou_results: # print("IoU per length {}".format(iou_arr)) f_title = "Iou vs length - Neuron {}".format(k_idx) f_name = "neuron {}".format(k_idx) plot_iou_vs_contour_length(c_len_arr, iou_arr, rslt_dir, f_title, f_name) # ------------------------------------------- # Gain # ------------------------------------------- # In Li2006, Gain was defined as output of neuron / mean output to noise pattern # where the noise pattern was defined as optimal stimulus at center of RF and all # others fragments were random. This corresponds to resp c_len=x/ mean resp clen=1 tgt_n_avg_noise_resp = np.mean(tgt_n_out_acts[:, 0]) max_active_n_avg_noise_resp = np.mean(max_act_n_acts[:, 0]) tgt_n_gains = tgt_n_out_acts / (tgt_n_avg_noise_resp + epsilon) max_active_n_gains = max_act_n_acts / (max_active_n_avg_noise_resp + epsilon) tgt_n_mean_gain_arr = np.mean(tgt_n_gains, axis=0) tgt_n_std_gain_arr = np.std(tgt_n_gains, axis=0) max_act_n_mean_gain_arr = np.mean(max_active_n_gains, axis=0) max_act_n_std_gain_arr = np.std(max_active_n_gains, axis=0) # ----------------------------------------------------------------------------------- # Plots # ----------------------------------------------------------------------------------- # Gain vs Length # f = plt.figure() f, ax_arr = plt.subplots(1, 2) ax_arr[0].errorbar(c_len_arr, tgt_n_mean_gain_arr, tgt_n_std_gain_arr, label='Target Neuron {}'.format(tgt_n)) ax_arr[1].errorbar(c_len_arr, max_act_n_mean_gain_arr, max_act_n_std_gain_arr, label='Max Active Neuron {}'.format(max_act_n_idx)) ax_arr[0].set_xlabel("Contour Length") ax_arr[1].set_xlabel("Contour Length") ax_arr[0].set_ylabel("Gain") ax_arr[1].set_ylabel("Gain") ax_arr[0].set_ylim(bottom=0) ax_arr[1].set_ylim(bottom=0) ax_arr[0].grid() ax_arr[1].grid() ax_arr[0].legend() ax_arr[1].legend() f.suptitle("Contour Gain Vs length - Neuron {}".format(k_idx)) f.savefig(os.path.join(rslt_dir, 'gain_vs_len.jpg'), format='jpg') plt.close(f) # Output activations vs Length f = plt.figure() plt.errorbar(c_len_arr, np.mean(tgt_n_out_acts, axis=0), np.std(tgt_n_out_acts, axis=0), label='target_neuron_{}'.format(tgt_n)) plt.errorbar(c_len_arr, np.mean(max_act_n_acts, axis=0), np.std(max_act_n_acts, axis=0), label='max_active_neuron_{}'.format(max_act_n_idx)) plt.legend() plt.grid() plt.xlabel("Contour Length") plt.ylabel("Activations") plt.title("Output Activations") f.savefig(os.path.join(rslt_dir, 'output_activations_vs_len.jpg'), format='jpg') plt.close(f) # Save output Activations tgt_n_mean_out_acts = np.mean(tgt_n_out_acts, axis=0) tgt_n_std_out_acts = np.std(tgt_n_out_acts, axis=0) return iou_arr, tgt_n_mean_gain_arr, tgt_n_std_gain_arr, max_act_n_mean_gain_arr, \ max_act_n_std_gain_arr, tgt_n_avg_noise_resp, max_active_n_avg_noise_resp, \ tgt_n_mean_out_acts, tgt_n_std_out_acts