def __init__(self, post_transform=None, pre_transform=None, p=0.5, contrasts=None, same_transforms=False, colour_space='grey', vision_type='trichromat', mask_image=None, grey_width=40, avg_illuminant=0, train_params=None, repeat=False, side_by_side=True): self.p = p self.grey_width = grey_width self.contrasts = contrasts self.same_transforms = same_transforms self.colour_space = colour_space self.vision_type = vision_type self.mask_image = mask_image self.post_transform = post_transform self.pre_transform = pre_transform self.avg_illuminant = avg_illuminant if train_params is None: self.train_params = train_params else: self.train_params = path_utils.read_pickle(train_params) self.img_counter = 0 self.repeat = repeat self.side_by_side = side_by_side
def match_results_to_input(result_file, geetup_info, model_in_size=(180, 320)): model_preds = path_utils.read_pickle(result_file) current_part_res = dict() for j in range(geetup_info.__len__()): f_path, f_gt = geetup_info.__getitem__(j) f_path = f_path[-1] f_gt = f_gt[-1] splitted_parts = f_path.replace('//', '/').split('/') part_folder = splitted_parts[-5] if part_folder not in current_part_res: current_part_res[part_folder] = {'1': dict(), '2': dict()} folder_name = splitted_parts[-2] image_name = splitted_parts[-1] if '/segments/1/' in f_path: seg = '1' elif '/segments/2/' in f_path: seg = '2' else: sys.exit('Ups unrecognised segment') pred = model_preds[j] pred = map_point_to_image_size(pred, (360, 640), model_in_size) if folder_name not in current_part_res[part_folder][seg]: current_part_res[part_folder][seg][folder_name] = [] euc_error = euclidean_distance(f_gt, pred) current_part_res[part_folder][seg][folder_name].append( [image_name, f_gt, pred, euc_error]) return current_part_res
def clip_visualise(db_path, pred_path, euc_path, out_dir, video_clips_inds=None): preds = path_utils.read_pickle(pred_path) eucs = path_utils.read_pickle(euc_path) geetup_info = geetup_db.GeetupDatasetInformative(db_path) if video_clips_inds is None: video_clips_inds = geetup_utils.get_video_clips_inds(geetup_info) for clip_inds in video_clips_inds: beg_ind, end_ind = clip_inds print( 'Video [%d %d] %.2f' % ( beg_ind, end_ind, np.median(eucs[beg_ind:end_ind]) ) ) draw_circle_clips(preds, geetup_info, beg_ind, end_ind, out_dir)
def process_network(net_name): all_layers_maxsf = [] for file_path in sorted( glob.glob(os.path.join(activations_dir, net_name) + '/*.pickle'), key=natural_keys ): file_name = ntpath.basename(file_path) layer_name = 'layer' name_parts = file_name.split('_') for pind, part in enumerate(name_parts): if part == 'layer': layer_name += name_parts[pind + 1].replace('.weight', '') layer_name = layer_name.replace('conv', '') break png_name = os.path.join( fig_out_dir, net_name, '%s_activation_%s.png' % (layer_name, 'max') ) csv_name = os.path.join( anl_out_dir, net_name, '%s_corrs_0.1.csv' % layer_name ) print('reading', file_name, layer_name) result_mat = path_utils.read_pickle(file_path) contrast_activation, xvals = process_layer(result_mat) # if not os.path.exists(png_name): # plot_layer(contrast_activation, xvals, net_name, layer_name) # if not os.path.exists(csv_name): # corr_layer(contrast_activation, xvals, net_name, layer_name) maxsf, header = maxsf_layer( contrast_activation, xvals, net_name, layer_name ) all_layers_maxsf.append(maxsf) out_file = os.path.join( anl_out_dir, 'peak_activations_avg', '%s_corrs.csv' % net_name ) np.savetxt( out_file, np.array(all_layers_maxsf), delimiter=',', header=header ) return
def get_network_activation(net_name): all_layers_maxsf = [] all_activations = [] for file_path in sorted( glob.glob(os.path.join(activations_dir, net_name) + '/*.pickle'), key=natural_keys ): file_name = ntpath.basename(file_path) layer_name = 'layer' name_parts = file_name.split('_') for pind, part in enumerate(name_parts): if part == 'layer': layer_name += name_parts[pind + 1].replace('.weight', '') layer_name = layer_name.replace('conv', '') break print('reading', file_name, layer_name) result_mat = path_utils.read_pickle(file_path) contrast_activation, xvals = process_layer(result_mat) all_activations.append(contrast_activation) maxsf, header = maxsf_layer( contrast_activation, xvals, net_name, layer_name ) all_layers_maxsf.append(maxsf) return all_layers_maxsf, all_activations, xvals