def write_vid_frames(vid_array, save_dir, scale_method=None): ''' Write a video represented as an array as individual image files. Args: array (np.ndarray) Array of shape N x H x W x C or N x H x W to write. dir (str): Directory to write the frames in. scale_method (None, "video", "frame"): If none, no pixel value scaling will be performed. Otherwise, "video" will scale every frame by the video's max and min, and "frame" will scale every frame by the frame's max and min. Returns: None ''' os.makedirs(save_dir, exist_ok=True) if scale_method and scale_method == 'video': vid_array = max_min_scale(vid_array) * 255 for i_frame, frame in enumerate(vid_array): if scale_method and scale_method == 'frame': frame = max_min_scale(frame) * 255 cv2.imwrite(os.path.join(save_dir, '{}.png'.format(i_frame)), np.uint8(frame))
def write_rfs(dataset, write_dir): ''' https://allensdk.readthedocs.io/en/latest/_static/examples/nb/brain_observatory_analysis.html ''' cell_ids = dataset.get_cell_specimen_ids() session_type = dataset.get_session_type() if 'locally_sparse_noise' in dataset.list_stimuli(): logging.info('WRITING RECEPTIVE FIELDS FOR EXPERIMENT {}'.format( dataset.get_metadata()['ophys_experiment_id'])) on_dir = os.path.join(write_dir, 'ReceptiveFields', 'on', session_type) off_dir = os.path.join(write_dir, 'ReceptiveFields', 'off', session_type) os.makedirs(on_dir, exist_ok=True) os.makedirs(off_dir, exist_ok=True) lsn = LocallySparseNoise(dataset) rfs = lsn.receptive_field rfs[np.isnan(rfs)] = 0 for ind, cell_id in enumerate(cell_ids): rf = rfs[:, :, ind, :] rf = max_min_scale(rf) * 255 on, off = rf.transpose([2, 0, 1]) fname = str(cell_id) + '.png' cv2.imwrite(os.path.join(on_dir, fname), on) cv2.imwrite(os.path.join(off_dir, fname), off)
def test_max_min_scale_shape(self): ''' Input and output shape are equal. ''' rand_vec = np.random.randn(10000) rand_vec_scaled = max_min_scale(rand_vec) self.assertCountEqual(rand_vec_scaled.shape, rand_vec.shape)
def read_image(self, fpath): ''' Read image and resize to desired shape ''' frame = cv2.imread(fpath, cv2.IMREAD_GRAYSCALE) frame = cv2.resize(frame, (self.stim_width, self.stim_height)) if self.img_transform is not None: frame = self.img_transform(frame) return np.float32(max_min_scale(frame))[None, ...]
def test_max_min_scale_range(self): ''' Output vector values are in [0, 1] given random input. ''' rand_vec = np.random.randn(10000) * 100 rand_vec_scaled = max_min_scale(rand_vec) scaled_min = np.amin(rand_vec_scaled) scaled_max = np.amax(rand_vec_scaled) self.assertAlmostEqual(scaled_min, 0.0, places = 6) self.assertAlmostEqual(scaled_max, 1.0, places = 6)
def write_simple_cell_strfs(weight_tensors, save_dir): ''' View simple cell (no spatial sharing of weights) strfs in a grid. Args: weight_tensors (list): List of np.ndarrays of shape n_neurons x in_c x n_features_y x n_features_x x kh x kw. save_dir (str): The directory to save the feature grids in, since there will be multiple grids. Returns: None ''' # make sure save_dir exists or create it os.makedirs(save_dir, exist_ok=True) n_frames = len(weight_tensors) for frame_num, weights in enumerate(weight_tensors): n_neurons, w_in, n_features_y, n_features_x, w_y, w_x = weights.shape for neuron_num, weights_feat in enumerate(weights): # reshape to n_features_x * n_features_y x in_c x kh x kw weights_feat = weights_feat.transpose([1, 2, 0, 3, 4]) weights_feat = weights_feat.reshape([-1, w_in, w_y, w_x]) # make an image grid grid = make_grid(torch.from_numpy(weights_feat), nrow=n_features_x, normalize=False, scale_each=False, padding=0) grid = grid.numpy().transpose([1, 2, 0]) # aggregate grids in grid (and make placeholder for grids if first frame) if frame_num == 0 and neuron_num == 0: grids = np.zeros([n_neurons, n_frames] + list(grid.shape)) grids[neuron_num, frame_num] = grid # scale all values to [0, 255] grids = np.uint8(max_min_scale(grids) * 255) # add makeshift black border between feats grids[:, :, ::w_y, :, :] = 0.0 grids[:, :, :, ::w_x, :] = 0.0 # save the grids per feature for neuron_num in range(n_neurons): imageio.mimwrite( os.path.join(save_dir, 'feature{}.gif'.format(neuron_num)), [grids[neuron_num, frame_num] for frame_num in range(n_frames)])
def write_complex_cell_strfs(weight_tensors, write_fpath, sort_inds=None): ''' View complex cell (spatially-shared weights) strfs in a grid. Args: weight_tensors (list): A list of np.ndarrays of shape out_c x in_c x kh x kw. write_fpath (str): The file path to save the resulting .gif as. sort_inds (list): List of indices of length out_c to sort the features by in the grid. openpv_path (str): Path to the openpv matlab utility directory (*/OpenPV/mlab/util). Returns: None ''' if os.path.split(write_fpath)[0] != '': os.makedirs(os.path.split(write_fpath)[0], exist_ok=True) n_frames = len(weight_tensors) # number of video frames in the input # loop through each weight file, extract the weights, and aggregate them for frame_num, weights in enumerate(weight_tensors): n_neurons, in_c, patch_height, patch_width = weights.shape # sort the features based on the activation indices if given if sort_inds: weights = weights[sort_inds] # make the grid for the features corresponding to this video frame grid = make_grid(torch.from_numpy(weights), nrow=int(np.floor(np.sqrt(n_neurons))), normalize=False, scale_each=False, padding=0) grid = grid.numpy().transpose([1, 2, 0]) # need to put channels last again # aggregate grids in grid (and make placeholder for grids if first frame) if frame_num == 0: grids = np.zeros([n_frames] + list(grid.shape)) grids[frame_num] = grid # scale values in the grid to [0, 255] grids = np.uint8(max_min_scale(grids) * 255) # put a black border in between the features grids[:, ::patch_height, ...] = 0.0 grids[:, :, ::patch_width, :] = 0.0 # write the .gif imageio.mimwrite(write_fpath, [grids[frame_num] for frame_num in range(n_frames)])
def write_AIBO_natural_stimuli(template, save_dir, stimulus, height=160, width=256): ''' Takes the natural_movie_* or natural_scenes stimulus template, and writes the images/frames as they would appear on the monitor. ''' monitor = BrainObservatoryMonitor() os.makedirs(save_dir, exist_ok=True) fnames = [ fname + '.png' for fname in get_img_frame_names(template.shape[0]) ] # scale to [0, 255] template = np.uint8(max_min_scale(template) * 255) for image, fname in zip(template, fnames): # try to filter out some of the pixelation image = cv2.bilateralFilter(image, 7, 40, 40) if 'natural_movie' in stimulus: image = monitor.natural_movie_image_to_screen(image, origin='upper') elif stimulus == 'natural_scenes': image = monitor.natural_scene_image_to_screen(image, origin='upper') # warp image as it was shown on monitor image = monitor.warp_image(image) # resize image = cv2.resize(image, (width, height)) # contrast enhance image = cv2.equalizeHist(image) cv2.imwrite(os.path.join(save_dir, fname), image)
def write_gifs(array, save_dir, scale=False): ''' Write a batch of video frame sequences as .gifs. Args: array (np.ndarray) Array of shape N x F x H x W x C or N x F x H x W to write, where F is the number of consecutive frames to write in each gif. dir (str): Directory to write the frames in. scale (bool): If True, will scale each gif linearly to [0, 255]. Returns: None ''' os.makedirs(save_dir, exist_ok=True) for i_gif, gif in enumerate(array): if scale: gif = max_min_scale(gif) * 255 imageio.mimwrite(os.path.join(save_dir, '{}.gif'.format(i_gif)), [np.uint8(frame) for frame in gif])