'data_th', transform_name='space_data') seq_v.graph_range(globals(), 'pointer', 'buffer', 'raw_data', transform_name='space_data') sync = np.fromfile(join(data_folder, 'Sync.bin'), dtype=np.uint16).astype(np.int32) sync -= sync.min() video_frame = 0 video_file = join(data_folder, 'Video.avi') seq_v.image_sequence(globals(), 'video_frame', 'video_file') # Create some arrays and constants relating to the events camera_pulses, beam_breaks, sounds = \ sync_funcs.get_time_points_of_events_in_sync_file(data_folder, clean=True, cam_ttl_pulse_period= const.CAMERA_TTL_PULSES_TIMEPOINT_PERIOD) points_per_pulse = np.mean(np.diff(camera_pulses)) camera_frames_in_video = csv_funcs.get_true_frame_array(data_folder) time_point_of_first_video_frame = camera_pulses[camera_frames_in_video][0] def time_point_to_frame(x): return sync_funcs.time_point_to_frame(time_point_of_first_video_frame, camera_frames_in_video,
join(dlc_project_folder, 'post_processing', 'markers_positions_no_large_movs.df')) frame = 3 global marker_dots marker_dots = np.zeros((640, 640, 4)) output = None overlay_dots.marker_dots = marker_dots marker_size = 3 markers_positions_no_large_movs_numpy = markers_positions_no_large_movs.values args = [markers_positions_no_large_movs_numpy, marker_size] update_markers_for_video = overlay_dots.update_markers_for_video tr.connect_repl_var(globals(), 'frame', 'output', 'update_markers_for_video', 'args') sv.image_sequence(globals(), 'frame', 'marker_dots', 'full_video_file') # ------------------------------------------------- # GET THE BODY POSITIONS # Get the body markers from the cleaned markers # Then interpolate the nans of the TailBase # Finally average the body markers to get the body position updated_body_markers = dlc_pp.seperate_markers(markers_positions_no_large_movs, body_parts) tail_base = updated_body_markers.loc[:, updated_body_markers.columns. get_level_values(1) == 'TailBase'] tail_base_cleaned = dlc_pp.clean_dlc_outpout(join(dlc_project_folder, 'post_processing', 'test.df'),
ap_den_data_panes = np.swapaxes( np.reshape( ap_den_data, (ap_den_data.shape[0], int( ap_den_data.shape[1] / time_points_buffer), time_points_buffer)), 0, 1) # ------------------------------------------------- # QUICK LOOK AT NEURONS FIRING # ------------------------------------------------- pane = 120 colormap = 'jet' image_levels = [0, 150] sv.image_sequence(globals(), 'pane', 'ap_data_panes', image_levels=image_levels, colormap=colormap, flip='ud') sv.image_sequence(globals(), 'pane', 'ap_den_data_panes', image_levels=image_levels, colormap=colormap, flip='ud') # ------------------------------------------------- # CREATING NEURON FIRING RATES # ------------------------------------------------- # Creating a scatter plot for neurons spiking spike_info = pd.read_pickle( join(spikes_folder, 'spike_info_after_cortex_sorting.df'))
mode='r', dtype=imfs.dtype, shape=(imfs.shape[0], imfs.shape[2])) channel = 0 start = 1000000 end = 1100000 f = plt.figure(2) args = [f] out = None def show_one_channel(channel, figure): figure.clear() ax = figure.add_subplot(111) ax.plot(imf_data_for_ks[channel, start:end]) return None sl.connect_repl_var(globals(), 'channel', 'out', 'show_one_channel', 'args', [0, 71]) # ------------------------------------------------- avg_spike_template = np.load( join(imf_kilosort_folder, 'imf_{}'.format(imf_for_ks), 'avg_spike_template.npy')) imf_3_panes = np.swapaxes(np.reshape(imf_data_for_ks, (72, 1000, 18161)), 0, 1) t = 0 image_levels = [0, 255] sv.image_sequence(globals(), 't', 'imf_3_panes', image_levels=image_levels)
(events[i] + window_timepoints) / const.LFP_DOWNSAMPLE_FACTOR) imfs_around_events[:, :, i, :] = imfs[:, :, start_imfs:end_imfs] avg_imfs_around_events = np.mean(imfs_around_events, axis=2) avg_imfs_around_events = np.swapaxes(avg_imfs_around_events, 0, 1) def space(data): return cdf.space_data_factor(data, 2) imf = 0 sv.graph_pane(globals(), 'imf', 'avg_imfs_around_events', transform_name='space') sv.image_sequence(globals(), 'imf', 'avg_imfs_around_events') _ = plt.plot(space(avg_lfps_around_event).T) random_times = np.random.choice( np.arange(2 * window_timepoints, lfps.shape[1] - 2 * window_timepoints, 1), num_of_events) random_triggered_lfps = [] for spike in random_times: random_triggered_lfps.append(lfps[:, spike - window_timepoints:spike + window_timepoints]) random_triggered_lfps = np.array(random_triggered_lfps) random_triggered_lfps_mean = random_triggered_lfps.mean(axis=0) random_triggered_lfps_std = random_triggered_lfps.std(axis=0) # _ = plt.plot(space(random_triggered_lfps_std).T)
speeds_smoothed = binning.rolling_window_with_step(speeds, np.mean, num_of_frames_to_average, 1) speeds_smoothed[0] = 0 speeds_smoothed = np.array(speeds_smoothed) rest_speed_threshold_cm_per_sec = 15 rest_move_periods = np.zeros(len(speeds_smoothed)) rest_move_periods[np.where( speeds_smoothed > rest_speed_threshold_cm_per_sec)] = 1 # plt.plot(rest_move_periods) # plt.plot((speeds_smoothed - np.nanmin(speeds_smoothed)) / np.nanmax(speeds_smoothed - np.nanmin(speeds_smoothed))) # Check the calculated speeds on the video frame = 1 sv.image_sequence(globals(), 'frame', 'full_video_file') # </editor-fold> # ------------------------------------------------- # <editor-fold desc="FIND THE FRAMES WHERE THE RAT SWITCHES MOVEMENT STATE"> seconds_of_constant_state = 5 switches_from_rest_to_move = np.squeeze( np.argwhere(np.diff(rest_move_periods) > 0)) switches_from_move_to_rest = np.squeeze( np.argwhere(np.diff(rest_move_periods) < 0)) switches_from_long_rest_to_long_move = [] for r_m in switches_from_rest_to_move[1:]: if np.all(rest_move_periods[r_m - seconds_of_constant_state * 120 : r_m - 1] == 0) and \ np.all(rest_move_periods[r_m + 1: r_m + seconds_of_constant_state * 120] == 1): switches_from_long_rest_to_long_move.append(r_m + 70)
# Flatten grayscale_resized_video_array = \ grayscale_resized_video_array.reshape((num_of_frames-1, video_resolution[0] * video_resolution[1])) # Smooth over time to 100ms per frame grayscale_resized_video_array_frame_smoothed = np.transpose( binning.rolling_window_with_step(grayscale_resized_video_array.transpose(), np.mean, 12, 12)) np.save(join(subsumpled_video_folder, 'grayscale_resized_video_array_frame_smoothed.npy'), grayscale_resized_video_array_frame_smoothed) # Have a look frame = 0 t = grayscale_resized_video_array_frame_smoothed.reshape((44215, video_resolution[1], video_resolution[0])) sv.image_sequence(globals(), 'frame', 'grayscale_resized_video_array_frame_smoothed') # PCA and keep the first 100 components pca_flat_video = PCA(n_components=100) pcs_flat_video = pca_flat_video.fit_transform(grayscale_resized_video_array_frame_smoothed) # Reverse PCA and have a look at the resolting images rev_pcs_video = pca_flat_video.inverse_transform(pcs_flat_video) rev_pcs_video = rev_pcs_video.reshape((rev_pcs_video.shape[0], video_resolution[1], video_resolution[0])) plt.figure(1) plt.imshow(rev_pcs_video[100, :, :]) plt.figure(2) plt.imshow(grayscale_resized_video_array_frame_smoothed.reshape(rev_pcs_video.shape[0], video_resolution[1], video_resolution[0])[100,:,:]) # Run the t-sne
markers_croped = pd.read_hdf(markers_file) crop_window_position = pd.read_csv(crop_window_position_file, sep=' ', names=['X', 'Y'], usecols=[4, 5]) crop_window_position.iloc[1:] = crop_window_position.iloc[:-1].values markers = dlc_pp.assign_croped_markers_to_full_arena(markers_croped, crop_window_position) # ------------------------------------------------- # ------------------------------------------------- # HAVE A LOOK AT HOW CLEAN THE BODY MARKERS ARE frame = 3 sv.image_sequence(globals(), 'frame', 'labeled_video_file') # Get the main body parts and nan the low likelihood frames body_parts = ['Neck', 'Mid Body', 'Tail Base'] body_markers = dlc_pp.seperate_markers(markers, body_parts) head_parts = [ 'Nose', 'Ear Left', 'Ear Right', 'Left Front Top Implant', 'Right Back To Implant', 'Screw Tip Implant' ] head_markers = dlc_pp.seperate_markers(markers, head_parts) tail_parts = ['Tail Mid', 'Tail Tip'] tail_markers = dlc_pp.seperate_markers(markers, tail_parts) likelihood_threshold = 0.8