data_sth = raw_data[:370, :] buffer = 500 pointer = 20000 def space_data(dat): dat = dat.astype(np.float32) result = np.array( ([dat[i, :] + (100 * i) for i in np.arange(dat.shape[0])])) return result seq_v.graph_range(globals(), 'pointer', 'buffer', 'data_cor', transform_name='space_data') seq_v.graph_range(globals(), 'pointer', 'buffer', 'data_hyp', transform_name='space_data') seq_v.graph_range(globals(), 'pointer', 'buffer', 'data_th', transform_name='space_data') seq_v.graph_range(globals(), 'pointer',
rec1_part2_wav_filename = join(sounds_basic_folder, r'2019_05_06 Recording 1', r'T2019-05-06_17-18-20_002.wav') rec1_both_sounds_wav_filename = join(sounds_basic_folder, r'2019_05_06 Recording 1', r'T2019-05-06_16-47-55_full.wav') rec2_wav_filename = join(sounds_basic_folder, r'2019_05_06 Recording 2', r'T2019-05-06_17-41-22_008.wav') # ---------------------------------------------------------------------------------------------------------------------- # SYNC STUFF sync_sampling_freq = const.SAMPLING_FREQUENCY sync1 = np.fromfile(rec1_sync_filename, dtype=np.uint16).astype(np.int32) sync2 = np.fromfile(rec2_sync_filename, dtype=np.uint16).astype(np.int32) sync = np.fromfile(full_sync_filename, dtype=np.uint16).astype(np.int32) sync -= sync.min() sync_diff = np.diff(sync) ''' # Have a quick look sync_start = 2160000 sync_step = 60000 sv.graph_range(globals(), 'sync_start', 'sync_step', 'sync') sv.graph_range(globals(), 'sync_start', 'sync_step', 'sync_diff') ''' # ---------------------------------------------------------------------------------------------------------------------- # SOUND STUFF rec1_part1 = wavfile.read(rec1_part1_wav_filename) rec1_part2 = wavfile.read(rec1_part2_wav_filename) rec2 = wavfile.read(rec2_wav_filename) sound_sampling_freq = rec1_part1[0]
# Create some arrays and constants relating to the events camera_pulses, beam_breaks, sounds = \ sync_funcs.get_time_points_of_events_in_sync_file(data_folder, clean=True, cam_ttl_pulse_period=cam_ttl_pulse_period) points_per_pulse = np.mean(np.diff(camera_pulses)) camera_frames_in_video = csv_funcs.get_true_frame_array(data_folder) time_point_of_first_video_frame = camera_pulses[camera_frames_in_video][0] # ---------------------------------- sound_bit_on = sync & 8 start=0 step = 5000 sv.graph_range(globals(), 'start', 'step', 'sound_bit_on') sound_durations = np.array([s[1] - s[0] for s in sounds]) reward_sounds_mask = sound_durations < reward_sound_max_duration reward_sounds = sounds[reward_sounds_mask] trial_sounds_mask = sound_durations > reward_sound_max_duration trial_sounds = sounds[trial_sounds_mask] trial_start_events = pd.read_pickle(join(events_folder, sync_funcs.event_types[9]+".pkl")) trial_end_events = pd.read_pickle(join(events_folder, sync_funcs.event_types[8]+".pkl")) trial_end_events['AmpTimePoints'] - np.array(trial_sounds)[:, 1] trial_end_events[trial_end_events['Result']=='Food']['AmpTimePoints'] - np.array(reward_sounds)[:, 1]
'Analysis', '\Lfp', 'Downsampling', 'Amplifier_LFPs_Downsampled_x4.npy') np.save(ds_numpy_filename, downsampled_lfp) downsampled_lfp = np.load(ds_numpy_filename) ''' factor = 4 # possible 3, 4, 5, 10 ds_numpy_filename = join(const.base_save_folder, const.rat_folder, const.date_folders[date], 'Analysis', 'Lfp', 'Downsampling', 'Amplifier_LFPs_Downsampled_x{}.npy'.format(factor)) downsampled_lfp = np.load(ds_numpy_filename) # ---------------------------------------------------------------------------------------------------------------------- # HAVE A LOOK AT THE RAW DATA # ---------------------------------------------------------------------------------------------------------------------- ''' def space_data(dat): dat = dat.astype(np.float32) result = np.array(([dat[i, :] + (500*i) for i in np.arange(dat.shape[0])])) return result timepoint = 100000 buffer = 10000 seq_v.graph_range(globals(), 'timepoint', 'buffer', 'raw_lfp', transform_name='space_data') ''' # ---------------------------------------------------------------------------------------------------------------------- # TESTING DIFFERENT SPECTRAL DENSITY METHODS # ----------------------------------------------------------------------------------------------------------------------
timepoint = 0 data_thal = raw_data[:4*120, :] data_thal_bottom = raw_data[:2*120, :] data_ca3 = raw_data[4*120:6*120, :] data_ca1 = raw_data[6*120:8*120, :] data_cort = raw_data[8*120:, :] data_half = raw_data[:600, :] def space(data): return cdt.space_data(data, 100) sv.graph_range(globals(), 'timepoint', 'timepoint_step', 'data_thal', transform_name='space') sv.graph_range(globals(), 'timepoint', 'timepoint_step', 'data_ca1', transform_name='space') sv.graph_range(globals(), 'timepoint', 'timepoint_step', 'data_half', transform_name='space') frame = 265 tp = 172000 start = 172200 end = 173200 start = 793200 end = start + 1000
downsampled_lfp = np.load(ds_numpy_filename) def space_data(dat): dat = dat.astype(np.float32) result = np.array( ([dat[i, :] + (500 * i) for i in np.arange(dat.shape[0])])) return result timepoint = 100000 buffer = 10000 seq_v.graph_range(globals(), 'timepoint', 'buffer', 'downsampled_lfp', transform_name='space_data') # ---------------------------------------------------------------------------------------------------------------------- # RUN THE emd.py TO GENERATE THE IMFS # ---------------------------------------------------------------------------------------------------------------------- ''' Parameters used result_dtype = np.int16 num_imfs = 13 ensemble_size = 25 noise_strength = 0.01 S_number = 20 num_siftings = 100
camera_frames_in_video = csv_funcs.get_true_frame_array(data_folder) time_point_of_first_video_frame = camera_pulses[camera_frames_in_video][0] # ---------------------------------- # HAVE A LOOK # Show the video gui video_frame = 0 video_file = join(data_folder, 'Video.avi') sv.image_sequence(globals(), 'video_frame', 'video_file') # ---------------------------------- # Show the sync trace gui sync_range = 2000 sync_point = time_point_of_first_video_frame - int(sync_range / 2) sv.graph_range(globals(), 'sync_point', 'sync_range', 'sync') # ---------------------------------- # Connect the video gui to the sync trace gui def time_point_to_frame(x): return sync_funcs.time_point_to_frame(time_point_of_first_video_frame, camera_frames_in_video, points_per_pulse, x) tr.connect_repl_var(globals(), 'sync_point', 'video_frame', 'time_point_to_frame') # ----------------------------------
import drop_down import one_shot_viewer import video_viewer # Make some data data1 = np.random.random((10, 10000)) for i in range(10): data1[i] += i data2 = np.random.random(500) # Have a look at them position1 = 0 range1 = 1000 sequence_viewer.graph_range(globals(), 'position1', 'range1', 'data1') position2 = 0 range2 = 10 sequence_viewer.graph_range(globals(), 'position2', 'range2', 'data2') # Also you can view data in panes pane = 0 sequence_viewer.graph_pane(globals(), 'pane', 'data1') # The pane viewer shows te last one or two dimensions of a 2d or 3d data set and itterates over the first one. # Connect two guis def pos1_to_pos2(pos1): if pos1 >= 0 and pos1 <= 4000: return int(pos1 / 10)