def get_trials_in_recording(exp_path, return_se=False, ignore_dprime=False, se=None, suppress_dprime_error=False, use_sparse=False): ''' Retrieve all the trials in a recording as Trial objects Parameters ---------- exp_path : String Path to the experiment folder. return_se : Bool, optional Whether to also return a StatisticExtractor object for the whole experiment. The default is False. Returns ------- [Trial] or ([Trial], StatisticExtractor) ''' #Get the appropriate paths for the suite2p info, the timeline, #and the trial metadata files = os.listdir(exp_path) s2p_path = os.path.join(exp_path, 'suite2p', 'plane0') timeline_path = os.path.join( exp_path, item([s for s in files if 'Timeline.mat' in s])) psychstim_path = os.path.join( exp_path, item([s for s in files if 'psychstim.mat' in s])) trials = [] if calc_d_prime(psychstim_path) > 1 or ignore_dprime: if se == None and not use_sparse: se = Recording(exp_path) #We need the total number of frames: structs = _get_trial_structs(psychstim_path) if not use_sparse: nframes = se.ops["nframes"] times = get_neural_frame_times(timeline_path, nframes) licks = get_lick_state_by_frame(timeline_path, times) for struct in structs: if use_sparse: trial = SparseTrial(struct, tolerant=False) else: trial = Trial(exp_path, struct, se, times, licks) trials.append(trial) return trials if not return_se else (trials, se) elif suppress_dprime_error: return None if not return_se else (None, None) else: raise ValueError("Dprime below 1")
import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation from mpl_toolkits.mplot3d import Axes3D from sklearn.linear_model import TheilSenRegressor from scipy.ndimage.filters import minimum_filter1d, uniform_filter1d from accdatatools.Observations.recordings import Recording import pickle as pkl #Load a recording try: with open(r"C:/Users/viviani/desktop/cache.pkl", "rb") as file: rec = pkl.load(file) except (FileNotFoundError, EOFError): with open(r"C:/Users/viviani/desktop/cache.pkl", "wb") as file: rec = Recording(r"D:\Local_Repository\CFEB013\2016-05-31_02_CFEB013") pkl.dump(rec, file) def heaviside(X, k=200): ''' Analytic approximation of the heaviside function. Approaches Heaviside(x) as K goes to +inf ''' return 1 / (1 + np.exp(-2 * k * X)) def ramp(X): return X * (X > 0)
# -*- coding: utf-8 -*- """ Created on Thu Oct 8 20:51:41 2020 @author: viviani Simple script to determine what proportion """ from accdatatools.Observations.recordings import Recording from accdatatools.Utils.map_across_dataset import apply_to_all_recordings_of_class import numpy as np if __name__ == "__main__": ls = [] f = lambda pth: ls.append(Recording(pth).gen_iscell()) apply_to_all_recordings_of_class("left_only_high_contrast", "H:", f) all_iscells = np.concatenate(ls) rate_of_inclusion = np.count_nonzero(all_iscells) / len(all_iscells) exl = 1 - rate_of_inclusion print("On average, {exl*100}% of ROIs weere excluded from analysis.")
from scipy.stats import pearsonr from accdatatools.Utils.path import get_exp_path from accdatatools.Observations.recordings import Recording from accdatatools.Timing.synchronisation import (get_neural_frame_times, get_lick_state_by_frame) experiment_ID = "2016-10-07_03_CFEB027" experiment_path = get_exp_path(experiment_ID, "H:\\") suite2p_path = os.path.join(experiment_path, "suite2p", "plane0") timeline_path = os.path.join(experiment_path, "2016-10-07_03_CFEB027_Timeline.mat") exp_recording = Recording(suite2p_path) frame_times = get_neural_frame_times(timeline_path, exp_recording.ops["nframes"]) licking = get_lick_state_by_frame(timeline_path, frame_times) corrs = [pearsonr(x, licking)[0] for x in exp_recording.dF_on_F] corrs_isort = np.argsort(corrs) to_plot = exp_recording.dF_on_F[corrs_isort] fig, ax = plt.subplots() lick_frame = np.nonzero(licking) max_brightness = np.percentile(to_plot, 99) ax.imshow(np.clip(to_plot[-20:], -0.2, max_brightness), origin='lower', aspect=5)
# -*- coding: utf-8 -*- """ Created on Fri Aug 28 19:23:32 2020 @author: viviani """ import numpy as np from accdatatools.Observations.recordings import Recording from accdatatools.Utils.map_across_dataset import iterate_across_recordings for exp in iterate_across_recordings(drive="H:\\"): print(f"\rProcessing {exp}...") try: a = Recording(exp) if np.any(a.F < 0) and np.any(a.Fneu < 0): for i in range(a.F.shape[0]): if np.any(a.F < 0) and np.any(a.Fneu < 0): df = pd.DataFrame df["F"] = a.F[i, :] df["Fneu"] = a.Fneu[i, :] df.to_csv( "C:/users/viviani/desktop/single_recording_F_Fneu.csv") else: print(f"min F = {np.min(a.F).item():.1f}" + f"min Fneu = {np.min(a.Fneu).item():.1F}") except Exception as e: pass else: del a