Beispiel #1
0
def load_stim(rec):
    try:    # data already loaded?
        stim = rec.stim
    except :# load data
        infofile = os.path.join(rec.i.stim_path,'stim_info.inf')
        info = D.Info(filename=infofile,translation=translation)

        # load and parse meta data
        filename_prefix = info.get('filename_prefix','.+\.s') # load all stimuli if no prefix given
        infofiles = U.parse_dir(rec.i.stim_path,prefix=filename_prefix,suffix='inf')
        infofile = os.path.join(rec.i.stim_path,infofiles[rec.i.cond_id])
        moreinfo = D.Info(filename=infofile,translation=translation)
        info.update(moreinfo)

        #
        # this is incomplete; the parser has to deal with vision egg files
        # and extract i.t_trial, i.t_start, i.start_times
        #
        # start_times = frm.select(rec.info["frm_ind"],rec.info["frm_skip"])
        # if hasattr(rec.info,"t_trial"):
        #     if len(start_times) > 1:
        #         rec.info["t_trial"] = round(N.diff(start_times)[0])
        # rec.info["t_start"] = start_times[0]
        # rec.info["start_times"] = start_times

        # load raw stimulus data
        fn_stim = os.path.join(rec.i.stim_path,info.frm_filename)
        stim = D.Stimulus(DL.load_events(fn_stim,dtype_t=info.timestamp_datatype,
                                         dtype_v=info.value_datatype))
    
        # convert data into SI units
        if hasattr(info,'units'):
            DL.raw_to_SI(stim,scalefactor=info.units_multiplier,offset=0)
        else:
            info.units = 'raw'

        # cut din
        if (hasattr(rec.i,'t_start') & hasattr(rec.i,'t_stop')):
            stim = stim[D.Epochs((rec.i.t_start,rec.i.t_stop))]
        stim.i.update(info)
        stim.i.id = rec.i.cond_id

        # in case of movies with identical repeats: change frame index
        if hasattr(stim.i,'nrepeats'):
            if stim.i.nrepeats != stim.i.nruns:
                max_frm = stim.i.nsweeps/stim.i.nrepeats
                stim.v = N.mod(stim.v,max_frm)
        else:
            stim.i.nrepeats = stim.i.nruns
            
        # compute framerate
	if stim.i.has_key('sweeptimeMsec'): # temporary - all exported times should be in s
	    stim.i.framerate = 1000./stim.i.sweeptimeMsec
	else:
	    stim.i.framerate = 1./stim.i.sweeptimeSec 

        rec.stim = stim
    return stim
Beispiel #2
0
def load_m(rec):
    import re
    params = D.Info()
    fn = os.path.join(rec.i.rec_path,rec.i.rec_id+'.m')
    matchstr = re.compile(r"""\s*(\w+)\s*=\s*([\d\.-]+)\s*;""",re.VERBOSE)
    fil = open(fn,'r')
    for key,value in matchstr.findall(fil.read()):
        if int(float(value))==float(value):
            params[key] = int(value)
        else:
            params[key] = float(value)
    params.is_movie = ('startfilen' in params.keys())
    params.is_discs = ('out_diameter' in params.keys())
    params.year = int('20'+rec.i.cell_id[:2])
    if params.is_movie:
        params.in_terms = params.endfilen-params.startfilen+1
        params.out_terms = params.preterm
        params.f_rate = get_framerate(params.refreshrate,params.frmperterm)
        params.lost_time = float(1./params.f_rate)
        params.trial_terms = params.in_terms+params.out_terms
        params.start_terms = params.out_terms
        params.t_trial = float(params.trial_terms)/params.f_rate
        params.t_start = float(params.start_terms)/params.f_rate
    if params.is_discs:
        params.in_terms = params.sigtime
        params.out_terms = params.pretime
        params.lost_time = float(params.out_terms)
        params.t_trial = float(params.in_terms+2*params.out_terms)
        params.t_start = float(params.out_terms)
    return params
Beispiel #3
0
    def __init__(self, celldate=None, cellid=None, ch_ret=None):
        self.i = D.Info()
        self.i.celldate = celldate
        self.i.cellid = cellid
        self.i.ch_ret = ch_ret
        self.i.t_frame = 0.007139184

        # set path variables
        self.i.data_path = os.path.join(datapath)
        self.i.rec_path = os.path.join(self.i.data_path, 'OpticTractData',
                                       celldate)
        self.i.rec_filenames = U.parse_dir(self.i.rec_path,
                                           suffix='mat',
                                           key_type=str)
        self.i.movie_path = os.path.join(self.i.data_path, 'stimulus')
        # find recording id
        rec_ids = filter(lambda k: cellid in k, self.i.rec_filenames.keys())
        if len(rec_ids) != 1:
            print '%d recordings found' % len(rec_ids)
            self.i.rec_id = None
        else:
            self.i.rec_id = rec_ids[0]

        # create empty containers for spikes, lfps, etc.
        # self.spks=D.Container() # dict contains spike timings
        # self.frm=D.Container() # dict contains auditory stimuli

        if self.i.rec_id != None:
            self.load()
Beispiel #4
0
def load_movie(rec, filename=None, varname=None):
    # load m-sequence movie
    try:  # data already loaded?
        movie = rec.movie
    except:  # load data
        import scipy.io as io
        fn_movie = os.path.join(rec.i.movie_path, filename)
        mat = io.loadmat(fn_movie)
        # self.mat = mat

        info = D.Info()
        for key in ['__header__', '__version__']:
            info[key] = mat[key]

        movie = D.Image(mat[varname])
        nframes = movie.shape[0]
        if len(movie.shape) == 2:
            nx = int(N.sqrt(movie.shape[1]))
            movie = N.reshape(movie, (nframes, nx, nx))
        nframes, nx, ny = movie.shape

        # update movie info
        movie.i.update(info)
        movie.i.nx = nx
        movie.i.ny = ny
        movie.i.nframes = nframes
        movie.i.filename = fn_movie

    rec.movie = movie

    return movie
Beispiel #5
0
def load_lfp(rec,id_lfp=None):
    try:
        filenames = rec.lfp.filenames
        info = rec.lfp.i
    except: # parse directory
        infofile = os.path.join(rec.i.lfp_path,'lfp_info.inf')
        info = D.Info(filename=infofile,translation=translation)
        # default parameters
        params = dict(filename_prefix=None,filename_suffix='lfp')
        params.update(info)
        # get filenames
        filenames = U.parse_dir(rec.i.lfp_path,
                                prefix=params['filename_prefix'],
                                suffix=params['filename_suffix'])
        rec.lfp.filenames = filenames
        rec.lfp.i = info
        
    if id_lfp != None:
	try:    # data already loaded?
            lfp = rec.lfp[id_lfp]
        except: # load data
            fn_lfp = os.path.join(rec.i.lfp_path,filenames[id_lfp])            

            # load raw data
            lfp = DL.load_time_series(fn_lfp,samplingrate=info.samplingrate,
                                      dtype=info.datatype,
                                      nsamples=info['nsamples'])

            # convert data into SI units
            if hasattr(info,'units'):
                zero = 2**(info.resolution-1)
                lfp_key = re.sub('\.\w+$','',filenames[id_lfp])
                if isinstance(info.units_multiplier,dict):
                    units_multiplier = info.units_multiplier[lfp_key]
                else:
                    units_multiplier = info.units_multiplier
                DL.raw_to_SI(lfp,scalefactor=units_multiplier,offset=zero)
            else:
                info.units = 'raw'
                
            # cut lfp
            if (hasattr(rec.i,'t_start') & hasattr(rec.i,'t_stop')):
                lfp = lfp[D.Epochs((rec.i.t_start,rec.i.t_stop))]
            lfp.i.update(info)
            lfp.i.id_lfp=id_lfp
            if info.units != 'raw':
                lfp.i.units_multiplier = units_multiplier
            rec.lfp[id_lfp] = lfp
            return lfp
    else: # load all of them
        id_lfp = filenames.keys()
        for i in id_lfp:
            rec.load_lfp(i)
Beispiel #6
0
def routes():
    import data
    p = paths()
    endpoints = data.Endpoints()
    id_delete = data.IDDelete()
    id_files = data.IDFiles()
    id_results = data.IDResults()
    ids = data.IDs()
    info = data.Info()
    logs = data.Logs()
    funcs = [endpoints, id_delete, id_files, id_results, ids, info, logs]
    return dict(zip(p, funcs))
Beispiel #7
0
    def __init__(self,cat_id,track_id,rec_id,cond_id=0):
	info = D.Info()
        info.cond_id = cond_id  # stimulus condition
        info.cat_id = cat_id
        info.track_id = track_id
        info.rec_id = rec_id
        info.cond_id = cond_id

        # set path variables
        info.rec_path = os.path.join(datapath,'cat_%s','track_%s','rec_%s')%(
            cat_id,track_id,rec_id)
        info.rec_name = 'cat_%s-track_%s-rec_%s-%d'%(
            cat_id,track_id,rec_id,cond_id) #unique name
        info.infofile = os.path.join(info.rec_path,'rec_info.inf')
        info.cache_path = os.path.join(cachepath,info.rec_name)
        info.spk_path = os.path.join(info.rec_path,'spk_data')
        info.lfp_path = os.path.join(info.rec_path,'lfp_data')
        info.stim_path = os.path.join(info.rec_path,'stim_data')
        info.fig_path = os.path.join(info.rec_path,'figures')
        info.res_path = os.path.join(info.rec_path,'results')

        # parse rec_info file
        self.i = D.Info(filename=info.infofile,translation=translation)
        # add path variables
        self.i.update(info)
        if hasattr(self.i,'rec_epochs'):
            (self.i.t_start,self.i.t_stop) = self.i.rec_epochs[cond_id]

        # create empty containers for spikes, lfps, etc.
        self.spk=D.Container() # dict contains spike timings
        self.lfp=D.Container() # dict contains LFPs
        self.comments={} # dict contains comments during data collection
        
        # intialize cache
        try:            
            U.make_dir(cachepath)
        except:
            assert 0, "could not create cache dir %s"%cachepath
Beispiel #8
0
    def __init__(self, cellid=None):
        self.i = D.Info()
        # set path variables
        self.i.rec_path = os.path.join(datapath, 'birdsong', 'fieldL_data')
        self.i.rec_filenames = U.parse_dir(self.i.rec_path,
                                           suffix='mat',
                                           key_type=str)
        # create empty containers for spikes, lfps, etc.
        self.spks = D.Container()  # dict contains spike timings
        self.resp = D.Container()  # dict contains averaged response
        self.stim = D.Container()  # dict contains auditory stimuli

        if cellid != None:
            self.load(cellid)
Beispiel #9
0
def routes():
    import data
    endpoints = data.Endpoints()
    p = paths()
    id_files = data.Id()
    ids = data.Ids()
    info = data.Info()
    raw = data.Raw()
    results = data.Results()
    status = data.Status()
    stop = data.Stop()
    upload = data.Upload()
    tools = data.Tools()
    funcs = [endpoints, id_files, ids, info, raw, results, status, stop, tools, upload]
    return dict(zip(p, funcs))
Beispiel #10
0
def load_spk(rec,id_spk):
    try:
        filenames = rec.spk.filenames
        info = rec.spk.i
    except: # parse directory
        infofile = os.path.join(rec.i.spk_path,'spk_info.inf')
        info = D.Info(filename=infofile,translation=translation)
        # default parameters
        params = dict(filename_prefix=None,filename_suffix='spk')
        params.update(info)
        # get filenames
        filenames = U.parse_dir(rec.i.spk_path,
                                prefix=params['filename_prefix'],
                                suffix=params['filename_suffix'])
        rec.spk.i = info
        rec.spk.i.update(filenames)
        
    if id_spk != None:
        try:    # data already loaded?
            spk = rec.spk[id_spk]
        except: # load data
            fn_spk = os.path.join(rec.i.spk_path,filenames[id_spk])

            # load raw data
            spk = DL.load_event_timings(fn_spk,dtype=info.datatype)
            
            # convert data into SI units
            if hasattr(info,'units'):
                DL.raw_to_SI(spk,scalefactor=info.units_multiplier,offset=0)
            else:
                info.units = 'raw'

            # cut spk
            if (hasattr(rec.i,'t_start') & hasattr(rec.i,'t_stop')):
                spk = spk[D.Epochs((rec.i.t_start,rec.i.t_stop))]
            spk.i.update(info)
            spk.i.id = id_spk
            rec.spk[id_spk] = spk
        return spk
    else: # load all of them
        id_spk = filenames.keys()
        for i in id_spk:
            rec.load_spk(i)
Beispiel #11
0
    def __init__(self,cell_id,rec_id,**kargs):
        defaultargs = dict(load_tic=True,load_tmg=True,load_sig=False,split=True,
                           neg=False,version=0,cc=True)
        defaultargs.update(kargs)

        self.i = D.Info(defaultargs)
        self.i.rec_id = rec_id
        self.i.cell_id = cell_id
        self.i.rec_path = os.path.join(datapath,cell_id)
        try:
            self.i.update(load_m(self))
        except:
            print 'coul not load m-file'
        if self.i.load_tic:
            try:
                load_tic(self)
            except:
                print "warning: could not stimulus time stamps"
        if self.i.load_tmg:
            try:
                self.load_spk()
            except:
                print "warning: could not load spk timings"
            if self.i.cc:
                try:
                    self.load_psp()
                except:
                    print "warning: could not load epsp timings"
            else:
                try:
                    self.load_psc()
                except:
                    print "warning: could not load epsc timings"
        if self.i.load_sig:
            try:
                self.load_sig()
            except:
                print "warning: could not load signal"
        if self.i.split:
            try:
                self.split_trials()
            except:
                print "warning: could not split trials"
Beispiel #12
0
    def load(self):
        '''
        loads spike and stimulus data

        '''
        import scipy.io as io
        fn = os.path.join(self.i.rec_path, self.i.rec_filenames[self.i.rec_id])
        mat = io.loadmat(fn)
        self.mat = mat

        info = D.Info()
        for key in ['__header__', '__version__']:
            info[key] = mat[key]
        self.i.update(info)

        self.i.event_ids = filter(lambda k: self.i.rec_id in k, mat.keys())
        for event_id in self.i.event_ids:
            evt = D.Events(mat[event_id].times)
            evt.i.units = 's'
            # retina
            if self.i.ch_ret != None:
                if re.match('.*Ch%d$' % self.i.ch_ret, event_id):
                    self.ret = evt
            else:
                if 'Cortex' in mat[event_id].title:
                    self.ret = evt
            # stimulus
            if 'Trigger' in mat[event_id].title:
                self.frm = evt
                # fix trigger in case of m_sequence
                if len(self.frm) == 327669:  # 10 m-sequences
                    print 'adding trigger t0'
                    self.frm = D.Events(N.concatenate(([0], evt)), i=evt.i)
                if len(self.frm) == 10:
                    print 'inserting triggers'
                    t_frame = self.i.t_frame
                    self.frm = D.Events(N.concatenate(
                        [N.arange(32767) * t_frame + ti for ti in evt]),
                                        i=evt.i)
                self.i.start_times = self.frm[::32767]
                self.i.t_trial = N.diff(self.i.start_times)[0]
                self.i.epochs = DL.epochs_from_onsets(self.i.start_times,
                                                      self.i.t_trial)
Beispiel #13
0
def denoise_time_series(rec, id_lfp=None):
    # load stimulus in order to get framerate etc.
    rec.load_stim()

    freqs_refresh = [rec.stim.i.refreshrate]
    print freqs_refresh
    freqs_frame = [rec.stim.i.framerate,2*rec.stim.i.framerate,3*rec.stim.i.framerate]
    print freqs_frame
    noise = D.Info(freqs_line=[60,120,180,240,300,360],sf_line=.075,
                   freqs_refresh=freqs_refresh,sf_refresh=.01,
                   freqs_frame=freqs_frame,sf_frame=.025)

    if id_lfp==None:
        ids = rec.lfp.keys() # denoise all lfp channels
    else:
        ids = [id_lfp]

    for id_lfp in ids:
        rec.lfp[id_lfp] = DSP.denoise_time_series(rec.lfp[id_lfp],**noise)

    if id_lfp != None:
        return rec.lfp[id_lfp]
Beispiel #14
0
    def load(self):
        '''
        loads spike and stimulus data

        '''
        import scipy.io as io
        fn = os.path.join(self.i.rec_path, self.i.rec_filenames[self.i.rec_id])
        mat = io.loadmat(fn)
        # self.mat = mat

        info = D.Info()
        for key in ['__header__', '__version__']:
            info[key] = mat[key]
        self.i.update(info)

        self.i.event_ids = filter(lambda k: self.i.rec_id in k, mat.keys())
        for event_id in self.i.event_ids:
            evt = D.Events(mat[event_id].times)
            evt.i.units = 's'
            # lgn
            if self.i.ch_lgn != None:
                if re.match('.*Ch%d$' % self.i.ch_lgn, event_id):
                    self.lgn = evt
            else:
                if 'LGN' in mat[event_id].title:
                    self.lgn = evt
            # retina
            if self.i.ch_ret != None:
                if re.match('.*Ch%d$' % self.i.ch_ret, event_id):
                    self.ret = evt
            else:
                if 'Cortex' in mat[event_id].title:
                    self.ret = evt
            # stimulus
            if 'Trigger' in mat[event_id].title:
                self.frm = evt
Beispiel #15
0
    def load(self, cellid=None):
        '''
        loads stimulus timing data

        Input:
          id_spk - an id that identifies the spike recording
          
        '''
        import scipy.io as io
        fn_stim = os.path.join(self.i.rec_path, self.i.rec_filenames[cellid])
        mat = io.loadmat(fn_stim)

        info = D.Info()
        for key in ['__header__', '__version__', 'cellid']:
            info[key] = mat[key]
        self.i.update(info)
        self.vstim = D.TimeSeries(mat['vstim'], 32 * 10**3, info=info)

        i = 0
        for trial in mat['est_data']:
            self.stim[i] = D.TimeSeries(trial.stim, 32 * 10**3, info=info)
            self.spks[i] = D.TimeSeries(trial.resp_raw.T, 10**3, info=info)
            self.resp[i] = D.TimeSeries(trial.resp, 10**3, info=info)
            i += 1