コード例 #1
0
ファイル: build_db.py プロジェクト: jmxpearson/hephys
    def import_censor(self, ftup):
        pdir = 'patient' + str(ftup[0]).zfill(3)
        fname = (str(ftup[0]) + '.' + str(ftup[1]) + '.plx' + str(ftup[2]) + 
            '_censoring.mat')
        fullname = self.datadir + pdir + '/' + fname
        excludes = sio.loadmat(fullname)['excludes'].round(3)

        if excludes.size != 0:
            #do this in case some exclude ranges make no sense
            badrng = np.where(np.diff(excludes, axis=1) < 0)
            excludes = np.delete(excludes, badrng, axis=0)

            # get data ready
            ddict = {'patient': ftup[0], 'dataset': ftup[1], 
            'channel': ftup[2], 'start': excludes[:,0], 'stop': excludes[:,1]}

            df = pd.DataFrame(ddict)
         
            target = 'censor/' + make_path(*ftup) 
            self.write_to_db(target, df)
コード例 #2
0
ファイル: build_db.py プロジェクト: jmxpearson/hephys
    def import_lfp(self, ftup):
        pdir = 'patient' + str(ftup[0]).zfill(3)
        fname = str(ftup[0]) + '.' + str(ftup[1]) + '.plx' + str(ftup[2]) + '.mat'
        fullname = self.datadir + pdir + '/' + fname
        dset = h5py.File(fullname, 'r')
        dat = dset['data'].value.squeeze()
        sr = dset['srlfp'].value.squeeze()

        vv = dat
        desired_rate = 200.  # desired final sampling rate (Hz)
        decfrac = int(sr / desired_rate)
        vv = decimate(dat, decfrac)  # decimate data to 200 Hz
        sr = sr / decfrac;
        dt = (1. / sr).round(3)

        times = (np.arange(0, vv.size) * dt).round(3).squeeze()
        ddict = {'patient': ftup[0], 'dataset': ftup[1], 
        'channel': ftup[2], 'time': times, 'voltage': vv.values.squeeze()}

        df = pd.DataFrame(ddict)
     
        target = 'lfp/' + make_path(*ftup) 
        self.write_to_db(target, df)
コード例 #3
0
ファイル: build_db.py プロジェクト: jmxpearson/hephys
    def import_spikes(self, ftup):
        pdir = 'patient' + str(ftup[0]).zfill(3)
        fname = ('times_' + str(ftup[0]) + '.' + str(ftup[1]) + '.plx' + 
            str(ftup[2]) + '.mat')
        fullname = self.datadir + pdir + '/' + fname
        dat = sio.loadmat(fullname)['cluster_class']
        
        unit = dat[:,0].astype('int')
        times = np.around(dat[:,1]/1000., decimals=3) #times are originally in ms
        sortord = np.argsort(times) #spikes aren't always time-sorted
        times = times[sortord]
        unit = unit[sortord]
        
        # now restrict to valid units:
        valid = unit == ftup[3]
        times = times[valid]
        unit = unit[valid]

        ddict = {'patient': ftup[0], 'dataset': ftup[1], 
        'channel': ftup[2], 'unit': unit, 'time': times}
        df = pd.DataFrame(ddict)

        target = 'spikes/' + make_path(*ftup)
        self.write_to_db(target, df)
コード例 #4
0
ファイル: build_db.py プロジェクト: jmxpearson/hephys
    def import_events(self, ftup):
        trial_variables, events = self._grab_matlab_events(ftup)
        evt = self._grab_plexon_events(ftup)

        # get number of events
        numtrials = max(events.index) + 1

        # was this an FHC recording? if so, there are no Plexon stamps
        # all events dumped into first or second slot, so other slots 
        # should have few timestamps
        isFHC = (evt[2].size < 10)

        if isFHC:  # line up events with phys
            # for now, we kludge this by just setting the clocks to be equal
            # at task start and not worrying about drift
            num_events = map(len, evt)
            startcode = np.argmax(num_events)  # these should be trial starts

            # get time of first FHC event
            FHC_start = evt[startcode][0].round(3).squeeze()

            # compensate for offset
            all_events = events.stack()
            all_events.sort()
            ephys_offset = (FHC_start - all_events.values[0]).round(3)
            events['time'] += ephys_offset

        else:  # if we have Plexon events, use them
            startcode = self.plx_codes['trial_start']
            stopcode = self.plx_codes['trial_over']

            # trial start -- sometimes a spurious event marks recording onset
            if evt[startcode].shape[0] != numtrials: 
                evt[startcode] = evt[startcode][1:]

            # trial stop -- when last trial aborted, may not be present
            if evt[stopcode].shape[0] != numtrials: 
                evt[stopcode] = np.append(evt[stopcode], np.nan)

            for var in self.plx_codes:
                # valid = pd.notnull(events[var])
                # events[var][valid] = evt[self.plx_codes[var]].round(3).squeeze()
                this_selection = events['event'] == var
                events['time'][this_selection] = evt[self.plx_codes[var]].round(3).squeeze()

        # try to make events columns: this may fail in case some events
        # can happen multiple times per trial; in that case, make each 
        # event a row and perform a join
        if self.flatten_events:
            # now merge task variables and events 
            df = events.join(trial_variables)
        else: 
            # make event names column names
            events = events.set_index('event', append=True).unstack()
            # get rid of multi-index labeling
            events.columns = pd.Index([e[1] for e in events.columns])
            # now merge task variables and events 
            df = pd.concat([trial_variables, events], axis=1)

        df['patient'] = ftup[0]
        df['dataset'] = ftup[1]

        # do some final tidying
        df = df[df['result'] != 'aborted']  # get rid of aborts
        target = 'events/' + make_path(*ftup[:-1]) 
        self.write_to_db(target, df)