Ejemplo n.º 1
0
    def get_concatenated_response_matrix(self,
                                         dtype=np.float,
                                         sampling_rate=1000.,
                                         truncate=None):
        """Loads spike files from disk, returns concatenated responses.
        
        You must run transform_all_stimuli first, or otherwise set self.t_list,
        so that I know how to bin the spikes.
        
        truncate : if a value, throw away all spikes greater than thi
            if None, throw away all spikes beyond the end of the stimulus
            for this response
        
        Returns in shape (1, N_timepoints)
        """
        # Set list of filenames and error check
        self._set_list_of_files()

        # load each one and histogram
        concatenated_psths = []
        for respfile, bin_centers in zip(self.spike_file_list, self.t_list):
            # store responses
            #~ try:
            #~ # flatten() handles the case of only one value
            #~ st = np.loadtxt(respfile).flatten()
            #~ except IOError:
            #~ # this handles the case of no data
            #~ st = np.array([])
            #~ st = st / 1000.0
            s = file(respfile).readlines()
            st = []
            for line in s:
                tmp = myutils.parse_space_sep(line, dtype=np.float)
                tmp = np.asarray(tmp) / sampling_rate
                if truncate:
                    tmp = tmp[tmp <= truncate]
                else:
                    tmp = tmp[tmp <= bin_centers.max()]
                st.append(tmp)

            # convert bin centers to bin edges
            bin_edges = bin_centers[:-1] + 0.5 * np.diff(bin_centers)
            bin_edges = np.concatenate([[-np.inf], bin_edges, [np.inf]])

            # now histogram
            counts = []
            for line in st:
                counts.append(np.histogram(line, bin_edges)[0])
            counts = np.mean(counts, axis=0)

            # Append to growing list and check that size matches up trial-by-trial
            concatenated_psths.append(counts)
            assert len(counts) == len(bin_centers)

        # Return a concatenated array of response from this recording
        self.psth_list = concatenated_psths
        return np.concatenate(concatenated_psths).astype(dtype)[np.newaxis, :]
Ejemplo n.º 2
0
 def get_concatenated_response_matrix(self, dtype=np.float, 
     sampling_rate=1000., truncate=None):    
     """Loads spike files from disk, returns concatenated responses.
     
     You must run transform_all_stimuli first, or otherwise set self.t_list,
     so that I know how to bin the spikes.
     
     truncate : if a value, throw away all spikes greater than thi
         if None, throw away all spikes beyond the end of the stimulus
         for this response
     
     Returns in shape (1, N_timepoints)
     """        
     # Set list of filenames and error check
     self._set_list_of_files()
     
     # load each one and histogram
     concatenated_psths = []
     for respfile, bin_centers in zip(self.spike_file_list, self.t_list):
         # store responses
         #~ try:
             #~ # flatten() handles the case of only one value
             #~ st = np.loadtxt(respfile).flatten()
         #~ except IOError:
             #~ # this handles the case of no data
             #~ st = np.array([])
         #~ st = st / 1000.0
         s = file(respfile).readlines()
         st = []
         for line in s:
             tmp = myutils.parse_space_sep(line, dtype=np.float)
             tmp = np.asarray(tmp) / sampling_rate
             if truncate:
                 tmp = tmp[tmp <= truncate]
             else:
                 tmp = tmp[tmp <= bin_centers.max()]
             st.append(tmp)
         
         # convert bin centers to bin edges
         bin_edges = bin_centers[:-1] + 0.5 * np.diff(bin_centers)
         bin_edges = np.concatenate([[-np.inf], bin_edges, [np.inf]])
         
         # now histogram
         counts = []
         for line in st:
             counts.append(np.histogram(line, bin_edges)[0])
         counts = np.mean(counts, axis=0)
     
         # Append to growing list and check that size matches up trial-by-trial
         concatenated_psths.append(counts)
         assert len(counts) == len(bin_centers)
     
     # Return a concatenated array of response from this recording
     self.psth_list = concatenated_psths
     return np.concatenate(concatenated_psths).astype(dtype)[np.newaxis,:]
Ejemplo n.º 3
0
def get_tetrode_filter(ratname=None):
    fn_d = {
        'CR12B': '/media/STELLATE/20111208_CR12B_allsessions_sorted/data_params_CR12B.csv',
        'CR17B': '/media/STELLATE/20110907_CR17B_allsessions_sorted/data_params_CR17B.csv',
        'CR13A': '/media/STELLATE/20110816_CR13A_allsessions_sorted/data_params_CR13A.csv'
        }    
    
    if ratname is None:
        l = []
        for r in fn_d.keys():
            l += get_tetrode_filter(r)        
        return l

    dp = mlab.csv2rec(fn_d[ratname])
    tetrode_filter = []
    for row in dp:
        if row['session_type'] != 'behaving':
            continue
        for t in myutils.parse_space_sep(row['auditory_tetrodes']):
            tetrode_filter.append((row['session_name'], t))    
    return sorted(tetrode_filter)