예제 #1
0
def add_recording_in_kwd(kwd,
                         recording_id=0,
                         downsample_factor=None,
                         nchannels=None,
                         nsamples=None,
                         data=None):
    if isinstance(kwd, string_types):
        kwd = open_file(kwd, 'a')
        to_close = True
    else:
        to_close = False

    if data is not None:
        nsamples, nchannels = data.shape

    recording = kwd.createGroup('/recordings', str(recording_id))
    recording._f_setAttr('downsample_factor', downsample_factor)

    dataset = kwd.createEArray(recording,
                               'data',
                               tb.Int16Atom(), (0, nchannels),
                               expectedrows=nsamples)

    # Add raw data.
    if data is not None:
        assert data.shape[1] == nchannels
        data_int16 = convert_dtype(data, np.int16)
        dataset.append(data_int16)

    kwd.createGroup(recording, 'filter')
    # TODO: filter
    if to_close:
        kwd.close()

    return kwd
예제 #2
0
def add_recording_in_kwd(kwd, recording_id=0,
                         downsample_factor=None, nchannels=None, 
                         nsamples=None, data=None):
    if isinstance(kwd, string_types):
        kwd = open_file(kwd, 'a')
        to_close = True
    else:
        to_close = False
    
    if data is not None:
        nsamples, nchannels = data.shape
    
    recording = kwd.createGroup('/recordings', str(recording_id))
    recording._f_setAttr('downsample_factor', downsample_factor)
    
    dataset = kwd.createEArray(recording, 'data', 
                               tb.Int16Atom(), 
                               (0, nchannels), expectedrows=nsamples)
    
    # Add raw data.
    if data is not None:
        assert data.shape[1] == nchannels
        data_int16 = convert_dtype(data, np.int16)
        dataset.append(data_int16)
            
    kwd.createGroup(recording, 'filter')
    # TODO: filter
    if to_close:
        kwd.close()
    
    return kwd
def add_recording_in_kwd(kwd,
                         recording_id=0,
                         downsample_factor=None,
                         nchannels=None,
                         nsamples=None,
                         data=None,
                         name=None,
                         sample_rate=None,
                         start_time=None,
                         start_sample=None,
                         bit_depth=None,
                         band_high=None,
                         band_low=None,
                         filter_name=''):
    if isinstance(kwd, string_types):
        kwd = open_file(kwd, 'a')
        to_close = True
    else:
        to_close = False

    if data is not None:
        nsamples, nchannels = data.shape

    try:
        recording = kwd.createGroup('/recordings', str(recording_id))
    except tb.NodeError:
        if to_close:
            kwd.close()
        return kwd
    recording._f_setAttr('downsample_factor', downsample_factor)

    dataset = kwd.createEArray(recording,
                               'data',
                               tb.Int16Atom(), (0, nchannels),
                               expectedrows=nsamples)

    # Add raw data.
    if data is not None:
        assert data.shape[1] == nchannels
        data_int16 = convert_dtype(data, np.int16)
        dataset.append(data_int16)

    # Add filter info.
    fil = kwd.createGroup(recording, 'filter')
    fil._f_setAttr('name', filter_name)

    # Copy recording info from kwik to kwd.
    recording._f_setAttr('name', name)
    recording._f_setAttr('start_time', start_time)
    recording._f_setAttr('start_sample', start_sample)
    recording._f_setAttr('sample_rate', sample_rate)
    recording._f_setAttr('bit_depth', bit_depth)
    recording._f_setAttr('band_high', band_high)
    recording._f_setAttr('band_low', band_low)

    if to_close:
        kwd.close()

    return kwd
예제 #4
0
    def load(self, fname, run_idx, seq, load_idx_rows, load_idx_cols,
             file_content, tmp_data, pos_idxs):
        """Load the data.

        Args:
        fname: The name of the file containing the data to be loaded.
        run_idx: The run currently looked at (not the actual run number but
                 the index in the overall run list). This is needed to get
                 the corresponding preprocessing information.
        seq: The sequence number to be loaded.
        load_idx_rows: The data of which rows should be loaded only.
        load_idx_cols: The data of which columns should be loaded only.
        file_content: All metadata in corresponding to the data.
        tmp_data: Array where the data is stored into.
        pos_idxs: Which data parts should be loaded (shich columns and rows),
                  load and store positions are the same.
        """

        self.pos_idxs = pos_idxs

        # fill in the wildcard for the module position
        fname = glob.glob(fname)[0]

        # load data
        with h5py.File(fname, "r") as f:
            idx = (Ellipsis, load_idx_rows, load_idx_cols)
            raw_data = f[self._path['data']][idx]

        utils.check_data_type(raw_data)

        # cfel stores the data as int16 whereas xfel stores it at uint16
        # -> have one common type uint16 because ADC values also should be
        # positive
        utils.convert_dtype(raw_data, np.uint16)

        print("raw_data.shape", raw_data.shape)
        print("self._raw_shape", self._raw_shape)
        self.get_seq_number(file_content[self._path['seq_number']])
        self.get_frame_loss_indices()
        self.fillup_frame_loss(tmp_data, raw_data)
예제 #5
0
 def read_next_spike(self):
     if self.spike >= self.klusters_data[self.shank]['nspikes']:
         return {}
     data = self.klusters_data[self.shank]
     read = {}
     read['cluster'] = data['aclu'][self.spike]
     fet = data['fet'].next()
     read['time'] = fet[-1]
     read['fet'] = convert_dtype(fet, np.float32)
     if 'spk' in data:
         read['spk'] = data['spk'].next()
     if 'uspk' in data:
         read['uspk'] = data['uspk'].next()
     if 'mask' in data:
         read['mask'] = data['mask'].next()
     # else:
         # read['mask'] = np.ones_like(read['fet'])
     self.spike += 1
     return read
 def read_next_spike(self):
     if self.spike >= self.klusters_data[self.shank]['nspikes']:
         return {}
     data = self.klusters_data[self.shank]
     read = {}
     read['cluster'] = data['aclu'][self.spike]
     fet = data['fet'].next()
     read['time'] = fet[-1]
     read['fet'] = convert_dtype(fet, np.float32)
     if 'spk' in data:
         read['spk'] = data['spk'].next()
     if 'uspk' in data:
         read['uspk'] = data['uspk'].next()
     if 'mask' in data:
         read['mask'] = data['mask'].next()
     # else:
         # read['mask'] = np.ones_like(read['fet'])
     self.spike += 1
     return read