Example #1
0
    def _merge_level(s_, d_, level='/'):

        if 'chan_mask' in s_:
            print('end-node')
            src_mask = s_.chan_mask
            dst_mask = d_.chan_mask
            print('Source mask: {0} channels'.format(src_mask.sum()))
            print('Dest mask: {0} channels'.format(dst_mask.sum()))
            choice = 'Overwrite destination? ([y]/n) '
            if choice.lower() in ('y', ''):
                # print 'would write', d_, 'at path:', level
                with warnings.catch_warnings():
                    warnings.simplefilter("ignore")
                    # remove trailing slash
                    k = level[:-1]
                    s_load = load_bunch(source, k)
                    save_bunch(dest, k, s_load, overwrite_paths=True)
            return

        src_keys = set(s_.keys())
        dst_keys = set(d_.keys())

        new_keys = src_keys.difference(dst_keys)
        for nk in new_keys:
            # print 'would write', s_[nk], 'at path:', '/'+nk
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                s_load = load_bunch(source, level + nk)
                save_bunch(dest, level + nk, s_load)
        sim_keys = src_keys.intersection(dst_keys)
        for sk in sim_keys:
            _merge_level(s_[sk], d_[sk], level + sk + '/')
Example #2
0
def rawload_mux(exp_path,
                test,
                version,
                daq_variant='',
                data_only=False,
                shm=True):
    """
    Find and load data recorded from the MUX style headstage. Return all
    recording columns by default, otherwise only return the electrode
    data.
    
    """
    raw_data = None
    shm_arr = ('/data', ) if shm else ()
    try:
        raw_data = load_bunch(os.path.join(exp_path, test + '.h5'),
                              '/',
                              shared_arrays=shm_arr)
    except IOError:
        raw_data = load_bunch(os.path.join(exp_path, test + '.mat'),
                              '/',
                              shared_arrays=shm_arr)
    try:
        Fs = raw_data.Fs
    except:
        Fs = raw_data.fs
    shape = raw_data.data.shape
    if shape[1] < shape[0]:
        raw_data.data = raw_data.data.transpose().copy()
    nrow, ncol_data = list(map(int, (raw_data.numRow, raw_data.numCol)))
    # scale data channels
    raw_data.data[:ncol_data * nrow] /= mux_gain[version]
    # correct for permuted digital out sampling
    if not daq_variant:
        # if daq info (new style) is not given, try to look up sampling order
        # based on the mux version (old style)
        daq_variant = version
    raw_data.data = _permute_mux(raw_data.data, nrow, daq_variant)
    if data_only:
        raw_data.data = raw_data.data[:ncol_data * nrow]

    try:
        # stim-mux converted h5 files (Virginia's conversion)
        # do not have info
        info = tdms_info(raw_data.info)
    except AttributeError:
        info = None
    return raw_data.data, Fs, (nrow, ncol_data), info
Example #3
0
 def lookup(self, dset_name):
     path = self._dset_to_path(dset_name)
     try:
         node = load_bunch(self.dbfile, path)
     except IOError:
         return Bunch()
     except NoSuchNodeError:
         return Bunch()
     return node
Example #4
0
def try_saved(exp_path, test, bandpass):
    try:
        dset = load_bunch(os.path.join(exp_path, test + '_proc.h5'), '/')
        #h5 = tables.open_file(os.path.join(exp_path, test+'_proc.h5'))
    except IOError as exc:
        raise DataPathError

    if dset.bandpass != bandpass:
        del dset
        raise DataPathError(
            'saved data bandpass does not match requested bandpass')
    return dset
Example #5
0
 def list_paths(self):
     b = load_bunch(self.dbfile, '/', load=False)
     _walk_paths(b)
Example #6
0
def load_open_ephys_channels(exp_path,
                             test,
                             rec_num='auto',
                             shared_array=False,
                             downsamp=1,
                             target_Fs=-1,
                             lowpass_ord=12,
                             page_size=8,
                             save_downsamp=True,
                             use_stored=True,
                             store_path='',
                             quantized=False):

    # first off, check if there is a stored file at target_Fs (if valid)
    if use_stored and target_Fs > 0:
        # Look for a previously downsampled data stash
        fname_part = '*{0}*_Fs{1}.h5'.format(test, int(target_Fs))
        # try store_path (if given) and also exp_path
        for p_ in (store_path, exp_path):
            fname = glob(osp.join(p_, fname_part))
            if len(fname) and osp.exists(fname[0]):
                print('Loading from', fname[0])
                channel_data = load_bunch(fname[0], '/')
                return channel_data

    rec_path, rec_num = prepare_paths(exp_path, test, rec_num)
    trueFs = get_robust_samplingrate(rec_path)
    if downsamp == 1 and target_Fs > 0:
        if trueFs is None:
            # do nothing
            print('Sampling frequency not robustly determined, '
                  'downsample not calculated for {0:.1f} Hz'.format(target_Fs))
            raise ValueError
        else:
            # find the correct (integer) downsample rate
            # to get (approx) target Fs
            # target_fs * downsamp <= Fs
            # downsamp <= Fs / target_fs
            downsamp = int(trueFs // target_Fs)
            print(('downsample rate:', downsamp))

    if downsamp > 1 and quantized:
        print('Cannot return quantized data when downsampling')
        quantized = False
    downsamp = int(downsamp)

    all_files = list()
    for pre in rec_num:
        all_files.extend(glob(osp.join(rec_path, pre + '*.continuous')))
    if not len(all_files):
        raise IOError('No files found')
    c_nums = list()
    chan_files = list()
    aux_files = list()
    aux_nums = list()
    adc_files = list()
    adc_nums = list()
    for f in all_files:
        f_part = osp.splitext(osp.split(f)[1])[0]
        # File names can be: Proc#_{ADC/CH/AUX}[_N].continuous
        # (the last _N part is not always present!! disgard for now)
        f_parts = f_part.split('_')
        if len(f_parts[-1]) == 1 and f_parts[-1] in '0123456789':
            f_parts = f_parts[:-1]
        ch = f_parts[-1]  # last file part is CHx or AUXx
        if ch[0:2] == 'CH':
            chan_files.append(f)
            c_nums.append(int(ch[2:]))
        elif ch[0:3] == 'AUX':  # separate chan and AUX files
            aux_files.append(f)
            aux_nums.append(int(ch[3:]))
        elif ch[0:3] == 'ADC':
            adc_files.append(f)
            adc_nums.append(int(ch[3:]))

    if downsamp > 1:
        (b_lp, a_lp) = cheby2_bp(60, hi=1.0 / downsamp, Fs=2, ord=lowpass_ord)

    def _load_array_block(files, shared_array=False, antialias=True):
        Fs = 1
        dtype = 'h' if quantized else 'd'

        # start on 1st index of 0th block
        n = 1
        b_cnt = 0
        b_idx = 1

        ch_record = OE.loadContinuous(files[0], dtype=np.int16, verbose=False)
        d_len = ch_record['data'].shape[-1]
        sub_len = d_len // downsamp
        if sub_len * downsamp < d_len:
            sub_len += 1
        proc_block = shm.shared_ndarray((page_size, d_len), typecode=dtype)
        proc_block[0] = ch_record['data'].astype('d')
        if shared_array:
            saved_array = shm.shared_ndarray((len(files), sub_len),
                                             typecode=dtype)
        else:
            saved_array = np.zeros((len(files), sub_len), dtype=dtype)

        for f in files[1:]:
            ch_record = OE.loadContinuous(f, dtype=np.int16,
                                          verbose=False)  # load data
            Fs = float(ch_record['header']['sampleRate'])
            proc_block[b_idx] = ch_record['data'].astype(dtype)
            b_idx += 1
            n += 1
            if (b_idx == page_size) or (n == len(files)):
                # do dynamic range conversion and downsampling
                # on a block of data
                if not quantized:
                    proc_block *= ch_record['header']['bitVolts']
                if downsamp > 1 and antialias:
                    filtfilt(proc_block, b_lp, a_lp)
                sl = slice(b_cnt * page_size, n)
                saved_array[sl] = proc_block[:b_idx, ::downsamp]
                # update / reset block counters
                b_idx = 0
                b_cnt += 1

        del proc_block
        while gc.collect():
            pass
        return saved_array, Fs, ch_record['header']

    # sort CH, AUX, and ADC by the channel number
    sorted_chans = np.argsort(c_nums)
    # sorts list ed on sorted_chans
    chan_files = [chan_files[n] for n in sorted_chans]
    chdata, Fs, header = _load_array_block(chan_files,
                                           shared_array=shared_array)

    aux_data = list()
    if len(aux_files) > 0:
        sorted_aux = np.argsort(aux_nums)
        aux_files = [aux_files[n] for n in sorted_aux]
        aux_data, _, _ = _load_array_block(aux_files, antialias=False)

    adc_data = list()
    if len(adc_files) > 0:
        sorted_adc = np.argsort(adc_nums)
        adc_files = [adc_files[n] for n in sorted_adc]
        adc_data, _, _ = _load_array_block(adc_files, antialias=False)

    if not trueFs:
        print('settings.xml not found, relying on sampling rate from '
              'recording header files')
        trueFs = Fs
    if downsamp > 1:
        trueFs /= downsamp
    dset = Bunch(chdata=chdata,
                 aux=aux_data,
                 adc=adc_data,
                 Fs=trueFs,
                 header=header)

    if save_downsamp and downsamp > 1:
        fname = '{0}_Fs{1}.h5'.format(osp.split(rec_path)[-1], int(dset.Fs))
        if not len(store_path):
            store_path = exp_path
        mkdir_p(store_path)
        fname = osp.join(store_path, fname)
        print('saving', fname)
        save_bunch(fname, '/', dset, mode='w')

    return dset