Exemplo n.º 1
0
def ncs_check_timestamps(ncsfname):
    """
    checks if records are equispaced
    """
    f = NcsFile(ncsfname)
    ts = f.read(0, f.num_recs, mode='timestamp')
    ts = ts.astype(int)
    err = np.diff(ts, 2).sum()
    return err
Exemplo n.º 2
0
def ncs_check_timestamps(ncsfname):
    """
    checks if records are equispaced
    """
    f = NcsFile(ncsfname)
    ts = f.read(0, f.num_recs, mode='timestamp')
    ts = ts.astype(int)
    err = np.diff(ts, 2).sum()
    return err
Exemplo n.º 3
0
def main(fname):
    fid = NcsFile(fname)
    nrec = fid.num_recs
    # out_size = (nrec + 1) * 512/Q
    times = fid.read(0, fid.num_recs, mode='timestamp')
    times = times.astype(float) / 1000
    arr = expandts(times, 1e3 * fid.timestep, 16)
    idx_err = np.diff(arr, 2).nonzero()[0]
    print(idx_err.shape[0])
    print(np.diff(arr, 2)[idx_err])

    outfile = tables.open_file('times_16.h5', 'w')
    outfile.create_array('/', 'times', arr)
    outfile.close()
Exemplo n.º 4
0
def main(fname):
    fid = NcsFile(fname)
    nrec = fid.num_recs
    # out_size = (nrec + 1) * 512/Q
    times = fid.read(0, fid.num_recs, mode='timestamp')
    times = times.astype(float) / 1000
    arr = expandts(times, 1e3 * fid.timestep, 16)
    idx_err = np.diff(arr, 2).nonzero()[0]
    print(idx_err.shape[0])
    print(np.diff(arr, 2)[idx_err])

    outfile = tables.open_file('times_16.h5', 'w')
    outfile.create_array('/', 'times', arr)
    outfile.close()
Exemplo n.º 5
0
def downsampling(ncsfname, h5fname, Q=16, include_times=True):
    """
    Main routine for downsampling
    """
    ncsf = NcsFile(ncsfname)
    chname = ncsf.header['AcqEntName']
    h5f = initfile(h5fname, ncsf, Q, include_times)
    nrec = ncsf.num_recs
    ds_order = 8
    ts = ncsf.timestep

    if DEBUG:
        print('{} has {} records'.format(ncsfname, nrec))

    blocks = make_blocks(nrec, min(100000, nrec))
    if Q > 1:
        # design filter for lowpass filtering before downsampling
        # use 80% of the Nyquist frequency as cutoff
        b_down, a_down = scipy.signal.cheby1(ds_order, .05, 0.8/Q)

    for start, stop in blocks:
        print('Filtering {} {}-{}'.format(chname, start, stop))
        data, ts = ncsf.read(start, stop, mode='both')

        if include_times:
            h5f.root.time.append(ts)

        if Q > 1:
            # Warning!
            # scipy.signal.decimate uses lfilt,
            # so the decimated signal has a phase shift
            # for this reason, we use
            ds_data = scipy.signal.filtfilt(b_down, a_down, data)[::Q]
        else:
            ds_data = data

        h5f.root.data.rawdata.append(ds_data)

        # if you would like to create other traces
        # (e.g. filtered versions of the data), do it here
        # by calling appropriate functions

        h5f.flush()

    h5f.close()
Exemplo n.º 6
0
def downsampling(ncsfname, h5fname, Q=16, include_times=True):
    """
    Main routine for downsampling
    """
    ncsf = NcsFile(ncsfname)
    chname = ncsf.header['AcqEntName']
    h5f = initfile(h5fname, ncsf, Q, include_times)
    nrec = ncsf.num_recs
    ds_order = 8
    ts = ncsf.timestep

    if DEBUG:
        print('{} has {} records'.format(ncsfname, nrec))

    blocks = make_blocks(nrec, min(100000, nrec))
    if Q > 1:
        # design filter for lowpass filtering before downsampling
        # use 80% of the Nyquist frequency as cutoff
        b_down, a_down = scipy.signal.cheby1(ds_order, .05, 0.8 / Q)

    for start, stop in blocks:
        print('Filtering {} {}-{}'.format(chname, start, stop))
        data, ts = ncsf.read(start, stop, mode='both')

        if include_times:
            h5f.root.time.append(ts)

        if Q > 1:
            # Warning!
            # scipy.signal.decimate uses lfilt,
            # so the decimated signal has a phase shift
            # for this reason, we use
            ds_data = scipy.signal.filtfilt(b_down, a_down, data)[::Q]
        else:
            ds_data = data

        h5f.root.data.rawdata.append(ds_data)

        # if you would like to create other traces
        # (e.g. filtered versions of the data), do it here
        # by calling appropriate functions

        h5f.flush()

    h5f.close()
def plot_spectrum(fname):
    fid = NcsFile(fname)
    rawdata = fid.read(0, 1000)
    data = rawdata * (1e6 * fid.header['ADBitVolts'])
    fs = 1/fid.timestep
    my_filter = DefaultFilter(fid.timestep)
    filt_data = my_filter.filter_extract(data)
    [f, p] = sig.welch(data, fs, nperseg=32768)
    [f_filt, p_filt] = sig.welch(filt_data, fs, nperseg=32768)

    fig = mpl.figure()
    plot = fig.add_subplot(1, 1, 1)
    plot.plot(f, p, label='Unfiltered')
    plot.plot(f_filt, p_filt, label='Filtered')

    plot.set_yscale('log')
    plot.legend()
    plot.set_ylabel(r'$\mu\mathrm{V}^2/\mathrm{Hz}$')
    plot.set_xlabel(r'$\mathrm{Hz}$')
Exemplo n.º 8
0
def to_original():
    ncsfiles = glob('*.ncs')
    for nf in ncsfiles:
        n = NcsFile(nf)
        oldname = n.header['AcqEntName'] + '.ncs'

        if os.path.exists(oldname):
            print('Not renaming {} to {}, file exists'.format(nf, oldname))
        else:
            print('renaming {} to {}'.format(nf, oldname))
            os.rename(nf, oldname)
Exemplo n.º 9
0
def main(fnames):
    """
    for each file in fnames, get entity name
    write everything to a csv file
    """
    fnames.sort()
    out_data = list()

    for name in fnames:
        fid = NcsFile(name)
        label = os.path.splitext(name)[0]
        entity = fid.header['AcqEntName']
        out_data.append((label, entity))

    with open(FNAME_OUT, 'w') as fid:
        csv_writer = csv.writer(fid, delimiter=';')
        csv_writer.writerows(out_data)
Exemplo n.º 10
0
# -*- coding: utf-8 -*-
# JN 2014-10-21
# script creates a clinRecConv.py from ncs files

import os
import numpy as np
from combinato import NcsFile
from matplotlib.dates import date2num

if __name__ == "__main__":
    if os.path.exists('clinRecConv.py'):
        print('File exists, doing nothing')
    else:
        fid = NcsFile('CSC1.ncs')
        d = fid.header['opened']
        n = date2num(d)
        ts = fid.read(0, 1, 'timestep')
        np.save('clinRecConv', np.array((ts, d)))
Exemplo n.º 11
0
            data = nf.read(start, stop, mode='data')
            data = data.reshape(-1, fs)
            block[i::ns, :] = data

        edffile.write(block)


if __name__ == "__main__":
    """ missing:
    outfilename
    """
    from argparse import ArgumentParser
    parser = ArgumentParser()
    parser.add_argument('--out')
    parser.add_argument('--patid')
    parser.add_argument('--comment', default='')
    args = parser.parse_args()

    chs = get_channel_list(os.getcwd())

    ncsfiles = []
    for ch in chs:
        ncsfiles.append(NcsFile(ch))

    h1, h2 = create_edf_header(ncsfiles, args.patid, args.comment)
    f = open(args.out, 'w')
    f.write(h1)
    f.write(h2)
    write_data(ncsfiles, f)
    f.close()
Exemplo n.º 12
0
    with open(fname, 'r') as fid:
        lines = [line.strip() for line in fid.readlines()]

    for line in lines:
        if line[0] == '#':
            continue
        fields = line.split()
        if fields[0] == target:
            dtime, micro = fields[2].split('.')
            dstr = fields[1] + ' ' + dtime
            dfmt = '%Y-%m-%d %H:%M:%S'
            start_date = datetime.datetime.strptime(dstr, dfmt)
            start_date += datetime.timedelta(microseconds=int(micro))
            break

    ts_start_nlx = float(fields[3]) / 1000
    return ts_start_nlx, start_date


if __name__ == "__main__":
    """
    a small test case
    """
    from combinato import NcsFile
    import sys
    Q = 16
    ncsfname = sys.argv[1]
    fid = NcsFile(ncsfname)
    h5f = initfile('out.h5', fid, Q)
    print(h5f)