Пример #1
0
def rb_filter():
    p = argparse.ArgumentParser(description="""
    filter a sampled dataset
    """)
    p.add_argument("dat", help="dat file")
    p.add_argument("-o", "--out", help="name of output dat file")
    p.add_argument("--order", help="filter order", default=3, type=int)
    p.add_argument("--highpass", help="highpass frequency", type=float)
    p.add_argument("--lowpass", help="lowpass frequency", type=float)
    p.add_argument("-f",
                   "--filter",
                   help="filter type: butter or bessel",
                   default="bessel")

    opt = p.parse_args()
    dtype = bark.read_metadata(opt.dat)['dtype']
    stream.read(opt.dat)._analog_filter(opt.filter,
                                        highpass=opt.highpass,
                                        lowpass=opt.lowpass,
                                        order=opt.order).write(opt.out, dtype)
    attrs = bark.read_metadata(opt.out)
    attrs['highpass'] = opt.highpass
    attrs['lowpass'] = opt.lowpass
    attrs['filter'] = opt.filter
    attrs['filter_order'] = opt.order
    bark.write_metadata(opt.out, **attrs)
Пример #2
0
def main(dat, csv, thresh, is_std, order=default_order, min_dist=0):
    if is_std:
        std = compute_std(dat)
        threshs = thresh * std
    else:
        # make threshs a vector if it's a scalar
        n_channels = bark.read_sampled(dat).data.shape[1]
        threshs = np.ones(n_channels) * thresh
    print('thresholds:', threshs)
    s = stream.read(dat)
    pad_len = order
    with open(csv, 'w') as fp:
        fp.write('channel,start\n')
        for (channel, sample) in stream_spikes(s, threshs, pad_len, order,
                                               min_dist * s.sr):
            fp.write('{},{}\n'.format(channel, sample / s.sr))
    bark.write_metadata(csv,
                        datatype=1000,
                        columns={
                            'channel': {
                                'units': None
                            },
                            'start': {
                                'units': 's'
                            }
                        },
                        thresholds=threshs,
                        order=order,
                        source=dat)
Пример #3
0
def rb_diff():
    p = argparse.ArgumentParser(description="""
    Subtracts one channel from another
    """)
    p.add_argument("dat", help="dat file")
    p.add_argument("-c",
                   "--channels",
                   help="""channels to difference, zero indexed, default: 0 1,
        subtracts second channel from first.""",
                   type=int,
                   nargs="+")
    p.add_argument("-o", "--out", help="name of output dat file")
    opt = p.parse_args()
    dat, out, channels = opt.dat, opt.out, opt.channels
    if not channels:
        channels = (0, 1)
    (stream.read(dat)[channels[0]] - stream.read(dat)[channels[1]]).write(out)
Пример #4
0
def rb_join():
    p = argparse.ArgumentParser(description="""
            Combines dat files by adding new channels with the same number
            samples. To add additional samples, use dat-cat""")
    p.add_argument("dat", help="dat files", nargs="+")
    p.add_argument("-o", "--out", help="name of output dat file")
    opt = p.parse_args()
    streams = [stream.read(fname) for fname in opt.dat]
    streams[0].merge(*streams[1:]).write(opt.out)
Пример #5
0
def test_write_read(tmpdir):
    fname = os.path.join(tmpdir.strpath, "mydat")
    columns = bark.sampled_columns(data1)
    attrs = dict(sampling_rate=100, columns=columns, fluffy="cat")
    a = Stream(data1, attrs=attrs)
    a.write(fname)
    b = read(fname)
    assert eq(data1, b.call())
    for key in attrs:
        assert attrs[key] == b.attrs[key]
Пример #6
0
def datchunk(dat, stride, use_seconds):
    attrs = bark.read_metadata(dat)
    sr = attrs['sampling_rate']
    if use_seconds:
        stride = stride * sr
    stride = int(stride)
    basename = os.path.splitext(dat)[0]
    for i, chunk in enumerate(stream.read(dat, chunksize=stride)):
        filename = "{}-chunk-{}.dat".format(basename, i)
        attrs['offset'] = stride * i
        bark.write_sampled(filename, chunk, **attrs)
Пример #7
0
def rb_decimate():
    ' Downsample raw binary file.'
    p = argparse.ArgumentParser(description="Downsample raw binary file")
    p.add_argument("input", help="input bark file")
    p.add_argument("--factor",
                   required=True,
                   type=int,
                   help="downsample factor")
    p.add_argument("-a",
                   "--attributes",
                   action='append',
                   type=lambda kv: kv.split("="),
                   dest='keyvalues',
                   help="extra metadata in the form of KEY=VALUE")
    p.add_argument("-o", "--out", help="name of output file", required=True)
    args = p.parse_args()
    if args.keyvalues:
        attrs = dict(args.keyvalues)
    else:
        attrs = {}
    stream.read(args.input).decimate(args.factor).write(args.out, **attrs)
Пример #8
0
def datchunk(dat, stride, use_seconds, one_cut):
    def write_chunk(chunk, attrs, i):
        filename = "{}-chunk-{}.dat".format(basename, i)
        attrs['offset'] = stride * i
        bark.write_sampled(filename, chunk, **attrs)
    attrs = bark.read_metadata(dat)
    if use_seconds:
        stride = stride * attrs['sampling_rate']
    stride = int(stride)
    basename = os.path.splitext(dat)[0]
    if one_cut:
        sds = bark.read_sampled(dat)
        write_chunk(sds.data[:stride,:], attrs, 0)
        write_chunk(sds.data[stride:,:], attrs, 1)
    else:
        for i, chunk in enumerate(stream.read(dat, chunksize=stride)):
            write_chunk(chunk, attrs, i)
Пример #9
0
def rb_concat():
    p = argparse.ArgumentParser(
        description="""Concatenate raw binary files by adding new samples.
    Do not confuse with merge, which combines channels""")
    p.add_argument("input", help="input raw binary files", nargs="+")
    p.add_argument("-a",
                   "--attributes",
                   action='append',
                   type=lambda kv: kv.split("="),
                   dest='keyvalues',
                   help="extra metadata in the form of KEY=VALUE")
    p.add_argument("-o", "--out", help="name of output file", required=True)
    args = p.parse_args()
    if args.keyvalues:
        attrs = dict(args.keyvalues)
    else:
        attrs = {}
    streams = [stream.read(x) for x in args.input]
    streams[0].chain(*streams[1:]).write(args.out, **attrs)
Пример #10
0
 def toStream(self):
     return stream.read(self.path)
Пример #11
0
 def toStream(self):
     from bark import stream
     return stream.read(self.path)
Пример #12
0
def compute_std(dat):
    s = stream.read(dat)
    std = np.zeros(len(s.attrs['columns']))
    for i, x in enumerate(stream.read(dat)):
        std += np.std(x, 0)
    return std / (i + 1)