示例#1
0
def arf2bark(arf_file,
             root_path,
             timezone,
             verbose,
             mangle_prefix=ENTRY_PREFIX):
    with arf.open_file(arf_file, 'r') as af:
        os.mkdir(root_path)
        root = bark.Root(root_path)
        if verbose:
            print('Created Root: ' + root_path)
        tle = None
        found_trigin = False
        for ename, entry in af.items():  # entries and top-level datasets
            if isinstance(entry, h5py.Group):  # entries
                entry_path = os.path.join(root_path, ename)
                entry_attrs = copy_attrs(entry.attrs)
                for pos_arg in ('name', 'parents'):
                    # along with 'timestamp' below, these are positional arguments to create_entry
                    # for now, I prefer hard-coding them over messing with runtime introspection
                    new_name = pos_arg
                    while new_name in entry_attrs:
                        new_name = '{}_{}'.format(mangle_prefix, new_name)
                    try:
                        entry_attrs[new_name] = entry_attrs.pop(pos_arg)
                    except KeyError:
                        pass
                    else:
                        if verbose:
                            print('Renamed attribute {} of entry {} to {}'.
                                  format(pos_arg, ename, new_name))
                timestamp = entry_attrs.pop('timestamp')
                if timezone:
                    timestamp = bark.convert_timestamp(timestamp, timezone)
                else:
                    timestamp = bark.convert_timestamp(timestamp)
                bark_entry = bark.create_entry(entry_path,
                                               timestamp,
                                               parents=False,
                                               **entry_attrs)
                if verbose:
                    print('Created Entry: ' + entry_path)
                for ds_name, dataset in entry.items():  # entry-level datasets
                    if ds_name == 'trig_in':  # accessing trig_in -> segfault
                        found_trigin = True  # and skip the dataset
                    else:
                        transfer_dset(ds_name, dataset, entry_path, verbose)
            elif isinstance(entry, h5py.Dataset):  # top-level datasets
                if arf.is_time_series(entry) or arf.is_marked_pointproc(entry):
                    if tle is None:
                        path = os.path.join(root_path, 'top_level')
                        tle = bark.create_entry(path, 0, parents=False).path
                    transfer_dset(ename, entry, tle, verbose)
                else:
                    unknown_ds_warning(ename)  # and skip, w/o creating TLE
        if found_trigin:
            print('Warning: found datasets named "trig_in". Jill-created ' +
                  '"trig_in" datasets segfault when read, so these datasets' +
                  ' were skipped. If you know the datasets are good, rename' +
                  ' them and try again.')
    return bark.Root(root_path)
示例#2
0
def test_entry_sort(tmpdir):
    path1 = os.path.join(tmpdir.strpath, "myentry")
    dtime1 = datetime.datetime(2020,1,1,0,0,0,0)
    entry1 = bark.create_entry(path1, dtime1, food="pizza")
    path2 = os.path.join(tmpdir.strpath, "myentry2")
    dtime2 = datetime.datetime(2021,1,1,0,0,0,0)
    entry2 = bark.create_entry(path2, dtime2, food="pizza")
    mylist = sorted([entry2, entry1])
    assert mylist[0] == entry1
    assert mylist[1] == entry2
示例#3
0
def arf2bark(arf_file, root_path, timezone, verbose):
    with arf.open_file(arf_file, 'r') as af:
        os.mkdir(root_path)
        root = bark.Root(root_path)
        if verbose:
            print('Created Root: ' + root_path)
        tle = None
        found_trigin = False
        for ename, entry in af.items(): # entries and top-level datasets
            if isinstance(entry, h5py.Group): # entries
                entry_path = os.path.join(root_path, ename)
                entry_attrs = copy_attrs(entry.attrs)
                timestamp = entry_attrs.pop('timestamp')
                # rename 'name' attribute created by openephys arf module
                try:
                    entry_attrs['openephys_name'] = entry_attrs.pop('name')
                except KeyError:
                    pass
                if timezone:
                    timestamp = bark.convert_timestamp(timestamp, timezone)
                else:
                    timestamp = bark.convert_timestamp(timestamp)

                bark_entry = bark.create_entry(entry_path,
                                               timestamp,
                                               parents=False,
                                               **entry_attrs)
                if verbose:
                    print('Created Entry: ' + entry_path)
                for ds_name, dataset in entry.items(): # entry-level datasets
                    if ds_name == 'trig_in': # accessing trig_in -> segfault
                        found_trigin = True # and skip the dataset
                    else:
                        transfer_dset(ds_name, dataset, entry_path, verbose)
            elif isinstance(entry, h5py.Dataset): # top-level datasets
                if arf.is_time_series(entry) or arf.is_marked_pointproc(entry):
                    if tle is None:
                        path = os.path.join(root_path, 'top_level')
                        tle = bark.create_entry(path, 0, parents=False).path
                    transfer_dset(ename, entry, tle, verbose)
                else:
                    unknown_ds_warning(ename) # and skip, w/o creating TLE
        if found_trigin:
            print('Warning: found datasets named "trig_in". Jill-created ' +
                  '"trig_in" datasets segfault when read, so these datasets' +
                  ' were skipped. If you know the datasets are good, rename' +
                  ' them and try again.')
    return bark.Root(root_path)
示例#4
0
def test_read_metadata(tmpdir):
    # entry/dir with good metadata file
    entry_path = os.path.join(tmpdir.strpath, "myentry")
    dtime = arrow.get("2020-01-02T03:04:05+06:00").datetime
    entry = bark.create_entry(entry_path, dtime, food="pizza")
    entry_metadata = bark.read_metadata(entry_path)
    assert 'timestamp' in entry_metadata
    # try to read entry/dir metadata file directly
    with pytest.raises(ValueError):
        entry_metadata = bark.read_metadata(os.path.join(entry_path, 'meta.yaml'))
    # entry/dir without metadata file
    with pytest.raises(FileNotFoundError):
        entry_metadata = bark.read_metadata(tmpdir.strpath)
    # dataset with good metadata file
    data = np.zeros((10,3), dtype='int16')
    params = dict(sampling_rate=30000, units="mV", unit_scale=0.025, extra="barley")
    dset_path = os.path.join(tmpdir.strpath, "test_sampled")
    dset = bark.write_sampled(datfile=dset_path, data=data, **params)
    dset_metadata = bark.read_metadata(dset_path)
    assert 'sampling_rate' in dset_metadata
    # try to read dataset metadata file directly
    with pytest.raises(ValueError):
        dset_metadata = bark.read_metadata(dset_path + '.meta.yaml')
    # dataset without metadata file
    os.remove(dset_path + '.meta.yaml')
    with pytest.raises(FileNotFoundError):
        dset_metadata = bark.read_metadata(dset_path)
    # dataset that doesn't exist
    with pytest.raises(FileNotFoundError):
        dset_metadata = bark.read_metadata(os.path.join(tmpdir.strpath, 'fake_dset.dat'))
示例#5
0
def test_create_entry(tmpdir):
    from datetime import tzinfo
    path = os.path.join(tmpdir.strpath, "myentry")
    dtime = arrow.get("2020-01-02T03:04:05+06:00").datetime
    entry = bark.create_entry(path, dtime, food="pizza")
    assert 'uuid' in entry.attrs
    assert dtime == bark.timestamp_to_datetime(entry.attrs["timestamp"])
    assert entry.attrs["food"] == "pizza"
示例#6
0
def eofolder2entry(oefolder,
                   entry_name,
                   timestamp=None,
                   timezone='America/Chicago',
                   parents=False,
                   **attrs):
    if not timestamp:
        timestamp = filename_to_timestamp(oefolder, timezone)
    else:
        timestamp = input_string_to_timestamp(timestamp, timezone)
    create_entry(entry_name, timestamp, parents, **attrs)
    kwds = glob(os.path.join(oefolder, "*.kwd"))
    dats = [os.path.join(entry_name,
                         os.path.splitext(os.path.split(kwd)[-1])[0] + '.dat')
            for kwd in kwds]
    for kwd, dat in zip(kwds, dats):
        write_from_kwd(kwd, dat)
示例#7
0
def arf2bark(arf_file, root_parent, timezone, verbose):
    with arf.open_file(arf_file, 'r') as af:
        # root
        root_dirname = os.path.splitext(arf_file)[0]
        root_path = os.path.join(os.path.abspath(root_parent), root_dirname)
        os.mkdir(root_path)
        root = bark.Root(root_path)
        if verbose:
            print('Created Root: ' + root_path)
        tle = None
        found_trigin = False
        for ename, entry in af.items():  # entries and top-level datasets
            if isinstance(entry, h5py.Group):  # entries
                entry_path = os.path.join(root_path, ename)
                entry_attrs = copy_attrs(entry.attrs)
                timestamp = entry_attrs.pop('timestamp')
                if timezone:
                    timestamp = bark.convert_timestamp(timestamp, timezone)
                else:
                    timestamp = bark.convert_timestamp(timestamp)
                bark_entry = bark.create_entry(entry_path,
                                               timestamp,
                                               parents=False,
                                               **entry_attrs)
                if verbose:
                    print('Created Entry: ' + entry_path)
                for ds_name, dataset in entry.items():  # entry-level datasets
                    if ds_name == 'trig_in':  # accessing trig_in -> segfault
                        found_trigin = True  # and skip the dataset
                    else:
                        transfer_dset(ds_name, dataset, entry_path, verbose)
            elif isinstance(entry, h5py.Dataset):  # top-level datasets
                if tle is None:
                    path = os.path.join(root_path, 'top_level')
                    tle = bark.create_entry(path, 0, parents=False).path
                transfer_dset(ename, entry, tle, verbose)
        if found_trigin:
            print('Warning: found datasets named "trig_in". Jill-created ' +
                  '"trig_in" datasets segfault when read, so these datasets' +
                  ' were skipped. If you know the datasets are good, rename' +
                  ' them and try again.')
    return bark.Root(root_path)
示例#8
0
def test_closing(tmpdir):
    # setup
    ds_name = 'test_sampled.dat'
    entry1_path = os.path.join(tmpdir.strpath, "entry1")
    dtime = arrow.get("2020-01-02T03:04:05+06:00").datetime
    entry1 = bark.create_entry(entry1_path, dtime, food="pizza")
    entry2_path = os.path.join(tmpdir.strpath, "entry2")
    dtime = arrow.get("2020-01-10T03:04:05+06:00").datetime
    entry2 = bark.create_entry(entry2_path, dtime, food="burritos")
    data = np.zeros((10,3), dtype='int16')
    params = dict(sampling_rate=30000, units="mV", unit_scale=0.025, extra="barley")
    dset_path = os.path.join(entry1_path, ds_name)
    dset = bark.write_sampled(datfile=dset_path, data=data, **params)
    del entry1, entry2, dset
    r = bark.read_root(tmpdir.strpath)
    # initial checking
    assert len(r.entries) == 2
    for ename in r.entries:
        assert callable(r.entries.get(ename))
    # load entry1
    entry1 = r.entries['entry1']
    assert isinstance(r.entries.get('entry1'), bark.Entry)
    assert callable(r.entries.get('entry2'))
    # load sampled dataset
    assert callable(entry1.datasets.get(ds_name))
    ds1 = entry1.datasets[ds_name]
    assert not callable(entry1.datasets.get(ds_name))
    assert isinstance(ds1, bark.SampledData)
    # close entry
    del ds1
    assert not callable(entry1.datasets.get(ds_name))
    assert isinstance(entry1.datasets.get(ds_name), bark.SampledData)
    entry1.close()
    assert callable(entry1.datasets.get(ds_name))
    # close root
    del entry1
    assert not callable(r.entries.get('entry1'))
    assert isinstance(r.entries.get('entry1'), bark.Entry)
    r.close()
    assert callable(r.entries.get('entry1'))
示例#9
0
def mk_entry():
    p = argparse.ArgumentParser(description="create a bark entry")
    p.add_argument("name", help="name of bark entry")
    p.add_argument("-a",
                   "--attributes",
                   action='append',
                   type=lambda kv: kv.split("="),
                   dest='keyvalues',
                   help="extra metadata in the form of KEY=VALUE")
    p.add_argument("-t",
                   "--timestamp",
                   help="format: YYYY-MM-DD or YYYY-MM-DD_HH-MM-SS.S")
    p.add_argument("-p",
                   "--parents",
                   help="no error if already exists, new meta-data written",
                   action="store_true")
    p.add_argument('--timezone',
                   help="timezone of timestamp, default: America/Chicago",
                   default='America/Chicago')
    args = p.parse_args()
    timestamp = arrow.get(
        args.timestamp).replace(tzinfo=tz.gettz(args.timezone)).datetime
    attrs = dict(args.keyvalues) if args.keyvalues else {}
    bark.create_entry(args.name, timestamp, args.parents, **attrs)
示例#10
0
def rhds_to_entry(rhd_paths,
                  entry_name,
                  timestamp=None,
                  parents=False,
                  max_gaps=10,
                  timezone='America/Chicago',
                  legacy=False,
                  **attrs):
    """
    Converts a temporally contiguous list of .rhd files to a bark entry.
    """
    if legacy:
        read_data = bark.io.rhd.legacy_load_intan_rhd_format.read_data
    else:
        read_data = bark.io.rhd.load_intan_rhd_format.read_data
    if not timestamp:
        timestamp = rhd_filename_to_timestamp(rhd_paths[0], timezone)
    else:
        timestamp = input_string_to_timestamp(timestamp, timezone)
    # extract data and metadata from first file
    print(rhd_paths[0])
    result = read_data(rhd_paths[0], no_floats=True)
    not_implemented_warnings(result)
    check_timestamp_gaps(result, max_gaps)
    # make entry
    entry_attrs = result['notes']
    attrs.update(entry_attrs)
    create_entry(entry_name, timestamp, parents, **attrs)
    # make datasets
    board_channels = adc_chan_names(result)
    if board_channels:
        dsetname = os.path.join(entry_name, 'board_adc.dat')
        board_adc_metadata(result, dsetname)
        with open(dsetname, 'wb') as fp:
            fp.write(result['board_adc_data'].T.tobytes())

    amplifier_channels = amp_chan_names(result)
    if amplifier_channels:
        dsetname = os.path.join(entry_name, 'amplifier.dat')
        amplifier_metadata(result, dsetname)
        with open(dsetname, 'wb') as fp:
            fp.write(result['amplifier_data'].T.tobytes())

    # now that the metadata has been written (and data from the first file)
    # write data for the remainder of the files
    for rhdfile in rhd_paths[1:]:
        print(rhdfile)
        result = read_data(rhdfile, no_floats=True)
        not_implemented_warnings(result)
        check_timestamp_gaps(result, max_gaps)
        cur_board_channels = adc_chan_names(result)
        cur_amplifier_channels = amp_chan_names(result)

        # check that the same channels are being recorded
        if board_channels != cur_board_channels:
            raise ValueError("""{} has channels {}
                    {} has channels {} """.format(rhdfile, cur_board_channels,
                                                  rhd_paths[0],
                                                  board_channels))
        if amplifier_channels != cur_amplifier_channels:
            raise ValueError("""{} has channels {}
                    {} has channels {}""".format(rhdfile,
                                                 cur_amplifier_channels,
                                                 rhd_paths[0],
                                                 amplifier_channels))
        # write data
        if cur_board_channels:
            dsetname = os.path.join(entry_name, 'board_adc.dat')
            with open(dsetname, 'ab') as fp:
                fp.write(result['board_adc_data'].T.tobytes())
        if cur_amplifier_channels:
            dsetname = os.path.join(entry_name, 'amplifier.dat')
            with open(dsetname, 'ab') as fp:
                fp.write(result['amplifier_data'].T.tobytes())