Exemplo n.º 1
0
def arf2bark(arf_file,
             root_path,
             timezone,
             verbose,
             mangle_prefix=ENTRY_PREFIX):
    with arf.open_file(arf_file, 'r') as af:
        os.mkdir(root_path)
        root = bark.Root(root_path)
        if verbose:
            print('Created Root: ' + root_path)
        tle = None
        found_trigin = False
        for ename, entry in af.items():  # entries and top-level datasets
            if isinstance(entry, h5py.Group):  # entries
                entry_path = os.path.join(root_path, ename)
                entry_attrs = copy_attrs(entry.attrs)
                for pos_arg in ('name', 'parents'):
                    # along with 'timestamp' below, these are positional arguments to create_entry
                    # for now, I prefer hard-coding them over messing with runtime introspection
                    new_name = pos_arg
                    while new_name in entry_attrs:
                        new_name = '{}_{}'.format(mangle_prefix, new_name)
                    try:
                        entry_attrs[new_name] = entry_attrs.pop(pos_arg)
                    except KeyError:
                        pass
                    else:
                        if verbose:
                            print('Renamed attribute {} of entry {} to {}'.
                                  format(pos_arg, ename, new_name))
                timestamp = entry_attrs.pop('timestamp')
                if timezone:
                    timestamp = bark.convert_timestamp(timestamp, timezone)
                else:
                    timestamp = bark.convert_timestamp(timestamp)
                bark_entry = bark.create_entry(entry_path,
                                               timestamp,
                                               parents=False,
                                               **entry_attrs)
                if verbose:
                    print('Created Entry: ' + entry_path)
                for ds_name, dataset in entry.items():  # entry-level datasets
                    if ds_name == 'trig_in':  # accessing trig_in -> segfault
                        found_trigin = True  # and skip the dataset
                    else:
                        transfer_dset(ds_name, dataset, entry_path, verbose)
            elif isinstance(entry, h5py.Dataset):  # top-level datasets
                if arf.is_time_series(entry) or arf.is_marked_pointproc(entry):
                    if tle is None:
                        path = os.path.join(root_path, 'top_level')
                        tle = bark.create_entry(path, 0, parents=False).path
                    transfer_dset(ename, entry, tle, verbose)
                else:
                    unknown_ds_warning(ename)  # and skip, w/o creating TLE
        if found_trigin:
            print('Warning: found datasets named "trig_in". Jill-created ' +
                  '"trig_in" datasets segfault when read, so these datasets' +
                  ' were skipped. If you know the datasets are good, rename' +
                  ' them and try again.')
    return bark.Root(root_path)
Exemplo n.º 2
0
def transfer_dset(ds_name, ds, e_path, verbose=False):
    ds_attrs = copy_attrs(ds.attrs)
    units = ds_attrs.pop('units', None)
    if arf.is_time_series(ds):
        ds_name += '.dat'
        ds_path = os.path.join(e_path, ds_name)
        ds_attrs['columns'] = build_columns(units)
        sr = ds_attrs.pop('sampling_rate')
        bark_ds = bark.write_sampled(ds_path, ds, sr, **ds_attrs)
        if verbose:
            print('Created sampled dataset: ' + ds_path)
    elif arf.is_marked_pointproc(ds):
        ds_name += '.csv'
        ds_path = os.path.join(e_path, ds_name)
        ds_data = pandas.DataFrame(ds[:])
        ds_attrs['columns'] = build_columns(units,
                                            column_names=ds_data.columns)
        for ser in ds_data:
            if ds_data[ser].dtype == numpy.dtype('O'):  # bytes object
                ds_data[ser] = ds_data[ser].str.decode('utf-8')
        bark_ds = bark.write_events(ds_path, ds_data, **ds_attrs)
        if verbose:
            print('Created event dataset: ' + ds_path)
    else:
        unknown_ds_warning(ds_name)
Exemplo n.º 3
0
 def send(self, chunk):
     """ align spikes, compute features """
     # pass data we can't use
     from arf import is_marked_pointproc
     if not is_marked_pointproc(chunk.data) or "spike" not in chunk.data.dtype.names:
         Node.send(self, chunk)
     else:
         # need to read data now because it won't be available in close
         self._queue.append(chunk._replace(data=chunk.data[:]))
         if chunk.ds != self._queue[0].ds:
             self._queue.pop()
             raise DataError("%s: sampling rate doesn't match other spikes" % chunk)
Exemplo n.º 4
0
def arf2bark(arf_file, root_path, timezone, verbose):
    with arf.open_file(arf_file, 'r') as af:
        os.mkdir(root_path)
        root = bark.Root(root_path)
        if verbose:
            print('Created Root: ' + root_path)
        tle = None
        found_trigin = False
        for ename, entry in af.items(): # entries and top-level datasets
            if isinstance(entry, h5py.Group): # entries
                entry_path = os.path.join(root_path, ename)
                entry_attrs = copy_attrs(entry.attrs)
                timestamp = entry_attrs.pop('timestamp')
                # rename 'name' attribute created by openephys arf module
                try:
                    entry_attrs['openephys_name'] = entry_attrs.pop('name')
                except KeyError:
                    pass
                if timezone:
                    timestamp = bark.convert_timestamp(timestamp, timezone)
                else:
                    timestamp = bark.convert_timestamp(timestamp)

                bark_entry = bark.create_entry(entry_path,
                                               timestamp,
                                               parents=False,
                                               **entry_attrs)
                if verbose:
                    print('Created Entry: ' + entry_path)
                for ds_name, dataset in entry.items(): # entry-level datasets
                    if ds_name == 'trig_in': # accessing trig_in -> segfault
                        found_trigin = True # and skip the dataset
                    else:
                        transfer_dset(ds_name, dataset, entry_path, verbose)
            elif isinstance(entry, h5py.Dataset): # top-level datasets
                if arf.is_time_series(entry) or arf.is_marked_pointproc(entry):
                    if tle is None:
                        path = os.path.join(root_path, 'top_level')
                        tle = bark.create_entry(path, 0, parents=False).path
                    transfer_dset(ename, entry, tle, verbose)
                else:
                    unknown_ds_warning(ename) # and skip, w/o creating TLE
        if found_trigin:
            print('Warning: found datasets named "trig_in". Jill-created ' +
                  '"trig_in" datasets segfault when read, so these datasets' +
                  ' were skipped. If you know the datasets are good, rename' +
                  ' them and try again.')
    return bark.Root(root_path)