示例#1
0
    def roundtripContainer(self):
        data_file = NWBFile(
            session_description='a test file',
            identifier='data_file',
            session_start_time=self.start_time
        )
        data_file.add_acquisition(self.container)

        with HDF5IO(self.data_filename, 'w', manager=get_manager()) as self.data_write_io:
            self.data_write_io.write(data_file)

        with HDF5IO(self.data_filename, 'r', manager=get_manager()) as self.data_read_io:
            data_file_obt = self.data_read_io.read()

            with HDF5IO(self.link_filename, 'w', manager=get_manager()) as link_write_io:
                link_file = NWBFile(
                    session_description='a test file',
                    identifier='link_file',
                    session_start_time=self.start_time
                )
                link_file.add_acquisition(TimeSeries(
                    name='test_mod_ts',
                    unit='V',
                    data=data_file_obt.get_acquisition('data_ts'),
                    timestamps=H5DataIO(
                        data=data_file_obt.get_acquisition('data_ts').timestamps,
                        link_data=True
                    )
                ))
                link_write_io.write(link_file)

        with HDF5IO(self.link_filename, 'r', manager=get_manager()) as self.link_file_reader:
            return self.getContainer(self.link_file_reader.read())
示例#2
0
 def roundtripContainer(self, cache_spec=False):
     self.build_nwbfile()
     self.writer = HDF5IO(self.filename, manager=get_manager(), mode='w')
     self.writer.write(self.nwbfile, cache_spec=cache_spec)
     self.writer.close()
     self.reader = HDF5IO(self.filename, manager=get_manager(), mode='r')
     self.read_nwbfile = self.reader.read()
示例#3
0
    def setUp(self):
        self.manager = get_manager()
        self.path = "test_pynwb_io_hdf5.nwb"
        self.start_time = datetime(1970, 1, 1, 12, tzinfo=tzutc())
        self.create_date = datetime(2017, 4, 15, 12, tzinfo=tzlocal())
        self.container = NWBFile(session_description='a test NWB File', identifier='TEST123',
                                 session_start_time=self.start_time, file_create_date=self.create_date)
        ts = TimeSeries(name='test_timeseries', data=list(range(100, 200, 10)), unit='SIunit',
                        timestamps=np.arange(10.), resolution=0.1)
        self.container.add_acquisition(ts)

        ts_builder = GroupBuilder('test_timeseries',
                                  attributes={'neurodata_type': 'TimeSeries'},
                                  datasets={'data': DatasetBuilder('data', list(range(100, 200, 10)),
                                                                   attributes={'unit': 'SIunit',
                                                                               'conversion': 1.0,
                                                                               'resolution': 0.1}),
                                            'timestamps': DatasetBuilder('timestamps', np.arange(10.),
                                                                         attributes={'unit': 'seconds',
                                                                                     'interval': 1})})
        self.builder = GroupBuilder(
            'root', groups={'acquisition': GroupBuilder('acquisition', groups={'test_timeseries': ts_builder}),
                            'analysis': GroupBuilder('analysis'),
                            'general': GroupBuilder('general'),
                            'processing': GroupBuilder('processing'),
                            'stimulus': GroupBuilder(
                                'stimulus',
                                groups={'presentation': GroupBuilder('presentation'),
                                        'templates': GroupBuilder('templates')})},
            datasets={'file_create_date': DatasetBuilder('file_create_date', [self.create_date.isoformat()]),
                      'identifier': DatasetBuilder('identifier', 'TEST123'),
                      'session_description': DatasetBuilder('session_description', 'a test NWB File'),
                      'nwb_version': DatasetBuilder('nwb_version', '1.0.6'),
                      'session_start_time': DatasetBuilder('session_start_time', self.start_time.isoformat())},
            attributes={'neurodata_type': 'NWBFile'})
示例#4
0
def main():

    ep = """
    use --nspath to validate against an extension. If --ns is not specified,
    validate against all namespaces in namespace file.
    """

    parser = ArgumentParser(description="Validate an NWB file", epilog=ep)
    parser.add_argument("path", type=str, help="the path to the NWB file")
    parser.add_argument('-p', '--nspath', type=str, help="the path to the namespace file")
    parser.add_argument("-n", "--ns", type=str, help="the namespace to validate against")

    args = parser.parse_args()

    if not os.path.exists(args.path):
        print('%s not found' % args.path, file=sys.stderr)
        sys.exit(1)

    io = HDF5IO(args.path, get_manager(), mode='r')

    if args.nspath is not None:
        namespaces = load_namespaces(args.nspath)
        if args.ns is not None:
            print('Validating against %s from %s.' % (args.ns, args.ns_path))
        else:
            print('Validating using namespaces in %s.' % args.nspath)
            for ns in namespaces:
                print('Validating against %s' % ns)
                errors = validate(io, ns)
                _print_errors(errors)
    else:
        errors = validate(io)
        print('Validating against core namespace')
        _print_errors(errors)
示例#5
0
 def testInFromMatNWB(self):
     filename = 'MatNWB.' + self.__class__.__name__ + '.testOutToPyNWB.nwb'
     with HDF5IO(filename, manager=get_manager(), mode='r') as io:
         matfile = io.read()
         matcontainer = self.getContainer(matfile)
         pycontainer = self.getContainer(self.file)
         self.assertContainerEqual(matcontainer, pycontainer)
def main():
    import os.path

    # example: start
    from datetime import datetime

    from pynwb import NWBFile, TimeSeries, get_manager
    from pynwb.form.backends.hdf5 import HDF5IO

    start_time = datetime(1970, 1, 1, 12, 0, 0)
    create_date = datetime(2017, 4, 15, 12, 0, 0)

    nwbfile = NWBFile('the PyNWB tutorial',
                      'a test NWB File',
                      'TEST123',
                      start_time,
                      file_create_date=create_date)

    ts = TimeSeries('test_timeseries',
                    'example_source',
                    list(range(100, 200, 10)),
                    'SIunit',
                    timestamps=list(range(10)),
                    resolution=0.1)

    nwbfile.add_acquisition(ts)

    io = HDF5IO("example.h5", manager=get_manager(), mode='w')
    io.write(nwbfile)
    io.close()
    # example: end

    os.remove("example.h5")
示例#7
0
 def testInFromMatNWB(self):
   filename = 'MatNWB.' + self.__class__.__name__ + '.testOutToPyNWB.nwb'
   io = HDF5IO(filename, manager=get_manager())
   matfile = io.read()
   io.close()
   matcontainer = self.getContainer(matfile)
   pycontainer = self.getContainer(self.file)
   self.assertContainerEqual(matcontainer, pycontainer)
示例#8
0
def get_data(data_sheet, imaging_folder, nwb_output_path):

    sbj_details = get_subject_sheet(data_sheet)

    all_imaging_times = pd.DataFrame()

    for sheet in sbj_details:
        subject_sheet = pd.read_excel(data_sheet,
                                      sheet_name=sheet,
                                      index_col=0)
        times, imaging_times_list = get_times_from_sheet(subject_sheet)
        data = pd.DataFrame(imaging_times_list)
        all_imaging_times = all_imaging_times.append(data)

    for i in sbj_details:
        subject_sheet = pd.read_excel(data_sheet, sheet_name=i, index_col=0)
        times, imaging_times_list = get_times_from_sheet(subject_sheet)

        if len(imaging_times_list) > 0:
            twop_folders = update_2p_spreadsheet(data_sheet, i, imaging_folder,
                                                 imaging_times_list,
                                                 all_imaging_times)
            tif_paths = []
            for folder_time in twop_folders:

                for file in os.listdir(
                        os.path.join(imaging_folder, folder_time)):
                    if file.endswith('_XYT.tif'):
                        tif_paths.append(
                            os.path.join(imaging_folder, folder_time,
                                         folder_time + '_XYT.tif'))

            if len(tif_paths) > 0:
                mouse_id, exp_date, session_start, mouse_date_folder, output_filepath = \
                    mouse_folder_details(subject_sheet, data_sheet, imaging_times_list, nwb_output_path)

                source, session_description, identifier, session_start_time, lab, institution, experiment_description, \
                    virus = nwb_file_variables(imaging_folder,subject_sheet, mouse_id, exp_date, session_start)

                nwb_file = NWBFile(
                    source=source,
                    session_description=session_description,
                    identifier=identifier,
                    session_start_time=session_start_time,
                    lab=lab,
                    institution=institution,
                    experiment_description=experiment_description,
                    virus=virus)

                tifs = twop_ts(data_sheet, imaging_times_list, nwb_file,
                               twop_folders, imaging_folder, exp_date,
                               tif_paths)

                file = NWBHDF5IO(output_filepath,
                                 manager=get_manager(),
                                 mode='w')
                file.write(nwb_file)
                file.close()
示例#9
0
    def roundtripContainer(self, cache_spec=False):
        description = 'a file to test writing and reading a %s' % self.container_type
        identifier = 'TEST_%s' % self.container_type
        nwbfile = NWBFile(description, identifier, self.start_time, file_create_date=self.create_date)
        self.addContainer(nwbfile)

        self.writer = HDF5IO(self.filename, manager=get_manager(), mode='w')
        self.writer.write(nwbfile, cache_spec=cache_spec)
        self.writer.close()
        self.reader = HDF5IO(self.filename, manager=get_manager(), mode='r')
        self.read_nwbfile = self.reader.read()

        try:
            tmp = self.getContainer(self.read_nwbfile)
            return tmp
        except Exception as e:
            self.reader.close()
            self.reader = None
            raise e
示例#10
0
    def roundtripContainer(self):
        # create and write data file
        data_file = NWBFile(
            session_description='a test file',
            identifier='data_file',
            session_start_time=self.start_time
        )
        data_file.add_acquisition(self.container)

        with HDF5IO(self.data_filename, 'w', manager=get_manager()) as data_write_io:
            data_write_io.write(data_file)

        # read data file
        with HDF5IO(self.data_filename, 'r', manager=get_manager()) as self.data_read_io:
            data_file_obt = self.data_read_io.read()

            # write "link file" with timeseries.data that is an external link to the timeseries in "data file"
            # also link timeseries.timestamps.data to the timeseries.timestamps in "data file"
            with HDF5IO(self.link_filename, 'w', manager=get_manager()) as link_write_io:
                link_file = NWBFile(
                    session_description='a test file',
                    identifier='link_file',
                    session_start_time=self.start_time
                )
                self.link_container = TimeSeries(
                    name='test_mod_ts',
                    unit='V',
                    data=data_file_obt.get_acquisition('data_ts'),  # test direct link
                    timestamps=H5DataIO(
                        data=data_file_obt.get_acquisition('data_ts').timestamps,
                        link_data=True  # test with setting link data
                    )
                )
                link_file.add_acquisition(self.link_container)
                link_write_io.write(link_file)

        # note that self.link_container contains a link to a dataset that is now closed

        # read the link file
        self.link_read_io = HDF5IO(self.link_filename, 'r', manager=get_manager())
        self.read_nwbfile = self.link_read_io.read()
        return self.getContainer(self.read_nwbfile)
示例#11
0
 def roundtripContainer(self):
     description = 'a file to test writing and reading a %s' % self.container_type
     source = 'test_roundtrip for %s' % self.container_type
     identifier = 'TEST_%s' % self.container_type
     nwbfile = NWBFile(source,
                       description,
                       identifier,
                       self.start_time,
                       file_create_date=self.create_date)
     self.addContainer(nwbfile)
     self.writer = HDF5IO(self.filename, get_manager())
     self.writer.write(nwbfile)
     self.writer.close()
     self.reader = HDF5IO(self.filename, get_manager())
     read_nwbfile = self.reader.read()
     try:
         tmp = self.getContainer(read_nwbfile)
         return tmp
     except Exception as e:
         self.reader.close()
         self.reader = None
         raise e
示例#12
0
    def test_link_root(self):
        # create and write data file
        data_file = NWBFile(
            session_description='a test file',
            identifier='data_file',
            session_start_time=self.start_time
        )
        data_file.add_acquisition(self.container)

        with HDF5IO(self.data_filename, 'w', manager=get_manager()) as data_write_io:
            data_write_io.write(data_file)

        # read data file
        manager = get_manager()
        with HDF5IO(self.data_filename, 'r', manager=manager) as data_read_io:
            data_file_obt = data_read_io.read()

            link_file = NWBFile(
                session_description='a test file',
                identifier='link_file',
                session_start_time=self.start_time
            )
            link_container = data_file_obt.acquisition[self.container.name]
            link_file.add_acquisition(link_container)
            self.assertIs(link_container.parent, data_file_obt)

            with HDF5IO(self.link_filename, 'w', manager=manager) as link_write_io:
                link_write_io.write(link_file)

        # read the link file, check container sources
        with HDF5IO(self.link_filename, 'r+', manager=get_manager()) as link_file_reader:
            read_nwbfile = link_file_reader.read()
            self.assertNotEqual(read_nwbfile.acquisition[self.container.name].container_source,
                                read_nwbfile.container_source)
            self.assertEqual(read_nwbfile.acquisition[self.container.name].container_source,
                             self.data_filename)
            self.assertEqual(read_nwbfile.container_source, self.link_filename)
示例#13
0
 def setUp(self):
     """ Set up an NWBFile object with an acquisition TimeSeries, analysis TimeSeries, and a processing module """
     self.start_time = datetime(1970, 1, 1, 12, tzinfo=tzutc())
     self.ref_time = datetime(1979, 1, 1, 0, tzinfo=tzutc())
     self.create_date = datetime(2017, 4, 15, 12, tzinfo=tzlocal())
     self.manager = get_manager()
     self.filename = 'test_nwbfileio.h5'
     self.nwbfile = NWBFile(
         session_description='a test NWB File',
         identifier='TEST123',
         session_start_time=self.start_time,
         timestamps_reference_time=self.ref_time,
         file_create_date=self.create_date,
         experimenter='test experimenter',
         stimulus_notes='test stimulus notes',
         data_collection='test data collection notes',
         experiment_description='test experiment description',
         institution='nomad',
         lab='nolab',
         notes='nonotes',
         pharmacology='nopharmacology',
         protocol='noprotocol',
         related_publications='nopubs',
         session_id='007',
         slices='noslices',
         source_script='nosources',
         surgery='nosurgery',
         virus='novirus',
         source_script_file_name='nofilename')
     self.ts = TimeSeries(name='test_timeseries',
                          data=list(range(100, 200, 10)),
                          unit='SIunit',
                          timestamps=np.arange(10.),
                          resolution=0.1)
     self.nwbfile.add_acquisition(self.ts)
     self.ts2 = TimeSeries(name='test_timeseries2',
                           data=list(range(200, 300, 10)),
                           unit='SIunit',
                           timestamps=np.arange(10.),
                           resolution=0.1)
     self.nwbfile.add_analysis(self.ts2)
     self.mod = self.nwbfile.create_processing_module(
         'test_module', 'a test module')
     self.ts3 = TimeSeries(name='test_timeseries2',
                           data=list(range(100, 200, 10)),
                           unit='SIunit',
                           timestamps=np.arange(10.),
                           resolution=0.1)
     self.mod.add(self.ts3)
示例#14
0
    def setUp(self):
        self.__manager = get_manager()
        self.start_time = datetime(1971, 1, 1, 12, tzinfo=tzutc())

        self.data = np.arange(2000).reshape((2, 1000))
        self.timestamps = np.linspace(0, 1, 1000)

        self.container = TimeSeries(
            name='data_ts',
            unit='V',
            data=self.data,
            timestamps=self.timestamps
        )

        self.data_filename = 'test_time_series_modular_data.nwb'
        self.link_filename = 'test_time_series_modular_link.nwb'
示例#15
0
def main():

    ep = """
    use --nspath to validate against an extension. If --ns is not specified,
    validate against all namespaces in namespace file.
    """

    parser = ArgumentParser(description="Validate an NWB file", epilog=ep)
    parser.add_argument("paths", type=str, nargs='+', help="NWB file paths")
    parser.add_argument('-p',
                        '--nspath',
                        type=str,
                        help="the path to the namespace file")
    parser.add_argument("-n",
                        "--ns",
                        type=str,
                        help="the namespace to validate against")

    args = parser.parse_args()
    ret = 0

    for path in args.paths:

        if not os.path.exists(path):
            print('%s not found' % path, file=sys.stderr)
            ret = 1
            continue

        with NWBHDF5IO(path, get_manager(), mode='r') as io:

            if args.nspath is not None:
                namespaces = load_namespaces(args.nspath)
                if args.ns is not None:
                    print('Validating %s against %s from %s.' %
                          (path, args.ns, args.ns_path))
                    ret = ret or _validate_helper(io=io, namespace=args.ns)
                else:
                    print('Validating %s using namespaces in %s.' %
                          (path, args.nspath))
                    for ns in namespaces:
                        print('Validating against %s' % ns)
                        ret = ret or _validate_helper(io=io, namespace=ns)
            else:
                print('Validating %s against core namespace' % path)
                ret = ret or _validate_helper(io=io)

    sys.exit(ret)
示例#16
0
    def setUp(self):
        self.manager = get_manager()
        self.path = "test_pynwb_io_hdf5.h5"
        self.start_time = datetime(1970, 1, 1, 12, 0, 0)
        self.create_date = datetime(2017, 4, 15, 12, 0, 0)
        self.container = NWBFile('a test source',
                                 'a test NWB File',
                                 'TEST123',
                                 self.start_time,
                                 file_create_date=self.create_date)
        ts = TimeSeries('test_timeseries',
                        'example_source',
                        list(range(100, 200, 10)),
                        'SIunit',
                        timestamps=list(range(10)),
                        resolution=0.1)
        self.container.add_acquisition(ts)

        ts_builder = GroupBuilder('test_timeseries',
                                  attributes={
                                      'source': 'example_source',
                                      'neurodata_type': 'TimeSeries',
                                      'help': 'General purpose TimeSeries'
                                  },
                                  datasets={
                                      'data':
                                      DatasetBuilder('data',
                                                     list(range(100, 200, 10)),
                                                     attributes={
                                                         'unit': 'SIunit',
                                                         'conversion': 1.0,
                                                         'resolution': 0.1
                                                     }),
                                      'timestamps':
                                      DatasetBuilder('timestamps',
                                                     list(range(10)),
                                                     attributes={
                                                         'unit': 'Seconds',
                                                         'interval': 1
                                                     })
                                  })
        self.builder = GroupBuilder(
            'root',
            groups={
                'acquisition':
                GroupBuilder('acquisition',
                             groups={'test_timeseries': ts_builder}),
                'analysis':
                GroupBuilder('analysis'),
                'general':
                GroupBuilder('general'),
                'processing':
                GroupBuilder('processing'),
                'stimulus':
                GroupBuilder('stimulus',
                             groups={
                                 'presentation': GroupBuilder('presentation'),
                                 'templates': GroupBuilder('templates')
                             })
            },
            datasets={
                'file_create_date':
                DatasetBuilder('file_create_date', [str(self.create_date)]),
                'identifier':
                DatasetBuilder('identifier', 'TEST123'),
                'session_description':
                DatasetBuilder('session_description', 'a test NWB File'),
                'nwb_version':
                DatasetBuilder('nwb_version', '1.0.6'),
                'session_start_time':
                DatasetBuilder('session_start_time', str(self.start_time))
            },
            attributes={'neurodata_type': 'NWBFile'})
示例#17
0
    def __init__(self, **kwargs):
        path, mode, manager, extensions, load_namespaces, file_obj, comm =\
            popargs('path', 'mode', 'manager', 'extensions', 'load_namespaces', 'file', 'comm', kwargs)

        # root group
        self.__rgroup = file_obj
        chunk_store = getattr(file_obj, 'chunk_store', None)
        if chunk_store is not None:
            try:
                filename = getattr(chunk_store.source, 'path', None)
                if filename is None:
                    filename = chunk_store.source.name
            except:
                filename = None
        if filename is None:
            filename = f'{type(file_obj.store).__name__}'
        self.__rgroup.filename = filename

        file_obj = self.__set_rgroup(file_obj)

        self.__built = dict()       # keep track of each builder for each dataset/group/link for each file
        self.__read = dict()        # keep track of which files have been read. Key is the filename value is the builder
        self.__file = file_obj

        if load_namespaces:
            if manager is not None:
                warn("loading namespaces from file - ignoring 'manager'")
            if extensions is not None:
                warn("loading namespaces from file - ignoring 'extensions' argument")
            # namespaces are not loaded when creating an NWBZARRHDF5IO object in write mode
            if 'w' in mode or mode == 'x':
                raise ValueError("cannot load namespaces from file when writing to it")

            tm = get_type_map()
            self.load_namespaces(tm, path, file=file_obj)
            manager = BuildManager(tm)

            # XXX: Leaving this here in case we want to revert to this strategy for
            #      loading cached namespaces
            # ns_catalog = NamespaceCatalog(NWBGroupSpec, NWBDatasetSpec, NWBNamespace)
            # super(NWBZARRHDF5IO, self).load_namespaces(ns_catalog, path)
            # tm = TypeMap(ns_catalog)
            # tm.copy_mappers(get_type_map())
        else:
            if manager is not None and extensions is not None:
                raise ValueError("'manager' and 'extensions' cannot be specified together")
            elif extensions is not None:
                manager = get_manager(extensions=extensions)
            elif manager is None:
                manager = get_manager()

        self.logger = logging.getLogger('%s.%s' % (self.__class__.__module__, self.__class__.__qualname__))

        if file_obj is not None:
            if path is None:
                path = file_obj.filename
            elif os.path.abspath(file_obj.filename) != os.path.abspath(path):
                msg = 'You argued %s as this object\'s path, ' % path
                msg += 'but supplied a file with filename: %s' % file_obj.filename
                raise ValueError(msg)
        elif path is None:
            TypeError("Must supply either 'path' or 'file' arg to HDF5IO.")

        if file_obj is None and not os.path.exists(path) and (mode == 'r' or mode == 'r+'):
            msg = "Unable to open file %s in '%s' mode. File does not exist." % (path, mode)
            raise UnsupportedOperation(msg)

        if file_obj is None and os.path.exists(path) and (mode == 'w-' or mode == 'x'):
            msg = "Unable to open file %s in '%s' mode. File already exists." % (path, mode)
            raise UnsupportedOperation(msg)

        if manager is None:
            manager = BuildManager(TypeMap(NamespaceCatalog()))
        elif isinstance(manager, TypeMap):
            manager = BuildManager(manager)

        # TO DO #
        self._HDF5IO__comm = comm
        self._HDF5IO__mode = mode
        self._HDF5IO__path = path
        self._HDF5IO__file = file_obj
        super(_HDF5IO, self).__init__(manager, source=path)
        self._HDF5IO__ref_queue = deque()  # a queue of the references that need to be added
        self._HDF5IO__dci_queue = deque()  # a queue of DataChunkIterators that need to be exhausted
示例#18
0
 def setUp(self):
     self.__manager = get_manager()
     self.container = self.setUpContainer()
示例#19
0
def create_nwb_file(Sess, start_time):

    sr = 30000  #30kHz
    if sys.platform == 'win32':
        SaveDir = os.path.join(r'C:\Users\slashchevam\Desktop\NPx\Results',
                               Sess)
        RawDataDir = r'C:\Users\slashchevam\Desktop\NPx'
        ExcelInfoPath = RawDataDir

        PathToUpload = os.path.join(RawDataDir, Sess)

    if sys.platform == 'linux':
        SaveDir = os.path.join('/mnt/gs/departmentN4/Marina/NPx/Results', Sess)
        RawDataDir = '/mnt/gs/projects/OWVinckNatIm/NPx_recordings/'
        PAthToAnalyzed = '/experiment1/recording1/continuous/Neuropix-PXI-100.0/'
        MatlabOutput = '/mnt/gs/projects/OWVinckNatIm/NPx_processed/Lev0_condInfo/'
        ExcelInfoPath = '/mnt/gs/departmentN4/Marina/'

        PathToUpload = RawDataDir + Sess + PAthToAnalyzed

        if not os.path.exists(SaveDir):
            os.makedirs(SaveDir)
        os.chdir(SaveDir)

    # Upload all the data
    spike_stamps = np.load(os.path.join(PathToUpload, "spike_times.npy"))
    spike_times = spike_stamps / sr
    spike_clusters = np.load(os.path.join(PathToUpload, "spike_clusters.npy"))
    cluster_group = pd.read_csv(os.path.join(PathToUpload,
                                             "cluster_group.tsv"),
                                sep="\t")
    cluster_info = pd.read_csv(os.path.join(PathToUpload, "cluster_info.tsv"),
                               sep="\t")

    if len(cluster_group) != len(cluster_info):
        print('Cluster group (manual labeling) and claster info do not match!')

    #excel_info = pd.read_excel((ExcelInfoPath + '\\Recordings_Marina_NPx.xlsx'), sheet_name=Sess)
    excel_info = pd.read_excel(os.path.join(ExcelInfoPath +
                                            'Recordings_Marina_NPx.xlsx'),
                               sheet_name=Sess)

    # Select spikes from good clusters only
    # Have to add the depth of the clusters
    good_clus_info = cluster_info[cluster_info['group'] ==
                                  'good']  # has depth info
    good_clus = good_clus_info[['id', 'group']]
    print("Found", len(good_clus), ' good clusters')

    good_spikes_ind = [x in good_clus['id'].values for x in spike_clusters]
    spike_clus_good = spike_clusters[good_spikes_ind]
    spike_times_good = spike_times[good_spikes_ind]
    # spike_stamps_good = spike_stamps[good_spikes_ind]

    if excel_info['Area'][0] == 'V1':
        good_clus_info['area'] = 'V1'
    else:
        good_clus_info['area'] = good_clus_info['depth'] > np.max(
            good_clus_info['depth']) - 1000
        good_clus_info['area'] = good_clus_info['area'].replace(True, 'V1')
        good_clus_info['area'] = good_clus_info['area'].replace(False, 'HPC')

    del spike_clusters, spike_times, spike_stamps, good_spikes_ind

    # Now reading digitals from condInfo
    # This has to be checked carefully again, especially for few stimuli in the session

    # cond class contains the following:
    #   'spontaneous_brightness': dict_keys(['name', 'time', 'timestamps', 'trl_list', 'conf'])
    #   'natural_images': dict_keys(['name', 'time', 'timestamps', 'trl_list', 'conf', 'img_order', 'img_name'])

    class condInfo:
        pass

    if sys.platform == 'linux':
        mat = scipy.io.loadmat(
            os.path.join((MatlabOutput + Sess), 'condInfo_01.mat'))
    if sys.platform == 'win32':
        mat = scipy.io.loadmat(os.path.join(PathToUpload, 'condInfo_01.mat'))

    SC_stim_labels = mat['StimClass'][0][0][0][0]
    SC_stim_present = np.where(mat['StimClass'][0][0][1][0] == 1)[0]
    SC_stim_labels_present = SC_stim_labels[SC_stim_present]

    cond = [condInfo() for i in range(len(SC_stim_labels_present))]

    for stim in range(len(SC_stim_labels_present)):
        cond[stim].name = SC_stim_labels_present[stim][0]
        cond[stim].stiminfo = mat['StimClass'][0][0][3][
            0, SC_stim_present[stim]][0][0][0][1]  # image indices are here

        # sorting out digitals for spontaneous activity
        # Need this loop in case there are few periods of spont, recorded like separate blocks
        if SC_stim_labels_present[stim][0] == 'spontaneous_brightness':
            cond[stim].time = []
            cond[stim].timestamps = []
            for block in range(
                    len(mat['StimClass'][0][0][3][0, SC_stim_present[stim]][0]
                        [0])):
                print(block)
                cond[stim].time.append(mat['StimClass'][0][0][3][
                    0, SC_stim_present[stim]][0][0][block][2])
                cond[stim].timestamps.append(mat['StimClass'][0][0][3][
                    0, SC_stim_present[stim]][0][0][block][3])

        cond[stim].trl_list = mat['StimClass'][0][0][3][
            0, SC_stim_present[stim]][1]
        cond[stim].conf = mat['StimClass'][0][0][2][
            0,
            SC_stim_present[stim]]  # config is very likely wrong and useless

        # sorting out digitals for natural images
        if SC_stim_labels_present[stim][0] == 'natural_images':
            cond[stim].time = mat['StimClass'][0][0][3][
                0, SC_stim_present[stim]][0][0][0][2]
            cond[stim].timestamps = mat['StimClass'][0][0][3][
                0, SC_stim_present[stim]][0][0][0][3]
            img_order = []
            for i in range(len(cond[stim].stiminfo)):
                img_order.append(int(cond[stim].stiminfo[i][2]))
            cond[stim].img_order = img_order
            cond[stim].img_name = cond[stim].conf[0][0][0][10][
                0]  # currently not used but might be needed later

        # sorting out digitals for drifting gratings
        if SC_stim_labels_present[stim][0] == 'drifting_gratings':
            cond[stim].time = mat['StimClass'][0][0][3][
                0, SC_stim_present[stim]][0][0][0][2]
            cond[stim].timestamps = mat['StimClass'][0][0][3][
                0, SC_stim_present[stim]][0][0][0][3]
            dg_orient = []
            for i in range(len(cond[stim].stiminfo)):
                dg_orient.append(int(cond[stim].stiminfo[i][2]))
            cond[stim].dg_orient = dg_orient

    # Now create NWB file
    start_time = start_time  # datetime(2020, 2, 27, 14, 36, 7, tzinfo=tzlocal())
    nwb_subject = Subject(description="Pretty nice girl",
                          sex='F',
                          species='mouse',
                          subject_id=excel_info['Mouse'].values[0],
                          genotype=excel_info['Genotype'].values[0])

    nwbfile = NWBFile(
        session_description=
        "NPx recording of Natural images and spontaneous activity",
        session_id=Sess,
        identifier='NWB123',
        session_start_time=start_time,
        experimenter='Marina Slashcheva',
        institution='ESI, Frankfurt',
        lab='Martin Vinck',
        notes=' | '.join(
            [x for x in list(excel_info['Note'].values) if str(x) != 'nan']),
        protocol=' | '.join([
            x for x in list(excel_info['experiment'].values) if str(x) != 'nan'
        ]),
        data_collection=
        'Ref: {}, Probe_angle: {}, , Depth: {}, APcoord: {}, MLcoord: {}, Recday: {}, Hemi: {}'
        .format(excel_info['refCh'].values[0],
                excel_info['Angle_probe'].values[0],
                excel_info['Depth'].values[0],
                excel_info['anteroposterior'].values[0],
                excel_info['mediolateral'].values[0],
                excel_info['Recday'].values[0],
                excel_info['Hemisphere'].values[0]),
        subject=nwb_subject)

    # Did not add it for the moment, later add running as a timeseries and add to HDF5 as binary parameter
    # test_ts = TimeSeries(name='test_timeseries', data=data, unit='m', timestamps=timestamps)

    # Add units
    nwbfile.add_unit_column(
        'location',
        'the anatomical location of this unit')  # to be added and CHECKED
    nwbfile.add_unit_column('depth', 'depth on the NPx probe')
    nwbfile.add_unit_column('channel', 'channel on the NPx probe')
    nwbfile.add_unit_column('fr', 'average FR according to KS')

    for un in good_clus_info['id']:
        info_tmp = good_clus_info[good_clus_info['id'] == un]
        spike_times_tmp = spike_times_good[spike_clus_good == un]

        nwbfile.add_unit(id=un,
                         spike_times=np.transpose(spike_times_tmp)[0],
                         location=info_tmp['area'].values[0],
                         depth=info_tmp['depth'].values[0],
                         channel=info_tmp['ch'].values[0],
                         fr=info_tmp['fr'].values[0])
        del spike_times_tmp

    # Add epochs
    for ep in range(len(cond)):
        if cond[ep].name == 'spontaneous_brightness':
            #if len(cond[ep].time) > 1:
            for bl in range(len(cond[ep].time)):
                nwbfile.add_epoch(cond[ep].time[bl][0][0],
                                  cond[ep].time[bl][0][1], cond[ep].name)
            #else:
            #    nwbfile.add_epoch(cond[ep].time[0][0], cond[ep].time[0][1], cond[ep].name)

        if cond[ep].name == 'natural_images':
            nwbfile.add_epoch(cond[ep].time[0][0], cond[ep].time[-1][1],
                              cond[ep].name)

        if cond[ep].name == 'drifting_gratings':
            nwbfile.add_epoch(cond[ep].time[0][0], cond[ep].time[-1][1],
                              cond[ep].name)

    # Add trials
    # Images names can be also added here
    nwbfile.add_trial_column(
        name='start', description='start time relative to the stimulus onset')
    nwbfile.add_trial_column(
        name='stimset',
        description='the visual stimulus type during the trial')
    nwbfile.add_trial_column(name='img_id',
                             description='image ID for Natural Images')

    for ep in range(len(cond)):
        if cond[ep].name == 'spontaneous_brightness':
            #if len(cond[ep].time) > 1:
            for tr in range(len(cond[ep].time)):
                nwbfile.add_trial(start_time=cond[ep].time[tr][0][0],
                                  stop_time=cond[ep].time[tr][0][1],
                                  start=cond[ep].time[tr][0][2],
                                  stimset=(cond[ep].name).encode('utf8'),
                                  img_id=('gray').encode('utf8'))

#            else:
#                nwbfile.add_trial(start_time = cond[ep].time[0][0], stop_time = cond[ep].time[0][1],
#                                  start = cond[ep].time[0][2],
#                                  stimset = (cond[ep].name).encode('utf8'),
#                                  img_id = ('gray').encode('utf8'))

        if cond[ep].name == 'natural_images':
            for tr in range(len(cond[ep].time)):
                nwbfile.add_trial(start_time=cond[ep].time[tr][0],
                                  stop_time=cond[ep].time[tr][1],
                                  start=cond[ep].time[tr][2],
                                  stimset=(cond[ep].name).encode('utf8'),
                                  img_id=(str(
                                      cond[ep].img_order[tr])).encode('utf8'))

        if cond[ep].name == 'drifting_gratings':
            for tr in range(len(cond[ep].time)):
                nwbfile.add_trial(start_time=cond[ep].time[tr][0],
                                  stop_time=cond[ep].time[tr][1],
                                  start=cond[ep].time[tr][2],
                                  stimset=(cond[ep].name).encode('utf8'),
                                  img_id=(str(
                                      cond[ep].dg_orient[tr])).encode('utf8'))

    # Write NWB file
    os.chdir(SaveDir)
    name_to_save = Sess + '.nwb'
    io = NWBHDF5IO(name_to_save, manager=get_manager(), mode='w')
    io.write(nwbfile)
    io.close()

    del nwbfile
示例#20
0
def nwb_copy_file(old_file, new_file, cp_objs={}, save_to_file=True):
    """
    Copy fields defined in 'obj', from existing NWB file to new NWB file.

    Parameters
    ----------
    old_file : str, path, nwbfile
        String or path to nwb file '/path/to/old_file.nwb'. Alternatively, the
        nwbfile object.
    new_file : str, path
        String such as '/path/to/new_file.nwb'.
    cp_objs : dict
        Name:Value pairs (Group:Children) listing the groups and respective
        children from the current NWB file to be copied. Children can be:
        - Boolean, indicating an attribute (e.g. for institution, lab)
        - List of strings, containing several children names
        Example:
        {'institution':True,
         'lab':True,
         'acquisition':['microphone'],
         'ecephys':['LFP','DecompositionSeries']}
    save_to_file: Boolean
        If True, saves directly to new_file.nwb. If False, only returns nwb_new.

    Returns:
    --------
    nwb_new : nwbfile object
    """

    manager = get_manager()

    # Get from nwbfile object in memory or from file
    if isinstance(old_file, NWBFile):
        nwb_old = old_file
        io1 = False
    else:
        io1 = NWBHDF5IO(str(old_file),
                        'r',
                        manager=manager,
                        load_namespaces=True)
        nwb_old = io1.read()

    # Creates new file
    nwb_new = NWBFile(
        session_description=str(nwb_old.session_description),
        identifier=id_generator(),
        session_start_time=nwb_old.session_start_time,
    )
    with NWBHDF5IO(new_file, mode='w', manager=manager,
                   load_namespaces=False) as io2:
        # Institution name ------------------------------------------------
        if 'institution' in cp_objs:
            nwb_new.institution = str(nwb_old.institution)

        # Lab name --------------------------------------------------------
        if 'lab' in cp_objs:
            nwb_new.lab = str(nwb_old.lab)

        # Session id ------------------------------------------------------
        if 'session' in cp_objs:
            nwb_new.session_id = nwb_old.session_id

        # Devices ---------------------------------------------------------
        if 'devices' in cp_objs:
            for aux in list(nwb_old.devices.keys()):
                dev = Device(nwb_old.devices[aux].name)
                nwb_new.add_device(dev)

        # Electrode groups ------------------------------------------------
        if 'electrode_groups' in cp_objs and nwb_old.electrode_groups is not None:
            for aux in list(nwb_old.electrode_groups.keys()):
                nwb_new.create_electrode_group(
                    name=str(nwb_old.electrode_groups[aux].name),
                    description=str(nwb_old.electrode_groups[aux].description),
                    location=str(nwb_old.electrode_groups[aux].location),
                    device=nwb_new.get_device(
                        nwb_old.electrode_groups[aux].device.name))

        # Electrodes ------------------------------------------------------
        if 'electrodes' in cp_objs and nwb_old.electrodes is not None:
            nElec = len(nwb_old.electrodes['x'].data[:])
            for aux in np.arange(nElec):
                nwb_new.add_electrode(
                    x=nwb_old.electrodes['x'][aux],
                    y=nwb_old.electrodes['y'][aux],
                    z=nwb_old.electrodes['z'][aux],
                    imp=nwb_old.electrodes['imp'][aux],
                    location=str(nwb_old.electrodes['location'][aux]),
                    filtering=str(nwb_old.electrodes['filtering'][aux]),
                    group=nwb_new.get_electrode_group(
                        nwb_old.electrodes['group'][aux].name),
                    group_name=str(nwb_old.electrodes['group_name'][aux]))
            # if there are custom variables
            new_vars = list(nwb_old.electrodes.colnames)
            default_vars = [
                'x', 'y', 'z', 'imp', 'location', 'filtering', 'group',
                'group_name'
            ]
            [new_vars.remove(var) for var in default_vars]
            for var in new_vars:
                if var == 'label':
                    var_data = [
                        str(elem) for elem in nwb_old.electrodes[var].data[:]
                    ]
                else:
                    var_data = np.array(nwb_old.electrodes[var].data[:])

                nwb_new.add_electrode_column(
                    name=str(var),
                    description=str(nwb_old.electrodes[var].description),
                    data=var_data)

            # If Bipolar scheme for electrodes
            for v in nwb_old.lab_meta_data.values():
                if isinstance(v, EcephysExt) and hasattr(
                        v, 'bipolar_scheme_table'):
                    bst_old = v.bipolar_scheme_table
                    bst_new = BipolarSchemeTable(
                        name=bst_old.name, description=bst_old.description)
                    ecephys_ext = EcephysExt(name=v.name)
                    ecephys_ext.bipolar_scheme_table = bst_new
                    nwb_new.add_lab_meta_data(ecephys_ext)

        # Epochs ----------------------------------------------------------
        if 'epochs' in cp_objs and nwb_old.epochs is not None:
            nEpochs = len(nwb_old.epochs['start_time'].data[:])
            for i in np.arange(nEpochs):
                nwb_new.add_epoch(
                    start_time=nwb_old.epochs['start_time'].data[i],
                    stop_time=nwb_old.epochs['stop_time'].data[i])
            # if there are custom variables
            new_vars = list(nwb_old.epochs.colnames)
            default_vars = ['start_time', 'stop_time', 'tags', 'timeseries']
            [new_vars.remove(var) for var in default_vars if var in new_vars]
            for var in new_vars:
                nwb_new.add_epoch_column(
                    name=var,
                    description=nwb_old.epochs[var].description,
                    data=nwb_old.epochs[var].data[:])

        # Invalid times ---------------------------------------------------
        if 'invalid_times' in cp_objs and nwb_old.invalid_times is not None:
            nInvalid = len(nwb_old.invalid_times['start_time'][:])
            for aux in np.arange(nInvalid):
                nwb_new.add_invalid_time_interval(
                    start_time=nwb_old.invalid_times['start_time'][aux],
                    stop_time=nwb_old.invalid_times['stop_time'][aux])

        # Trials ----------------------------------------------------------
        if 'trials' in cp_objs and nwb_old.trials is not None:
            nTrials = len(nwb_old.trials['start_time'])
            for aux in np.arange(nTrials):
                nwb_new.add_trial(start_time=nwb_old.trials['start_time'][aux],
                                  stop_time=nwb_old.trials['stop_time'][aux])
            # if there are custom variables
            new_vars = list(nwb_old.trials.colnames)
            default_vars = ['start_time', 'stop_time']
            [new_vars.remove(var) for var in default_vars]
            for var in new_vars:
                nwb_new.add_trial_column(
                    name=var,
                    description=nwb_old.trials[var].description,
                    data=nwb_old.trials[var].data[:])

        # Intervals -------------------------------------------------------
        if 'intervals' in cp_objs and nwb_old.intervals is not None:
            all_objs_names = list(nwb_old.intervals.keys())
            for obj_name in all_objs_names:
                obj_old = nwb_old.intervals[obj_name]
                # create and add TimeIntervals
                obj = TimeIntervals(name=obj_old.name,
                                    description=obj_old.description)
                nInt = len(obj_old['start_time'])
                for ind in np.arange(nInt):
                    obj.add_interval(start_time=obj_old['start_time'][ind],
                                     stop_time=obj_old['stop_time'][ind])
                # Add to file
                nwb_new.add_time_intervals(obj)

        # Stimulus --------------------------------------------------------
        if 'stimulus' in cp_objs:
            all_objs_names = list(nwb_old.stimulus.keys())
            for obj_name in all_objs_names:
                obj_old = nwb_old.stimulus[obj_name]
                obj = TimeSeries(name=obj_old.name,
                                 description=obj_old.description,
                                 data=obj_old.data[:],
                                 rate=obj_old.rate,
                                 resolution=obj_old.resolution,
                                 conversion=obj_old.conversion,
                                 starting_time=obj_old.starting_time,
                                 unit=obj_old.unit)
                nwb_new.add_stimulus(obj)

        # Processing modules ----------------------------------------------
        if 'ecephys' in cp_objs:
            interfaces = [
                nwb_old.processing['ecephys'].data_interfaces[key]
                for key in cp_objs['ecephys']
            ]
            # Add ecephys module to NWB file
            ecephys_module = ProcessingModule(
                name='ecephys',
                description='Extracellular electrophysiology data.')
            nwb_new.add_processing_module(ecephys_module)
            for interface_old in interfaces:
                obj = copy_obj(interface_old, nwb_old, nwb_new)
                if obj is not None:
                    ecephys_module.add_data_interface(obj)

        if 'behavior' in cp_objs:
            interfaces = [
                nwb_old.processing['behavior'].data_interfaces[key]
                for key in cp_objs['behavior']
            ]
            if 'behavior' not in nwb_new.processing:
                # Add behavior module to NWB file
                behavior_module = ProcessingModule(
                    name='behavior', description='behavioral data.')
                nwb_new.add_processing_module(behavior_module)
            for interface_old in interfaces:
                obj = copy_obj(interface_old, nwb_old, nwb_new)
                if obj is not None:
                    behavior_module.add_data_interface(obj)

        # Acquisition -----------------------------------------------------
        # Can get raw ElecetricalSeries and Mic recording
        if 'acquisition' in cp_objs:
            for acq_name in cp_objs['acquisition']:
                obj_old = nwb_old.acquisition[acq_name]
                acq = copy_obj(obj_old, nwb_old, nwb_new)
                nwb_new.add_acquisition(acq)

        # Surveys ---------------------------------------------------------
        if 'surveys' in cp_objs and 'behavior' in nwb_old.processing:
            surveys_list = [
                v for v in
                nwb_old.processing['behavior'].data_interfaces.values()
                if v.neurodata_type == 'SurveyTable'
            ]
            if cp_objs['surveys'] and len(surveys_list) > 0:
                if 'behavior' not in nwb_new.processing:
                    # Add behavior module to NWB file
                    behavior_module = ProcessingModule(
                        name='behavior', description='behavioral data.')
                    nwb_new.add_processing_module(behavior_module)
                for obj_old in surveys_list:
                    srv = copy_obj(obj_old, nwb_old, nwb_new)
                    behavior_module.add_data_interface(srv)

        # Subject ---------------------------------------------------------
        if nwb_old.subject is not None:
            if 'subject' in cp_objs:
                try:
                    cortical_surfaces = CorticalSurfaces()
                    surfaces = nwb_old.subject.cortical_surfaces.surfaces
                    for sfc in list(surfaces.keys()):
                        cortical_surfaces.create_surface(
                            name=surfaces[sfc].name,
                            faces=surfaces[sfc].faces,
                            vertices=surfaces[sfc].vertices)
                    nwb_new.subject = ECoGSubject(
                        cortical_surfaces=cortical_surfaces,
                        subject_id=nwb_old.subject.subject_id,
                        age=nwb_old.subject.age,
                        description=nwb_old.subject.description,
                        genotype=nwb_old.subject.genotype,
                        sex=nwb_old.subject.sex,
                        species=nwb_old.subject.species,
                        weight=nwb_old.subject.weight,
                        date_of_birth=nwb_old.subject.date_of_birth)
                except:
                    nwb_new.subject = Subject(**nwb_old.subject.fields)

        # Write new file with copied fields
        if save_to_file:
            io2.write(nwb_new, link_data=False)

    # Close old file and return new nwbfile object
    if io1:
        io1.close()

    return nwb_new
示例#21
0
 def testOutToMatNWB(self):
     filename = 'PyNWB.' + self.__class__.__name__ + '.testOutToMatNWB.nwb'
     with HDF5IO(filename, manager=get_manager(), mode='w') as io:
         io.write(self.file)
     self.assertTrue(os.path.isfile(filename))
示例#22
0
def nwb_copy_file(old_file, new_file, cp_objs={}):
    """
    Copy fields defined in 'obj', from existing NWB file to new NWB file.

    Parameters
    ----------
    old_file : str, path
        String such as '/path/to/old_file.nwb'.
    new_file : str, path
        String such as '/path/to/new_file.nwb'.
    cp_objs : dict
        Name:Value pairs (Group:Children) listing the groups and respective
        children from the current NWB file to be copied. Children can be:
        - Boolean, indicating an attribute (e.g. for institution, lab)
        - List of strings, containing several children names
        Example:
        {'institution':True,
         'lab':True,
         'acquisition':['microphone'],
         'ecephys':['LFP','DecompositionSeries']}
    """

    manager = get_manager()

    # Open original signal file
    with NWBHDF5IO(old_file, 'r', manager=manager,
                   load_namespaces=True) as io1:
        nwb_old = io1.read()

        # Creates new file
        nwb_new = NWBFile(session_description=str(nwb_old.session_description),
                          identifier='',
                          session_start_time=datetime.now(tzlocal()))
        with NWBHDF5IO(new_file, mode='w', manager=manager,
                       load_namespaces=False) as io2:
            # Institution name ------------------------------------------------
            if 'institution' in cp_objs:
                nwb_new.institution = str(nwb_old.institution)

            # Lab name --------------------------------------------------------
            if 'lab' in cp_objs:
                nwb_new.lab = str(nwb_old.lab)

            # Session id ------------------------------------------------------
            if 'session' in cp_objs:
                nwb_new.session_id = nwb_old.session_id

            # Devices ---------------------------------------------------------
            if 'devices' in cp_objs:
                for aux in list(nwb_old.devices.keys()):
                    dev = Device(nwb_old.devices[aux].name)
                    nwb_new.add_device(dev)

            # Electrode groups ------------------------------------------------
            if 'electrode_groups' in cp_objs:
                for aux in list(nwb_old.electrode_groups.keys()):
                    nwb_new.create_electrode_group(
                        name=str(nwb_old.electrode_groups[aux].name),
                        description=str(nwb_old.electrode_groups[
                            aux].description),
                        location=str(nwb_old.electrode_groups[aux].location),
                        device=nwb_new.get_device(
                            nwb_old.electrode_groups[aux].device.name)
                    )

            # Electrodes ------------------------------------------------------
            if 'electrodes' in cp_objs:
                nElec = len(nwb_old.electrodes['x'].data[:])
                for aux in np.arange(nElec):
                    nwb_new.add_electrode(
                        x=nwb_old.electrodes['x'][aux],
                        y=nwb_old.electrodes['y'][aux],
                        z=nwb_old.electrodes['z'][aux],
                        imp=nwb_old.electrodes['imp'][aux],
                        location=str(nwb_old.electrodes['location'][aux]),
                        filtering=str(nwb_old.electrodes['filtering'][aux]),
                        group=nwb_new.get_electrode_group(
                            nwb_old.electrodes['group'][aux].name),
                        group_name=str(nwb_old.electrodes['group_name'][aux])
                    )
                # if there are custom variables
                new_vars = list(nwb_old.electrodes.colnames)
                default_vars = ['x', 'y', 'z', 'imp', 'location', 'filtering',
                                'group', 'group_name']
                [new_vars.remove(var) for var in default_vars]
                for var in new_vars:

                    if var == 'label':
                        var_data = [str(elem) for elem in nwb_old.electrodes[
                                                          var].data[:]]
                    else:
                        var_data = np.array(nwb_old.electrodes[var].data[:])

                    nwb_new.add_electrode_column(name=str(var),
                                                 description=
                                                 str(nwb_old.electrodes[
                                                     var].description),
                                                 data=var_data)

            # Epochs ----------------------------------------------------------
            if 'epochs' in cp_objs:
                nEpochs = len(nwb_old.epochs['start_time'].data[:])
                for i in np.arange(nEpochs):
                    nwb_new.add_epoch(
                        start_time=nwb_old.epochs['start_time'].data[i],
                        stop_time=nwb_old.epochs['stop_time'].data[i])
                # if there are custom variables
                new_vars = list(nwb_old.epochs.colnames)
                default_vars = ['start_time', 'stop_time', 'tags',
                                'timeseries']
                [new_vars.remove(var) for var in default_vars if
                 var in new_vars]
                for var in new_vars:
                    nwb_new.add_epoch_column(name=var,
                                             description=nwb_old.epochs[
                                                 var].description,
                                             data=nwb_old.epochs[var].data[:])

            # Invalid times ---------------------------------------------------
            if 'invalid_times' in cp_objs:
                nInvalid = len(nwb_old.invalid_times['start_time'][:])
                for aux in np.arange(nInvalid):
                    nwb_new.add_invalid_time_interval(
                        start_time=nwb_old.invalid_times['start_time'][aux],
                        stop_time=nwb_old.invalid_times['stop_time'][aux])

            # Trials ----------------------------------------------------------
            if 'trials' in cp_objs:
                nTrials = len(nwb_old.trials['start_time'])
                for aux in np.arange(nTrials):
                    nwb_new.add_trial(
                        start_time=nwb_old.trials['start_time'][aux],
                        stop_time=nwb_old.trials['stop_time'][aux])
                # if there are custom variables
                new_vars = list(nwb_old.trials.colnames)
                default_vars = ['start_time', 'stop_time']
                [new_vars.remove(var) for var in default_vars]
                for var in new_vars:
                    nwb_new.add_trial_column(name=var,
                                             description=nwb_old.trials[
                                                 var].description,
                                             data=nwb_old.trials[var].data[:])

            # Intervals -------------------------------------------------------
            if 'intervals' in cp_objs:
                all_objs_names = list(nwb_old.intervals.keys())
                for obj_name in all_objs_names:
                    obj_old = nwb_old.intervals[obj_name]
                    # create and add TimeIntervals
                    obj = TimeIntervals(name=obj_old.name,
                                        description=obj_old.description)
                    nInt = len(obj_old['start_time'])
                    for ind in np.arange(nInt):
                        obj.add_interval(start_time=obj_old['start_time'][ind],
                                         stop_time=obj_old['stop_time'][ind])
                    # Add to file
                    nwb_new.add_time_intervals(obj)

            # Stimulus --------------------------------------------------------
            if 'stimulus' in cp_objs:
                all_objs_names = list(nwb_old.stimulus.keys())
                for obj_name in all_objs_names:
                    obj_old = nwb_old.stimulus[obj_name]
                    obj = TimeSeries(name=obj_old.name,
                                     description=obj_old.description,
                                     data=obj_old.data[:],
                                     rate=obj_old.rate,
                                     resolution=obj_old.resolution,
                                     conversion=obj_old.conversion,
                                     starting_time=obj_old.starting_time,
                                     unit=obj_old.unit)
                    nwb_new.add_stimulus(obj)

            # Processing modules ----------------------------------------------
            if 'ecephys' in cp_objs:
                if cp_objs['ecephys'] is True:
                    interfaces = nwb_old.processing[
                        'ecephys'].data_interfaces.keys()
                else:  # list of items
                    interfaces = [
                        nwb_old.processing['ecephys'].data_interfaces[key]
                        for key in cp_objs['ecephys']
                    ]
                # Add ecephys module to NWB file
                ecephys_module = ProcessingModule(
                    name='ecephys',
                    description='Extracellular electrophysiology data.'
                )
                nwb_new.add_processing_module(ecephys_module)
                for interface_old in interfaces:
                    obj = copy_obj(interface_old, nwb_old, nwb_new)
                    if obj is not None:
                        ecephys_module.add_data_interface(obj)

            # Acquisition -----------------------------------------------------
            if 'acquisition' in cp_objs:
                if cp_objs['acquisition'] is True:
                    all_acq_names = list(nwb_old.acquisition.keys())
                else:  # list of items
                    all_acq_names = cp_objs['acquisition']
                for acq_name in all_acq_names:
                    obj_old = nwb_old.acquisition[acq_name]
                    obj = copy_obj(obj_old, nwb_old, nwb_new)
                    if obj is not None:
                        nwb_new.add_acquisition(obj)

            # Subject ---------------------------------------------------------
            if 'subject' in cp_objs:
                try:
                    cortical_surfaces = CorticalSurfaces()
                    surfaces = nwb_old.subject.cortical_surfaces.surfaces
                    for sfc in list(surfaces.keys()):
                        cortical_surfaces.create_surface(
                            name=surfaces[sfc].name,
                            faces=surfaces[sfc].faces,
                            vertices=surfaces[sfc].vertices)
                    nwb_new.subject = ECoGSubject(
                        cortical_surfaces=cortical_surfaces,
                        subject_id=nwb_old.subject.subject_id,
                        age=nwb_old.subject.age,
                        description=nwb_old.subject.description,
                        genotype=nwb_old.subject.genotype,
                        sex=nwb_old.subject.sex,
                        species=nwb_old.subject.species,
                        weight=nwb_old.subject.weight,
                        date_of_birth=nwb_old.subject.date_of_birth)
                except:
                    nwb_new.subject = Subject(age=nwb_old.subject.age,
                                              description=nwb_old.subject.description,
                                              genotype=nwb_old.subject.genotype,
                                              sex=nwb_old.subject.sex,
                                              species=nwb_old.subject.species,
                                              subject_id=nwb_old.subject.subject_id,
                                              weight=nwb_old.subject.weight,
                                              date_of_birth=nwb_old.subject.date_of_birth)

            # Write new file with copied fields
            io2.write(nwb_new, link_data=False)
示例#23
0
文件: base.py 项目: campagnola/pynwb
 def setUp(self):
     self.__manager = get_manager()
     self.__container = self.setUpContainer()
     self.__builder = self.setUpBuilder()
示例#24
0
 def testOutToMatNWB(self):
   filename = 'PyNWB.' + self.__class__.__name__ + '.testOutToMatNWB.nwb'
   io = HDF5IO(filename, manager=get_manager())
   io.write(self.file)
   io.close()
   self.assertTrue(os.path.isfile(filename))
示例#25
0
                          rate = float(from_ini.rate)
                          )
        nwb_file.add_acquisition(time_series_G)

# =============================================================================
# =============================================================================
# # CHECKS (FROM SSC SCRIPT) not mine
# =============================================================================
# =============================================================================
# checking for an object in the file 
# this will need to check that there is a timeseries file in the mat file
def check_entry(file_name,obj):
    try:
        return file_name[obj]
    except KeyError:
        print (str(obj) +" does not exist")
        return []

# =============================================================================
# Calling the functions and building the files        
# =============================================================================

get_mat_data(DATA_DIRECTORY)

file = NWBHDF5IO(OUTPUT_DIR + "conference_mouse.nwb", manager=get_manager(), mode='w')
file.write(nwb_file)
file.close()



示例#26
0
 def testOutToMatNWB(self):
     filename = 'PyNWB.' + self.__class__.__name__ + '.testOutToMatNWB.nwb'
     io = HDF5IO(filename, manager=get_manager())
     io.write(self.file)
     io.close()
     self.assertTrue(os.path.isfile(filename))
def main():

    import os.path

    # prerequisites: start
    import numpy as np

    rate = 10.0
    np.random.seed(1234)
    data_len = 1000
    ephys_data = np.random.rand(data_len)
    ephys_timestamps = np.arange(data_len) / rate
    spatial_timestamps = ephys_timestamps[::10]
    spatial_data = np.cumsum(np.random.normal(size=(2, len(spatial_timestamps))), axis=-1).T
    # prerequisites: end

    # create-nwbfile: start
    from datetime import datetime
    from pynwb import NWBFile

    f = NWBFile('the PyNWB tutorial', 'my first synthetic recording', 'EXAMPLE_ID', datetime.now(),
                experimenter='Dr. Bilbo Baggins',
                lab='Bag End Laboratory',
                institution='University of Middle Earth at the Shire',
                experiment_description='I went on an adventure with thirteen dwarves to reclaim vast treasures.',
                session_id='LONELYMTN')
    # create-nwbfile: end

    # save-nwbfile: start
    from pynwb import get_manager
    from pynwb.form.backends.hdf5 import HDF5IO

    filename = "example.h5"
    io = HDF5IO(filename, manager=get_manager(), mode='w')
    io.write(f)
    io.close()
    # save-nwbfile: end

    os.remove(filename)

    # create-device: start
    device = f.create_device(name='trodes_rig123', source="a source")
    # create-device: end

    # create-electrode-groups: start
    electrode_name = 'tetrode1'
    source = "an hypothetical source"
    description = "an example tetrode"
    location = "somewhere in the hippocampus"

    electrode_group = f.create_electrode_group(electrode_name,
                                               source=source,
                                               description=description,
                                               location=location,
                                               device=device)

    # create-electrode-groups: end

    # create-electrode-table-region: start
    for idx in [1, 2, 3, 4]:
        f.add_electrode(idx,
                        x=1.0, y=2.0, z=3.0,
                        imp=float(-idx),
                        location='CA1', filtering='none',
                        description='channel %s' % idx, group=electrode_group)

    electrode_table_region = f.create_electrode_table_region([0, 2], 'the first and third electrodes')
    # create-electrode-table-region: end

    # create-timeseries: start
    from pynwb.ecephys import ElectricalSeries
    from pynwb.behavior import SpatialSeries

    ephys_ts = ElectricalSeries('test_ephys_data',
                                'an hypothetical source',
                                ephys_data,
                                electrode_table_region,
                                timestamps=ephys_timestamps,
                                # Alternatively, could specify starting_time and rate as follows
                                # starting_time=ephys_timestamps[0],
                                # rate=rate,
                                resolution=0.001,
                                comments="This data was randomly generated with numpy, using 1234 as the seed",
                                description="Random numbers generated with numpy.random.rand")
    f.add_acquisition(ephys_ts)

    spatial_ts = SpatialSeries('test_spatial_timeseries',
                               'a stumbling rat',
                               spatial_data,
                               'origin on x,y-plane',
                               timestamps=spatial_timestamps,
                               resolution=0.1,
                               comments="This data was generated with numpy, using 1234 as the seed",
                               description="This 2D Brownian process generated with "
                                           "np.cumsum(np.random.normal(size=(2, len(spatial_timestamps))), axis=-1).T")
    f.add_acquisition(spatial_ts)
    # create-timeseries: end

    # create-data-interface: start
    from pynwb.ecephys import LFP
    from pynwb.behavior import Position

    lfp = f.add_acquisition(LFP('a hypothetical source'))
    ephys_ts = lfp.create_electrical_series('test_ephys_data',
                                            'an hypothetical source',
                                            ephys_data,
                                            electrode_table_region,
                                            timestamps=ephys_timestamps,
                                            resolution=0.001,
                                            comments="This data was randomly generated with numpy, using 1234 as the seed",  # noqa: E501
                                            description="Random numbers generated with numpy.random.rand")

    pos = f.add_acquisition(Position('a hypothetical source'))
    spatial_ts = pos.create_spatial_series('test_spatial_timeseries',
                                           'a stumbling rat',
                                           spatial_data,
                                           'origin on x,y-plane',
                                           timestamps=spatial_timestamps,
                                           resolution=0.1,
                                           comments="This data was generated with numpy, using 1234 as the seed",
                                           description="This 2D Brownian process generated with "
                                                       "np.cumsum(np.random.normal(size=(2, len(spatial_timestamps))), axis=-1).T")  # noqa: E501
    # create-data-interface: end

    # create-epochs: start
    epoch_tags = ('example_epoch',)

    f.create_epoch(name='epoch1', start_time=0.0, stop_time=1.0, tags=epoch_tags,
                   description="the first test epoch", timeseries=[ephys_ts, spatial_ts])

    f.create_epoch(name='epoch2', start_time=0.0, stop_time=1.0, tags=epoch_tags,
                   description="the second test epoch", timeseries=[ephys_ts, spatial_ts])
    # create-epochs: end

    # create-compressed-timeseries: start
    from pynwb.ecephys import ElectricalSeries
    from pynwb.behavior import SpatialSeries
    from pynwb.form.backends.hdf5 import H5DataIO

    ephys_ts = ElectricalSeries('test_compressed_ephys_data',
                                'an hypothetical source',
                                H5DataIO(ephys_data, compress=True),
                                electrode_table_region,
                                timestamps=H5DataIO(ephys_timestamps, compress=True),
                                resolution=0.001,
                                comments="This data was randomly generated with numpy, using 1234 as the seed",
                                description="Random numbers generated with numpy.random.rand")
    f.add_acquisition(ephys_ts)

    spatial_ts = SpatialSeries('test_compressed_spatial_timeseries',
                               'a stumbling rat',
                               H5DataIO(spatial_data, compress=True),
                               'origin on x,y-plane',
                               timestamps=H5DataIO(spatial_timestamps, compress=True),
                               resolution=0.1,
                               comments="This data was generated with numpy, using 1234 as the seed",
                               description="This 2D Brownian process generated with "
                                           "np.cumsum(np.random.normal(size=(2, len(spatial_timestamps))), axis=-1).T")
    f.add_acquisition(spatial_ts)
示例#28
0
def readfile(filename):
    io = HDF5IO(filename, manager=pynwb.get_manager(), mode='r')
    nwbfile = io.read()
    io.close()
    return nwbfile
示例#29
0
                          stop_time=cond[ep].time[0][1],
                          stimset=(cond[ep].name).encode('utf8'),
                          img_id=('gray').encode('utf8'))

    if cond[ep].name == 'natural_images':
        for tr in range(len(cond[ep].time)):
            nwbfile.add_trial(start_time=cond[ep].time[tr][0],
                              stop_time=cond[ep].time[tr][1],
                              stimset=(cond[ep].name).encode('utf8'),
                              img_id=(str(
                                  cond[ep].img_order[tr])).encode('utf8'))

# Write NWB file
os.chdir(SaveDir)
name_to_save = Sess + '.nwb'
io = NWBHDF5IO(name_to_save, manager=get_manager(), mode='w')
io.write(nwbfile)
io.close()

del nwbfile

# %%

# Reading the NWB data
os.chdir(SaveDir)
f = NWBHDF5IO((Sess + '.nwb'), 'r')
data_nwb = f.read()

# %% NOT RELEVANT ANY LONGER
#def ms_raster():
示例#30
0
def build_NWB(args,
              Ca_Imaging_options={
                  'Suite2P-binary-filename': 'data.bin',
                  'plane': 0
              }):

    if args.verbose:
        print('Initializing NWB file for "%s" [...]' % args.datafolder)

    #################################################
    ####            BASIC metadata            #######
    #################################################
    metadata = np.load(os.path.join(args.datafolder, 'metadata.npy'),
                       allow_pickle=True).item()

    # replace by day and time in metadata !!
    if os.path.sep in args.datafolder:
        sep = os.path.sep
    else:
        sep = '/'  # a weird behavior on Windows

    day = metadata['filename'].split('\\')[-2].split('_')
    Time = metadata['filename'].split('\\')[-1].split('-')
    identifier = metadata['filename'].split(
        '\\')[-2] + '-' + metadata['filename'].split('\\')[-1]
    start_time = datetime.datetime(int(day[0]),
                                   int(day[1]),
                                   int(day[2]),
                                   int(Time[0]),
                                   int(Time[1]),
                                   int(Time[2]),
                                   tzinfo=tzlocal())

    # subject info
    if 'subject_props' in metadata and (metadata['subject_props'] is not None):
        subject_props = metadata['subject_props']
        dob = subject_props['date_of_birth'].split('_')
    else:
        subject_props = {}
        print('subject properties not in metadata ...')
        dob = ['1988', '4', '24']

    # NIdaq tstart
    if os.path.isfile(os.path.join(args.datafolder, 'NIdaq.start.npy')):
        metadata['NIdaq_Tstart'] = np.load(
            os.path.join(args.datafolder, 'NIdaq.start.npy'))[0]

    subject = pynwb.file.Subject(
        description=(subject_props['description'] if
                     ('description' in subject_props) else 'Unknown'),
        sex=(subject_props['sex'] if ('sex' in subject_props) else 'Unknown'),
        genotype=(subject_props['genotype'] if
                  ('genotype' in subject_props) else 'Unknown'),
        species=(subject_props['species'] if
                 ('species' in subject_props) else 'Unknown'),
        subject_id=(subject_props['subject_id'] if
                    ('subject_id' in subject_props) else 'Unknown'),
        weight=(subject_props['weight'] if
                ('weight' in subject_props) else 'Unknown'),
        date_of_birth=datetime.datetime(int(dob[0]),
                                        int(dob[1]),
                                        int(dob[2]),
                                        tzinfo=tzlocal()))

    nwbfile = pynwb.NWBFile(
        identifier=identifier,
        session_description=str(metadata),
        experiment_description=metadata['protocol'],
        experimenter=(metadata['experimenter'] if
                      ('experimenter' in metadata) else 'Unknown'),
        lab=(metadata['lab'] if ('lab' in metadata) else 'Unknown'),
        institution=(metadata['institution'] if
                     ('institution' in metadata) else 'Unknown'),
        notes=(metadata['notes'] if ('notes' in metadata) else 'Unknown'),
        virus=(subject_props['virus'] if
               ('virus' in subject_props) else 'Unknown'),
        surgery=(subject_props['surgery'] if
                 ('surgery' in subject_props) else 'Unknown'),
        session_start_time=start_time,
        subject=subject,
        source_script=str(pathlib.Path(__file__).resolve()),
        source_script_file_name=str(pathlib.Path(__file__).resolve()),
        file_create_date=datetime.datetime.utcnow().replace(tzinfo=tzlocal()))

    filename = os.path.join(
        pathlib.Path(args.datafolder).parent, '%s.nwb' % identifier)

    manager = pynwb.get_manager(
    )  # we need a manager to link raw and processed data

    #################################################
    ####         IMPORTING NI-DAQ data        #######
    #################################################
    if args.verbose:
        print('- Loading NIdaq data for "%s" [...]' % args.datafolder)
    try:
        NIdaq_data = np.load(os.path.join(args.datafolder, 'NIdaq.npy'),
                             allow_pickle=True).item()
        NIdaq_Tstart = np.load(os.path.join(args.datafolder,
                                            'NIdaq.start.npy'))[0]
    except FileNotFoundError:
        print(' /!\ No NI-DAQ data found /!\ ')
        print('   -----> Not able to build NWB file for "%s"' %
              args.datafolder)
        raise BaseException

    true_tstart0 = np.load(os.path.join(args.datafolder, 'NIdaq.start.npy'))[0]
    st = datetime.datetime.fromtimestamp(true_tstart0).strftime('%H:%M:%S.%f')
    true_tstart = StartTime_to_day_seconds(st)

    # #################################################
    # ####         Locomotion                   #######
    # #################################################

    if metadata['Locomotion'] and ('Locomotion' in args.modalities):
        # compute running speed from binary NI-daq signal
        if args.verbose:
            print('- Computing and storing running-speed for "%s" [...]' %
                  args.datafolder)

        speed = compute_locomotion_speed(
            NIdaq_data['digital'][0],
            acq_freq=float(metadata['NIdaq-acquisition-frequency']),
            radius_position_on_disk=float(
                metadata['rotating-disk']['radius-position-on-disk-cm']),
            rotoencoder_value_per_rotation=float(
                metadata['rotating-disk']['roto-encoder-value-per-rotation']))
        _, speed = resample_signal(
            speed,
            original_freq=float(metadata['NIdaq-acquisition-frequency']),
            new_freq=args.running_sampling,
            pre_smoothing=2. / args.running_sampling)
        running = pynwb.TimeSeries(name='Running-Speed',
                                   data=speed,
                                   starting_time=0.,
                                   unit='cm/s',
                                   rate=args.running_sampling)
        nwbfile.add_acquisition(running)

    # #################################################
    # ####         Visual Stimulation           #######
    # #################################################
    if (metadata['VisualStim'] and
        ('VisualStim' in args.modalities)) and os.path.isfile(
            os.path.join(args.datafolder, 'visual-stim.npy')):

        # preprocessing photodiode signal
        _, Psignal = resample_signal(
            NIdaq_data['analog'][0],
            original_freq=float(metadata['NIdaq-acquisition-frequency']),
            pre_smoothing=2. / float(metadata['NIdaq-acquisition-frequency']),
            new_freq=args.photodiode_sampling)

        VisualStim = np.load(os.path.join(args.datafolder, 'visual-stim.npy'),
                             allow_pickle=True).item()
        # using the photodiod signal for the realignement
        if args.verbose:
            print(
                '=> Performing realignement from photodiode for "%s" [...]  ' %
                args.datafolder)
        if 'time_duration' not in VisualStim:
            VisualStim['time_duration'] = np.array(
                VisualStim['time_stop']) - np.array(VisualStim['time_start'])
        for key in ['time_start', 'time_stop', 'time_duration']:
            metadata[key] = VisualStim[key]
        success, metadata = realign_from_photodiode(
            Psignal,
            metadata,
            sampling_rate=(args.photodiode_sampling
                           if args.photodiode_sampling > 0 else None),
            verbose=args.verbose)
        if success:
            timestamps = metadata['time_start_realigned']
            for key in ['time_start_realigned', 'time_stop_realigned']:
                VisualStimProp = pynwb.TimeSeries(name=key,
                                                  data=metadata[key],
                                                  unit='seconds',
                                                  timestamps=timestamps)
                nwbfile.add_stimulus(VisualStimProp)
            for key in VisualStim:
                None_cond = (VisualStim[key] == None)
                if key in ['protocol_id', 'index']:
                    array = np.array(VisualStim[key])
                elif (type(VisualStim[key]) in [list, np.ndarray, np.array
                                                ]) and np.sum(None_cond) > 0:
                    # need to remove the None elements
                    VisualStim[key][
                        None_cond] = 0 * VisualStim[key][~None_cond][0]
                    array = np.array(VisualStim[key],
                                     dtype=type(
                                         VisualStim[key][~None_cond][0]))
                else:
                    array = VisualStim[key]
                VisualStimProp = pynwb.TimeSeries(name=key,
                                                  data=array,
                                                  unit='NA',
                                                  timestamps=timestamps)
                nwbfile.add_stimulus(VisualStimProp)
        else:
            print(' /!\ No VisualStim metadata found /!\ ')
            # print('   -----> Not able to build NWB file for "%s" ' % args.datafolder)
            # TEMPORARY FOR TROUBLESHOOTING !!
            metadata['time_start_realigned'] = metadata['time_start']
            metadata['time_stop_realigned'] = metadata['time_stop']
            print(' /!\ Realignement unsuccessful /!\ ')
            print(
                '       --> using the default time_start / time_stop values ')

        if args.verbose:
            print('=> Storing the photodiode signal for "%s" [...]' %
                  args.datafolder)

        photodiode = pynwb.TimeSeries(name='Photodiode-Signal',
                                      data=Psignal,
                                      starting_time=0.,
                                      unit='[current]',
                                      rate=args.photodiode_sampling)
        nwbfile.add_acquisition(photodiode)

    #################################################
    ####         FaceCamera Recording         #######
    #################################################

    if metadata['FaceCamera']:

        if args.verbose:
            print('=> Storing FaceCamera acquisition for "%s" [...]' %
                  args.datafolder)
        if ('raw_FaceCamera' in args.modalities):
            try:
                FC_times, FC_FILES, _, _, _ = load_FaceCamera_data(
                    os.path.join(args.datafolder, 'FaceCamera-imgs'),
                    t0=NIdaq_Tstart,
                    verbose=True)

                img = np.load(
                    os.path.join(args.datafolder, 'FaceCamera-imgs',
                                 FC_FILES[0]))

                FC_SUBSAMPLING = build_subsampling_from_freq(
                    args.FaceCamera_frame_sampling,
                    1. / np.mean(np.diff(FC_times)),
                    len(FC_FILES),
                    Nmin=3)

                def FaceCamera_frame_generator():
                    for i in FC_SUBSAMPLING:
                        yield np.load(
                            os.path.join(args.datafolder, 'FaceCamera-imgs',
                                         FC_FILES[i])).astype(np.uint8)

                FC_dataI = DataChunkIterator(data=FaceCamera_frame_generator(),
                                             maxshape=(None, img.shape[0],
                                                       img.shape[1]),
                                             dtype=np.dtype(np.uint8))
                FaceCamera_frames = pynwb.image.ImageSeries(
                    name='FaceCamera',
                    data=FC_dataI,
                    unit='NA',
                    timestamps=FC_times[FC_SUBSAMPLING])
                nwbfile.add_acquisition(FaceCamera_frames)

            except BaseException as be:
                print(be)
                FC_FILES = None
                print(' /!\ Problems with FaceCamera data for "%s" /!\ ' %
                      args.datafolder)

        #################################################
        ####         Pupil from FaceCamera        #######
        #################################################

        if 'Pupil' in args.modalities:

            # add_pupil_data(nwbfile, FC_FILES, args)

            if os.path.isfile(os.path.join(args.datafolder, 'pupil.npy')):

                if args.verbose:
                    print('=> Adding processed pupil data for "%s" [...]' %
                          args.datafolder)

                dataP = np.load(os.path.join(args.datafolder, 'pupil.npy'),
                                allow_pickle=True).item()

                if 'cm_to_pix' in dataP:  # SCALE FROM THE PUPIL GUI
                    pix_to_mm = 10. / float(
                        dataP['cm_to_pix'])  # IN MILLIMETERS FROM HERE
                else:
                    pix_to_mm = 1

                pupil_module = nwbfile.create_processing_module(
                    name='Pupil',
                    description=
                    'processed quantities of Pupil dynamics, pix_to_mm=%.3f' %
                    pix_to_mm)

                for key, scale in zip(['cx', 'cy', 'sx', 'sy', 'blinking'],
                                      [pix_to_mm for i in range(4)] + [1]):
                    if type(dataP[key]) is np.ndarray:
                        PupilProp = pynwb.TimeSeries(name=key,
                                                     data=dataP[key] * scale,
                                                     unit='seconds',
                                                     timestamps=FC_times)
                        pupil_module.add(PupilProp)

                # then add the frames subsampled
                if FC_FILES is not None:
                    img = np.load(
                        os.path.join(args.datafolder, 'FaceCamera-imgs',
                                     FC_FILES[0]))
                    x, y = np.meshgrid(np.arange(0, img.shape[0]),
                                       np.arange(0, img.shape[1]),
                                       indexing='ij')
                    cond = (x >= dataP['xmin']) & (x <= dataP['xmax']) & (
                        y >= dataP['ymin']) & (y <= dataP['ymax'])

                    PUPIL_SUBSAMPLING = build_subsampling_from_freq(
                        args.Pupil_frame_sampling,
                        1. / np.mean(np.diff(FC_times)),
                        len(FC_FILES),
                        Nmin=3)

                    def Pupil_frame_generator():
                        for i in PUPIL_SUBSAMPLING:
                            yield np.load(os.path.join(args.datafolder, 'FaceCamera-imgs', FC_FILES[i])).astype(np.uint8)[cond].reshape(\
                                                                                            dataP['xmax']-dataP['xmin']+1, dataP['ymax']-dataP['ymin']+1)

                    PUC_dataI = DataChunkIterator(
                        data=Pupil_frame_generator(),
                        maxshape=(None, dataP['xmax'] - dataP['xmin'] + 1,
                                  dataP['ymax'] - dataP['ymin'] + 1),
                        dtype=np.dtype(np.uint8))
                    Pupil_frames = pynwb.image.ImageSeries(
                        name='Pupil',
                        data=PUC_dataI,
                        unit='NA',
                        timestamps=FC_times[PUPIL_SUBSAMPLING])
                    nwbfile.add_acquisition(Pupil_frames)

            else:
                print(' /!\ No processed pupil data found for "%s" /!\ ' %
                      args.datafolder)

        #################################################
        ####      Facemotion from FaceCamera        #######
        #################################################

        if 'Facemotion' in args.modalities:

            if os.path.isfile(os.path.join(args.datafolder, 'facemotion.npy')):

                if args.verbose:
                    print(
                        '=> Adding processed facemotion data for "%s" [...]' %
                        args.datafolder)

                dataF = np.load(os.path.join(args.datafolder,
                                             'facemotion.npy'),
                                allow_pickle=True).item()

                faceMotion_module = nwbfile.create_processing_module(
                    name='face-motion', description='face motion dynamics')

                FaceMotionProp = pynwb.TimeSeries(
                    name='face motion time series',
                    data=dataF['motion'],
                    unit='seconds',
                    timestamps=FC_times[dataF['frame']])

                faceMotion_module.add(FaceMotionProp)

                # then add the motion frames subsampled
                if FC_FILES is not None:

                    FACEMOTION_SUBSAMPLING = build_subsampling_from_freq(
                        args.FaceMotion_frame_sampling,
                        1. / np.mean(np.diff(FC_times)),
                        len(FC_FILES),
                        Nmin=3)

                    img = np.load(
                        os.path.join(args.datafolder, 'FaceCamera-imgs',
                                     FC_FILES[0]))
                    x, y = np.meshgrid(np.arange(0, img.shape[0]),
                                       np.arange(0, img.shape[1]),
                                       indexing='ij')
                    condF = (x>=dataF['ROI'][0]) & (x<=(dataF['ROI'][0]+dataF['ROI'][2])) &\
                        (y>=dataF['ROI'][1]) & (y<=(dataF['ROI'][1]+dataF['ROI'][3]))

                    def Facemotion_frame_generator():
                        for i in FACEMOTION_SUBSAMPLING:
                            i0 = np.min([i, len(FC_FILES) - 2])
                            img1 = np.load(
                                os.path.join(args.datafolder,
                                             'FaceCamera-imgs',
                                             FC_FILES[i0])).astype(
                                                 np.uint8)[condF].reshape(
                                                     dataF['ROI'][2] + 1,
                                                     dataF['ROI'][3] + 1)
                            img2 = np.load(
                                os.path.join(args.datafolder,
                                             'FaceCamera-imgs',
                                             FC_FILES[i0 + 1])).astype(
                                                 np.uint8)[condF].reshape(
                                                     dataF['ROI'][2] + 1,
                                                     dataF['ROI'][3] + 1)
                            yield img2 - img1

                    FMCI_dataI = DataChunkIterator(
                        data=Facemotion_frame_generator(),
                        maxshape=(None, dataF['ROI'][2] + 1,
                                  dataF['ROI'][3] + 1),
                        dtype=np.dtype(np.uint8))
                    FaceMotion_frames = pynwb.image.ImageSeries(
                        name='Face-Motion',
                        data=FMCI_dataI,
                        unit='NA',
                        timestamps=FC_times[FACEMOTION_SUBSAMPLING])
                    nwbfile.add_acquisition(FaceMotion_frames)

            else:
                print(' /!\ No processed facemotion data found for "%s" /!\ ' %
                      args.datafolder)

    #################################################
    ####    Electrophysiological Recording    #######
    #################################################

    if metadata['Electrophy'] and ('Electrophy' in args.modalities):

        if args.verbose:
            print('=> Storing electrophysiological signal for "%s" [...]' %
                  args.datafolder)

        electrophy = pynwb.TimeSeries(
            name='Electrophysiological-Signal',
            data=NIdaq_data['analog'][1],
            starting_time=0.,
            unit='[voltage]',
            rate=float(metadata['NIdaq-acquisition-frequency']))
        nwbfile.add_acquisition(electrophy)

    #################################################
    ####         Calcium Imaging              #######
    #################################################
    # see: add_ophys.py script

    Ca_data = None
    if metadata['CaImaging']:
        if args.verbose:
            print('=> Storing Calcium Imaging signal for "%s" [...]' %
                  args.datafolder)
        if not hasattr(args, 'CaImaging_folder') or (args.CaImaging_folder
                                                     == ''):
            try:
                args.CaImaging_folder = get_TSeries_folders(args.datafolder)
                Ca_data = add_ophys(
                    nwbfile,
                    args,
                    metadata=metadata,
                    with_raw_CaImaging=('raw_CaImaging' in args.modalities),
                    with_processed_CaImaging=('processed_CaImaging'
                                              in args.modalities),
                    Ca_Imaging_options=Ca_Imaging_options)
            except BaseException as be:
                print(be)
                print(' /!\ No Ca-Imaging data found, /!\ ')
                print('             -> add them later with "add_ophys.py" \n')

    #################################################
    ####         Writing NWB file             #######
    #################################################

    if os.path.isfile(filename):
        temp = str(tempfile.NamedTemporaryFile().name) + '.nwb'
        print("""
        "%s" already exists
        ---> moving the file to the temporary file directory as: "%s" [...]
        """ % (filename, temp))
        shutil.move(filename, temp)
        print('---> done !')

    io = pynwb.NWBHDF5IO(filename, mode='w', manager=manager)
    print("""
    ---> Creating the NWB file: "%s"
    """ % filename)
    io.write(nwbfile, link_data=False)
    io.close()
    print('---> done !')

    if Ca_data is not None:
        Ca_data.close()  # can be closed only after having written

    return filename
示例#31
0
#   use our own timestamps in case the timestamps in the original file are not aligned with the
#   clock of the NWBFile we are creating. In this way we can use the linking to "re-align" different
#   TimeSeries without having to copy the main data.

####################
# Linking to whole Containers
# ---------------------------
#
# Appending to files and linking is made possible by passing around the same
# :py:class:`~pynwb.form.build.map.BuildManager`. You can get a manager to pass around
# using the :py:meth:`~pynwb.get_manager` function.
#

from pynwb import get_manager

manager = get_manager()

####################
# .. tip::
#
#    You can pass in extensions to :py:meth:`~pynwb.get_manager` using the *extensions* argument.

####################
# Step 1: Get the container object you want to link to
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Now let's open our test files and retrieve our timeseries.
#

# Get the first timeseries
io1 = NWBHDF5IO(filename1, 'r', manager=manager)
nwbfile1 = io1.read()
示例#32
0
def writefile(nwbfile, filename):
    io = HDF5IO(filename, manager=pynwb.get_manager(), mode='w')
    io.write(nwbfile)
    io.close()