Пример #1
0
    def getBoilerPlateObjects(self):

        iSS = ImageSeries(name='test_iS',
                          data=np.ones((2, 2, 2)),
                          unit='unit',
                          external_file=['external_file'],
                          starting_frame=[1, 2, 3],
                          format='tiff',
                          timestamps=list())

        device = Device(name='device_name')
        oc = OpticalChannel('test_optical_channel', 'description', 500.)
        ip = ImagingPlane('test_imaging_plane', oc, 'description', device,
                          600., 300., 'indicator', 'location', (1, 2, 1, 2, 3),
                          4.0, 'unit', 'reference_frame')
        return iSS, ip
Пример #2
0
 def setBoilerPlateObjects(self):
     ts = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
     self.image_series = ImageSeries(name='test_iS', dimension=[2],
                                     external_file=['images.tiff'],
                                     starting_frame=[1, 2, 3], format='tiff', timestamps=ts)
     self.device = Device(name='dev1')
     self.optical_channel = OpticalChannel('test_optical_channel',
                                           'optical channel description', 500.)
     self.imaging_plane = ImagingPlane('test_imaging_plane',
                                       self.optical_channel,
                                       'imaging plane description',
                                       self.device,
                                       600., 300., 'GFP', 'somewhere in the brain',
                                       (1., 2., 1., 2., 3.), 4.0, 'manifold unit', 'A frame to refer to')
     return PlaneSegmentation('description', self.imaging_plane, 'test_plane_seg_name',
                              self.image_series)
Пример #3
0
    def test_roundtrip(self):
        """
        Add a LabelSeries to an NWBFile, write it to file, read the file, and test that the LabelSeries from the
        file matches the original LabelSeries.
        """
        behavior_module = self.nwbfile.create_processing_module(
            name="behavior", description="behavior")

        video = np.random.rand(100, 128, 128)
        image_series = ImageSeries(name='video', data=video, rate=30.0)

        pcs = np.random.rand(100, 4).astype('float64')
        representation_series = RepresentationSeries(
            name='pcs',
            description='pc projections',
            data=pcs,
            method='iterated SVD',
            rate=30.0,
            starting_time=0.0,
            video=image_series)

        labels = np.random.rand(100, 5).astype(bool)
        scores = np.random.rand(100, 5)
        vocab = np.random.rand(5).astype(str)
        label_series = LabelSeries(name='labels',
                                   description='labels',
                                   data=labels,
                                   scores=scores,
                                   vocabulary=vocab,
                                   rate=30.0,
                                   starting_time=0.0,
                                   exclusive=False,
                                   method='jim did it',
                                   representation=representation_series,
                                   video=image_series)

        behavior_module.add(label_series)
        behavior_module.add(representation_series)
        self.nwbfile.add_acquisition(image_series)

        with NWBHDF5IO(self.path, mode='w') as io:
            io.write(self.nwbfile)

        with NWBHDF5IO(self.path, mode='r', load_namespaces=True) as io:
            read_nwbfile = io.read()
            self.assertContainerEqual(behavior_module,
                                      read_nwbfile.processing['behavior'])
Пример #4
0
def make_nwbfile_imageseries_no_unit():
    """Create a test file with an ImageSeries with data and no unit."""
    nwbfile = NWBFile(session_description='ADDME',
                      identifier='ADDME',
                      session_start_time=datetime.now().astimezone())
    image_series = ImageSeries(
        name='test_imageseries',
        data=np.ones((3, 3, 3)),
        external_file=['external_file'],
        starting_frame=[1, 2, 3],
        format='tiff',
        timestamps=[1., 2., 3.]
    )

    nwbfile.add_acquisition(image_series)

    test_name = 'imageseries_no_unit'
    _write(test_name, nwbfile)
Пример #5
0
 def test_init(self):
     dev = Device('test_device')
     iS = ImageSeries(
         name='test_iS',
         data=np.ones((3, 3, 3)),
         unit='unit',
         external_file=['external_file'],
         starting_frame=[1, 2, 3],
         format='tiff',
         timestamps=list(),
         device=dev,
     )
     self.assertEqual(iS.name, 'test_iS')
     self.assertEqual(iS.unit, 'unit')
     self.assertEqual(iS.external_file, ['external_file'])
     self.assertEqual(iS.starting_frame, [1, 2, 3])
     self.assertEqual(iS.format, 'tiff')
     self.assertIs(iS.device, dev)
Пример #6
0
    def add_microscopy(self, from_path):
        miniscope = read_settings(from_path)
        self.nwbfile.add_device(miniscope)
        annotations = read_notes(from_path)
        if annotations:
            self.nwbfile.add_acquisition(annotations)

        ms_files = [
            os.path.split(x)[1]
            for x in natsorted(glob(os.path.join(from_path, 'msCam*.avi')))
        ]

        self.nwbfile.add_acquisition(
            ImageSeries(name='OnePhotonSeries',
                        format='external',
                        external_file=ms_files,
                        timestamps=load_miniscope_timestamps(from_path),
                        starting_frame=[0] * len(ms_files)))
Пример #7
0
def CreatePlaneSegmentation():
    w, h = 5, 5
    img_mask = [[[1.0 for x in range(w)] for y in range(h)], [[2.0 for x in range(w)] for y in range(h)]]
    pix_mask = [[1, 2, 1.0], [3, 4, 1.0], [5, 6, 1.0],
                [7, 8, 2.0], [9, 10, 2.0]]

    iSS = ImageSeries(name='test_iS', data=np.ones((2, 2, 2)), unit='unit',
                      external_file=['external_file'], starting_frame=[1, 2, 3], format='tiff', timestamps=[1., 2.])

    oc = OpticalChannel('test_optical_channel', 'description', 500.)
    device = Device(name='device_name')
    ip = ImagingPlane('test_imaging_plane', oc, 'description', device, 600.,
                      300., 'indicator', 'location', reference_frame='reference_frame')

    pS = PlaneSegmentation('description', ip, 'test_name', iSS)
    pS.add_roi(pixel_mask=pix_mask[0:3], image_mask=img_mask[0])
    pS.add_roi(pixel_mask=pix_mask[3:5], image_mask=img_mask[1])
    return pS
Пример #8
0
    def buildPlaneSegmentation(self):
        """ Return an PlaneSegmentation and set related objects """
        w, h = 5, 5
        img_mask = [[[1.0 for x in range(w)] for y in range(h)],
                    [[2.0 for x in range(w)] for y in range(h)]]
        pix_mask = [(1, 2, 1.0), (3, 4, 1.0), (5, 6, 1.0), (7, 8, 2.0),
                    (9, 10, 2.)]

        ts = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
        self.image_series = ImageSeries(name='test_iS',
                                        dimension=[2],
                                        external_file=['images.tiff'],
                                        starting_frame=[1, 2, 3],
                                        format='tiff',
                                        timestamps=ts)

        self.device = Device(name='dev1')
        self.optical_channel = OpticalChannel('test_optical_channel',
                                              'optical channel description',
                                              500.)
        self.imaging_plane = ImagingPlane('imgpln1',
                                          self.optical_channel,
                                          'a fake ImagingPlane',
                                          self.device,
                                          600.,
                                          200.,
                                          'GFP',
                                          'somewhere in the brain',
                                          (((1., 2., 3.), (4., 5., 6.)), ),
                                          2.,
                                          'a unit',
                                          reference_frame='unknown')

        self.img_mask = deepcopy(img_mask)
        self.pix_mask = deepcopy(pix_mask)
        self.pxmsk_index = [3, 5]
        pS = PlaneSegmentation('plane segmentation description',
                               self.imaging_plane, 'test_plane_seg_name',
                               self.image_series)
        pS.add_roi(pixel_mask=pix_mask[0:3], image_mask=img_mask[0])
        pS.add_roi(pixel_mask=pix_mask[3:5], image_mask=img_mask[1])
        return pS
Пример #9
0
    def test_init(self):
        iS = ImageSeries(name='test_iS',
                         data=np.ones((2, 2, 2)),
                         unit='unit',
                         external_file=['external_file'],
                         starting_frame=[1, 2, 3],
                         format='tiff',
                         timestamps=[1., .2])

        ims = ImageMaskSeries(name='test_ims',
                              data=np.ones((2, 2, 2)),
                              unit='unit',
                              masked_imageseries=iS,
                              external_file=['external_file'],
                              starting_frame=[1, 2, 3],
                              format='tiff',
                              timestamps=[1., 2.])
        self.assertEqual(ims.name, 'test_ims')
        self.assertEqual(ims.unit, 'unit')
        self.assertIs(ims.masked_imageseries, iS)
        self.assertEqual(ims.external_file, ['external_file'])
        self.assertEqual(ims.starting_frame, [1, 2, 3])
        self.assertEqual(ims.format, 'tiff')
    def run_conversion(self, nwbfile: NWBFile, metadata: dict):
        video_folder = Path(self.source_data['folder_path'])
        video_file_path_list = [
            str(x) for x in video_folder.iterdir() if x.suffix == ".mkv"
        ]

        video_timestamps = np.empty(0)
        for video_file_path in video_file_path_list:
            video_time_df = pd.read_csv(video_file_path.replace(
                ".mkv", "_timestamps.csv"),
                                        delimiter=";",
                                        skipinitialspace=True)
            video_timestamps = np.append(
                video_timestamps, video_time_df['timestamp'].to_numpy() / 1E3)

        # Custom labeled events
        videos = ImageSeries(name='Videos',
                             description="Videos recorded by TIS camera.",
                             format="external",
                             external_file=video_file_path_list,
                             timestamps=H5DataIO(video_timestamps,
                                                 compression="gzip"))
        nwbfile.add_acquisition(videos)
Пример #11
0
    def run_conversion(
        self,
        nwbfile: NWBFile,
        metadata: dict,
        stub_test: bool = False,
    ):
        if stub_test:
            count_max = 10
        else:
            count_max = np.inf

        (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split(".")
        file_paths = self.source_data["file_paths"]
        for file in file_paths:
            cap = cv2.VideoCapture(file)
            if int(major_ver) < 3:
                fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
            else:
                fps = cap.get(cv2.CAP_PROP_FPS)

            success, frame = cap.read()
            mov = [frame]
            count = 1
            while success and count < count_max:
                success, frame = cap.read()
                mov.append(frame)
                count += 1
            mov = np.array(mov)
            cap.release()

            video = ImageSeries(
                name=f"Video: {Path(file).name}",
                description="Video recorded by camera.",
                data=H5DataIO(mov, compression="gzip"),
                rate=fps,
            )
            nwbfile.add_acquisition(video)
Пример #12
0
def create_plane_segmentation():
    w, h = 5, 5
    img_mask = [[[1.0 for x in range(w)] for y in range(h)],
                [[2.0 for x in range(w)] for y in range(h)]]
    pix_mask = [[1, 2, 1.0], [3, 4, 1.0], [5, 6, 1.0], [7, 8, 2.0],
                [9, 10, 2.0]]

    iSS = ImageSeries(name='test_iS',
                      data=np.ones((2, 2, 2)),
                      unit='unit',
                      external_file=['external_file'],
                      starting_frame=[1, 2, 3],
                      format='tiff',
                      timestamps=[1., 2.])

    ip = create_imaging_plane()

    pS = PlaneSegmentation(description='description',
                           imaging_plane=ip,
                           name='test_name',
                           reference_images=iSS)
    pS.add_roi(pixel_mask=pix_mask[0:3], image_mask=img_mask[0])
    pS.add_roi(pixel_mask=pix_mask[3:5], image_mask=img_mask[1])
    return pS
Пример #13
0
    def test_init(self):
        w, h = 5, 5
        img_mask = [[[1.0 for x in range(w)] for y in range(h)],
                    [[2.0 for x in range(w)] for y in range(h)]]
        pix_mask = [[1, 2, 1.0], [3, 4, 1.0], [5, 6, 1.0], [7, 8, 2.0],
                    [9, 10, 2.0]]

        iSS = ImageSeries(name='test_iS',
                          source='a hypothetical source',
                          data=list(),
                          unit='unit',
                          external_file=['external_file'],
                          starting_frame=[1, 2, 3],
                          format='tiff',
                          timestamps=list())

        device = Device(name='device_name', source='device_source')
        oc = OpticalChannel('test_optical_channel', 'test_source',
                            'description', 500.)
        ip = ImagingPlane('test_imaging_plane', 'test_source', oc,
                          'description', device, 600., 'imaging_rate',
                          'indicator', 'location', (1, 2, 1, 2, 3), 4.0,
                          'unit', 'reference_frame')

        pS = PlaneSegmentation('test source', 'description', ip, 'test_name',
                               iSS)
        pS.add_roi("1234", pix_mask[0:3], img_mask[0])
        pS.add_roi("5678", pix_mask[3:5], img_mask[1])

        self.assertEqual(pS.description, 'description')
        self.assertEqual(pS.source, 'test source')

        self.assertEqual(pS.imaging_plane, ip)
        self.assertEqual(pS.reference_images, iSS)
        self.assertEqual(pS.pixel_masks.data, pix_mask)
        self.assertEqual(pS.image_masks.data, img_mask)
Пример #14
0
 def test_data_no_frame(self):
     iS = ImageSeries(name='test_iS',
                      unit='unit',
                      data=np.ones((3, 3, 3)),
                      timestamps=list())
     self.assertIsNone(iS.starting_frame)
Пример #15
0
 def test_external_file_no_frame(self):
     iS = ImageSeries(name='test_iS',
                      unit='unit',
                      external_file=['external_file'],
                      timestamps=list())
     self.assertListEqual(iS.starting_frame, [0])
Пример #16
0
 def test_no_data_no_file(self):
     msg = "Must supply either external_file or data to ImageSeries 'test_iS'."
     with self.assertRaisesWith(ValueError, msg):
         ImageSeries(name='test_iS', unit='unit', timestamps=list())
Пример #17
0
# the script.

nwbfile = NWBFile(source='Allen Brain Observatory: Visual Coding',
                  session_description='Allen Brain Observatory dataset',
                  identifier=str(metadata['ophys_experiment_id']),
                  session_start_time=metadata['session_start_time'],
                  file_create_date=datetime.datetime.now())

########################################
# 2) Next, we add stimuli templates (one for each type of stimulus), and a data series that indexes these templates to
# describe what stimulus was being shown during the experiment.
for stimulus in stimulus_list:
    visual_stimulus_images = ImageSeries(
        name=stimulus,
        source='NA',
        data=dataset.get_stimulus_template(stimulus),
        unit='NA',
        format='raw',
        timestamps=[0.0])
    image_index = IndexSeries(
        name=stimulus,
        source='NA',
        data=dataset.get_stimulus_table(stimulus).frame.values,
        unit='NA',
        indexed_timeseries=visual_stimulus_images,
        timestamps=timestamps[dataset.get_stimulus_table(
            stimulus).start.values])
    nwbfile.add_stimulus_template(visual_stimulus_images)
    nwbfile.add_stimulus(image_index)

########################################
Пример #18
0
 def create_acquisition(self):
     """
     Acquisition data like audiospectrogram(raw beh data), nidq(raw ephys data), raw camera data.
     These are independent of probe type.
     """
     for neurodata_type_name, neurodata_type_args_list in self.nwb_metadata[
             'Acquisition'].items():
         data_retrieved_args_list = self._get_data(neurodata_type_args_list)
         for neurodata_type_args in data_retrieved_args_list:
             if neurodata_type_name == 'ImageSeries':
                 for types, times in zip(neurodata_type_args['data'],
                                         neurodata_type_args['timestamps']):
                     customargs = dict(name='camera_raw',
                                       external_file=[str(types)],
                                       format='external',
                                       timestamps=times,
                                       unit='n.a.')
                     self.nwbfile.add_acquisition(ImageSeries(**customargs))
             elif neurodata_type_name == 'DecompositionSeries':
                 neurodata_type_args['bands'] = np.squeeze(
                     neurodata_type_args['bands'])
                 freqs = DynamicTable(
                     'bands',
                     'spectogram frequencies',
                     id=np.arange(neurodata_type_args['bands'].shape[0]))
                 freqs.add_column('freq',
                                  'frequency value',
                                  data=neurodata_type_args['bands'])
                 neurodata_type_args.update(dict(bands=freqs))
                 temp = neurodata_type_args['data'][:, :, np.newaxis]
                 neurodata_type_args['data'] = np.moveaxis(
                     temp, [0, 1, 2], [0, 2, 1])
                 ts = neurodata_type_args.pop('timestamps')
                 starting_time = ts[0][0] if isinstance(
                     ts[0], np.ndarray) else ts[0]
                 neurodata_type_args.update(
                     dict(starting_time=np.float64(starting_time),
                          rate=1 / np.mean(np.diff(ts.squeeze())),
                          unit='sec'))
                 self.nwbfile.add_acquisition(
                     DecompositionSeries(**neurodata_type_args))
             elif neurodata_type_name == 'ElectricalSeries':
                 if not self.electrode_table_exist:
                     self.create_electrode_table_ecephys()
                 if neurodata_type_args['name'] in ['raw.lf', 'raw.ap']:
                     for probe_no in range(self.no_probes):
                         if neurodata_type_args['data'][probe_no].shape[
                                 1] > self._one_data.data_attrs_dump[
                                     'electrode_table_length'][probe_no]:
                             if 'channels.rawInd' in self._one_data.loaded_datasets:
                                 channel_idx = self._one_data.loaded_datasets[
                                     'channels.rawInd'][
                                         probe_no].data.astype('int')
                             else:
                                 warnings.warn(
                                     'could not find channels.rawInd')
                                 break
                         else:
                             channel_idx = slice(None)
                         self.nwbfile.add_acquisition(
                             ElectricalSeries(
                                 name=neurodata_type_args['name'] + '_' +
                                 self.nwb_metadata['Probes'][probe_no]
                                 ['name'],
                                 starting_time=np.abs(
                                     np.round(
                                         neurodata_type_args['timestamps']
                                         [probe_no][0, 1], 2)
                                 ),  # round starting times of the order of 1e-5
                                 rate=neurodata_type_args['data']
                                 [probe_no].fs,
                                 data=H5DataIO(
                                     DataChunkIterator(
                                         _iter_datasetview(
                                             neurodata_type_args['data']
                                             [probe_no],
                                             channel_ids=channel_idx),
                                         buffer_size=self.buffer_size),
                                     compression=True,
                                     shuffle=self.shuffle,
                                     compression_opts=self.complevel),
                                 electrodes=self.probe_dt_region[probe_no],
                                 channel_conversion=neurodata_type_args[
                                     'data']
                                 [probe_no].channel_conversion_sample2v[
                                     neurodata_type_args['data']
                                     [probe_no].type][channel_idx]))
                 elif neurodata_type_args['name'] in ['raw.nidq']:
                     self.nwbfile.add_acquisition(
                         ElectricalSeries(**neurodata_type_args))
Пример #19
0
    print("create_device : done")
    test_nwb.cicada_create_optical_channel()
    print("create optical channel : done")
    test_nwb.cicada_create_module()
    print("create_module : done")

    test_nwb.cicada_create_imaging_plane()
    print("create_imaging_plane : done")
    test_nwb.cicada_create_two_photon_series(
        data_to_store=store_data["non_corrected_movie"],
        external_file=[all_paths["non_corrected_movie"]])
    print("create two photon series : done")
    test_nwb.cicada_create_motion_correction()
    print("create motion correction : done")
    corrected_movie = ImageSeries(name="corrected_movie",
                                  data=store_data["corrected_movie"],
                                  external_file=[all_paths["corrected_movie"]],
                                  rate=1.0)
    original_movie = ImageSeries(
        name="original_movie",
        data=store_data["non_corrected_movie"],
        external_file=[all_paths["non_corrected_movie"]],
        rate=1.0)
    xy_translation = ImageSeries(name="xy_translation",
                                 data=None,
                                 external_file=[],
                                 rate=1.0)
    test_nwb.cicada_add_corrected_image_stack(corrected=corrected_movie,
                                              original=original_movie,
                                              xy_translation=xy_translation)
    print("add corrected image stack : done")
Пример #20
0
def no2nwb(NOData, session_use, subjects):

    # Prepare the NO data that will be coverted to the NWB format

    session = NOData.sessions[session_use]
    events = NOData._get_event_data(session_use, experiment_type='All')
    cell_ids = NOData.ls_cells(session_use)
    experiment_id_learn = session['experiment_id_learn']
    experiment_id_recog = session['experiment_id_recog']
    task_descr = session['task_descr']

    # Get the metadata for the subject
    df_session = subjects[subjects['session_id'] == session_use]

    print('session_use')
    print(session_use)
    print('age')
    print(str(df_session['age'].values[0]))
    print('epilepsy_diagnosis')
    print(str(df_session['epilepsy_diagnosis'].values[0]))

    nwb_subject = Subject(
        age=str(df_session['age'].values[0]),
        description=df_session['epilepsy_diagnosis'].values[0],
        sex=df_session['sex'].values[0],
        subject_id=df_session['subject_id'].values[0])

    # Create the NWB file
    nwbfile = NWBFile(
        #source='https://datadryad.org/bitstream/handle/10255/dryad.163179/RecogMemory_MTL_release_v2.zip',
        session_description='RecogMemory dataset session use 5' + session['session'],
        identifier=session['session_id'],
        session_start_time=datetime.datetime.now(),# TODO: need to check out the time for session start
        file_create_date=datetime.datetime.now(),
        experiment_description="learning: " + str(experiment_id_learn) + ", " + \
                               "recognition: " + \
                               str(experiment_id_recog),
        subject=nwb_subject
    )

    # Add event and experiment_id acquisition
    # event_ts = TimeSeries(name='events', source='NA', unit='NA', data=np.asarray(events[1].values),
    #                       timestamps=np.asarray(events[0].values))

    event_ts = TimeSeries(name='events',
                          unit='NA',
                          data=np.asarray(events[1].values),
                          timestamps=np.asarray(events[0].values))
    # experiment_ids = TimeSeries(name='experiment_ids', source='NA', unit='NA', data=np.asarray(events[2]),
    #                             timestamps=np.asarray(events[0].values))
    experiment_ids = TimeSeries(name='experiment_ids',
                                unit='NA',
                                data=np.asarray(events[2]),
                                timestamps=np.asarray(events[0].values))
    nwbfile.add_acquisition(event_ts)
    nwbfile.add_acquisition(experiment_ids)

    # Add stimuli to the NWB file2
    # Get the first cell from the cell list
    cell = NOData.pop_cell(session_use, NOData.ls_cells(session_use)[0])
    trials = cell.trials
    stimuli_recog_path = [trial.file_path_recog for trial in trials]
    stimuli_learn_path = [trial.file_path_learn for trial in trials]

    # Add stimuli recog
    counter = 1
    for path in stimuli_recog_path:
        folders = path.split('\\')
        path = os.path.join('./RecogMemory_MTL_release_v2', 'Stimuli',
                            folders[0], folders[1], folders[2])
        img = cv2.imread(path)
        name = 'stimuli_recog_' + str(counter)
        stimulus_recog = ImageSeries(name=name,
                                     data=img,
                                     unit='NA',
                                     format='',
                                     timestamps=[0.0])

        nwbfile.add_stimulus(stimulus_recog)
        counter += 1

    # Add stimuli learn
    counter = 1
    for path in stimuli_learn_path:
        if path == 'NA':
            continue
        folders = path.split('\\')

        path = os.path.join('./RecogMemory_MTL_release_v2', 'Stimuli',
                            folders[0], folders[1], folders[2])
        img = cv2.imread(path)

        name = 'stimuli_learn_' + str(counter)

        stimulus_learn = ImageSeries(name=name,
                                     data=img,
                                     unit='NA',
                                     format='',
                                     timestamps=[0.0])

        nwbfile.add_stimulus(stimulus_learn)

        counter += 1

    # Add epochs and trials: storing start and end times for a stimulus

    # First extract the category ids and names that we need
    # The metadata for each trials will be store in a trial table

    cat_id_recog = [trial.category_recog for trial in trials]
    cat_name_recog = [trial.category_name_recog for trial in trials]
    cat_id_learn = [trial.category_learn for trial in trials]
    cat_name_learn = [trial.category_name_learn for trial in trials]

    # Extract the event timestamps
    events_learn_stim_on = events[(events[2] == experiment_id_learn) &
                                  (events[1] == NOData.markers['stimulus_on'])]
    events_learn_stim_off = events[(events[2] == experiment_id_learn) & (
        events[1] == NOData.markers['stimulus_off'])]
    events_learn_delay1_off = events[(events[2] == experiment_id_learn) & (
        events[1] == NOData.markers['delay1_off'])]
    events_learn_delay2_off = events[(events[2] == experiment_id_learn) & (
        events[1] == NOData.markers['delay2_off'])]

    events_recog_stim_on = events[(events[2] == experiment_id_recog) &
                                  (events[1] == NOData.markers['stimulus_on'])]
    events_recog_stim_off = events[(events[2] == experiment_id_recog) & (
        events[1] == NOData.markers['stimulus_off'])]
    events_recog_delay1_off = events[(events[2] == experiment_id_recog) & (
        events[1] == NOData.markers['delay1_off'])]
    events_recog_delay2_off = events[(events[2] == experiment_id_recog) & (
        events[1] == NOData.markers['delay2_off'])]

    # Extract new_old label
    new_old_recog = [trial.new_old_recog for trial in trials]

    # Create the trial tables
    nwbfile.add_trial_column('stim_on', 'the time when the stimulus is shown')
    nwbfile.add_trial_column('stim_off', 'the time when the stimulus is off')
    nwbfile.add_trial_column('delay1_off', 'the time when delay1 is off')
    nwbfile.add_trial_column('delay2_off', 'the time when delay2 is off')
    nwbfile.add_trial_column('stim_phase',
                             'learning/recognition phase during the trial')
    nwbfile.add_trial_column('category_id', 'the category id of the stimulus')
    nwbfile.add_trial_column('category_name',
                             'the category name of the stimulus')
    nwbfile.add_trial_column('external_image_file',
                             'the file path to the stimulus')
    nwbfile.add_trial_column('new_old_labels_recog',
                             'labels for new or old stimulus')

    range_recog = np.amin([
        len(events_recog_stim_on),
        len(events_recog_stim_off),
        len(events_recog_delay1_off),
        len(events_recog_delay2_off)
    ])
    range_learn = np.amin([
        len(events_learn_stim_on),
        len(events_learn_stim_off),
        len(events_learn_delay1_off),
        len(events_learn_delay2_off)
    ])

    # Iterate the event list and add information into each epoch and trial table
    for i in range(range_learn):
        # nwbfile.create_epoch(start_time=events_learn_stim_on.iloc[i][0],
        #                      stop_time=events_learn_stim_off.iloc[i][0],
        #                      timeseries=[event_ts, experiment_ids],
        #                      tags='stimulus_learn',
        #                      description='learning phase stimulus')

        # nwbfile.add_trial({'start': events_learn_stim_on.iloc[i][0],
        #                    'end': events_learn_delay2_off.iloc[i][0],
        #                    'stim_on': events_learn_stim_on.iloc[i][0],
        #                    'stim_off': events_learn_stim_off.iloc[i][0],
        #                    'delay1_off': events_learn_delay1_off.iloc[i][0],
        #                    'delay2_off': events_learn_delay2_off.iloc[i][0],
        #                    'stim_phase': 'learn',
        #                    'category_id': cat_id_learn[i],
        #                    'category_name': cat_name_learn[i],
        #                    'external_image_file': stimuli_learn_path[i],
        #                    'new_old_labels_recog': -1})

        nwbfile.add_trial(start_time=events_learn_stim_on.iloc[i][0],
                          stop_time=events_learn_delay2_off.iloc[i][0],
                          stim_on=events_learn_stim_on.iloc[i][0],
                          stim_off=events_learn_stim_off.iloc[i][0],
                          delay1_off=events_learn_delay1_off.iloc[i][0],
                          delay2_off=events_learn_delay2_off.iloc[i][0],
                          stim_phase='learn',
                          category_id=cat_id_learn[i],
                          category_name=cat_name_learn[i],
                          external_image_file=stimuli_learn_path[i],
                          new_old_labels_recog='NA')

    for i in range(range_recog):
        # nwbfile.create_epoch(start_time=events_recog_stim_on.iloc[i][0],
        #                      stop_time=events_recog_stim_off.iloc[i][0],
        #                      timeseries=[event_ts, experiment_ids],
        #                      tags='stimulus_recog',
        #                      description='recognition phase stimulus')

        nwbfile.add_trial(start_time=events_recog_stim_on.iloc[i][0],
                          stop_time=events_recog_delay2_off.iloc[i][0],
                          stim_on=events_recog_stim_on.iloc[i][0],
                          stim_off=events_recog_stim_off.iloc[i][0],
                          delay1_off=events_recog_delay1_off.iloc[i][0],
                          delay2_off=events_recog_delay2_off.iloc[i][0],
                          stim_phase='recog',
                          category_id=cat_id_recog[i],
                          category_name=cat_name_recog[i],
                          external_image_file=stimuli_recog_path[i],
                          new_old_labels_recog=new_old_recog[i])

    # Add the waveform clustering and the spike data.
    # Create necessary processing modules for different kinds of waveform data
    clustering_processing_module = ProcessingModule(
        'Spikes', 'The spike data contained')
    clusterWaveform_learn_processing_module = ProcessingModule(
        'MeanWaveforms_learn',
        'The mean waveforms for the clustered raw signal for learning phase')
    clusterWaveform_recog_processing_module = ProcessingModule(
        'MeanWaveforms_recog',
        'The mean waveforms for the clustered raw signal for recognition phase'
    )
    IsolDist_processing_module = ProcessingModule('IsoDist', 'The IsolDist')
    SNR_processing_module = ProcessingModule('SNR', 'SNR (signal-to-noise)')
    # Get the unique channel id that we will be iterate over
    channel_ids = np.unique([cell_id[0] for cell_id in cell_ids])

    # Interate the channel list
    for channel_id in channel_ids:
        cell_name = 'A' + str(channel_id) + '_cells.mat'
        file_path = os.path.join('RecogMemory_MTL_release_v2', 'Data',
                                 'sorted', session['session'], task_descr,
                                 cell_name)
        try:
            cell_mat = loadmat(file_path)
        except FileNotFoundError:
            print("File not found")
            continue
        spikes = cell_mat['spikes']
        meanWaveform_recog = cell_mat['meanWaveform_recog']
        meanWaveform_learn = cell_mat['meanWaveform_learn']
        IsolDist_SNR = cell_mat['IsolDist_SNR']

        spike_id = np.asarray([spike[0] for spike in spikes])
        spike_cluster_id = np.asarray([spike[1] for spike in spikes])
        spike_timestamps = np.asarray([spike[2] / 1000000 for spike in spikes])
        clustering = Clustering(description='Spikes of the channel detected',
                                num=spike_id,
                                peak_over_rms=np.asarray([0]),
                                times=spike_timestamps,
                                name='channel' + str(channel_id))
        clustering_processing_module.add_data_interface(clustering)

        for i in range(len(meanWaveform_learn[0][0][0][0])):
            waveform_mean_learn = ClusterWaveforms(
                clustering_interface=clustering,
                waveform_filtering='NA',
                waveform_sd=np.asarray([[0]]),
                waveform_mean=np.asarray([meanWaveform_learn[0][0][1][i]]),
                name='waveform_learn_cluster_id_' +
                str(meanWaveform_learn[0][0][0][0][i]))
            try:
                clusterWaveform_learn_processing_module.add_data_interface(
                    waveform_mean_learn)
            except ValueError as e:
                print(
                    'Catch an error in adding waveform interface to the recog processing module:'
                    + str(e))
                continue

        # Adding mean waveform recognition into the processing module
        for i in range(len(meanWaveform_recog[0][0][0][0])):
            waveform_mean_recog = ClusterWaveforms(
                clustering_interface=clustering,
                waveform_filtering='NA',
                waveform_sd=np.asarray([[0]]),
                waveform_mean=np.asarray([meanWaveform_recog[0][0][1][i]]),
                name='waveform_recog_cluster_id_' +
                str(meanWaveform_recog[0][0][0][0][i]))
            try:
                clusterWaveform_recog_processing_module.add_data_interface(
                    waveform_mean_recog)
            except ValueError as e:
                print(
                    'Catch an error in adding waveform interface to the recog processing module:'
                    + str(e))
                continue

        # Adding IsolDist_SNR data into the processing module
        # Here I use feature extraction to store the IsolDist_SNR data because
        # they are extracted from the original signals.
        # print(IsolDist_SNR[0][0][0])
        for i in range(len(IsolDist_SNR[0][0][1][0])):
            isoldist_data_interface = TimeSeries(
                data=[IsolDist_SNR[0][0][1][0][i]],
                unit='NA',
                timestamps=[0],
                name='IsolDist_' + str(IsolDist_SNR[0][0][0][0][i]))
            try:
                IsolDist_processing_module.add_data_interface(
                    isoldist_data_interface)
            except ValueError as e:
                print(
                    'Catch an error in adding IsolDist to the processing module:'
                    + str(e))
                continue

            SNR_data_interface = TimeSeries(unit='NA',
                                            description='The SNR data',
                                            data=[IsolDist_SNR[0][0][2][0][i]],
                                            timestamps=[0],
                                            name='SNR_' +
                                            str(IsolDist_SNR[0][0][0][0][i]))

            try:
                SNR_processing_module.add_data_interface(SNR_data_interface)
            except ValueError as e:
                print(
                    'Catch an error in adding SNR to the processing module:' +
                    str(e))
                continue

    nwbfile.add_processing_module(clustering_processing_module)
    nwbfile.add_processing_module(clusterWaveform_learn_processing_module)
    nwbfile.add_processing_module(clusterWaveform_recog_processing_module)
    nwbfile.add_processing_module(IsolDist_processing_module)
    nwbfile.add_processing_module(SNR_processing_module)

    return nwbfile
Пример #21
0
nwb.add_device(miniscope)

ms_files = [
    os.path.split(x)[1]
    for x in natsorted(glob(os.path.join(data_dir, 'msCam*.avi')))
]

behav_files = [
    os.path.split(x)[1]
    for x in natsorted(glob(os.path.join(data_dir, 'behavCam*.avi')))
]

nwb.add_acquisition(
    ImageSeries(name='OnePhotonSeries',
                format='external',
                external_file=ms_files,
                timestamps=load_miniscope_timestamps(data_dir),
                starting_frame=[0] * len(ms_files)))

nwb.add_acquisition(
    ImageSeries(name='behaviorCam',
                format='external',
                external_file=behav_files,
                timestamps=load_miniscope_timestamps(data_dir, cam_num=2),
                starting_frame=[0] * len(behav_files)))

save_path = os.path.join(data_dir, 'test_out.nwb')
with NWBHDF5IO(save_path, 'w') as io:
    io.write(nwb)

# test read
Пример #22
0
def AddMesoscaleImagingDataToNWB(nwb_file,
                                 tiff_files=None,
                                 img_info=None,
                                 timestamps=None,
                                 load_data=False,
                                 verbose=False):
    """
        Adds information about a trial (or trials) to Neurodata Without Borders (NWB) file.

        :param nwb_file: {NWB file or str} Either pointer to existing NWB file or a string so NWB file can be opened.
        :param tiff_files: {list} List of TIFF files. All will be loaded (in order provided)
        :param img_info: {dict} A dictionary carrying information about the images.
        :param load_data: [optional] {bool} whether to load images from TIFF stack or leave it as a reference. Default is false.
        :param verbose: [optional] {bool} whether to print updates while converting. Default is false.
        :return: {str} filename of NWB file (empty if error)
        """

    # Check to see if user passed a filename or file
    nwb_io = None
    if not nwb_file:
        raise ValueError("Must pass a NWB file.")
    if type(nwb_file) == pynwb.file.NWBFile:
        if verbose: print("Using passed NWB file.")
    elif type(nwb_file) == str:
        nwb_file_name = nwb_file
        [nwb_file, nwb_io] = OpenNWBFile(nwb_file_name, verbose=verbose)
    else:
        raise ValueError("NWB file was not valid.")

    # Define default image information
    if verbose: print("Parsing image information.")
    def_image_info = {
        'resolution': -1.0,
        'bits_per_pixel': 0,
        'starting_time': -1.0,
        'rate': -1.0,
        'comments': 'No comments provided.',
        'description': 'No description provided.'
    }
    # Update the user-specified keys
    if img_info is not None:
        def_image_info.update(img_info)
    img_info = def_image_info

    # Check whether we were passed timestamps for the images
    if (timestamps is None) or (len(timestamps) == 0):
        # Use starting time and rate
        starting_time = img_info['starting_time']
        rate = img_info['rate']
        timestamps = None
    else:
        # Use timestamps
        starting_time = None
        rate = None

    # Load data?
    if load_data:
        if verbose: print("Loading data from files to store in NWB.")
        # Load all of the data from file
        img = []
        for cur_img_file in tiff_files:
            # Read with the PIL library because something is wrong with header for TIFFfile
            cur_img = io.imread(cur_img_file, plugin='pil')
            # Append to overall image
            if len(img) == 0:
                img = cur_img
            else:
                img = np.concatenate([img, cur_img], axis=0)
            if verbose:
                print("\tLoaded %s (contains %d images of %d x %d)." %
                      (cur_img_file, cur_img.shape[0], cur_img.shape[1],
                       cur_img.shape[2]))

        if verbose: print("Creating image series.")
        image_series = ImageSeries("Raw Mesoscale Images from TIFF Stack",
                                   data=img,
                                   unit='fluorescence',
                                   format='TIFF',
                                   bits_per_pixel=img_info['bits_per_pixel'],
                                   dimension=[img.shape[1], img.shape[2]],
                                   resolution=img_info['resolution'],
                                   conversion=1.0,
                                   timestamps=timestamps,
                                   starting_time=starting_time,
                                   rate=rate,
                                   comments=img_info['comments'] +
                                   " Images from files: %s" %
                                   ([f for f in tiff_files]),
                                   description=img_info['description'])
    else:  # don't load data, store by reference
        # Need to grab the size of each image stack so we can keep a store of starting image frames
        num_frames = []
        for cur_img_file, cur_img_file_ind in enumerate(tiff_files):
            # Read with the PIL library because something is wrong with header for TIFFfile
            cur_img.clear()
            cur_img = io.imread(cur_img_file, plugin='pil')
            num_frames.append(cur_img.shape[0])
        # Pop off the last one and push on a first one
        num_frames.insert(0, 0)
        num_frames.pop()
        starting_frames = np.array(num_frames).cumsum(axis=0)

        image_series = ImageSeries(
            "Raw Mesoscale Images from TIFF Stack",
            data=img,
            unit='fluorescence',
            format='external',
            external_file=tiff_files,
            starting_frame=starting_frames,
            bits_per_pixel=img_info['bits_per_pixel'],
            dimension=[cur_img.shape[1], cur_img.shape[2]],
            resolution=img_info['resolution'],
            conversion=1.0,
            timestamps=timestamps,
            starting_time=starting_time,
            rate=rate,
            comments=img_info['comments'] + " Images from files: %s" %
            ([f for f in tiff_files]),
            description=img_info['description'])

    # Add to the file
    nwb_file.add_acquisition(image_series)

    # Write the file
    if nwb_io:
        if verbose: print("Writing NWB file and closing.")
        nwb_io.write(nwb_file)
        nwb_io.close()
        return nwb_file_name
    else:
        if verbose: print("Returning NWB file variable")
        return nwb_file
Пример #23
0
import numpy as np

from pynwb import NWBFile, NWBHDF5IO
from ndx_labels import LabelSeries, RepresentationSeries
from pynwb.image import ImageSeries

nwbfile = NWBFile(session_description='session_description',
                  identifier='identifier',
                  session_start_time=datetime.datetime.now(
                      datetime.timezone.utc))

behavior_module = nwbfile.create_processing_module(name="behavior",
                                                   description="behavior")

video = np.random.rand(100, 128, 128)
image_series = ImageSeries(name='video', data=video, rate=30.0)

pcs = np.random.rand(100, 4).astype('float64')
representation_series = RepresentationSeries(name='pcs',
                                             description='pc projections',
                                             data=pcs,
                                             method='iterated SVD',
                                             rate=30.0,
                                             starting_time=0.0,
                                             video=image_series)

labels = np.random.rand(100, 5).astype(bool)
vocab = np.random.rand(5).astype(str)
label_series = LabelSeries(name='labels',
                           description='labels',
                           data=labels,
Пример #24
0
 def test_data_no_unit(self):
     msg = "Must supply 'unit' argument when supplying 'data' to ImageSeries 'test_iS'."
     with self.assertRaisesWith(ValueError, msg):
         ImageSeries(name='test_iS',
                     data=np.ones((3, 3, 3)),
                     timestamps=list())
Пример #25
0
nwbfile.add_acquisition(image_series1)
nwbfile.add_acquisition(image_series2)

####################
# Motion Correction (optional)
# ---------------------------------
#
# You can also store the result of motion correction.
# These should be stored in a :py:class:`~pynwb.ophys.MotionCorrection` object,
# which is a :py:class:`~pynwb.core.MultiContainerInterface` (similar to pynwb.behavior.Position)
# which holds 1 or more :py:class:`~pynwb.ophys.CorrectedImageStack` objects.

corrected = ImageSeries(
    name='corrected',  # this must be named "corrected"
    data=np.ones((1000, 100, 100)),
    unit='na',
    format='raw',
    starting_time=0.0,
    rate=1.0)

xy_translation = TimeSeries(
    name='xy_translation',
    data=np.ones((1000, 2)),
    unit='pixels',
    starting_time=0.0,
    rate=1.0,
)

corrected_image_stack = CorrectedImageStack(
    corrected=corrected,
    original=image_series1,
Пример #26
0
from pynwb import NWBHDF5IO, NWBFile
from h5py import File
import numpy as np
from datetime import datetime

base_dir = '/Users/bendichter/Desktop/Schnitzer/data/Example Data'

nwbfile = NWBFile(session_start_time=datetime(1900, 1, 1),
                  session_description=' ',
                  identifier='m655_D11_S1')

nwbfile.add_acquisition(
    ImageSeries(name='video',
                format='external',
                external_file=['m655_D11_S1.avi'],
                starting_time=0.0,
                rate=np.nan,
                starting_frame=[0],
                dimension=[250, 250]))

centroid_fname = 'm655_D11_S1_centroids.mat'
pos_path = os.path.join(base_dir, centroid_fname)

with File(pos_path, 'r') as file:
    pos_data = np.array(file['c']).T
spatial_series = SpatialSeries(name='position',
                               data=pos_data,
                               starting_time=0.0,
                               rate=5.0,
                               units='unknown',
                               reference_frame='unknown')
Пример #27
0
 def test_external_file_no_unit(self):
     iS = ImageSeries(name='test_iS',
                      external_file=['external_file'],
                      timestamps=list())
     self.assertEqual(iS.unit, ImageSeries.DEFAULT_UNIT)
Пример #28
0
    def run_conversion(self,
                       nwbfile: NWBFile,
                       metadata: dict,
                       stub_test: bool = False,
                       external_mode: bool = True,
                       starting_times: Optional[list] = None,
                       chunk_data: bool = True,
                       module_name: Optional[str] = None,
                       module_description: Optional[str] = None):
        """
        Convert the movie data files to ImageSeries and write them in the NWBFile.

        Parameters
        ----------
        nwbfile : NWBFile
        metadata : dict
        stub_test : bool, optional
            If True, truncates the write operation for fast testing. The default is False.
        external_mode : bool, optional
            ImageSeries in NWBFiles may contain either explicit movie data or file paths to external movie files. If
            True, this utilizes the more efficient method of merely encoding the file path linkage (recommended). For
            data sharing, the video files must be contained in the same folder as the NWBFile. If the intention of this
            NWBFile involves an upload to DANDI, the non-NWBFile types are not allowed so this flag would have to be
            set to False. The default is True.
        starting_times : list, optional
            List of start times for each movie. If unspecified, assumes that the movies in the file_paths list are in
            sequential order and are contiguous.
        chunk_data : bool, optional
            If True, uses a DataChunkIterator to read and write the movie, reducing overhead RAM usage at the cost of
            reduced conversion speed (compared to loading video entirely into RAM as an array). This will also force to
            True, even if manually set to False, whenever the video file size exceeds available system RAM by a factor
            of 70 (from compression experiments). Based on experiements for a ~30 FPS system of ~400 x ~600 color
            frames, the equivalent uncompressed RAM usage is around 2GB per minute of video. The default is True.
        module_name: str, optional
            Name of the processing module to add the ImageSeries object to. Default behavior is to add as acquisition.
        module_description: str, optional
            If the processing module specified by module_name does not exist, it will be created with this description.
            The default description is the same as used by the conversion_tools.get_module function.
        """
        file_paths = self.source_data['file_paths']

        if stub_test:
            count_max = 10
        else:
            count_max = np.inf
        if starting_times is not None:
            assert isinstance(starting_times, list) and all([isinstance(x, float) for x in starting_times]) \
                and len(starting_times) == len(file_paths), \
                "Argument 'starting_times' must be a list of floats in one-to-one correspondence with 'file_paths'!"
        else:
            starting_times = [0.]

        for j, file in enumerate(file_paths):
            timestamps = starting_times[j] + get_movie_timestamps(
                movie_file=file)

            if len(starting_times) != len(file_paths):
                starting_times.append(timestamps[-1])

            image_series_kwargs = dict(name=f"Video: {Path(file).stem}",
                                       description="Video recorded by camera.",
                                       unit="Frames")
            if check_regular_timestamps(ts=timestamps):
                fps = get_movie_fps(movie_file=file)
                image_series_kwargs.update(starting_time=starting_times[j],
                                           rate=fps)
            else:
                image_series_kwargs.update(
                    timestamps=H5DataIO(timestamps, compression="gzip"))

            if external_mode:
                image_series_kwargs.update(format="external",
                                           external_file=[file])
            else:
                uncompressed_estimate = Path(file).stat().st_size * 70
                available_memory = psutil.virtual_memory().available
                if not chunk_data and uncompressed_estimate >= available_memory:
                    warn(
                        f"Not enough memory (estimated {round(uncompressed_estimate/1e9, 2)} GB) to load movie file as "
                        f"array ({round(available_memory/1e9, 2)} GB available)! Forcing chunk_data to True."
                    )
                    chunk_data = True

                total_frames = len(timestamps)
                frame_shape = get_frame_shape(movie_file=file)
                maxshape = [total_frames]
                maxshape.extend(frame_shape)
                best_gzip_chunk = (1, frame_shape[0], frame_shape[1], 3)
                tqdm_pos, tqdm_mininterval = (0, 10)
                if chunk_data:

                    def data_generator(file, count_max):
                        cap = cv2.VideoCapture(str(file))
                        for _ in range(min(count_max, total_frames)):
                            success, frame = cap.read()
                            yield frame
                        cap.release()

                    mov = DataChunkIterator(
                        data=tqdm(
                            iterable=data_generator(file=file,
                                                    count_max=count_max),
                            desc=f"Copying movie data for {Path(file).name}",
                            position=tqdm_pos,
                            total=total_frames,
                            mininterval=tqdm_mininterval),
                        iter_axis=0,  # nwb standard is time as zero axis
                        maxshape=tuple(maxshape))
                    image_series_kwargs.update(data=H5DataIO(
                        mov, compression="gzip", chunks=best_gzip_chunk))
                else:
                    cap = cv2.VideoCapture(str(file))
                    mov = []
                    with tqdm(desc=f"Reading movie data for {Path(file).name}",
                              position=tqdm_pos,
                              total=total_frames,
                              mininterval=tqdm_mininterval) as pbar:
                        for _ in range(min(count_max, total_frames)):
                            success, frame = cap.read()
                            mov.append(frame)
                            pbar.update(1)
                    cap.release()
                    image_series_kwargs.update(data=H5DataIO(
                        DataChunkIterator(
                            tqdm(iterable=np.array(mov),
                                 desc=
                                 f"Writing movie data for {Path(file).name}",
                                 position=tqdm_pos,
                                 mininterval=tqdm_mininterval),
                            iter_axis=0,  # nwb standard is time as zero axis
                            maxshape=tuple(maxshape)),
                        compression="gzip",
                        chunks=best_gzip_chunk))
            if module_name is None:
                nwbfile.add_acquisition(ImageSeries(**image_series_kwargs))
            else:
                get_module(nwbfile=nwbfile,
                           name=module_name,
                           description=module_description).add(
                               ImageSeries(**image_series_kwargs))
Пример #29
0
def convert(
        input_file,
        session_start_time,
        subject_date_of_birth,
        subject_id='I5',
        subject_description='naive',
        subject_genotype='wild-type',
        subject_sex='M',
        subject_weight='11.6g',
        subject_species='Mus musculus',
        subject_brain_region='Medial Entorhinal Cortex',
        surgery='Probe: +/-3.3mm ML, 0.2mm A of sinus, then as deep as possible',
        session_id='npI5_0417_baseline_1',
        experimenter='Kei Masuda',
        experiment_description='Virtual Hallway Task',
        institution='Stanford University School of Medicine',
        lab_name='Giocomo Lab'):
    """
    Read in the .mat file specified by input_file and convert to .nwb format.

    Parameters
    ----------
    input_file : np.ndarray (..., n_channels, n_time)
        the .mat file to be converted
    subject_id : string
        the unique subject ID number for the subject of the experiment
    subject_date_of_birth : datetime ISO 8601
        the date and time the subject was born
    subject_description : string
        important information specific to this subject that differentiates it from other members of it's species
    subject_genotype : string
        the genetic strain of this species.
    subject_sex : string
        Male or Female
    subject_weight :
        the weight of the subject around the time of the experiment
    subject_species : string
        the name of the species of the subject
    subject_brain_region : basestring
        the name of the brain region where the electrode probe is recording from
    surgery : str
        information about the subject's surgery to implant electrodes
    session_id: string
        human-readable ID# for the experiment session that has a one-to-one relationship with a recording session
    session_start_time : datetime
        date and time that the experiment started
    experimenter : string
        who ran the experiment, first and last name
    experiment_description : string
        what task was being run during the session
    institution : string
        what institution was the experiment performed in
    lab_name : string
        the lab where the experiment was performed

    Returns
    -------
    nwbfile : NWBFile
        The contents of the .mat file converted into the NWB format.  The nwbfile is saved to disk using NDWHDF5
    """

    # input matlab data
    matfile = hdf5storage.loadmat(input_file)

    # output path for nwb data
    def replace_last(source_string, replace_what, replace_with):
        head, _sep, tail = source_string.rpartition(replace_what)
        return head + replace_with + tail

    outpath = replace_last(input_file, '.mat', '.nwb')

    create_date = datetime.today()
    timezone_cali = pytz.timezone('US/Pacific')
    create_date_tz = timezone_cali.localize(create_date)

    # if loading data from config.yaml, convert string dates into datetime
    if isinstance(session_start_time, str):
        session_start_time = datetime.strptime(session_start_time,
                                               '%B %d, %Y %I:%M%p')
        session_start_time = timezone_cali.localize(session_start_time)

    if isinstance(subject_date_of_birth, str):
        subject_date_of_birth = datetime.strptime(subject_date_of_birth,
                                                  '%B %d, %Y %I:%M%p')
        subject_date_of_birth = timezone_cali.localize(subject_date_of_birth)

    # create unique identifier for this experimental session
    uuid_identifier = uuid.uuid1()

    # Create NWB file
    nwbfile = NWBFile(
        session_description=experiment_description,  # required
        identifier=uuid_identifier.hex,  # required
        session_id=session_id,
        experiment_description=experiment_description,
        experimenter=experimenter,
        surgery=surgery,
        institution=institution,
        lab=lab_name,
        session_start_time=session_start_time,  # required
        file_create_date=create_date_tz)  # optional

    # add information about the subject of the experiment
    experiment_subject = Subject(subject_id=subject_id,
                                 species=subject_species,
                                 description=subject_description,
                                 genotype=subject_genotype,
                                 date_of_birth=subject_date_of_birth,
                                 weight=subject_weight,
                                 sex=subject_sex)
    nwbfile.subject = experiment_subject

    # adding constants via LabMetaData container
    # constants
    sample_rate = float(matfile['sp'][0]['sample_rate'][0][0][0])
    n_channels_dat = int(matfile['sp'][0]['n_channels_dat'][0][0][0])
    dat_path = matfile['sp'][0]['dat_path'][0][0][0]
    offset = int(matfile['sp'][0]['offset'][0][0][0])
    data_dtype = matfile['sp'][0]['dtype'][0][0][0]
    hp_filtered = bool(matfile['sp'][0]['hp_filtered'][0][0][0])
    vr_session_offset = matfile['sp'][0]['vr_session_offset'][0][0][0]
    # container
    lab_metadata = LabMetaData_ext(name='LabMetaData',
                                   acquisition_sampling_rate=sample_rate,
                                   number_of_electrodes=n_channels_dat,
                                   file_path=dat_path,
                                   bytes_to_skip=offset,
                                   raw_data_dtype=data_dtype,
                                   high_pass_filtered=hp_filtered,
                                   movie_start_time=vr_session_offset)
    nwbfile.add_lab_meta_data(lab_metadata)

    # Adding trial information
    nwbfile.add_trial_column(
        'trial_contrast',
        'visual contrast of the maze through which the mouse is running')
    trial = np.ravel(matfile['trial'])
    trial_nums = np.unique(trial)
    position_time = np.ravel(matfile['post'])
    # matlab trial numbers start at 1. To correctly index trial_contract vector,
    # subtracting 1 from 'num' so index starts at 0
    for num in trial_nums:
        trial_times = position_time[trial == num]
        nwbfile.add_trial(start_time=trial_times[0],
                          stop_time=trial_times[-1],
                          trial_contrast=matfile['trial_contrast'][num - 1][0])

    # Add mouse position inside:
    position = Position()
    position_virtual = np.ravel(matfile['posx'])
    # position inside the virtual environment
    sampling_rate = 1 / (position_time[1] - position_time[0])
    position.create_spatial_series(
        name='Position',
        data=position_virtual,
        starting_time=position_time[0],
        rate=sampling_rate,
        reference_frame='The start of the trial, which begins at the start '
        'of the virtual hallway.',
        conversion=0.01,
        description='Subject position in the virtual hallway.',
        comments='The values should be >0 and <400cm. Values greater than '
        '400cm mean that the mouse briefly exited the maze.',
    )

    # physical position on the mouse wheel
    physical_posx = position_virtual
    trial_gain = np.ravel(matfile['trial_gain'])
    for num in trial_nums:
        physical_posx[trial ==
                      num] = physical_posx[trial == num] / trial_gain[num - 1]

    position.create_spatial_series(
        name='PhysicalPosition',
        data=physical_posx,
        starting_time=position_time[0],
        rate=sampling_rate,
        reference_frame='Location on wheel re-referenced to zero '
        'at the start of each trial.',
        conversion=0.01,
        description='Physical location on the wheel measured '
        'since the beginning of the trial.',
        comments='Physical location found by dividing the '
        'virtual position by the "trial_gain"')
    nwbfile.add_acquisition(position)

    # Add timing of lick events, as well as mouse's virtual position during lick event
    lick_events = BehavioralEvents()
    lick_events.create_timeseries(
        'LickEvents',
        data=np.ravel(matfile['lickx']),
        timestamps=np.ravel(matfile['lickt']),
        unit='centimeter',
        description='Subject position in virtual hallway during the lick.')
    nwbfile.add_acquisition(lick_events)

    # Add information on the visual stimulus that was shown to the subject
    # Assumed rate=60 [Hz]. Update if necessary
    # Update external_file to link to Unity environment file
    visualization = ImageSeries(
        name='ImageSeries',
        unit='seconds',
        format='external',
        external_file=list(['https://unity.com/VR-and-AR-corner']),
        starting_time=vr_session_offset,
        starting_frame=[[0]],
        rate=float(60),
        description='virtual Unity environment that the mouse navigates through'
    )
    nwbfile.add_stimulus(visualization)

    # Add the recording device, a neuropixel probe
    recording_device = nwbfile.create_device(name='neuropixel_probes')
    electrode_group_description = 'single neuropixels probe http://www.open-ephys.org/neuropixelscorded'
    electrode_group_name = 'probe1'

    electrode_group = nwbfile.create_electrode_group(
        electrode_group_name,
        description=electrode_group_description,
        location=subject_brain_region,
        device=recording_device)

    # Add information about each electrode
    xcoords = np.ravel(matfile['sp'][0]['xcoords'][0])
    ycoords = np.ravel(matfile['sp'][0]['ycoords'][0])
    data_filtered_flag = matfile['sp'][0]['hp_filtered'][0][0]
    if data_filtered_flag:
        filter_desc = 'The raw voltage signals from the electrodes were high-pass filtered'
    else:
        filter_desc = 'The raw voltage signals from the electrodes were not high-pass filtered'

    num_recording_electrodes = xcoords.shape[0]
    recording_electrodes = range(0, num_recording_electrodes)

    # create electrode columns for the x,y location on the neuropixel  probe
    # the standard x,y,z locations are reserved for Allen Brain Atlas location
    nwbfile.add_electrode_column('rel_x', 'electrode x-location on the probe')
    nwbfile.add_electrode_column('rel_y', 'electrode y-location on the probe')

    for idx in recording_electrodes:
        nwbfile.add_electrode(id=idx,
                              x=np.nan,
                              y=np.nan,
                              z=np.nan,
                              rel_x=float(xcoords[idx]),
                              rel_y=float(ycoords[idx]),
                              imp=np.nan,
                              location='medial entorhinal cortex',
                              filtering=filter_desc,
                              group=electrode_group)

    # Add information about each unit, termed 'cluster' in giocomo data
    # create new columns in unit table
    nwbfile.add_unit_column(
        'quality',
        'labels given to clusters during manual sorting in phy (1=MUA, '
        '2=Good, 3=Unsorted)')

    # cluster information
    cluster_ids = matfile['sp'][0]['cids'][0][0]
    cluster_quality = matfile['sp'][0]['cgs'][0][0]
    # spikes in time
    spike_times = np.ravel(matfile['sp'][0]['st'][0])  # the time of each spike
    spike_cluster = np.ravel(
        matfile['sp'][0]['clu'][0])  # the cluster_id that spiked at that time

    for i, cluster_id in enumerate(cluster_ids):
        unit_spike_times = spike_times[spike_cluster == cluster_id]
        waveforms = matfile['sp'][0]['temps'][0][cluster_id]
        nwbfile.add_unit(id=int(cluster_id),
                         spike_times=unit_spike_times,
                         quality=cluster_quality[i],
                         waveform_mean=waveforms,
                         electrode_group=electrode_group)

    # Trying to add another Units table to hold the results of the automatic spike sorting
    # create TemplateUnits units table
    template_units = Units(
        name='TemplateUnits',
        description='units assigned during automatic spike sorting')
    template_units.add_column(
        'tempScalingAmps',
        'scaling amplitude applied to the template when extracting spike',
        index=True)

    # information on extracted spike templates
    spike_templates = np.ravel(matfile['sp'][0]['spikeTemplates'][0])
    spike_template_ids = np.unique(spike_templates)
    # template scaling amplitudes
    temp_scaling_amps = np.ravel(matfile['sp'][0]['tempScalingAmps'][0])

    for i, spike_template_id in enumerate(spike_template_ids):
        template_spike_times = spike_times[spike_templates ==
                                           spike_template_id]
        temp_scaling_amps_per_template = temp_scaling_amps[spike_templates ==
                                                           spike_template_id]
        template_units.add_unit(id=int(spike_template_id),
                                spike_times=template_spike_times,
                                electrode_group=electrode_group,
                                tempScalingAmps=temp_scaling_amps_per_template)

    # create ecephys processing module
    spike_template_module = nwbfile.create_processing_module(
        name='ecephys',
        description='units assigned during automatic spike sorting')

    # add template_units table to processing module
    spike_template_module.add(template_units)

    print(nwbfile)
    print('converted to NWB:N')
    print('saving ...')

    with NWBHDF5IO(outpath, 'w') as io:
        io.write(nwbfile)
        print('saved', outpath)
mat = mat['data_out']
gcamp = mat['gcamp'][0][0]
chbo = mat['chbo'][0][0]
chbr = mat['chbr'][0][0]


def acquisition_rate(txt_file_name):
    warnings.warn(
        'This is hard coded. Make sure to implement txt file loading.')
    return 31.2207


gcamp_nwb = ImageSeries(name='gcamp',
                        source='NA',
                        data=gcamp,
                        unit='NA',
                        format='raw',
                        starting_time=0.,
                        rate=acquisition_rate(None))

chbo_nwb = ImageSeries(name='chbo',
                       source='NA',
                       data=chbo,
                       unit='NA',
                       format='raw',
                       starting_time=0.,
                       rate=acquisition_rate(None))

chbr_nwb = ImageSeries(name='chbr',
                       source='NA',
                       data=chbr,