예제 #1
0
def create_nwb_file():
    '''
    acquisition.t1
    acquisition.t2
    modules.mod.t3
    modules.mod.t4
    :return:
    '''
    start_time = datetime(2019, 1, 1, 11, tzinfo=tzlocal())
    create_date = datetime.now(tz=tzlocal())

    # FIXME: this attr breaks nwb-explorer
    # date_of_birth=create_date
    sub = pynwb.file.Subject(age='33',
                             description='Nothing too personal.',
                             genotype='AA',
                             sex='M',
                             species='H**o erectus',
                             subject_id='001',
                             weight="199 lb")

    nwbfile = pynwb.NWBFile('Example structured data',
                            'TSD',
                            start_time,
                            file_create_date=create_date,
                            notes='Example NWB file',
                            experimenter='Filippo Ledda',
                            experiment_description='Add example data',
                            institution='UCL',
                            subject=sub)
    sample_num = 100
    timestamps = np.arange(0, sample_num, 1)
    data = timestamps * 2

    nwbfile.add_acquisition(
        pynwb.TimeSeries(name='t1',
                         data=data,
                         unit='UA',
                         timestamps=timestamps))
    nwbfile.add_acquisition(
        pynwb.TimeSeries(name='t2', data=data, unit='pA', rate=1.0))

    mod = nwbfile.create_processing_module('mod', 'Mod')
    interface = mod.add(
        pynwb.TimeSeries(name='t3',
                         data=data,
                         unit='pA',
                         timestamps=timestamps))
    mod.add(
        pynwb.TimeSeries(name='t4',
                         data=data,
                         unit='UA',
                         timestamps=timestamps))

    nwbfile.add_acquisition(
        create_image('internal_storaged_image', nwbfile, False))
    nwbfile.add_acquisition(
        create_image('external_storaged_image', nwbfile, True))

    return nwbfile
예제 #2
0
def add_motion_correction_pm(session, ophys_module, nwbfile):
    xy_mc = pynwb.TimeSeries(
        name="MotionCorrection",
        data=session.motion_correction,
        timestamps=session.twop_timestamps,
        description="Number of pixels shifts measured during motion correction",
        unit="pixels",
    )

    ophys_module.add_data_interface(xy_mc)
예제 #3
0
def convert_1d_sampled(out, data, metadata):
    '''convert one-dimensional sampled data'''
    spikes = data[:]
    unit = data.unit
    dim = data.dimensions[0]
    q = pq.Quantity(dim.sampling_interval, dim.unit)
    rate = (1 / q.rescale(pq.s)).rescale(pq.Hz)
    print(f"{data.name}, 1d, sampled data: {rate} Hz", file=sys.stderr)
    ts = nwb.TimeSeries(name=data.name, data=data[:], unit=unit, rate=float(rate))
    out.add_acquisition(ts)
예제 #4
0
def test_set_container_sources(nwbfile):
    ts = pynwb.TimeSeries(
          name="a timeseries", 
          data=[1, 2, 3], 
          starting_time=0.0, 
          rate=1.0
        )
    nwbfile.add_acquisition(ts)

    nwb2_sink.set_container_sources(nwbfile, "foo")
    assert ts.container_source == "foo"
    assert nwbfile.container_source == "foo"
    assert nwbfile.subject.container_source == "foo"
예제 #5
0
def add_motion_correction_cis(session, ophys_module, nwbfile):
    corrected_image_series = pynwb.image.ImageSeries(
        name="motion_corrected_movie",
        description="see external file",
        external_file=["URL"],
        starting_frame=[0],
        format="external",
        timestamps=session.twop_timestamps,
        unit="Fluorescence (a.u.)",
    )
    nwbfile.add_acquisition(corrected_image_series)

    orig_image_series = pynwb.image.ImageSeries(
        name="original_movie",
        description="see external file",
        external_file=["URL"],
        starting_frame=[0],
        format="external",
        timestamps=session.twop_timestamps,
        unit="Fluorescence (a.u.)",
    )
    nwbfile.add_acquisition(orig_image_series)

    mot_corr_traces = pynwb.TimeSeries(
        name="MotionCorrection",
        data=session.motion_correction,
        timestamps=session.twop_timestamps,
        description="Number of pixels shifts measured during motion correction",
        unit="pixels",
    )

    nwbfile.add_acquisition(mot_corr_traces)

    corr_obj = pynwb.ophys.CorrectedImageStack(
        corrected=corrected_image_series,
        original=orig_image_series,
        xy_translation=mot_corr_traces,
    )

    # corrected_image_series.parent = corr_obj
    # mot_corr_traces.parent = corr_obj
    ophys_module.add_data_interface(corr_obj)
예제 #6
0
def simple_nwb(base_path):
    in_nwb_path = os.path.join(base_path, "input.nwb")
    out_nwb_path = os.path.join(base_path, "meta.nwb")

    nwbfile = pynwb.NWBFile(
        session_description="test session",
        identifier='test session',
        session_start_time=datetime.now()
    )
    nwbfile.add_acquisition(
        pynwb.TimeSeries(
            name="a timeseries", 
            data=[1, 2, 3], 
            starting_time=0.0, 
            rate=1.0
        )
    )
    with pynwb.NWBHDF5IO(path=in_nwb_path, mode="w") as writer:
        writer.write(nwbfile)

    return in_nwb_path, out_nwb_path
예제 #7
0
    def write_data(self):

        filename = '%s-%i.nwb' % (self.STIM['label'][self.iEp % len(
            self.STIM['label'])], int(self.iEp / len(self.STIM['label'])) + 1)

        nwbfile = pynwb.NWBFile(
            'Intrinsic Imaging data following bar stimulation',
            'intrinsic',
            datetime.datetime.utcnow(),
            file_create_date=datetime.datetime.utcnow())

        # Create our time series
        angles = pynwb.TimeSeries(
            name='angle_timeseries',
            data=self.STIM[self.STIM['label'][self.iEp %
                                              len(self.STIM['label'])] +
                           '-angle'],
            unit='Rd',
            timestamps=self.STIM[self.STIM['label'][self.iEp %
                                                    len(self.STIM['label'])] +
                                 '-times'])
        nwbfile.add_acquisition(angles)

        images = pynwb.image.ImageSeries(
            name='image_timeseries',
            data=np.array(self.FRAMES, dtype=np.float64),
            unit='a.u.',
            timestamps=self.STIM[self.STIM['label'][self.iEp %
                                                    len(self.STIM['label'])] +
                                 '-times'])

        nwbfile.add_acquisition(images)

        # Write the data to file
        io = pynwb.NWBHDF5IO(os.path.join(self.datafolder, filename), 'w')
        print('writing:', filename)
        io.write(nwbfile)
        io.close()
        print(filename, ' saved !')
예제 #8
0
def create_random_nwbfile(add_processing=True, add_high_gamma=True):
    """
    Creates a NWB file with fields used by the Chang lab, with random fake data.

    Parameters
    ----------
    add_processing : boolean
        Whether to add processing (LFP) data or not.
    add_high_gamma : boolean
        Whether to add high_gamma data or not.

    Returns
    -------
    nwbfile : nwbfile object
    """
    # Create NWBFile
    start_time = datetime(2017, 4, 3, 11, tzinfo=tzlocal())
    nwbfile = pynwb.NWBFile(session_description='fake data',
                            identifier='NWB123',
                            session_start_time=start_time)

    # Basic fields
    nwbfile.institution = 'My institution'
    nwbfile.lab = 'My lab'
    nwbfile.subject = pynwb.file.Subject(age='5 months',
                                         description='description')

    # Add device and electrodes
    device = nwbfile.create_device(name='device')
    electrode_group = nwbfile.create_electrode_group(
        name='electrode_group0',
        description="an electrode group",
        location="somewhere in the hippocampus",
        device=device)
    n_electrodes = 4
    for idx in np.arange(n_electrodes):
        nwbfile.add_electrode(id=idx,
                              x=1.0,
                              y=2.0,
                              z=3.0,
                              imp=float(-idx),
                              location='CA1',
                              filtering='none',
                              group=electrode_group)

    # Add noise signal as raw data
    electrode_table_region = nwbfile.create_electrode_table_region(
        list(np.arange(n_electrodes)), 'electrodes_table_region')
    raw_len = 100000
    ephys_data = np.random.rand(raw_len * 4).reshape((raw_len, 4))
    ephys_ts = pynwb.ecephys.ElectricalSeries(
        name='raw_data',
        data=ephys_data,
        electrodes=electrode_table_region,
        starting_time=0.,
        rate=100.)
    nwbfile.add_acquisition(ephys_ts)

    # Add noise signal as processing data
    if add_processing:
        ecephys_module = nwbfile.create_processing_module(
            name='ecephys', description='preprocessed data')
        lfp_len = 10000
        lfp_data = np.random.rand(lfp_len * 4).reshape((lfp_len, 4))
        lfp = pynwb.ecephys.LFP(name='LFP')
        lfp.create_electrical_series(name='processed_electrical_series',
                                     data=lfp_data,
                                     electrodes=electrode_table_region,
                                     rate=10.,
                                     starting_time=0.)
        ecephys_module.add_data_interface(lfp)

    # Add noise signal as high gamma
    if add_processing and add_high_gamma:
        hg_data = np.random.rand(lfp_len * 4).reshape((lfp_len, 4))
        hg = pynwb.ecephys.ElectricalSeries(name='high_gamma',
                                            data=hg_data,
                                            electrodes=electrode_table_region,
                                            rate=10.,
                                            description='')
        ecephys_module.add_data_interface(hg)

    # Add noise signals as speaker stimuli and mic recording acquisition
    stim1 = pynwb.TimeSeries(name='stim1',
                             data=np.random.rand(raw_len),
                             starting_time=0.0,
                             rate=100.,
                             unit='')
    stim2 = pynwb.TimeSeries(name='stim2',
                             data=np.random.rand(raw_len),
                             starting_time=0.0,
                             rate=100.,
                             unit='')
    nwbfile.add_stimulus(stim1)
    nwbfile.add_stimulus(stim2)

    mic = pynwb.TimeSeries(name='mic',
                           data=np.random.rand(raw_len),
                           starting_time=0.0,
                           rate=100.,
                           unit='')
    nwbfile.add_acquisition(mic)

    return nwbfile
예제 #9
0
    def make(self, key):
        # get the list of mark parameters
        mark_param = (MarkParameters & key).fetch1()
        mark_param_dict = mark_param['mark_param_dict']

        #check that the mark type is supported
        if not MarkParameters().supported_mark_type(mark_param['mark_type']):
            Warning(
                f'Mark type {mark_param["mark_type"]} not supported; skipping')
            return

        # get the list of units
        units = UnitInclusionParameters().get_included_units(key, key)

        # retrieve the units from the NWB file
        nwb_units = (CuratedSpikeSorting()
                     & key).fetch_nwb()[0]['units'].to_dataframe()

        # get the labbox workspace so we can get the waveforms from the recording
        curation_feed_uri = (SpikeSorting & key).fetch('curation_feed_uri')[0]
        workspace = le.load_workspace(curation_feed_uri)
        recording = workspace.get_recording_extractor(
            workspace.recording_ids[0])
        sorting = workspace.get_sorting_extractor(workspace.sorting_ids[0])
        channel_ids = recording.get_channel_ids()
        # assume the channels are all the same for the moment. This would need to be changed for larger probes
        channel_ids_by_unit = [channel_ids] * (max(units['unit_id']) + 1)
        # here we only get 8 points because that should be plenty to find the minimum/maximum
        waveforms = le.get_unit_waveforms(recording, sorting, units['unit_id'],
                                          channel_ids_by_unit, 8)

        if mark_param['mark_type'] == 'amplitude':
            # get the marks and timestamps
            n_elect = waveforms[0].shape[1]
            marks = np.empty((0, 4), dtype='int16')
            timestamps = np.empty((0), dtype='float64')
            for index, unit in enumerate(waveforms):
                marks = np.concatenate(
                    (marks, np.amin(np.asarray(unit, dtype='int16'), axis=2)),
                    axis=0)
                timestamps = np.concatenate(
                    (timestamps,
                     nwb_units.loc[units['unit_id'][index]].spike_times),
                    axis=0)
            # sort the timestamps to order them properly
            sort_order = np.argsort(timestamps)
            timestamps = timestamps[sort_order]
            marks = marks[sort_order, :]

            if 'threshold' in mark_param_dict:
                print('thresholding')
                # filter the marks by the amplitude threshold
                if mark_param_dict['sign'] == -1:
                    include = np.where(
                        np.amax(marks, axis=1) <= mark_param_dict['sign'] *
                        mark_param_dict['threshold'])[0]
                elif mark_param_dict['sign'] == -1:
                    include = np.where(
                        np.amax(marks, axis=1) >= mark_param_dict['threshold']
                    )[0]
                else:
                    include = np.where(
                        np.abs(np.amax(marks, axis=1)) >=
                        mark_param_dict['threshold'])[0]
                timestamps = timestamps[include]
                marks = marks[include, :]

            # create a new AnalysisNwbfile and a timeseries for the marks and save
            key['analysis_file_name'] = AnalysisNwbfile().create(
                key['nwb_file_name'])
            nwb_object = pynwb.TimeSeries(
                'marks',
                data=marks,
                unit='uV',
                timestamps=timestamps,
                description=
                f'amplitudes of spikes from electrodes {recording.get_channel_ids()}'
            )
            key['marks_object_id'] = AnalysisNwbfile().add_nwb_object(
                key['analysis_file_name'], nwb_object)
            AnalysisNwbfile().add(key['nwb_file_name'],
                                  key['analysis_file_name'])
            self.insert1(key)
예제 #10
0
def build_NWB(args,
              Ca_Imaging_options={
                  'Suite2P-binary-filename': 'data.bin',
                  'plane': 0
              }):

    if args.verbose:
        print('Initializing NWB file for "%s" [...]' % args.datafolder)

    #################################################
    ####            BASIC metadata            #######
    #################################################
    metadata = np.load(os.path.join(args.datafolder, 'metadata.npy'),
                       allow_pickle=True).item()

    # replace by day and time in metadata !!
    if os.path.sep in args.datafolder:
        sep = os.path.sep
    else:
        sep = '/'  # a weird behavior on Windows

    day = metadata['filename'].split('\\')[-2].split('_')
    Time = metadata['filename'].split('\\')[-1].split('-')
    identifier = metadata['filename'].split(
        '\\')[-2] + '-' + metadata['filename'].split('\\')[-1]
    start_time = datetime.datetime(int(day[0]),
                                   int(day[1]),
                                   int(day[2]),
                                   int(Time[0]),
                                   int(Time[1]),
                                   int(Time[2]),
                                   tzinfo=tzlocal())

    # subject info
    if 'subject_props' in metadata and (metadata['subject_props'] is not None):
        subject_props = metadata['subject_props']
        dob = subject_props['date_of_birth'].split('_')
    else:
        subject_props = {}
        print('subject properties not in metadata ...')
        dob = ['1988', '4', '24']

    # NIdaq tstart
    if os.path.isfile(os.path.join(args.datafolder, 'NIdaq.start.npy')):
        metadata['NIdaq_Tstart'] = np.load(
            os.path.join(args.datafolder, 'NIdaq.start.npy'))[0]

    subject = pynwb.file.Subject(
        description=(subject_props['description'] if
                     ('description' in subject_props) else 'Unknown'),
        sex=(subject_props['sex'] if ('sex' in subject_props) else 'Unknown'),
        genotype=(subject_props['genotype'] if
                  ('genotype' in subject_props) else 'Unknown'),
        species=(subject_props['species'] if
                 ('species' in subject_props) else 'Unknown'),
        subject_id=(subject_props['subject_id'] if
                    ('subject_id' in subject_props) else 'Unknown'),
        weight=(subject_props['weight'] if
                ('weight' in subject_props) else 'Unknown'),
        date_of_birth=datetime.datetime(int(dob[0]),
                                        int(dob[1]),
                                        int(dob[2]),
                                        tzinfo=tzlocal()))

    nwbfile = pynwb.NWBFile(
        identifier=identifier,
        session_description=str(metadata),
        experiment_description=metadata['protocol'],
        experimenter=(metadata['experimenter'] if
                      ('experimenter' in metadata) else 'Unknown'),
        lab=(metadata['lab'] if ('lab' in metadata) else 'Unknown'),
        institution=(metadata['institution'] if
                     ('institution' in metadata) else 'Unknown'),
        notes=(metadata['notes'] if ('notes' in metadata) else 'Unknown'),
        virus=(subject_props['virus'] if
               ('virus' in subject_props) else 'Unknown'),
        surgery=(subject_props['surgery'] if
                 ('surgery' in subject_props) else 'Unknown'),
        session_start_time=start_time,
        subject=subject,
        source_script=str(pathlib.Path(__file__).resolve()),
        source_script_file_name=str(pathlib.Path(__file__).resolve()),
        file_create_date=datetime.datetime.utcnow().replace(tzinfo=tzlocal()))

    filename = os.path.join(
        pathlib.Path(args.datafolder).parent, '%s.nwb' % identifier)

    manager = pynwb.get_manager(
    )  # we need a manager to link raw and processed data

    #################################################
    ####         IMPORTING NI-DAQ data        #######
    #################################################
    if args.verbose:
        print('- Loading NIdaq data for "%s" [...]' % args.datafolder)
    try:
        NIdaq_data = np.load(os.path.join(args.datafolder, 'NIdaq.npy'),
                             allow_pickle=True).item()
        NIdaq_Tstart = np.load(os.path.join(args.datafolder,
                                            'NIdaq.start.npy'))[0]
    except FileNotFoundError:
        print(' /!\ No NI-DAQ data found /!\ ')
        print('   -----> Not able to build NWB file for "%s"' %
              args.datafolder)
        raise BaseException

    true_tstart0 = np.load(os.path.join(args.datafolder, 'NIdaq.start.npy'))[0]
    st = datetime.datetime.fromtimestamp(true_tstart0).strftime('%H:%M:%S.%f')
    true_tstart = StartTime_to_day_seconds(st)

    # #################################################
    # ####         Locomotion                   #######
    # #################################################

    if metadata['Locomotion'] and ('Locomotion' in args.modalities):
        # compute running speed from binary NI-daq signal
        if args.verbose:
            print('- Computing and storing running-speed for "%s" [...]' %
                  args.datafolder)

        speed = compute_locomotion_speed(
            NIdaq_data['digital'][0],
            acq_freq=float(metadata['NIdaq-acquisition-frequency']),
            radius_position_on_disk=float(
                metadata['rotating-disk']['radius-position-on-disk-cm']),
            rotoencoder_value_per_rotation=float(
                metadata['rotating-disk']['roto-encoder-value-per-rotation']))
        _, speed = resample_signal(
            speed,
            original_freq=float(metadata['NIdaq-acquisition-frequency']),
            new_freq=args.running_sampling,
            pre_smoothing=2. / args.running_sampling)
        running = pynwb.TimeSeries(name='Running-Speed',
                                   data=speed,
                                   starting_time=0.,
                                   unit='cm/s',
                                   rate=args.running_sampling)
        nwbfile.add_acquisition(running)

    # #################################################
    # ####         Visual Stimulation           #######
    # #################################################
    if (metadata['VisualStim'] and
        ('VisualStim' in args.modalities)) and os.path.isfile(
            os.path.join(args.datafolder, 'visual-stim.npy')):

        # preprocessing photodiode signal
        _, Psignal = resample_signal(
            NIdaq_data['analog'][0],
            original_freq=float(metadata['NIdaq-acquisition-frequency']),
            pre_smoothing=2. / float(metadata['NIdaq-acquisition-frequency']),
            new_freq=args.photodiode_sampling)

        VisualStim = np.load(os.path.join(args.datafolder, 'visual-stim.npy'),
                             allow_pickle=True).item()
        # using the photodiod signal for the realignement
        if args.verbose:
            print(
                '=> Performing realignement from photodiode for "%s" [...]  ' %
                args.datafolder)
        if 'time_duration' not in VisualStim:
            VisualStim['time_duration'] = np.array(
                VisualStim['time_stop']) - np.array(VisualStim['time_start'])
        for key in ['time_start', 'time_stop', 'time_duration']:
            metadata[key] = VisualStim[key]
        success, metadata = realign_from_photodiode(
            Psignal,
            metadata,
            sampling_rate=(args.photodiode_sampling
                           if args.photodiode_sampling > 0 else None),
            verbose=args.verbose)
        if success:
            timestamps = metadata['time_start_realigned']
            for key in ['time_start_realigned', 'time_stop_realigned']:
                VisualStimProp = pynwb.TimeSeries(name=key,
                                                  data=metadata[key],
                                                  unit='seconds',
                                                  timestamps=timestamps)
                nwbfile.add_stimulus(VisualStimProp)
            for key in VisualStim:
                None_cond = (VisualStim[key] == None)
                if key in ['protocol_id', 'index']:
                    array = np.array(VisualStim[key])
                elif (type(VisualStim[key]) in [list, np.ndarray, np.array
                                                ]) and np.sum(None_cond) > 0:
                    # need to remove the None elements
                    VisualStim[key][
                        None_cond] = 0 * VisualStim[key][~None_cond][0]
                    array = np.array(VisualStim[key],
                                     dtype=type(
                                         VisualStim[key][~None_cond][0]))
                else:
                    array = VisualStim[key]
                VisualStimProp = pynwb.TimeSeries(name=key,
                                                  data=array,
                                                  unit='NA',
                                                  timestamps=timestamps)
                nwbfile.add_stimulus(VisualStimProp)
        else:
            print(' /!\ No VisualStim metadata found /!\ ')
            # print('   -----> Not able to build NWB file for "%s" ' % args.datafolder)
            # TEMPORARY FOR TROUBLESHOOTING !!
            metadata['time_start_realigned'] = metadata['time_start']
            metadata['time_stop_realigned'] = metadata['time_stop']
            print(' /!\ Realignement unsuccessful /!\ ')
            print(
                '       --> using the default time_start / time_stop values ')

        if args.verbose:
            print('=> Storing the photodiode signal for "%s" [...]' %
                  args.datafolder)

        photodiode = pynwb.TimeSeries(name='Photodiode-Signal',
                                      data=Psignal,
                                      starting_time=0.,
                                      unit='[current]',
                                      rate=args.photodiode_sampling)
        nwbfile.add_acquisition(photodiode)

    #################################################
    ####         FaceCamera Recording         #######
    #################################################

    if metadata['FaceCamera']:

        if args.verbose:
            print('=> Storing FaceCamera acquisition for "%s" [...]' %
                  args.datafolder)
        if ('raw_FaceCamera' in args.modalities):
            try:
                FC_times, FC_FILES, _, _, _ = load_FaceCamera_data(
                    os.path.join(args.datafolder, 'FaceCamera-imgs'),
                    t0=NIdaq_Tstart,
                    verbose=True)

                img = np.load(
                    os.path.join(args.datafolder, 'FaceCamera-imgs',
                                 FC_FILES[0]))

                FC_SUBSAMPLING = build_subsampling_from_freq(
                    args.FaceCamera_frame_sampling,
                    1. / np.mean(np.diff(FC_times)),
                    len(FC_FILES),
                    Nmin=3)

                def FaceCamera_frame_generator():
                    for i in FC_SUBSAMPLING:
                        yield np.load(
                            os.path.join(args.datafolder, 'FaceCamera-imgs',
                                         FC_FILES[i])).astype(np.uint8)

                FC_dataI = DataChunkIterator(data=FaceCamera_frame_generator(),
                                             maxshape=(None, img.shape[0],
                                                       img.shape[1]),
                                             dtype=np.dtype(np.uint8))
                FaceCamera_frames = pynwb.image.ImageSeries(
                    name='FaceCamera',
                    data=FC_dataI,
                    unit='NA',
                    timestamps=FC_times[FC_SUBSAMPLING])
                nwbfile.add_acquisition(FaceCamera_frames)

            except BaseException as be:
                print(be)
                FC_FILES = None
                print(' /!\ Problems with FaceCamera data for "%s" /!\ ' %
                      args.datafolder)

        #################################################
        ####         Pupil from FaceCamera        #######
        #################################################

        if 'Pupil' in args.modalities:

            # add_pupil_data(nwbfile, FC_FILES, args)

            if os.path.isfile(os.path.join(args.datafolder, 'pupil.npy')):

                if args.verbose:
                    print('=> Adding processed pupil data for "%s" [...]' %
                          args.datafolder)

                dataP = np.load(os.path.join(args.datafolder, 'pupil.npy'),
                                allow_pickle=True).item()

                if 'cm_to_pix' in dataP:  # SCALE FROM THE PUPIL GUI
                    pix_to_mm = 10. / float(
                        dataP['cm_to_pix'])  # IN MILLIMETERS FROM HERE
                else:
                    pix_to_mm = 1

                pupil_module = nwbfile.create_processing_module(
                    name='Pupil',
                    description=
                    'processed quantities of Pupil dynamics, pix_to_mm=%.3f' %
                    pix_to_mm)

                for key, scale in zip(['cx', 'cy', 'sx', 'sy', 'blinking'],
                                      [pix_to_mm for i in range(4)] + [1]):
                    if type(dataP[key]) is np.ndarray:
                        PupilProp = pynwb.TimeSeries(name=key,
                                                     data=dataP[key] * scale,
                                                     unit='seconds',
                                                     timestamps=FC_times)
                        pupil_module.add(PupilProp)

                # then add the frames subsampled
                if FC_FILES is not None:
                    img = np.load(
                        os.path.join(args.datafolder, 'FaceCamera-imgs',
                                     FC_FILES[0]))
                    x, y = np.meshgrid(np.arange(0, img.shape[0]),
                                       np.arange(0, img.shape[1]),
                                       indexing='ij')
                    cond = (x >= dataP['xmin']) & (x <= dataP['xmax']) & (
                        y >= dataP['ymin']) & (y <= dataP['ymax'])

                    PUPIL_SUBSAMPLING = build_subsampling_from_freq(
                        args.Pupil_frame_sampling,
                        1. / np.mean(np.diff(FC_times)),
                        len(FC_FILES),
                        Nmin=3)

                    def Pupil_frame_generator():
                        for i in PUPIL_SUBSAMPLING:
                            yield np.load(os.path.join(args.datafolder, 'FaceCamera-imgs', FC_FILES[i])).astype(np.uint8)[cond].reshape(\
                                                                                            dataP['xmax']-dataP['xmin']+1, dataP['ymax']-dataP['ymin']+1)

                    PUC_dataI = DataChunkIterator(
                        data=Pupil_frame_generator(),
                        maxshape=(None, dataP['xmax'] - dataP['xmin'] + 1,
                                  dataP['ymax'] - dataP['ymin'] + 1),
                        dtype=np.dtype(np.uint8))
                    Pupil_frames = pynwb.image.ImageSeries(
                        name='Pupil',
                        data=PUC_dataI,
                        unit='NA',
                        timestamps=FC_times[PUPIL_SUBSAMPLING])
                    nwbfile.add_acquisition(Pupil_frames)

            else:
                print(' /!\ No processed pupil data found for "%s" /!\ ' %
                      args.datafolder)

        #################################################
        ####      Facemotion from FaceCamera        #######
        #################################################

        if 'Facemotion' in args.modalities:

            if os.path.isfile(os.path.join(args.datafolder, 'facemotion.npy')):

                if args.verbose:
                    print(
                        '=> Adding processed facemotion data for "%s" [...]' %
                        args.datafolder)

                dataF = np.load(os.path.join(args.datafolder,
                                             'facemotion.npy'),
                                allow_pickle=True).item()

                faceMotion_module = nwbfile.create_processing_module(
                    name='face-motion', description='face motion dynamics')

                FaceMotionProp = pynwb.TimeSeries(
                    name='face motion time series',
                    data=dataF['motion'],
                    unit='seconds',
                    timestamps=FC_times[dataF['frame']])

                faceMotion_module.add(FaceMotionProp)

                # then add the motion frames subsampled
                if FC_FILES is not None:

                    FACEMOTION_SUBSAMPLING = build_subsampling_from_freq(
                        args.FaceMotion_frame_sampling,
                        1. / np.mean(np.diff(FC_times)),
                        len(FC_FILES),
                        Nmin=3)

                    img = np.load(
                        os.path.join(args.datafolder, 'FaceCamera-imgs',
                                     FC_FILES[0]))
                    x, y = np.meshgrid(np.arange(0, img.shape[0]),
                                       np.arange(0, img.shape[1]),
                                       indexing='ij')
                    condF = (x>=dataF['ROI'][0]) & (x<=(dataF['ROI'][0]+dataF['ROI'][2])) &\
                        (y>=dataF['ROI'][1]) & (y<=(dataF['ROI'][1]+dataF['ROI'][3]))

                    def Facemotion_frame_generator():
                        for i in FACEMOTION_SUBSAMPLING:
                            i0 = np.min([i, len(FC_FILES) - 2])
                            img1 = np.load(
                                os.path.join(args.datafolder,
                                             'FaceCamera-imgs',
                                             FC_FILES[i0])).astype(
                                                 np.uint8)[condF].reshape(
                                                     dataF['ROI'][2] + 1,
                                                     dataF['ROI'][3] + 1)
                            img2 = np.load(
                                os.path.join(args.datafolder,
                                             'FaceCamera-imgs',
                                             FC_FILES[i0 + 1])).astype(
                                                 np.uint8)[condF].reshape(
                                                     dataF['ROI'][2] + 1,
                                                     dataF['ROI'][3] + 1)
                            yield img2 - img1

                    FMCI_dataI = DataChunkIterator(
                        data=Facemotion_frame_generator(),
                        maxshape=(None, dataF['ROI'][2] + 1,
                                  dataF['ROI'][3] + 1),
                        dtype=np.dtype(np.uint8))
                    FaceMotion_frames = pynwb.image.ImageSeries(
                        name='Face-Motion',
                        data=FMCI_dataI,
                        unit='NA',
                        timestamps=FC_times[FACEMOTION_SUBSAMPLING])
                    nwbfile.add_acquisition(FaceMotion_frames)

            else:
                print(' /!\ No processed facemotion data found for "%s" /!\ ' %
                      args.datafolder)

    #################################################
    ####    Electrophysiological Recording    #######
    #################################################

    if metadata['Electrophy'] and ('Electrophy' in args.modalities):

        if args.verbose:
            print('=> Storing electrophysiological signal for "%s" [...]' %
                  args.datafolder)

        electrophy = pynwb.TimeSeries(
            name='Electrophysiological-Signal',
            data=NIdaq_data['analog'][1],
            starting_time=0.,
            unit='[voltage]',
            rate=float(metadata['NIdaq-acquisition-frequency']))
        nwbfile.add_acquisition(electrophy)

    #################################################
    ####         Calcium Imaging              #######
    #################################################
    # see: add_ophys.py script

    Ca_data = None
    if metadata['CaImaging']:
        if args.verbose:
            print('=> Storing Calcium Imaging signal for "%s" [...]' %
                  args.datafolder)
        if not hasattr(args, 'CaImaging_folder') or (args.CaImaging_folder
                                                     == ''):
            try:
                args.CaImaging_folder = get_TSeries_folders(args.datafolder)
                Ca_data = add_ophys(
                    nwbfile,
                    args,
                    metadata=metadata,
                    with_raw_CaImaging=('raw_CaImaging' in args.modalities),
                    with_processed_CaImaging=('processed_CaImaging'
                                              in args.modalities),
                    Ca_Imaging_options=Ca_Imaging_options)
            except BaseException as be:
                print(be)
                print(' /!\ No Ca-Imaging data found, /!\ ')
                print('             -> add them later with "add_ophys.py" \n')

    #################################################
    ####         Writing NWB file             #######
    #################################################

    if os.path.isfile(filename):
        temp = str(tempfile.NamedTemporaryFile().name) + '.nwb'
        print("""
        "%s" already exists
        ---> moving the file to the temporary file directory as: "%s" [...]
        """ % (filename, temp))
        shutil.move(filename, temp)
        print('---> done !')

    io = pynwb.NWBHDF5IO(filename, mode='w', manager=manager)
    print("""
    ---> Creating the NWB file: "%s"
    """ % filename)
    io.write(nwbfile, link_data=False)
    io.close()
    print('---> done !')

    if Ca_data is not None:
        Ca_data.close()  # can be closed only after having written

    return filename
예제 #11
0
def build_summary_episodes(
        FILES,
        roi_prefix=10000,  # to have unique roi per session, session X has roi IDs: X*roi_prefix+i_ROI
        prestim_duration=2,
        modalities=['pupil', 'facemotion', 'running-speed'],
        dt_sampling=20,  # ms
        Nmax=100000):
    """
    
    """

    protocols, subjects, sessions, sessions_per_subject, filename, STIM = find_protocol_details(
        FILES)
    FULL_EPISODE_ARRAY, QUANT = [], {
        'subject': [],
        'session_per_subject': [],
        'session': [],
        'roi': [],
        'pupil': [],
        'running-speed': [],
        'facemotion': []
    }
    print('- building "%s" by concatenating episodes from n=%i files [...]' %
          (filename, len(FILES)))

    for session, f in enumerate(FILES):

        print('   -> session #%i: %s' % (session + 1, f))
        data = Data(f)

        for ip, p in enumerate(protocols):

            if len(protocols) > 1:
                duration = data.metadata['Protocol-%i-presentation-duration' %
                                         (ip + 1)]
            else:
                duration = data.metadata['presentation-duration']

            # build episodes of other modalities (running, ...)
            if ('Pupil' in data.nwbfile.processing) and ('pupil'
                                                         in modalities):
                Pupil_episodes = EpisodeResponse(
                    data,
                    protocol_id=ip,
                    prestim_duration=prestim_duration,
                    dt_sampling=dt_sampling,  # ms
                    quantity='Pupil')
                t_pupil_cond = (Pupil_episodes.t > 0) & (Pupil_episodes.t <
                                                         duration)
            else:
                Pupil_episodes = None

            if ('Running-Speed'
                    in data.nwbfile.acquisition) and ('running-speed'
                                                      in modalities):
                Running_episodes = EpisodeResponse(
                    data,
                    protocol_id=ip,
                    prestim_duration=prestim_duration,
                    dt_sampling=dt_sampling,  # ms
                    quantity='Running-Speed')
                t_running_cond = (Running_episodes.t > 0) & (Running_episodes.t
                                                             < duration)
            else:
                Running_episodes = None

            if ('FaceMotion' in data.nwbfile.processing) and ('facemotion'
                                                              in modalities):
                FaceMotion_episodes = EpisodeResponse(
                    data,
                    protocol_id=ip,
                    prestim_duration=prestim_duration,
                    dt_sampling=dt_sampling,  # ms
                    quantity='FaceMotion')
                t_facemotion_cond = (FaceMotion_episodes.t >
                                     0) & (FaceMotion_episodes.t < duration)
            else:
                FaceMotion_episodes = None

            for roi in range(np.sum(data.iscell))[:Nmax]:

                roiID = roi_prefix * session + roi

                EPISODES = EpisodeResponse(
                    data,
                    protocol_id=ip,
                    quantity='CaImaging',
                    subquantity='dF/F',
                    dt_sampling=dt_sampling,  # ms
                    roiIndex=roi,
                    prestim_duration=prestim_duration)

                for iEp in range(EPISODES.resp.shape[0]):
                    FULL_EPISODE_ARRAY.append(EPISODES.resp[iEp, :])
                    for key in data.nwbfile.stimulus.keys():
                        STIM[key].append(data.nwbfile.stimulus[key].data[iEp])
                    QUANT['roi'].append(roiID)
                    QUANT['session'].append(session)
                    QUANT['subject'].append(data.metadata['subject_ID'])
                    QUANT['session_per_subject'].append(
                        sessions_per_subject[session])

                    if Running_episodes is not None:
                        QUANT['running-speed'].append(
                            Running_episodes.resp[iEp, :][t_running_cond])
                    else:
                        QUANT['running-speed'].append(666.)  # flag for None

                    if Pupil_episodes is not None:
                        QUANT['pupil'].append(
                            Pupil_episodes.resp[iEp, :][t_pupil_cond])
                    else:
                        QUANT['pupil'].append(666.)  # flag for None

                    if FaceMotion_episodes is not None:
                        QUANT['facemotion'].append(FaceMotion_episodes.resp[
                            iEp, :][t_facemotion_cond])
                    else:
                        QUANT['facemotion'].append(666.)  # flag for None

    # set up the NWBFile
    description = 'Summary data concatenating episodes from the datafiles:\n'
    for f in FILES:
        description += '- %s\n' % f
    nwbfile = pynwb.NWBFile(
        session_description=description,
        identifier=filename,
        session_start_time=datetime.datetime.now().astimezone())

    episode_waveforms = pynwb.TimeSeries(name='episode_waveforms',
                                         data=np.array(FULL_EPISODE_ARRAY),
                                         unit='dF/F',
                                         timestamps=EPISODES.t)

    nwbfile.add_acquisition(episode_waveforms)

    for key in STIM:
        stim = pynwb.TimeSeries(name=key,
                                data=np.array(STIM[key]),
                                unit='None',
                                rate=1.)
        nwbfile.add_stimulus(stim)

    for key in QUANT:
        stim = pynwb.TimeSeries(name=key,
                                data=np.array(QUANT[key]),
                                unit='None',
                                rate=1.)
        nwbfile.add_acquisition(stim)

    io = pynwb.NWBHDF5IO(filename, mode='w')
    io.write(nwbfile)
    io.close()
예제 #12
0
                if not folder in added_stim:
                    added_stim[folder] = []
                added_stim[folder].append(name)
                if not folder in currents:
                    fig1 = plt.figure(figsize=(12, 6), dpi=80)
                    curr = fig1.add_subplot(111)
                    plt.legend()
                    currents[folder] = curr

                protocol = protocols[folder]
                pi = protocol_info[folder]
                desc = '%s, injected current %snA'%(pi,stim_ampl)
                print desc
                ts_stim = pynwb.TimeSeries('%s_%s'%(protocol,name), 
                              data, 
                              'pA', 
                              timestamps=timestamps, 
                              description=desc,
                              comments='Extracted from IgorPro file: %s; path in file: %s; %s'%(filename, path, gen_info))

                nwbfile.add_stimulus(ts_stim)

                currents[folder].plot(timestamps, wave['wData'], label=desc)
                
                found+=1
                record = filesystem['root'][d][dd]
                
                path = '%s/%s/%s'%('root',d,dd)
                wave = record.wave['wave']
                name = wave['wave_header']['bname']
                print('=========[volts: %s, of %i found]====='%(name,found))
예제 #13
0
def to_nwb(inputDir,
           outputDir,
           schema=None,
           outputFilename=None,
           verbose=True):
    """
    Load all behavior data from txt files and save a single NWB file.

    Args:
        inputDir (str): folder containing all txt files.
        outputDir (str): destination folder for the NWB file.
        schema (dict): dictionary containing definition of NWB file structure.
        outputFilename (str): if None it will be created automatically.
        verbose (bool): if True, show message for each step.

    Returns:
        nwbFile: NWB file object.
        outputFullfile: full path to the output NWB file.
    """

    if schema is None:
        schema = read_schema(inputDir)

    creationDate = datetime.now(tz=tzlocal())

    # -- Read general session parameters --
    if verbose:
        print('Loading session parameters...')
    subject = read_txt_data(inputDir, schema['subject']['filename'],
                            'one_string')
    experimenter = read_txt_data(inputDir, schema['experimenter']['filename'],
                                 'one_string')
    sessionDescription = schema['session_description']
    institution = schema['institution']
    species = schema['species']
    sessionStartTime = read_txt_data(inputDir,
                                     schema['session_start_time']['filename'],
                                     'datetime')

    # Date convention follows ISO 8601  (https://en.wikipedia.org/wiki/ISO_8601)
    startTimeStr = sessionStartTime.strftime('%Y%m%dT%H%M%S')
    uniqueID = '{}_{}'.format(subject, startTimeStr)

    if outputFilename is None:
        outputFilename = make_nwb_basename(subject, startTimeStr)

    subjectObj = pynwb.file.Subject(subject_id=subject, species=species)

    # -- Load extension (to enable metadata) --
    extensionDir = os.path.dirname(__file__)
    pynwb.load_namespaces(os.path.join(extensionDir, EXTENSION_FILE))
    LabMetaData_ext = pynwb.get_class('LabMetaData_ext',
                                      'uobrainflex_metadata')

    # -- Initialize NWB file --
    nwbFile = pynwb.NWBFile(
        session_description=sessionDescription,  # required
        identifier=uniqueID,  # required
        session_start_time=sessionStartTime,  # required
        file_create_date=creationDate,
        subject=subjectObj,
        experimenter=experimenter,
        institution=institution)

    # -- Lab metadata --
    metadata = {}
    for entryName, entryInfo in schema['metadata'].items():
        metadata[entryName] = read_txt_data(inputDir, entryInfo['filename'],
                                            'one_string')
    missing_fields = set(LabMetaData_ext.__nwbfields__).symmetric_difference(
        set(metadata))
    for entryName in missing_fields:
        metadata[entryName] = ''
    sessionMetadata = LabMetaData_ext(name='metadata', **metadata)
    nwbFile.add_lab_meta_data(sessionMetadata)

    # -- Store time series (running speed, pupil, etc) --
    for timeSeriesName, timeSeriesInfo in schema['acquisition'].items():
        if verbose:
            print('Loading {}...'.format(timeSeriesName))
        if 'timestamps' in timeSeriesInfo:
            timeSeriesTimestamps = read_txt_data(inputDir,
                                                 timeSeriesInfo['timestamps'],
                                                 'float')
        elif 'rate' in timeSeriesInfo:
            raise ValueError('The use of sampling rate instead of timestamps '+\
                             'has not been implemented yet.')
        if 'filename' in timeSeriesInfo:
            timeSeriesData = read_txt_data(inputDir,
                                           timeSeriesInfo['filename'], 'float')
        else:
            timeSeriesData = np.zeros(len(timeSeriesTimestamps), dtype=int)
        timeSeriesObj = pynwb.TimeSeries(
            name=timeSeriesName,
            data=timeSeriesData,
            timestamps=timeSeriesTimestamps,
            unit=timeSeriesInfo['unit'],
            description=timeSeriesInfo['description'])
        nwbFile.add_acquisition(timeSeriesObj)

    # -- Create a dataframe with data from each trial (and specify columns for NWB file) --
    if verbose:
        print('Loading trial data...')
    trialDataFrame = pd.DataFrame()
    for fieldName, fieldInfo in schema['trials'].items():
        fieldDescription = fieldInfo['description']
        # -- If there is a map to interpret values, add it to the description
        if 'map' in fieldInfo:
            fieldDescription += " MAP:{}".format(str(fieldInfo['map']))
        if (fieldName != 'start_time') and (fieldName != 'stop_time'):
            nwbFile.add_trial_column(fieldName, description=fieldDescription)
        fieldData = read_txt_data(inputDir, fieldInfo['filename'],
                                  fieldInfo['dtype'])
        trialDataFrame[fieldName] = fieldData

    # -- Add trial data to NWB file --
    for index, row in trialDataFrame.iterrows():
        nwbFile.add_trial(**dict(row))

    # -- Save NWB file to disk --
    if verbose:
        print('Saving NWB file...')
    outputFullfile = os.path.join(outputDir, outputFilename)
    with pynwb.NWBHDF5IO(outputFullfile, 'w') as io:
        io.write(nwbFile)
    print('Saved {}'.format(outputFullfile))

    return nwbFile, outputFullfile