示例#1
0
    def test_init(self):
        aS = AnnotationSeries('test_aS',
                              'a hypothetical source',
                              timestamps=list())
        self.assertEqual(aS.name, 'test_aS')
        self.assertEqual(aS.source, 'a hypothetical source')

        aS.add_annotation(2.0, 'comment')
示例#2
0
def get_events(session_path, suffixes=None):
    """
    Parameters
    ----------
    session_path: str
    suffixes: Iterable(str), optional
        The 3-letter names for the events to write. If None, detect all in session_path

    """
    session_name = os.path.split(session_path)[1]

    if suffixes is None:
        evt_files = glob(os.path.join(session_path, session_name) + '.evt.*') + \
                    glob(os.path.join(session_path, session_name) + '.*.evt')
    else:
        evt_files = [
            os.path.join(session_path, session_name + s) for s in suffixes
        ]

    out = []
    for evt_file in evt_files:
        parts = os.path.split(evt_file)[1].split('.')
        if parts[-1] == 'evt':
            name = '.'.join(parts[1:-1])
        else:
            name = parts[-1]
        df = pd.read_csv(evt_file, sep='\t', names=('time', 'desc'))
        if len(df):
            timestamps = df.values[:, 0].astype(float) / 1000
            data = df['desc'].values
            annotation_series = AnnotationSeries(name=name,
                                                 data=data,
                                                 timestamps=timestamps)
            out.append(annotation_series)
    return out
示例#3
0
    def add_behavior(self,nev_fp,blocks,practice_incl=None):
        if nev_fp is None or not os.path.exists(nev_fp):
            raise ValueError('error in nev_fp')

        start_time = self.nwb.session_start_time
        # events = nev_to_behavior_annotation(nev_fp,practice_incl=practice_incl)
        # events.set_timestamps(events.timestamps-start_time.timestamp())

        nev = load_nev(nev_fp)
        ev = pd.DataFrame.from_records(nev_as_records(nev),index='TimeStamp')

        ev['EventString'] = [str(v,'utf-8') for v in ev.EventString.values]
        ev['time'] = pd.to_datetime(ev.index.values,unit='us',utc=True)
        ev = ev[ev.ttl==1]

        label_blockstart(ev)
        n_blocks = int(len(ev[ev.label=='block_start'])/2)
        n_ttls = int(n_blocks*17)
        if practice_incl:
            n_ttls-=5

        ev_ts = np.array([t.timestamp() for t in ev.time]) - start_time.timestamp()

        events = AnnotationSeries(name='ttl', data=ev.label.values[:n_ttls], timestamps=ev_ts[:n_ttls])

        self.nwb.add_acquisition(events)
        self.nwb.add_trial_column(name='outcome',description='Choice pair for both players')
        # self.nwb.add_trial_column(name='round',description='')

        ttl = self.nwb.acquisition['ttl']
        trial_starts = [t for d,t in zip(ttl.data,ttl.timestamps) if d.startswith('trial')]

        # Calculate Trial deltas
        # pdil_events = pd.concat(self.load_pdil_events(local_scheduler=True))
        # pdil_events = pdil_events[pdil_events.block.isin(blocks)]

        outcomes = pd.concat(self.load_game_data(local_scheduler=True))
        outcomes = outcomes[outcomes.block.isin(blocks)]
        trial_delta = outcomes.sort_values(['block','trial']).timing_sumTictocs.values
        choices = pd.DataFrame.from_records(map(points_to_choice,outcomes.points.values),columns=['A','B'])
        choices['points'] = outcomes.points.values
        choices['tuple'] = [a[0].upper()+'-'+b[0].upper() for a,b in zip(choices.A.values,choices.B.values)]
        for start,dt,choice in zip(trial_starts,trial_delta,choices.tuple.values):

            self.nwb.add_trial(start_time=start,stop_time=start+dt,outcome=choice)

        block_starts = ttl.timestamps[ttl.data == 'block_start'][1::2]
        # for bstart,idx in zip(block_starts,idx):
        #     self.nwb.add_epoch()


        return self.nwb
示例#4
0
 def _write_event(self, nwbfile, event):
     hierarchy = {
         'block': event.segment.block.name,
         'segment': event.segment.name
     }
     tS_evt = AnnotationSeries(
         name=event.name,
         data=event.labels,
         timestamps=event.times.rescale('second').magnitude,
         description=event.description or "",
         comments=json.dumps(hierarchy))
     nwbfile.add_acquisition(tS_evt)
     return tS_evt
示例#5
0
def write_events(nwbfile: NWBFile,
                 session_path: str,
                 suffixes: Iterable[str],
                 module=None):
    """Write the event information from Neurscope into the NWBFile.

    Parameters
    ----------
    nwbfile: pynwb.NWBFile
    session_path: str
    suffixes: Iterable(str), optional
        The 3-letter names for the events to write. If None, detect all in session_path
    module: pynwb.processing_module

    """
    session_name = os.path.split(session_path)[1]

    if suffixes is None:
        evt_files = glob(os.path.join(session_path, session_name) +
                         ".evt.*") + glob(
                             os.path.join(session_path, session_name) +
                             ".*.evt")
    else:
        evt_files = [
            os.path.join(session_path, session_name + s) for s in suffixes
        ]
    if module is None:
        module = check_module(nwbfile, "events")
    for evt_file in evt_files:
        parts = os.path.split(evt_file)[1].split(".")
        if parts[-1] == "evt":
            name = ".".join(parts[1:-1])
        else:
            name = parts[-1]
        if os.path.isfile(evt_file):
            df = pd.read_csv(evt_file, sep="\t", names=("time", "desc"))
            if len(df):
                timestamps = df.values[:, 0].astype(float) / 1000
                data = df["desc"].values
                annotation_series = AnnotationSeries(name=name,
                                                     data=data,
                                                     timestamps=timestamps)
                module.add_data_interface(annotation_series)
        else:
            print("Warning: No .evt file found at the path location!"
                  "Unable to write annotation_series.")
示例#6
0
文件: nwb.py 项目: elijahc/emu
def add_ttl(nwbfile, nev_fp):
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        nev = load_nev(nev_fp)
        if len(nev['events']) > 0:
            ev = pd.DataFrame.from_records(nev_as_records(nev),
                                           index='TimeStamp')

    ev['EventString'] = [str(v, 'utf-8') for v in ev.EventString.values]
    ev['time'] = pd.to_datetime(ev.index.values, unit='us', utc=True)
    ev = ev[ev.ttl == 1]

    ev_ts = np.array([t.timestamp() for t in ev.time])

    events = AnnotationSeries(name='ttl', data=ev.ttl.values, timestamps=ev_ts)
    nwbfile.add_acquisition(events)
    return nwbfile
示例#7
0
def get_events(session_path: str, suffixes: Iterable[int] = None):
    """Retrieve event information from Neuroscope evt files.

    Parameters
    ----------
    session_path: str
    suffixes: Iterable(str), optional
        The 3-letter names for the events to write. If None, detect all in session_path

    """
    session_name = os.path.split(session_path)[1]

    if suffixes is None:
        evt_files = glob(os.path.join(session_path, session_name) +
                         ".evt.*") + glob(
                             os.path.join(session_path, session_name) +
                             ".*.evt")
    else:
        evt_files = [
            os.path.join(session_path, session_name + s) for s in suffixes
        ]

    out = []
    for evt_file in evt_files:
        parts = os.path.split(evt_file)[1].split(".")
        if parts[-1] == "evt":
            name = ".".join(parts[1:-1])
        else:
            name = parts[-1]
        if os.path.isfile(evt_file):
            df = pd.read_csv(evt_file, sep="\t", names=("time", "desc"))
            if len(df):
                timestamps = df.values[:, 0].astype(float) / 1000
                data = df["desc"].values
                annotation_series = AnnotationSeries(name=name,
                                                     data=data,
                                                     timestamps=timestamps)
                out.append(annotation_series)
        else:
            print("Warning: No .evt file found at the path location!"
                  "Unable to retrieve annotation_series.")
            out = None

    return out
示例#8
0
文件: nwb.py 项目: elijahc/emu
def nev_to_behavior_annotation(nev_fp, practice_incl=False):
    nev = load_nev(nev_fp)
    ev = pd.DataFrame.from_records(nev_as_records(nev), index='TimeStamp')

    ev['EventString'] = [str(v, 'utf-8') for v in ev.EventString.values]
    ev['time'] = pd.to_datetime(ev.index.values, unit='us', utc=True)
    ev = ev[ev.ttl == 1]

    label_blockstart(ev)
    n_blocks = int(len(ev[ev.label == 'block_start']) / 2)
    n_ttls = int(n_blocks * 17)
    if practice_incl:
        n_ttls -= 5

    ev_ts = np.array([t.timestamp() for t in ev.time])

    events = AnnotationSeries(name='ttl',
                              data=ev.label.values[:n_ttls],
                              timestamps=ev_ts[:n_ttls])

    return events
示例#9
0
def write_events(nwbfile, session_path, suffixes=None, module=None):
    """

    Parameters
    ----------
    nwbfile: pynwb.NWBFile
    session_path: str
    suffixes: Iterable(str), optional
        The 3-letter names for the events to write. If None, detect all in session_path
    module: pynwb.processing_module

    """
    session_name = os.path.split(session_path)[1]

    if suffixes is None:
        evt_files = glob(os.path.join(session_path, session_name) + '.evt.*') + \
                    glob(os.path.join(session_path, session_name) + '.*.evt')
    else:
        evt_files = [
            os.path.join(session_path, session_name + s) for s in suffixes
        ]
    if module is None:
        module = check_module(nwbfile, 'events')
    for evt_file in evt_files:
        parts = os.path.split(evt_file)[1].split('.')
        if parts[-1] == 'evt':
            name = '.'.join(parts[1:-1])
        else:
            name = parts[-1]
        df = pd.read_csv(evt_file, sep='\t', names=('time', 'desc'))
        if len(df):
            timestamps = df.values[:, 0].astype(float) / 1000
            data = df['desc'].values
            annotation_series = AnnotationSeries(name=name,
                                                 data=data,
                                                 timestamps=timestamps)
            module.add_data_interface(annotation_series)
示例#10
0
def read_notes(fpath):
    """Reads the notes from the settings_and_notes.dat file and creates a pynwb.misc.AnnotationSeries

    Parameters
    ----------
    fpath: str
        data dir or path to settings_and_notes.dat

    Returns
    -------
    None or pynwb.misc.AnnotationSeries

    """
    if not fpath[-4:] == '.dat':
        fpath = os.path.join(fpath, 'settings_and_notes.dat')
    df = pd.read_csv(fpath, skiprows=3, delimiter='\t')
    if len(df):
        return AnnotationSeries(
            name='notes',
            data=df['Note'].values,
            timestamps=df['elapsedTime'].values / 1000,
            description='read from miniscope settings_and_notes.dat file')
    else:
        return
示例#11
0
def no2nwb(NOData, session_use, subjects_ini, path_to_data):
    '''
       Purpose:
           Import the data and associated meta-data from the new/old recognition dataset into an
           NWB file. Each of the features of the dataset, such as the events (i.e., TTLs) or mean waveform, are
           compartmentalized to the appropriate component of the NWB file.


    '''

    # Time scaling (covert uS -----> S for NWB file)
    TIME_SCALING = 10**6

    # Prepare the NO data that will be coverted to the NWB format

    session = NOData.sessions[session_use]
    events = NOData._get_event_data(session_use, experiment_type='All')
    cell_ids = NOData.ls_cells(session_use)
    experiment_id_learn = session['experiment_id_learn']
    experiment_id_recog = session['experiment_id_recog']
    task_descr = session['task_descr']

    # Get the metadata for the subject
    # ============ Read Config File ()
    # load config file (subjects == config file)

    #  Check config file path
    filename = subjects_ini
    if not os.path.exists(filename):
        print('This file does not exist: {}'.format(filename))
        print("Check filename/and or directory")

    # Read the config file
    try:
        # initialze the ConfigParser() class
        config = configparser.ConfigParser()
        # read .ini file
        config.read(filename)
    except:
        print('Failed to read the config file..')
        print('Does this file exist: {}'.format(os.path.exists(filename)))

    #  Read Meta-data from INI file.
    for section in config.sections():
        if session_use == int(section):
            session_id = int(section)  #  The New/Old ID for the session
            #Get the session ID
            for value in config[section]:
                if value.lower() == 'nosessions.age':
                    age = int(config[section][value])
                if value.lower() == 'nosessions.diagnosiscode':
                    epilepsyDxCode = config[section][value]
                    epilepsyDx = getEpilepsyDx(int(epilepsyDxCode))
                if value.lower() == 'nosessions.sex':
                    sex = config[section][value].strip("'")
                if value.lower() == 'nosessions.id':
                    ID = config[section][value].strip("'")
                if value.lower() == 'nosessions.session':
                    pt_session = config[section][value].strip("'")
                if value.lower() == 'nosessions.date':
                    unformattedDate = config[section][value].strip("'")
                    date = datetime.strptime(unformattedDate, '%Y-%m-%d')
                    finaldate = date.replace(hour=0, minute=0)
                if value.lower() == 'nosessions.institution':
                    institution = config[section][value].strip("'")
                if value.lower() == 'nosessions.la':
                    LA = config[section][value].strip("'").split(',')
                    if LA[0] == 'NaN':
                        LA_x = np.nan
                        LA_y = np.nan
                        LA_z = np.nan
                    else:
                        LA_x = float(LA[0])
                        LA_y = float(LA[1])
                        LA_z = float(LA[2])
                if value.lower() == 'nosessions.ra':
                    RA = config[section][value].strip("'").split(',')
                    if RA[0] == 'NaN':
                        RA_x = np.nan
                        RA_y = np.nan
                        RA_z = np.nan
                    else:
                        RA_x = float(RA[0])
                        RA_y = float(RA[1])
                        RA_z = float(RA[2])
                if value.lower() == 'nosessions.lh':
                    LH = config[section][value].strip("'").split(',')
                    if LH[0] == 'NaN':
                        LH_x = np.nan
                        LH_y = np.nan
                        LH_z = np.nan
                    else:
                        LH_x = float(LH[0])
                        LH_y = float(LH[1])
                        LH_z = float(LH[2])
                if value.lower() == 'nosessions.rh':
                    RH = config[section][value].strip("'").split(',')
                    if RH[0] == 'NaN':
                        RH_x = np.nan
                        RH_y = np.nan
                        RH_z = np.nan
                    else:
                        RH_x = float(RH[0])
                        RH_y = float(RH[1])
                        RH_z = float(RH[2])
                if value.lower() == 'nosessions.system':
                    signalSystem = config[section][value].strip("'")

    # =================================================================

    print(
        '======================================================================='
    )
    print('session use: {}'.format(session_id))
    print('age: {}'.format(age))
    print('epilepsy_diagnosis: {}'.format(epilepsyDx))

    nwb_subject = Subject(age=str(age),
                          description=epilepsyDx,
                          sex=sex,
                          species='Human',
                          subject_id=pt_session[:pt_session.find('_')])

    # Create the NWB file
    nwbfile = NWBFile(
        #source='https://datadryad.org/bitstream/handle/10255/dryad.163179/RecogMemory_MTL_release_v2.zip',
        session_description='New/Old recognition task for ID: {}. '.format(
            session_id),
        identifier='{}_{}'.format(ID, session_use),
        session_start_time=finaldate,  #default session start time
        file_create_date=datetime.now(),
        experiment_description=
        'The data contained within this file describes a new/old recogntion task performed in '
        'patients with intractable epilepsy implanted with depth electrodes and Behnke-Fried '
        'microwires in the human Medical Temporal Lobe (MTL).',
        institution=institution,
        keywords=[
            'Intracranial Recordings', 'Intractable Epilepsy',
            'Single-Unit Recordings', 'Cognitive Neuroscience', 'Learning',
            'Memory', 'Neurosurgery'
        ],
        related_publications=
        'Faraut et al. 2018, Scientific Data; Rutishauser et al. 2015, Nat Neurosci;',
        lab='Rutishauser',
        subject=nwb_subject,
        data_collection='learning: {}, recognition: {}'.format(
            session['experiment_id_learn'], session['experiment_id_recog']))

    # Add events and experiment_id acquisition
    events_description = (
        """ The events coorespond to the TTL markers for each trial. For the learning trials, the TTL markers 
            are the following: 55 = start of the experiment, 1 = stimulus ON, 2 = stimulus OFF, 3 = Question Screen Onset [“Is this an animal?”], 
            20 = Yes (21 = NO) during learning, 6 = End of Delay after Response, 66 = End of Experiment. For the recognition trials, 
            the TTL markers are the following: 55 = start of experiment, 1 = stimulus ON, 2 = stimulus OFF, 3 = Question Screen Onset [“Have you seen this image before?”], 
            31:36 = Confidence (Yes vs. No) response [31 (new, confident), 32 (new, probably), 33 (new, guess), 34 (old, guess), 
            35 (old, probably), 36 (old, confident)], 66 = End of Experiment"""
    )

    event_ts = AnnotationSeries(name='events',
                                data=np.asarray(events[1].values).astype(str),
                                timestamps=np.asarray(events[0].values) /
                                TIME_SCALING,
                                description=events_description)

    experiment_ids_description = (
        """The experiment_ids coorespond to the encoding (i.e., learning) or recogniton trials. The learning trials are demarcated by: {}. The recognition trials are demarcated by: {}. """
        .format(experiment_id_learn, experiment_id_recog))

    experiment_ids = TimeSeries(name='experiment_ids',
                                unit='NA',
                                data=np.asarray(events[2]),
                                timestamps=np.asarray(events[0].values) /
                                TIME_SCALING,
                                description=experiment_ids_description)

    nwbfile.add_acquisition(event_ts)
    nwbfile.add_acquisition(experiment_ids)

    # Add stimuli to the NWB file
    # Get the first cell from the cell list
    cell = NOData.pop_cell(session_use,
                           NOData.ls_cells(session_use)[0], path_to_data)
    trials = cell.trials
    stimuli_recog_path = [trial.file_path_recog for trial in trials]
    stimuli_learn_path = [trial.file_path_learn for trial in trials]

    # Add epochs and trials: storing start and end times for a stimulus

    # First extract the category ids and names that we need
    # The metadata for each trials will be store in a trial table

    cat_id_recog = [trial.category_recog for trial in trials]
    cat_name_recog = [trial.category_name_recog for trial in trials]
    cat_id_learn = [trial.category_learn for trial in trials]
    cat_name_learn = [trial.category_name_learn for trial in trials]

    # Extract the event timestamps
    events_learn_stim_on = events[(events[2] == experiment_id_learn) &
                                  (events[1] == NOData.markers['stimulus_on'])]
    events_learn_stim_off = events[(events[2] == experiment_id_learn) & (
        events[1] == NOData.markers['stimulus_off'])]
    events_learn_delay1_off = events[(events[2] == experiment_id_learn) & (
        events[1] == NOData.markers['delay1_off'])]
    events_learn_delay2_off = events[(events[2] == experiment_id_learn) & (
        events[1] == NOData.markers['delay2_off'])]
    events_learn = events[(events[2] == experiment_id_learn)]
    events_learn_response = []
    events_learn_response_time = []
    for i in range(len(events_learn[0])):
        if (events_learn.iloc[i, 1]
                == NOData.markers['response_learning_animal']) or (
                    events_learn.iloc[i, 1]
                    == NOData.markers['response_learning_non_animal']):
            events_learn_response.append(events_learn.iloc[i, 1] - 20)
            events_learn_response_time.append(events_learn.iloc[i, 0])

    events_recog_stim_on = events[(events[2] == experiment_id_recog) &
                                  (events[1] == NOData.markers['stimulus_on'])]
    events_recog_stim_off = events[(events[2] == experiment_id_recog) & (
        events[1] == NOData.markers['stimulus_off'])]
    events_recog_delay1_off = events[(events[2] == experiment_id_recog) & (
        events[1] == NOData.markers['delay1_off'])]
    events_recog_delay2_off = events[(events[2] == experiment_id_recog) & (
        events[1] == NOData.markers['delay2_off'])]
    events_recog = events[(events[2] == experiment_id_recog)]
    events_recog_response = []
    events_recog_response_time = []
    for i in range(len(events_recog[0])):
        if ((events_recog.iloc[i, 1] == NOData.markers['response_1'])
                or (events_recog.iloc[i, 1] == NOData.markers['response_2'])
                or (events_recog.iloc[i, 1] == NOData.markers['response_3'])
                or (events_recog.iloc[i, 1] == NOData.markers['response_4'])
                or (events_recog.iloc[i, 1] == NOData.markers['response_5'])
                or (events_recog.iloc[i, 1] == NOData.markers['response_6'])):
            events_recog_response.append(events_recog.iloc[i, 1])
            events_recog_response_time.append(events_recog.iloc[i, 0])

    # Extract new_old label
    new_old_recog = [trial.new_old_recog for trial in trials]
    # Create the trial tables

    nwbfile.add_trial_column('stim_on_time',
                             'The Time when the Stimulus is Shown')
    nwbfile.add_trial_column('stim_off_time',
                             'The Time when the Stimulus is Off')
    nwbfile.add_trial_column('delay1_time', 'The Time when Delay1 is Off')
    nwbfile.add_trial_column('delay2_time', 'The Time when Delay2 is Off')
    nwbfile.add_trial_column('stim_phase',
                             'Learning/Recognition Phase During the Trial')
    nwbfile.add_trial_column('stimCategory', 'The Category ID of the Stimulus')
    nwbfile.add_trial_column('category_name',
                             'The Category Name of the Stimulus')
    nwbfile.add_trial_column('external_image_file',
                             'The File Path to the Stimulus')
    nwbfile.add_trial_column(
        'new_old_labels_recog',
        '''The Ground truth Labels for New or Old Stimulus. 0 == Old Stimuli 
                            (presented during the learning phase), 1 = New Stimuli (not seen )'during learning phase'''
    )
    nwbfile.add_trial_column('response_value',
                             'The Response for Each Stimulus')
    nwbfile.add_trial_column('response_time',
                             'The Response Time for each Stimulus')

    range_recog = np.amin([
        len(events_recog_stim_on),
        len(events_recog_stim_off),
        len(events_recog_delay1_off),
        len(events_recog_delay2_off)
    ])
    range_learn = np.amin([
        len(events_learn_stim_on),
        len(events_learn_stim_off),
        len(events_learn_delay1_off),
        len(events_learn_delay2_off)
    ])

    # Iterate the event list and add information into each epoch and trial table
    for i in range(range_learn):

        nwbfile.add_trial(
            start_time=(events_learn_stim_on.iloc[i][0]) / (TIME_SCALING),
            stop_time=(events_learn_delay2_off.iloc[i][0]) / (TIME_SCALING),
            stim_on_time=(events_learn_stim_on.iloc[i][0]) / (TIME_SCALING),
            stim_off_time=(events_learn_stim_off.iloc[i][0]) / (TIME_SCALING),
            delay1_time=(events_learn_delay1_off.iloc[i][0]) / (TIME_SCALING),
            delay2_time=(events_learn_delay2_off.iloc[i][0]) / (TIME_SCALING),
            stim_phase='learn',
            stimCategory=cat_id_learn[i],
            category_name=cat_name_learn[i],
            external_image_file=stimuli_learn_path[i],
            new_old_labels_recog='NA',
            response_value=events_learn_response[i],
            response_time=(events_learn_response_time[i]) / (TIME_SCALING))

    for i in range(range_recog):

        nwbfile.add_trial(
            start_time=events_recog_stim_on.iloc[i][0] / (TIME_SCALING),
            stop_time=events_recog_delay2_off.iloc[i][0] / (TIME_SCALING),
            stim_on_time=events_recog_stim_on.iloc[i][0] / (TIME_SCALING),
            stim_off_time=events_recog_stim_off.iloc[i][0] / (TIME_SCALING),
            delay1_time=events_recog_delay1_off.iloc[i][0] / (TIME_SCALING),
            delay2_time=events_recog_delay2_off.iloc[i][0] / (TIME_SCALING),
            stim_phase='recog',
            stimCategory=cat_id_recog[i],
            category_name=cat_name_recog[i],
            external_image_file=stimuli_recog_path[i],
            new_old_labels_recog=new_old_recog[i],
            response_value=events_recog_response[i],
            response_time=events_recog_response_time[i] / (TIME_SCALING))

    # Add the waveform clustering and the spike data.
    # Get the unique channel id that we will be iterate over
    channel_ids = np.unique([cell_id[0] for cell_id in cell_ids])

    # unique unit id
    unit_id = 0

    # Create unit columns
    nwbfile.add_unit_column('origClusterID', 'The original cluster id')
    nwbfile.add_unit_column('waveform_mean_encoding',
                            'The mean waveform for encoding phase.')
    nwbfile.add_unit_column('waveform_mean_recognition',
                            'The mean waveform for the recognition phase.')
    nwbfile.add_unit_column('IsolationDist', 'IsolDist')
    nwbfile.add_unit_column('SNR', 'SNR')
    nwbfile.add_unit_column('waveform_mean_sampling_rate',
                            'The Sampling Rate of Waveform')

    #Add Stimuli
    stimuli_presentation = []

    # Add stimuli learn
    counter = 1
    for path in stimuli_learn_path:
        if path == 'NA':
            continue
        folders = path.split('\\')

        path = os.path.join(path_to_data, 'Stimuli', folders[0], folders[1],
                            folders[2])
        img = cv2.imread(path)
        resized_image = cv2.resize(img, (300, 400))
        stimuli_presentation.append(resized_image)

    # Add stimuli recog
    counter = 1
    for path in stimuli_recog_path:
        folders = path.split('\\')
        path = os.path.join(path_to_data, 'Stimuli', folders[0], folders[1],
                            folders[2])
        img = cv2.imread(path)
        resized_image = cv2.resize(img, (300, 400))
        stimuli_presentation.append(resized_image)
        name = 'stimuli_recog_' + str(counter)

    # Add stimuli to OpticalSeries
    stimulus_presentation_on_time = []

    for n in range(0, len(events_learn_stim_on)):
        stimulus_presentation_on_time.append(events_learn_stim_on.iloc[n][0] /
                                             (TIME_SCALING))

    for n in range(0, len(events_recog_stim_on)):
        stimulus_presentation_on_time.append(events_recog_stim_on.iloc[n][0] /
                                             (TIME_SCALING))

    name = 'StimulusPresentation'
    stimulus = OpticalSeries(name=name,
                             data=stimuli_presentation,
                             timestamps=stimulus_presentation_on_time[:],
                             orientation='lower left',
                             format='raw',
                             unit='meters',
                             field_of_view=[.2, .3, .7],
                             distance=0.7,
                             dimension=[300, 400, 3])

    nwbfile.add_stimulus(stimulus)

    # Get Unit data
    all_spike_cluster_ids = []
    all_selected_time_stamps = []
    all_IsolDist = []
    all_SNR = []
    all_selected_mean_waveform_learn = []
    all_selected_mean_waveform_recog = []
    all_mean_waveform = []
    all_channel_id = []
    all_oriClusterIDs = []
    all_channel_numbers = []
    all_brain_area = []
    # Iterate the channel list

    # load brain area file
    brain_area_file_path = os.path.join(path_to_data, 'Data', 'events',
                                        session['session'], task_descr,
                                        'brainArea.mat')

    try:
        brain_area_mat = loadmat(brain_area_file_path)
    except FileNotFoundError:
        print("brain_area_mat file not found")

    for channel_id in channel_ids:
        cell_name = 'A' + str(channel_id) + '_cells.mat'
        cell_file_path = os.path.join(path_to_data, 'Data', 'sorted',
                                      session['session'], task_descr,
                                      cell_name)

        try:
            cell_mat = loadmat(cell_file_path)
        except FileNotFoundError:
            print("cell mat file not found")
            continue

        spikes = cell_mat['spikes']
        meanWaveform_recog = cell_mat['meanWaveform_recog']
        meanWaveform_learn = cell_mat['meanWaveform_learn']
        IsolDist_SNR = cell_mat['IsolDist_SNR']

        spike_cluster_id = np.asarray([spike[1] for spike in spikes
                                       ])  # Each Cluster ID of the spike
        spike_timestamps = (np.asarray([spike[2] for spike in spikes])) / (
            TIME_SCALING)  # Timestamps of spikes for each ClusterID
        unique_cluster_ids = np.unique(spike_cluster_id)

        # If there are more than one cluster.
        for id in unique_cluster_ids:

            # Grab brain area
            brain_area = extra_brain_area(brain_area_mat, channel_id)

            selected_spike_timestamps = spike_timestamps[spike_cluster_id ==
                                                         id]
            IsolDist, SNR = extract_IsolDist_SNR_by_cluster_id(
                IsolDist_SNR, id)
            selected_mean_waveform_learn = extra_mean_waveform(
                meanWaveform_learn, id)
            selected_mean_waveform_recog = extra_mean_waveform(
                meanWaveform_recog, id)

            # If the mean waveform does not have 256 elements, we set the mean wave form to all 0
            if len(selected_mean_waveform_learn) != 256:
                selected_mean_waveform_learn = np.zeros(256)
            if len(selected_mean_waveform_recog) != 256:
                selected_mean_waveform_recog = np.zeros(256)

            mean_waveform = np.hstack(
                [selected_mean_waveform_learn, selected_mean_waveform_recog])

            # Append unit data
            all_spike_cluster_ids.append(id)
            all_selected_time_stamps.append(selected_spike_timestamps)
            all_IsolDist.append(IsolDist)
            all_SNR.append(SNR)
            all_selected_mean_waveform_learn.append(
                selected_mean_waveform_learn)
            all_selected_mean_waveform_recog.append(
                selected_mean_waveform_recog)
            all_mean_waveform.append(mean_waveform)
            all_channel_id.append(channel_id)
            all_oriClusterIDs.append(int(id))
            all_channel_numbers.append(channel_id)
            all_brain_area.append(brain_area)

            unit_id += 1

    nwbfile.add_electrode_column(
        name='origChannel',
        description='The original channel ID for the channel')

    #Add Device
    device = nwbfile.create_device(name=signalSystem)

    # Add Electrodes (brain Area Locations, MNI coordinates for microwires)
    length_all_spike_cluster_ids = len(all_spike_cluster_ids)
    for electrodeNumber in range(0, len(channel_ids)):

        brainArea_location = extra_brain_area(brain_area_mat,
                                              channel_ids[electrodeNumber])

        if brainArea_location == 'RH':  #  Right Hippocampus
            full_brainArea_Location = 'Right Hippocampus'

            electrode_name = '{}-microwires-{}'.format(
                signalSystem, channel_ids[electrodeNumber])
            description = "Behnke Fried/Micro Inner Wire Bundle (Behnke-Fried BF08R-SP05X-000 and WB09R-SP00X-0B6; Ad-Tech Medical)"
            location = full_brainArea_Location

            # Add electrode group
            electrode_group = nwbfile.create_electrode_group(
                electrode_name,
                description=description,
                location=location,
                device=device)

            #Add Electrode
            nwbfile.add_electrode([channel_ids[electrodeNumber]],
                                  x=RH_x,
                                  y=RH_y,
                                  z=RH_z,
                                  imp=np.nan,
                                  location=full_brainArea_Location,
                                  filtering='300-3000Hz',
                                  group=electrode_group,
                                  origChannel=channel_ids[electrodeNumber])

        if brainArea_location == 'LH':
            full_brainArea_Location = 'Left Hippocampus'

            electrode_name = '{}-microwires-{}'.format(
                signalSystem, channel_ids[electrodeNumber])
            description = "Behnke Fried/Micro Inner Wire Bundle (Behnke-Fried BF08R-SP05X-000 and WB09R-SP00X-0B6; Ad-Tech Medical)"
            location = full_brainArea_Location

            # Add electrode group
            electrode_group = nwbfile.create_electrode_group(
                electrode_name,
                description=description,
                location=location,
                device=device)

            nwbfile.add_electrode([all_channel_id[electrodeNumber]],
                                  x=LH_x,
                                  y=LH_y,
                                  z=LH_z,
                                  imp=np.nan,
                                  location=full_brainArea_Location,
                                  filtering='300-3000Hz',
                                  group=electrode_group,
                                  origChannel=channel_ids[electrodeNumber])
        if brainArea_location == 'RA':
            full_brainArea_Location = 'Right Amygdala'

            electrode_name = '{}-microwires-{}'.format(
                signalSystem, channel_ids[electrodeNumber])
            description = "Behnke Fried/Micro Inner Wire Bundle (Behnke-Fried BF08R-SP05X-000 and WB09R-SP00X-0B6; Ad-Tech Medical)"
            location = full_brainArea_Location

            # Add electrode group
            electrode_group = nwbfile.create_electrode_group(
                electrode_name,
                description=description,
                location=location,
                device=device)

            nwbfile.add_electrode([all_channel_id[electrodeNumber]],
                                  x=RA_x,
                                  y=RA_y,
                                  z=RA_z,
                                  imp=np.nan,
                                  location=full_brainArea_Location,
                                  filtering='300-3000Hz',
                                  group=electrode_group,
                                  origChannel=channel_ids[electrodeNumber])
        if brainArea_location == 'LA':
            full_brainArea_Location = 'Left Amygdala'

            electrode_name = '{}-microwires-{}'.format(
                signalSystem, channel_ids[electrodeNumber])
            description = "Behnke Fried/Micro Inner Wire Bundle (Behnke-Fried BF08R-SP05X-000 and WB09R-SP00X-0B6; Ad-Tech Medical)"
            location = full_brainArea_Location

            # Add electrode group
            electrode_group = nwbfile.create_electrode_group(
                electrode_name,
                description=description,
                location=location,
                device=device)

            nwbfile.add_electrode([all_channel_id[electrodeNumber]],
                                  x=LA_x,
                                  y=LA_y,
                                  z=LA_z,
                                  imp=np.nan,
                                  location=full_brainArea_Location,
                                  filtering='300-3000Hz',
                                  group=electrode_group,
                                  origChannel=channel_ids[electrodeNumber])

    # Create Channel list index
    channel_list = list(range(0, length_all_spike_cluster_ids))
    unique_channel_ids = np.unique(all_channel_id)
    length_ChannelIds = len(np.unique(all_channel_id))
    for yy in range(0, length_ChannelIds):
        a = np.array(np.where(unique_channel_ids[yy] == all_channel_id))
        b = a[0]
        c = b.tolist()
        for i in c:
            channel_list[i] = yy

    #Add WAVEFORM Sampling RATE
    waveform_mean_sampling_rate = [98.4 * 10**3]
    waveform_mean_sampling_rate_matrix = [waveform_mean_sampling_rate
                                          ] * (length_all_spike_cluster_ids)

    # Add Units to NWB file
    for index_id in range(0, length_all_spike_cluster_ids):
        nwbfile.add_unit(
            id=index_id,
            spike_times=all_selected_time_stamps[index_id],
            origClusterID=all_oriClusterIDs[index_id],
            IsolationDist=all_IsolDist[index_id],
            SNR=all_SNR[index_id],
            waveform_mean_encoding=all_selected_mean_waveform_learn[index_id],
            waveform_mean_recognition=all_selected_mean_waveform_recog[
                index_id],
            electrodes=[channel_list[index_id]],
            waveform_mean_sampling_rate=waveform_mean_sampling_rate_matrix[
                index_id])

    return nwbfile
示例#12
0
def test_show_annotations():
    timestamps = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
    annotations = AnnotationSeries(name="test_annotations", timestamps=timestamps)
    show_annotations(annotations)
示例#13
0
def test_show_annotations():
    timestamps = np.array([0., 1., 2., 3., 4., 5., 6.])
    annotations = AnnotationSeries(name='test_annotations',
                                   timestamps=timestamps)
    show_annotations(annotations)
示例#14
0
 def test_init(self):
     aS = AnnotationSeries('test_aS', data=[1, 2, 3], timestamps=list())
     self.assertEqual(aS.name, 'test_aS')
     aS.add_annotation(2.0, 'comment')
示例#15
0
文件: nwb.py 项目: elijahc/emu
def nlx_to_nwb(nev_fp,
               ncs_paths,
               desc='',
               trim_buffer=60 * 10,
               practice_incl=False,
               electrode_locations=None):
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        ncs = nlx.load_ncs(ncs_paths.pop(0), load_time=False)
        nev = nlx.load_nev(nev_fp)

    uuid = nev['header']['SessionUUID']
    start_time = pd.to_datetime(ncs['timestamp'][0], unit='us',
                                utc=True).to_pydatetime()
    start_time_sec = start_time.timestamp()
    nwbfile = NWBFile(session_description=desc,
                      identifier=uuid,
                      session_start_time=start_time)

    dev = nwbfile.create_device(name='Neuralynx')
    # ts = ncs_to_timeseries(ncs)

    if electrode_locations is not None:
        add_electrodes(nwbfile, electrode_locations, dev)

    ev = pd.DataFrame.from_records(nev_as_records(nev), index='TimeStamp')

    ev['EventString'] = [str(v, 'utf-8') for v in ev.EventString.values]
    ev['time'] = pd.to_datetime(ev.index.values, unit='us', utc=True)
    ev = ev[ev.ttl == 1]
    label_blockstart(ev)
    n_blocks = int(len(ev[ev.label == 'block_start']) / 2)
    n_ttls = int(n_blocks * 17)
    if practice_incl:
        n_ttls -= 5

    # ev['TimeStamp'] = ev.index.values/10**6 - start_time.timestamp()
    ev_ts = np.array([t.timestamp() for t in ev.time]) - start_time.timestamp()
    #     start_stop = ev[ev.EventString.isin(['Starting Recording','Stopping Recording'])]

    events = AnnotationSeries(name='ttl',
                              data=ev.label.values[:n_ttls],
                              timestamps=ev_ts[:n_ttls])
    data_time_len = events.timestamps[-1] + trim_buffer

    nwbfile.add_acquisition(events)

    lfp = LFP(name='LFP')
    nwbfile.add_acquisition(ncs_to_timeseries(ncs, data_time_len))

    for ch, ts_kwargs in iter_ncs_to_timeseries(ncs_paths, data_time_len):
        if electrode_locations is not None:
            # ts_kwargs['electrodes']=electrode_table_region
            try:
                electrode_table_region = nwbfile.create_electrode_table_region(
                    [ch], 'Channel')
                row = electrode_locations.where(
                    electrode_locations.chan_num == ch + 1).dropna().iloc[0]
                ts_kwargs['name'] = 'wire_{}_electrode_{}'.format(
                    int(row.wire_num), int(row.electrode))
                ts_kwargs['electrodes'] = electrode_table_region
                ts = ElectricalSeries(**ts_kwargs)
                if ts.name not in lfp.electrical_series.keys():
                    lfp.add_electrical_series(ts)
            except IndexError:
                pass
        else:
            ts = TimeSeries(**ts_kwargs)
            if ts.name not in nwbfile.acquisition.keys():
                nwbfile.add_acquisition(ts)

            else:
                print('Failed to load: ', ts.name)

        ecephys_module = ProcessingModule(
            name='ecephys',
            description='preprocessed extracellular electrophysiology')
        nwbfile.create_processing_module(
            name='ecephys',
            description='preprocessed extracellular electrophysiology')
        nwbfile.processing['ecephys'].add(lfp)

    return nwbfile