示例#1
0
 def setUp(self):
     self.trials = pd.DataFrame()
     self.trials['time'] = np.array([113.64892769, 118.64899683, 125.64938855, \
         125.79111004, 126.99205732, 131.99212646, 138.99219036, 139.09721184, \
         144.09728098, 145.57216859])
     self.trials['event'] = np.array(['iti_start', 'iti_end', 'omission', \
         'tray_activated', 'iti_start', 'stimulus_appears', 'tray_activated', \
         'iti_start', 'stimulus_appears', 'correct'])
     self.trials['trial_idx'] = np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3])
     self.trials['results'] = np.array(['omission', 'omission', 'omission', \
         'omission', 'NONE', 'NONE', 'NONE', 'correct', 'correct', 'correct'])
     self.trials['with_previous_results'] = np.array(['a', 'a', 'a', 'a', \
         'omission_NONE', 'omission_NONE', 'omission_NONE', \
         'NONE_correct', 'NONE_correct', 'NONE_correct'])
     self.trials['event_type'] = np.array(['start', 'end', 'results', \
         'other', 'start', 'stimulus', 'other', 'start', 'stimulus', 'results'])
     self.trials.loc[self.trials.with_previous_results == 'a', \
         'with_previous_results'] = np.nan
     self.trials.name = 'trials'
     self.startoftrial = ['start']
     self.endoftrial = ['end']
     self.segment = Segment()
     self.segment.dataframes = {}
     self.segment.dataframes.update({'trials': self.trials})
示例#2
0
文件: nixio.py 项目: asobolev/neo2nix
    def read_segment(fh, block_id, seg_id):
        def read_multiple(nix_file, obj_type):
            nix_tag = nix_file.blocks[block_id].tags[seg_id]
            objs = filter(lambda x: x.type == obj_type, nix_tag.references)
            read_func = getattr(Reader, 'read_' + obj_type)
            return [read_func(fh, block_id, da.name) for da in objs]

        nix_block = fh.handle.blocks[block_id]
        nix_tag = nix_block.tags[seg_id]

        seg = Segment(name=nix_tag.name)

        for key, value in Reader.Help.read_attributes(nix_tag.metadata, 'segment').items():
            setattr(seg, key, value)

        seg.annotations = Reader.Help.read_annotations(nix_tag.metadata, 'segment')

        setattr(seg, 'analogsignals', ProxyList(fh, lambda f: read_multiple(f, 'analogsignal')))
        setattr(seg, 'irregularlysampledsignals', ProxyList(fh, lambda f: read_multiple(f, 'irregularlysampledsignal')))
        setattr(seg, 'spiketrains', ProxyList(fh, lambda f: read_multiple(f, 'spiketrain')))
        setattr(seg, 'events', ProxyList(fh, lambda f: read_multiple(f, 'event')))
        setattr(seg, 'epochs', ProxyList(fh, lambda f: read_multiple(f, 'epoch')))

        return seg
示例#3
0
    def read_segment(self, lazy=False, group=None, reader=None):
        """
        Read a Segment from the file

        :param lazy: Enables lazy reading
        :param group: HDF5 Group representing the segment in NSDF model tree (optional)
        :param reader: NSDFReader instance (optional)
        :return: Read segment
        """
        assert not lazy, 'Do not support lazy'

        segment = Segment()
        group, reader = self._select_first_container(group, reader, 'segment')

        if group is None:
            return None

        attrs = group.attrs

        self._read_segment_children(group, reader, segment)

        self._read_container_metadata(attrs, segment)

        return segment
示例#4
0
    def read_segment(self, lazy=False, cascade=True, group=None, reader=None):
        """
        Read a Segment from the file

        :param lazy: Enables lazy reading
        :param cascade: Read nested objects or not?
        :param group: HDF5 Group representing the block in NSDF model tree (optional)
        :param reader: NSDFReader instance (optional)
        :return: Read segment
        """
        segment = Segment()
        group, reader = self._select_first_container(group, reader, 'segment')

        if group is None:
            return None

        attrs = group.attrs

        if cascade:
            self._read_segment_children(lazy, group, reader, segment)

        self._read_container_metadata(attrs, segment)

        return segment
示例#5
0
 def test_annotations(self):
     self.testfilename = self.get_filename_path('nixio_fr_ann.nix')
     with NixIO(filename=self.testfilename, mode='ow') as io:
         annotations = {'my_custom_annotation': 'hello block'}
         bl = Block(**annotations)
         annotations = {'something': 'hello hello000'}
         seg = Segment(**annotations)
         an = AnalogSignal([[1, 2, 3], [4, 5, 6]],
                           units='V',
                           sampling_rate=1 * pq.Hz)
         an.annotations['ansigrandom'] = 'hello chars'
         sp = SpikeTrain([3, 4, 5] * s, t_stop=10.0)
         sp.annotations['railway'] = 'hello train'
         ev = Event(np.arange(0, 30, 10) * pq.Hz,
                    labels=np.array(['trig0', 'trig1', 'trig2'], dtype='S'))
         ev.annotations['venue'] = 'hello event'
         ev2 = Event(np.arange(0, 30, 10) * pq.Hz,
                     labels=np.array(['trig0', 'trig1', 'trig2'],
                                     dtype='S'))
         ev2.annotations['evven'] = 'hello ev'
         seg.spiketrains.append(sp)
         seg.events.append(ev)
         seg.events.append(ev2)
         seg.analogsignals.append(an)
         bl.segments.append(seg)
         io.write_block(bl)
         io.close()
     with NixIOfr(filename=self.testfilename) as frio:
         frbl = frio.read_block()
         assert 'my_custom_annotation' in frbl.annotations
         assert 'something' in frbl.segments[0].annotations
         # assert 'ansigrandom' in frbl.segments[0].analogsignals[0].annotations
         assert 'railway' in frbl.segments[0].spiketrains[0].annotations
         assert 'venue' in frbl.segments[0].events[0].annotations
         assert 'evven' in frbl.segments[0].events[1].annotations
     os.remove(self.testfilename)
示例#6
0
    def read_segment(self,
                     block_index=0,
                     seg_index=0,
                     lazy=False,
                     signal_group_mode=None,
                     load_waveforms=False,
                     time_slice=None,
                     strict_slicing=True):
        """
        :param block_index: int default 0. In case of several block block_index can be specified.

        :param seg_index: int default 0. Index of segment.

        :param lazy: False by default.

        :param signal_group_mode: 'split-all' or 'group-by-same-units' (default depend IO):
        This control behavior for grouping channels in AnalogSignal.
            * 'split-all': each channel will give an AnalogSignal
            * 'group-by-same-units' all channel sharing the same quantity units ar grouped in
            a 2D AnalogSignal

        :param load_waveforms: False by default. Control SpikeTrains.waveforms is None or not.

        :param time_slice: None by default means no limit.
            A time slice is (t_start, t_stop) both are quantities.
            All object AnalogSignal, SpikeTrain, Event, Epoch will load only in the slice.

        :param strict_slicing: True by default.
             Control if an error is raise or not when one of  time_slice member (t_start or t_stop)
             is outside the real time range of the segment.
        """

        if lazy:
            assert time_slice is None, 'For lazy=true you must specify time_slice when loading'

        if signal_group_mode is None:
            signal_group_mode = self._prefered_signal_group_mode

        # annotations
        seg_annotations = dict(
            self.raw_annotations['blocks'][block_index]['segments'][seg_index])
        for k in ('signals', 'units', 'events'):
            seg_annotations.pop(k)
        seg_annotations = check_annotations(seg_annotations)

        seg = Segment(index=seg_index, **seg_annotations)

        # AnalogSignal
        signal_channels = self.header['signal_channels']
        if signal_channels.size > 0:
            channel_indexes_list = self.get_group_channel_indexes()
            for channel_indexes in channel_indexes_list:
                for i, (ind_within,
                        ind_abs) in self._make_signal_channel_subgroups(
                            channel_indexes,
                            signal_group_mode=signal_group_mode).items():
                    # make a proxy...
                    anasig = AnalogSignalProxy(rawio=self,
                                               global_channel_indexes=ind_abs,
                                               block_index=block_index,
                                               seg_index=seg_index)

                    if not lazy:
                        # ... and get the real AnalogSIgnal if not lazy
                        anasig = anasig.load(time_slice=time_slice,
                                             strict_slicing=strict_slicing)
                        # TODO magnitude_mode='rescaled'/'raw'

                    anasig.segment = seg
                    seg.analogsignals.append(anasig)

        # SpikeTrain and waveforms (optional)
        unit_channels = self.header['unit_channels']
        for unit_index in range(len(unit_channels)):
            # make a proxy...
            sptr = SpikeTrainProxy(rawio=self,
                                   unit_index=unit_index,
                                   block_index=block_index,
                                   seg_index=seg_index)

            if not lazy:
                # ... and get the real SpikeTrain if not lazy
                sptr = sptr.load(time_slice=time_slice,
                                 strict_slicing=strict_slicing,
                                 load_waveforms=load_waveforms)
                # TODO magnitude_mode='rescaled'/'raw'

            sptr.segment = seg
            seg.spiketrains.append(sptr)

        # Events/Epoch
        event_channels = self.header['event_channels']
        for chan_ind in range(len(event_channels)):
            if event_channels['type'][chan_ind] == b'event':
                e = EventProxy(rawio=self,
                               event_channel_index=chan_ind,
                               block_index=block_index,
                               seg_index=seg_index)
                if not lazy:
                    e = e.load(time_slice=time_slice,
                               strict_slicing=strict_slicing)
                e.segment = seg
                seg.events.append(e)
            elif event_channels['type'][chan_ind] == b'epoch':
                e = EpochProxy(rawio=self,
                               event_channel_index=chan_ind,
                               block_index=block_index,
                               seg_index=seg_index)
                if not lazy:
                    e = e.load(time_slice=time_slice,
                               strict_slicing=strict_slicing)
                e.segment = seg
                seg.epochs.append(e)

        seg.create_many_to_one_relationship()
        return seg
示例#7
0
    def read_segment(
        self,
        take_ideal_sampling_rate=False,
        lazy=False,
        cascade=True,
    ):
        """
        Arguments:
        """

        header = self.read_header(filename=self.filename)

        #~ print header
        fid = open(self.filename, 'rb')

        seg = Segment(
            file_origin=os.path.basename(self.filename),
            ced_version=str(header.system_id),
        )

        if not cascade:
            return seg

        def addannotations(ob, channelHeader):
            ob.annotate(title=channelHeader.title)
            ob.annotate(physical_channel_index=channelHeader.phy_chan)
            ob.annotate(comment=channelHeader.comment)

        for i in range(header.channels):
            channelHeader = header.channelHeaders[i]

            #~ print 'channel' , i , 'kind' ,  channelHeader.kind

            if channelHeader.kind != 0:
                #~ print '####'
                #~ print 'channel' , i, 'kind' , channelHeader.kind , channelHeader.type , channelHeader.phy_chan
                #~ print channelHeader
                pass

            if channelHeader.kind in [1, 9]:
                #~ print 'analogChanel'
                anaSigs = self.readOneChannelContinuous(
                    fid, i, header, take_ideal_sampling_rate, lazy=lazy)
                #~ print 'nb sigs', len(anaSigs) , ' sizes : ',
                for anaSig in anaSigs:
                    addannotations(anaSig, channelHeader)
                    anaSig.name = str(anaSig.annotations['title'])
                    seg.analogsignals.append(anaSig)
                    #~ print sig.signal.size,
                #~ print ''

            elif channelHeader.kind in [2, 3, 4, 5, 8]:
                ea = self.readOneChannelEventOrSpike(fid, i, header, lazy=lazy)
                if ea is not None:
                    for ev in ea:
                        addannotations(ev, channelHeader)
                        seg.eventarrays.append(ev)

            elif channelHeader.kind in [6, 7]:
                sptr = self.readOneChannelEventOrSpike(fid,
                                                       i,
                                                       header,
                                                       lazy=lazy)
                if sptr is not None:
                    for sp in sptr:
                        addannotations(sp, channelHeader)
                        seg.spiketrains.append(sp)

        fid.close()

        seg.create_many_to_one_relationship()
        return seg
示例#8
0
    def read_segment(self,
                     blockname=None,
                     lazy=False,
                     cascade=True,
                     sortname=''):
        """
        Read a single segment from the tank. Note that TDT blocks are Neo
        segments, and TDT tanks are Neo blocks, so here the 'blockname' argument
        refers to the TDT block's name, which will be the Neo segment name.

        'sortname' is used to specify the external sortcode generated by offline spike sorting.
        if sortname=='PLX', there should be a ./sort/PLX/*.SortResult file in the tdt block,
        which stores the sortcode for every spike; defaults to '', which uses the original online sort
        """
        if not blockname:
            blockname = os.listdir(self.dirname)[0]

        if blockname == 'TempBlk': return None

        if not self.is_tdtblock(blockname): return None  # if not a tdt block

        subdir = os.path.join(self.dirname, blockname)
        if not os.path.isdir(subdir): return None

        seg = Segment(name=blockname)

        tankname = os.path.basename(self.dirname)

        #TSQ is the global index
        tsq_filename = os.path.join(subdir,
                                    tankname + '_' + blockname + '.tsq')
        dt = [
            ('size', 'int32'),
            ('evtype', 'int32'),
            ('code', 'S4'),
            ('channel', 'uint16'),
            ('sortcode', 'uint16'),
            ('timestamp', 'float64'),
            ('eventoffset', 'int64'),
            ('dataformat', 'int32'),
            ('frequency', 'float32'),
        ]
        tsq = np.fromfile(tsq_filename, dtype=dt)

        #0x8801: 'EVTYPE_MARK' give the global_start
        global_t_start = tsq[tsq['evtype'] == 0x8801]['timestamp'][0]

        #TEV is the old data file
        try:
            tev_filename = os.path.join(subdir,
                                        tankname + '_' + blockname + '.tev')
            #tev_array = np.memmap(tev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead
            tev_array = np.fromfile(tev_filename, dtype='uint8')
        except IOError:
            tev_filename = None

        #if there exists an external sortcode in ./sort/[sortname]/*.SortResult (generated after offline sortting)
        sortresult_filename = None
        if sortname is not '':
            try:
                for file in os.listdir(os.path.join(subdir, 'sort', sortname)):
                    if file.endswith(".SortResult"):
                        sortresult_filename = os.path.join(
                            subdir, 'sort', sortname, file)

                        # get new sortcode
                        newsorcode = np.fromfile(sortresult_filename, 'int8')[
                            1024:]  # the first 1024 byte is file header
                        # update the sort code with the info from this file
                        tsq['sortcode'][1:-1] = newsorcode
                        # print('sortcode updated')
                        break
            except OSError:
                sortresult_filename = None
            except IOError:
                sortresult_filename = None

        for type_code, type_label in tdt_event_type:
            mask1 = tsq['evtype'] == type_code
            codes = np.unique(tsq[mask1]['code'])

            for code in codes:
                mask2 = mask1 & (tsq['code'] == code)
                channels = np.unique(tsq[mask2]['channel'])

                for channel in channels:
                    mask3 = mask2 & (tsq['channel'] == channel)

                    if type_label in ['EVTYPE_STRON', 'EVTYPE_STROFF']:
                        if lazy:
                            times = [] * pq.s
                            labels = np.array([], dtype=str)
                        else:
                            times = (tsq[mask3]['timestamp'] -
                                     global_t_start) * pq.s
                            labels = tsq[mask3]['eventoffset'].view(
                                'float64').astype('S')
                        ea = Event(times=times,
                                   name=code,
                                   channel_index=int(channel),
                                   labels=labels)
                        if lazy:
                            ea.lazy_shape = np.sum(mask3)
                        seg.events.append(ea)

                    elif type_label == 'EVTYPE_SNIP':
                        sortcodes = np.unique(tsq[mask3]['sortcode'])
                        for sortcode in sortcodes:
                            mask4 = mask3 & (tsq['sortcode'] == sortcode)
                            nb_spike = np.sum(mask4)
                            sr = tsq[mask4]['frequency'][0]
                            waveformsize = tsq[mask4]['size'][0] - 10
                            if lazy:
                                times = [] * pq.s
                                waveforms = None
                            else:
                                times = (tsq[mask4]['timestamp'] -
                                         global_t_start) * pq.s
                                dt = np.dtype(
                                    data_formats[tsq[mask3]['dataformat'][0]])
                                waveforms = get_chunks(
                                    tsq[mask4]['size'],
                                    tsq[mask4]['eventoffset'],
                                    tev_array).view(dt)
                                waveforms = waveforms.reshape(
                                    nb_spike, -1, waveformsize)
                                waveforms = waveforms * pq.mV
                            if nb_spike > 0:
                                #   t_start = (tsq['timestamp'][0] - global_t_start) * pq.s # this hould work but not
                                t_start = 0 * pq.s
                                t_stop = (tsq['timestamp'][-1] -
                                          global_t_start) * pq.s

                            else:
                                t_start = 0 * pq.s
                                t_stop = 0 * pq.s
                            st = SpikeTrain(
                                times=times,
                                name='Chan{0} Code{1}'.format(
                                    channel, sortcode),
                                t_start=t_start,
                                t_stop=t_stop,
                                waveforms=waveforms,
                                left_sweep=waveformsize / 2. / sr * pq.s,
                                sampling_rate=sr * pq.Hz,
                            )
                            st.annotate(channel_index=channel)
                            if lazy:
                                st.lazy_shape = nb_spike
                            seg.spiketrains.append(st)

                    elif type_label == 'EVTYPE_STREAM':
                        dt = np.dtype(
                            data_formats[tsq[mask3]['dataformat'][0]])
                        shape = np.sum(tsq[mask3]['size'] - 10)
                        sr = tsq[mask3]['frequency'][0]
                        if lazy:
                            signal = []
                        else:
                            if PY3K:
                                signame = code.decode('ascii')
                            else:
                                signame = code
                            sev_filename = os.path.join(
                                subdir, tankname + '_' + blockname + '_' +
                                signame + '_ch' + str(channel) + '.sev')
                            try:
                                #sig_array = np.memmap(sev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead
                                sig_array = np.fromfile(sev_filename,
                                                        dtype='uint8')
                            except IOError:
                                sig_array = tev_array
                            signal = get_chunks(tsq[mask3]['size'],
                                                tsq[mask3]['eventoffset'],
                                                sig_array).view(dt)

                        anasig = AnalogSignal(
                            signal=signal * pq.V,
                            name='{0} {1}'.format(code, channel),
                            sampling_rate=sr * pq.Hz,
                            t_start=(tsq[mask3]['timestamp'][0] -
                                     global_t_start) * pq.s,
                            channel_index=int(channel))
                        if lazy:
                            anasig.lazy_shape = shape
                        seg.analogsignals.append(anasig)
        return seg
示例#9
0
    def read_segment(self,
                     import_neuroshare_segment=True,
                     lazy=False,
                     cascade=True):
        """
        Arguments:
            import_neuroshare_segment: import neuroshare segment as SpikeTrain with associated waveforms or not imported at all.

        """
        seg = Segment(file_origin=os.path.basename(self.filename), )

        if sys.platform.startswith('win'):
            neuroshare = ctypes.windll.LoadLibrary(self.dllname)
        elif sys.platform.startswith('linux'):
            neuroshare = ctypes.cdll.LoadLibrary(self.dllname)
        neuroshare = DllWithError(neuroshare)

        #elif sys.platform.startswith('darwin'):

        # API version
        info = ns_LIBRARYINFO()
        neuroshare.ns_GetLibraryInfo(ctypes.byref(info), ctypes.sizeof(info))
        seg.annotate(neuroshare_version=str(info.dwAPIVersionMaj) + '.' +
                     str(info.dwAPIVersionMin))

        if not cascade:
            return seg

        # open file
        hFile = ctypes.c_uint32(0)
        neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename),
                               ctypes.byref(hFile))
        fileinfo = ns_FILEINFO()
        neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo),
                                  ctypes.sizeof(fileinfo))

        # read all entities
        for dwEntityID in range(fileinfo.dwEntityCount):
            entityInfo = ns_ENTITYINFO()
            neuroshare.ns_GetEntityInfo(hFile, dwEntityID,
                                        ctypes.byref(entityInfo),
                                        ctypes.sizeof(entityInfo))

            # EVENT
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_EVENT':
                pEventInfo = ns_EVENTINFO()
                neuroshare.ns_GetEventInfo(hFile, dwEntityID,
                                           ctypes.byref(pEventInfo),
                                           ctypes.sizeof(pEventInfo))

                if pEventInfo.dwEventType == 0:  #TEXT
                    pData = ctypes.create_string_buffer(
                        pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 1:  #CVS
                    pData = ctypes.create_string_buffer(
                        pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 2:  # 8bit
                    pData = ctypes.c_byte(0)
                elif pEventInfo.dwEventType == 3:  # 16bit
                    pData = ctypes.c_int16(0)
                elif pEventInfo.dwEventType == 4:  # 32bit
                    pData = ctypes.c_int32(0)
                pdTimeStamp = ctypes.c_double(0.)
                pdwDataRetSize = ctypes.c_uint32(0)

                ea = Event(name=str(entityInfo.szEntityLabel), )
                if not lazy:
                    times = []
                    labels = []
                    for dwIndex in range(entityInfo.dwItemCount):
                        neuroshare.ns_GetEventData(
                            hFile, dwEntityID, dwIndex,
                            ctypes.byref(pdTimeStamp), ctypes.byref(pData),
                            ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize))
                        times.append(pdTimeStamp.value)
                        labels.append(str(pData.value))
                    ea.times = times * pq.s
                    ea.labels = np.array(labels, dtype='S')
                else:
                    ea.lazy_shape = entityInfo.dwItemCount
                seg.eventarrays.append(ea)

            # analog
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_ANALOG':
                pAnalogInfo = ns_ANALOGINFO()

                neuroshare.ns_GetAnalogInfo(hFile, dwEntityID,
                                            ctypes.byref(pAnalogInfo),
                                            ctypes.sizeof(pAnalogInfo))
                dwIndexCount = entityInfo.dwItemCount

                if lazy:
                    signal = [] * pq.Quantity(1, pAnalogInfo.szUnits)
                else:
                    pdwContCount = ctypes.c_uint32(0)
                    pData = np.zeros((entityInfo.dwItemCount, ),
                                     dtype='float64')
                    total_read = 0
                    while total_read < entityInfo.dwItemCount:
                        dwStartIndex = ctypes.c_uint32(total_read)
                        dwStopIndex = ctypes.c_uint32(entityInfo.dwItemCount -
                                                      total_read)

                        neuroshare.ns_GetAnalogData(
                            hFile, dwEntityID, dwStartIndex, dwStopIndex,
                            ctypes.byref(pdwContCount),
                            pData[total_read:].ctypes.data_as(
                                ctypes.POINTER(ctypes.c_double)))
                        total_read += pdwContCount.value

                    signal = pq.Quantity(pData,
                                         units=pAnalogInfo.szUnits,
                                         copy=False)

                #t_start
                dwIndex = 0
                pdTime = ctypes.c_double(0)
                neuroshare.ns_GetTimeByIndex(hFile, dwEntityID, dwIndex,
                                             ctypes.byref(pdTime))

                anaSig = AnalogSignal(
                    signal,
                    sampling_rate=pAnalogInfo.dSampleRate * pq.Hz,
                    t_start=pdTime.value * pq.s,
                    name=str(entityInfo.szEntityLabel),
                )
                anaSig.annotate(probe_info=str(pAnalogInfo.szProbeInfo))
                if lazy:
                    anaSig.lazy_shape = entityInfo.dwItemCount
                seg.analogsignals.append(anaSig)

            #segment
            if entity_types[
                    entityInfo.
                    dwEntityType] == 'ns_ENTITY_SEGMENT' and import_neuroshare_segment:

                pdwSegmentInfo = ns_SEGMENTINFO()
                if not str(entityInfo.szEntityLabel).startswith('spks'):
                    continue

                neuroshare.ns_GetSegmentInfo(hFile, dwEntityID,
                                             ctypes.byref(pdwSegmentInfo),
                                             ctypes.sizeof(pdwSegmentInfo))
                nsource = pdwSegmentInfo.dwSourceCount

                pszMsgBuffer = ctypes.create_string_buffer(" " * 256)
                neuroshare.ns_GetLastErrorMsg(ctypes.byref(pszMsgBuffer), 256)

                for dwSourceID in range(pdwSegmentInfo.dwSourceCount):
                    pSourceInfo = ns_SEGSOURCEINFO()
                    neuroshare.ns_GetSegmentSourceInfo(
                        hFile, dwEntityID, dwSourceID,
                        ctypes.byref(pSourceInfo), ctypes.sizeof(pSourceInfo))

                if lazy:
                    sptr = SpikeTrain(times,
                                      name=str(entityInfo.szEntityLabel),
                                      t_stop=0. * pq.s)
                    sptr.lazy_shape = entityInfo.dwItemCount
                else:
                    pdTimeStamp = ctypes.c_double(0.)
                    dwDataBufferSize = pdwSegmentInfo.dwMaxSampleCount * pdwSegmentInfo.dwSourceCount
                    pData = np.zeros((dwDataBufferSize), dtype='float64')
                    pdwSampleCount = ctypes.c_uint32(0)
                    pdwUnitID = ctypes.c_uint32(0)

                    nsample = int(dwDataBufferSize)
                    times = np.empty((entityInfo.dwItemCount), dtype='f')
                    waveforms = np.empty(
                        (entityInfo.dwItemCount, nsource, nsample), dtype='f')
                    for dwIndex in range(entityInfo.dwItemCount):
                        neuroshare.ns_GetSegmentData(
                            hFile, dwEntityID, dwIndex,
                            ctypes.byref(pdTimeStamp),
                            pData.ctypes.data_as(
                                ctypes.POINTER(ctypes.c_double)),
                            dwDataBufferSize * 8, ctypes.byref(pdwSampleCount),
                            ctypes.byref(pdwUnitID))

                        times[dwIndex] = pdTimeStamp.value
                        waveforms[
                            dwIndex, :, :] = pData[:nsample * nsource].reshape(
                                nsample, nsource).transpose()

                    sptr = SpikeTrain(
                        times=pq.Quantity(times, units='s', copy=False),
                        t_stop=times.max(),
                        waveforms=pq.Quantity(waveforms,
                                              units=str(
                                                  pdwSegmentInfo.szUnits),
                                              copy=False),
                        left_sweep=nsample / 2. /
                        float(pdwSegmentInfo.dSampleRate) * pq.s,
                        sampling_rate=float(pdwSegmentInfo.dSampleRate) *
                        pq.Hz,
                        name=str(entityInfo.szEntityLabel),
                    )
                seg.spiketrains.append(sptr)

            # neuralevent
            if entity_types[
                    entityInfo.dwEntityType] == 'ns_ENTITY_NEURALEVENT':

                pNeuralInfo = ns_NEURALINFO()
                neuroshare.ns_GetNeuralInfo(hFile, dwEntityID,
                                            ctypes.byref(pNeuralInfo),
                                            ctypes.sizeof(pNeuralInfo))

                if lazy:
                    times = [] * pq.s
                    t_stop = 0 * pq.s
                else:
                    pData = np.zeros((entityInfo.dwItemCount, ),
                                     dtype='float64')
                    dwStartIndex = 0
                    dwIndexCount = entityInfo.dwItemCount
                    neuroshare.ns_GetNeuralData(
                        hFile, dwEntityID, dwStartIndex, dwIndexCount,
                        pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                    times = pData * pq.s
                    t_stop = times.max()
                sptr = SpikeTrain(
                    times,
                    t_stop=t_stop,
                    name=str(entityInfo.szEntityLabel),
                )
                if lazy:
                    sptr.lazy_shape = entityInfo.dwItemCount
                seg.spiketrains.append(sptr)

        # close
        neuroshare.ns_CloseFile(hFile)

        seg.create_many_to_one_relationship()
        return seg
示例#10
0
    def read_segment(
        self,
        lazy=False,
        cascade=True,
        delimiter='\t',
        usecols=None,
        skiprows=0,
        timecolumn=None,
        sampling_rate=1. * pq.Hz,
        t_start=0. * pq.s,
        unit=pq.V,
        method='genfromtxt',
    ):
        """
        Arguments:
            delimiter  :  columns delimiter in file  '\t' or one space or two space or ',' or ';'
            usecols : if None take all columns otherwise a list for selected columns
            skiprows : skip n first lines in case they contains header informations
            timecolumn :  None or a valid int that point the time vector
            samplerate : the samplerate of signals if timecolumn is not None this is not take in account
            t_start : time of the first sample
            unit : unit of AnalogSignal can be a str or directly a Quantities

            method :  'genfromtxt' or 'csv' or 'homemade'
                        in case of bugs you can try one of this methods

                        'genfromtxt' use numpy.genfromtxt
                        'csv' use cvs module
                        'homemade' use a intuitive more robust but slow method

        """
        seg = Segment(file_origin=os.path.basename(self.filename))
        if not cascade:
            return seg

        if type(sampling_rate) == float or type(sampling_rate) == int:
            # if not quantitities Hz by default
            sampling_rate = sampling_rate * pq.Hz

        if type(t_start) == float or type(t_start) == int:
            # if not quantitities s by default
            t_start = t_start * pq.s

        unit = pq.Quantity(1, unit)

        #loadtxt
        if method == 'genfromtxt':
            sig = np.genfromtxt(self.filename,
                                delimiter=delimiter,
                                usecols=usecols,
                                skiprows=skiprows,
                                dtype='f')
            if len(sig.shape) == 1:
                sig = sig[:, np.newaxis]
        elif method == 'csv':
            tab = [
                l for l in csv.reader(file(self.filename, 'rU'),
                                      delimiter=delimiter)
            ]
            tab = tab[skiprows:]
            sig = np.array(tab, dtype='f')
        elif method == 'homemade':
            fid = open(self.filename, 'rU')
            for l in range(skiprows):
                fid.readline()
            tab = []
            for line in fid.readlines():
                line = line.replace('\r', '')
                line = line.replace('\n', '')
                l = line.split(delimiter)
                while '' in l:
                    l.remove('')
                tab.append(l)
            sig = np.array(tab, dtype='f')

        if timecolumn is not None:
            sampling_rate = 1. / np.mean(np.diff(sig[:, timecolumn])) * pq.Hz
            t_start = sig[0, timecolumn] * pq.s

        for i in range(sig.shape[1]):
            if timecolumn == i: continue
            if usecols is not None and i not in usecols: continue

            if lazy:
                signal = [] * unit
            else:
                signal = sig[:, i] * unit

            anaSig = AnalogSignal(signal,
                                  sampling_rate=sampling_rate,
                                  t_start=t_start,
                                  channel_index=i,
                                  name='Column %d' % i)
            if lazy:
                anaSig.lazy_shape = sig.shape
            seg.analogsignals.append(anaSig)

        create_many_to_one_relationship(seg)
        return seg
示例#11
0
    def _read_segment(self, fobject, lazy):
        '''
        Read a single segment with a single analogsignal

        Returns the segment or None if there are no more segments
        '''

        try:
            # float64 -- start time of the AnalogSignal
            t_start = np.fromfile(fobject, dtype=np.float64, count=1)[0]
        except IndexError:
            # if there are no more Segments, return
            return False

        # int16 -- index of the stimulus parameters
        seg_index = np.fromfile(fobject, dtype=np.int16, count=1)[0].tolist()

        # int16 -- number of stimulus parameters
        numelements = np.fromfile(fobject, dtype=np.int16, count=1)[0]

        # read the name strings for the stimulus parameters
        paramnames = []
        for _ in range(numelements):
            # unit8 -- the number of characters in the string
            numchars = np.fromfile(fobject, dtype=np.uint8, count=1)[0]

            # char * numchars -- a single name string
            name = np.fromfile(fobject, dtype=np.uint8, count=numchars)

            # exclude invalid characters
            name = str(name[name >= 32].view('c').tostring())

            # add the name to the list of names
            paramnames.append(name)

        # float32 * numelements -- the values for the stimulus parameters
        paramvalues = np.fromfile(fobject, dtype=np.float32, count=numelements)

        # combine parameter names and the parameters as a dict
        params = dict(zip(paramnames, paramvalues))

        # int32 -- the number elements in the AnalogSignal
        numpts = np.fromfile(fobject, dtype=np.int32, count=1)[0]

        # int16 * numpts -- the AnalogSignal itself
        signal = np.fromfile(fobject, dtype=np.int16, count=numpts)

        # handle lazy loading
        if lazy:
            sig = AnalogSignal([], t_start=t_start*pq.d,
                               file_origin=self._filename,
                               sampling_period=1.*pq.s,
                               units=pq.mV,
                               dtype=np.float)
            sig.lazy_shape = len(signal)
        else:
            sig = AnalogSignal(signal.astype(np.float)*pq.mV,
                               t_start=t_start*pq.d,
                               file_origin=self._filename,
                               sampling_period=1.*pq.s,
                               copy=False)
        # Note: setting the sampling_period to 1 s is arbitrary

        # load the AnalogSignal and parameters into a new Segment
        seg = Segment(file_origin=self._filename,
                      index=seg_index,
                      **params)
        seg.analogsignals = [sig]

        return seg
示例#12
0
    def read_block(
        self,
        lazy=False,
        cascade=True,
        channel_index=None,
        tracking=False,
        tracking_ttl_chan=None,
        stim_ttl_chan=None,
    ):
        """
        Arguments:
            Channel_index: can be int, iterable or None to select one, many or
            all channel(s) respectively
            # TODO multiple stimulus channels
        """

        blk = Block()
        if cascade:
            seg = Segment(file_origin=self._path)
            blk.segments += [seg]

            # if channel_index:
            #     if type(channel_index) is int: channel_index = [ channel_index ]
            #     if type(channel_index) is list: channel_index = np.array( channel_index )
            # else:
            #     channel_index = np.arange(0,self._attrs['shape'][1])
            #
            # rcg = RecordingChannelGroup(name='all channels',
            #                      channel_indexes=channel_index)
            # blk.recordingchannelgroups.append(rcg)
            #
            # for idx in channel_index:
            #     # read nested analosignal
            #     ana = self.read_analogsignal(channel_index=idx,
            #                             lazy=lazy,
            #                             cascade=cascade,
            #                              )
            #     chan = RecordingChannel(index=int(idx))
            #     seg.analogsignals += [ ana ]
            #     chan.analogsignals += [ ana ]
            #     rcg.recordingchannels.append(chan)
            seg.duration = (self._attrs['shape'][0] /
                            self._attrs['kwe']['sample_rate']) * pq.s

            if lazy:
                pass
            else:
                if tracking:
                    if tracking_ttl_chan is not None:
                        events, irsigs = self._get_tracking(
                            channel=tracking_ttl_chan, conversion=1)
                        seg.Events += [events]
                    else:
                        irsigs = self._get_tracking(channel=tracking_ttl_chan,
                                                    conversion=1)
                    for irsig in irsigs:
                        seg.irregularlysampledsignals += [irsig]
                if stim_ttl_chan is not None:
                    try:
                        for chan in stim_ttl_chan:
                            epo = self._get_stim(channel=chan)
                            seg.epochs += [epo]
                    except:
                        epo = self._get_stim(channel=stim_ttl_chan)
                        seg.epochs += [epo]

            # neo.tools.populate_RecordingChannel(blk)
        blk.create_many_to_one_relationship()
        return blk
示例#13
0
    def read_segment(self, lazy=False):
        """

        """
        if lazy:
            raise NotImplementedError("lazy mode not supported")

        seg = Segment(file_origin=os.path.basename(self.filename))

        # loadtxt
        if self.method == 'genfromtxt':
            sig = np.genfromtxt(self.filename,
                                delimiter=self.delimiter,
                                usecols=self.usecols,
                                skip_header=self.skiprows,
                                dtype='f')
            if len(sig.shape) == 1:
                sig = sig[:, np.newaxis]
        elif self.method == 'csv':
            with open(self.filename, 'rU') as fp:
                tab = [l for l in csv.reader(fp, delimiter=self.delimiter)]
            tab = tab[self.skiprows:]
            sig = np.array(tab, dtype='f')
            if self.usecols is not None:
                mask = np.array(self.usecols)
                sig = sig[:, mask]
        elif self.method == 'homemade':
            fid = open(self.filename, 'rU')
            for l in range(self.skiprows):
                fid.readline()
            tab = []
            for line in fid.readlines():
                line = line.replace('\r', '')
                line = line.replace('\n', '')
                parts = line.split(self.delimiter)
                while '' in parts:
                    parts.remove('')
                tab.append(parts)
            sig = np.array(tab, dtype='f')
            if self.usecols is not None:
                mask = np.array(self.usecols)
                sig = sig[:, mask]
        else:
            sig = self.method(self.filename, self.usecols)
            if not isinstance(sig, np.ndarray):
                raise TypeError("method function must return a NumPy array")
            if len(sig.shape) == 1:
                sig = sig[:, np.newaxis]
            elif len(sig.shape) != 2:
                raise ValueError(
                    "method function must return a 1D or 2D NumPy array")

        if self.timecolumn is None:
            sampling_rate = self.sampling_rate
            t_start = self.t_start
        else:
            delta_t = np.diff(sig[:, self.timecolumn])
            mean_delta_t = np.mean(delta_t)
            if (delta_t.max() - delta_t.min()) / mean_delta_t < 1e-6:
                # equally spaced --> AnalogSignal
                sampling_rate = 1.0 / np.mean(np.diff(
                    sig[:, self.timecolumn])) / self.time_units
            else:
                # not equally spaced --> IrregularlySampledSignal
                sampling_rate = None
            t_start = sig[0, self.timecolumn] * self.time_units

        if self.signal_group_mode == 'all-in-one':
            if self.timecolumn is not None:
                mask = list(range(sig.shape[1]))
                if self.timecolumn >= 0:
                    mask.remove(self.timecolumn)
                else:  # allow negative column index
                    mask.remove(sig.shape[1] + self.timecolumn)
                signal = sig[:, mask]
            else:
                signal = sig
            if sampling_rate is None:
                irr_sig = IrregularlySampledSignal(signal[:, self.timecolumn] *
                                                   self.time_units,
                                                   signal * self.units,
                                                   name='multichannel')
                seg.irregularlysampledsignals.append(irr_sig)
            else:
                ana_sig = AnalogSignal(signal * self.units,
                                       sampling_rate=sampling_rate,
                                       t_start=t_start,
                                       channel_index=self.usecols
                                       or np.arange(signal.shape[1]),
                                       name='multichannel')
                seg.analogsignals.append(ana_sig)
        else:
            if self.timecolumn is not None and self.timecolumn < 0:
                time_col = sig.shape[1] + self.timecolumn
            else:
                time_col = self.timecolumn
            for i in range(sig.shape[1]):
                if time_col == i:
                    continue
                signal = sig[:, i] * self.units
                if sampling_rate is None:
                    irr_sig = IrregularlySampledSignal(sig[:, time_col] *
                                                       self.time_units,
                                                       signal,
                                                       t_start=t_start,
                                                       channel_index=i,
                                                       name='Column %d' % i)
                    seg.irregularlysampledsignals.append(irr_sig)
                else:
                    ana_sig = AnalogSignal(signal,
                                           sampling_rate=sampling_rate,
                                           t_start=t_start,
                                           channel_index=i,
                                           name='Column %d' % i)
                    seg.analogsignals.append(ana_sig)

        seg.create_many_to_one_relationship()
        return seg
示例#14
0
    def read_segment(self, lazy=False, cascade=True):

        ## Read header file

        f = open(self.filename + '.ent', 'rU')
        #version
        version = f.readline()
        if version[:2] != 'V2' and version[:2] != 'V3':
            # raise('read only V2 .eeg.ent files')
            raise VersionError('Read only V2 or V3 .eeg.ent files. %s given' %
                               version[:2])
            return

        #info
        info1 = f.readline()[:-1]
        info2 = f.readline()[:-1]

        # strange 2 line for datetime
        #line1
        l = f.readline()
        r1 = re.findall('(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)', l)
        r2 = re.findall('(\d+):(\d+):(\d+)', l)
        r3 = re.findall('(\d+)-(\d+)-(\d+)', l)
        YY, MM, DD, hh, mm, ss = (None, ) * 6
        if len(r1) != 0:
            DD, MM, YY, hh, mm, ss = r1[0]
        elif len(r2) != 0:
            hh, mm, ss = r2[0]
        elif len(r3) != 0:
            DD, MM, YY = r3[0]

        #line2
        l = f.readline()
        r1 = re.findall('(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)', l)
        r2 = re.findall('(\d+):(\d+):(\d+)', l)
        r3 = re.findall('(\d+)-(\d+)-(\d+)', l)
        if len(r1) != 0:
            DD, MM, YY, hh, mm, ss = r1[0]
        elif len(r2) != 0:
            hh, mm, ss = r2[0]
        elif len(r3) != 0:
            DD, MM, YY = r3[0]
        try:
            fulldatetime = datetime.datetime(int(YY), int(MM), int(DD),
                                             int(hh), int(mm), int(ss))
        except:
            fulldatetime = None

        seg = Segment(
            file_origin=os.path.basename(self.filename),
            elan_version=version,
            info1=info1,
            info2=info2,
            rec_datetime=fulldatetime,
        )

        if not cascade: return seg

        l = f.readline()
        l = f.readline()
        l = f.readline()

        # sampling rate sample
        l = f.readline()
        sampling_rate = 1. / float(l) * pq.Hz

        # nb channel
        l = f.readline()
        nbchannel = int(l) - 2

        #channel label
        labels = []
        for c in range(nbchannel + 2):
            labels.append(f.readline()[:-1])

        # channel type
        types = []
        for c in range(nbchannel + 2):
            types.append(f.readline()[:-1])

        # channel unit
        units = []
        for c in range(nbchannel + 2):
            units.append(f.readline()[:-1])
        #print units

        #range
        min_physic = []
        for c in range(nbchannel + 2):
            min_physic.append(float(f.readline()))
        max_physic = []
        for c in range(nbchannel + 2):
            max_physic.append(float(f.readline()))
        min_logic = []
        for c in range(nbchannel + 2):
            min_logic.append(float(f.readline()))
        max_logic = []
        for c in range(nbchannel + 2):
            max_logic.append(float(f.readline()))

        #info filter
        info_filter = []
        for c in range(nbchannel + 2):
            info_filter.append(f.readline()[:-1])

        f.close()

        #raw data
        n = int(round(np.log(max_logic[0] - min_logic[0]) / np.log(2)) / 8)
        data = np.fromfile(self.filename, dtype='i' + str(n))
        data = data.byteswap().reshape(
            (data.size / (nbchannel + 2), nbchannel + 2)).astype('f4')
        for c in range(nbchannel):
            if lazy:
                sig = []
            else:
                sig = (data[:,c]-min_logic[c])/(max_logic[c]-min_logic[c])*\
                                    (max_physic[c]-min_physic[c])+min_physic[c]

            try:
                unit = pq.Quantity(1, units[c])
            except:
                unit = pq.Quantity(1, '')

            anaSig = AnalogSignal(sig * unit,
                                  sampling_rate=sampling_rate,
                                  t_start=0. * pq.s,
                                  name=labels[c],
                                  channel_index=c)
            if lazy:
                anaSig.lazy_shape = data.shape[0]
            anaSig.annotate(channel_name=labels[c])
            seg.analogsignals.append(anaSig)

        # triggers
        f = open(self.filename + '.pos')
        times = []
        labels = []
        reject_codes = []
        for l in f.readlines():
            r = re.findall(' *(\d+) *(\d+) *(\d+) *', l)
            times.append(float(r[0][0]) / sampling_rate.magnitude)
            labels.append(str(r[0][1]))
            reject_codes.append(str(r[0][2]))
        if lazy:
            times = [] * pq.S
            labels = np.array([], dtype='S')
            reject_codes = []
        else:
            times = np.array(times) * pq.s
            labels = np.array(labels)
            reject_codes = np.array(reject_codes)
        ea = EventArray(
            times=times,
            labels=labels,
            reject_codes=reject_codes,
        )
        if lazy:
            ea.lazy_shape = len(times)
        seg.eventarrays.append(ea)

        f.close()

        seg.create_many_to_one_relationship()
        return seg
def proc_f32(filename):
    '''Load an f32 file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareF32IO to
    make sure BrainwareF32IO is working properly

    block = proc_f32(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_f32_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.f32', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_f32_py2.npz'
             f32 file name = 'file1.f32'
    '''

    filenameorig = os.path.basename(filename[:-12] + '.f32')

    # create the objects to store other objects
    block = Block(file_origin=filenameorig)
    chx = ChannelIndex(file_origin=filenameorig,
                       index=np.array([], dtype=np.int),
                       channel_names=np.array([], dtype='S'))
    unit = Unit(file_origin=filenameorig)

    # load objects into their containers
    block.channel_indexes.append(chx)
    chx.units.append(unit)

    try:
        with np.load(filename) as f32obj:
            f32file = f32obj.items()[0][1].flatten()
    except IOError as exc:
        if 'as a pickle' in exc.message:
            block.create_many_to_one_relationship()
            return block
        else:
            raise

    sweeplengths = [res[0, 0].tolist() for res in f32file['sweeplength']]
    stims = [res.flatten().tolist() for res in f32file['stim']]

    sweeps = [res['spikes'].flatten() for res in f32file['sweep'] if res.size]

    fullf32 = zip(sweeplengths, stims, sweeps)
    for sweeplength, stim, sweep in fullf32:
        for trainpts in sweep:
            if trainpts.size:
                trainpts = trainpts.flatten().astype('float32')
            else:
                trainpts = []

            paramnames = ['Param%s' % i for i in range(len(stim))]
            params = dict(zip(paramnames, stim))
            train = SpikeTrain(trainpts,
                               units=pq.ms,
                               t_start=0,
                               t_stop=sweeplength,
                               file_origin=filenameorig)

            segment = Segment(file_origin=filenameorig, **params)
            segment.spiketrains = [train]
            unit.spiketrains.append(train)
            block.segments.append(segment)

    block.create_many_to_one_relationship()

    return block
示例#16
0
    def read_block(self, lazy=False, **kargs):
        def read(name, type, nb, dictionary, file):

            if type == 'int32':
                # dictionary[name] = int.from_bytes(file.read(4), byteorder=sys.byteorder, signed=True)
                dictionary[name] = struct.unpack("i", file.read(4))[0]
            if type == 'float32':
                dictionary[name] = struct.unpack('f', file.read(4))[0]
            if type == 'uint8':
                l = []
                for i in range(nb):
                    l.append(chr(struct.unpack('B', file.read(1))[0]))
                dictionary[name] = l
            if type == 'uint16':
                l = []
                for i in range(nb):
                    l.append((struct.unpack('H', file.read(2)))[0])
                dictionary[name] = l
            if type == 'short':
                dictionary[name] = struct.unpack('h', file.read(2))[0]

            return dictionary

        def read_header(file_name):

            file = open(file_name, "rb")

            i = [['file_size', 'int32', 1], ['checksum_header', 'int32', 1],
                 ['check_data', 'int32', 1], ['lenheader', 'int32', 1],
                 ['versionid', 'float32', 1], ['filetype', 'int32', 1],
                 ['filesubtype', 'int32', 1], ['datatype', 'int32', 1],
                 ['sizeof', 'int32', 1], ['framewidth', 'int32', 1],
                 ['frameheight', 'int32', 1], ['nframesperstim', 'int32', 1],
                 ['nstimuli', 'int32', 1], ['initialxbinfactor', 'int32', 1],
                 ['initialybinfactor', 'int32', 1], ['xbinfactor', 'int32', 1],
                 ['ybinfactor', 'int32', 1], ['username', 'uint8', 32],
                 ['recordingdate', 'uint8', 16], ['x1roi', 'int32', 1],
                 ['y1roi', 'int32', 1], ['x2roi', 'int32', 1],
                 ['y2roi', 'int32', 1], ['stimoffs', 'int32', 1],
                 ['stimsize', 'int32', 1], ['frameoffs', 'int32', 1],
                 ['framesize', 'int32', 1], ['refoffs', 'int32', 1],
                 ['refsize', 'int32', 1], ['refwidth', 'int32', 1],
                 ['refheight', 'int32', 1], ['whichblocks', 'uint16', 16],
                 ['whichframe', 'uint16', 16], ['loclip', 'int32', 1],
                 ['hiclip', 'int32', 1], ['lopass', 'int32', 1],
                 ['hipass', 'int32', 1], ['operationsperformed', 'uint8', 64],
                 ['magnification', 'float32', 1], ['gain', 'uint16', 1],
                 ['wavelength', 'uint16', 1], ['exposuretime', 'int32', 1],
                 ['nrepetitions', 'int32', 1],
                 ['acquisitiondelay', 'int32', 1],
                 ['interstiminterval', 'int32', 1],
                 ['creationdate', 'uint8', 16], ['datafilename', 'uint8', 64],
                 ['orareserved', 'uint8', 256]]

            dic = {}
            for x in i:
                dic = read(name=x[0],
                           type=x[1],
                           nb=x[2],
                           dictionary=dic,
                           file=file)

            if dic['filesubtype'] == 13:
                i = [["includesrefframe", "int32", 1], ["temp", "uint8", 128],
                     ["ntrials", "int32", 1], ["scalefactors", "int32", 1],
                     ["cameragain", "short", 1], ["ampgain", "short", 1],
                     ["samplingrate", "short", 1], ["average", "short", 1],
                     ["exposuretime", "short", 1],
                     ["samplingaverage", "short", 1],
                     ["presentaverage", "short", 1],
                     ["framesperstim", "short", 1],
                     ["trialsperblock", "short", 1],
                     ["sizeofanalogbufferinframes", "short", 1],
                     ["cameratrials", "short", 1], ["filler", "uint8", 106],
                     ["dyedaqreserved", "uint8", 106]]
                for x in i:
                    dic = read(name=x[0],
                               type=x[1],
                               nb=x[2],
                               dictionary=dic,
                               file=file)
                # nottested
                #  p.listofstimuli=temp(1:max(find(temp~=0)))';  % up to first non-zero stimulus
                dic["listofstimuli"] = dic["temp"][0:np.argwhere(
                    x != 0).max(0)]
            else:
                i = [["includesrefframe", "int32", 1],
                     ["listofstimuli", "uint8", 256],
                     ["nvideoframesperdataframe", "int32", 1],
                     ["ntrials", "int32", 1], ["scalefactor", "int32", 1],
                     ["meanampgain", "float32",
                      1], ["meanampdc", "float32", 1],
                     ["vdaqreserved", "uint8", 256]]
                for x in i:
                    dic = read(name=x[0],
                               type=x[1],
                               nb=x[2],
                               dictionary=dic,
                               file=file)
            i = [["user", "uint8", 256], ["comment", "uint8", 256],
                 ["refscalefactor", "int32", 1]]
            for x in i:
                dic = read(name=x[0],
                           type=x[1],
                           nb=x[2],
                           dictionary=dic,
                           file=file)
            dic["actuallength"] = os.stat(file_name).st_size
            file.close()

            return dic

        # start of the reading process
        nblocks = 1
        print("reading the header")
        header = read_header(self.filename)
        nstim = header['nstimuli']
        ni = header['framewidth']
        nj = header['frameheight']
        nfr = header['nframesperstim']
        lenh = header['lenheader']
        framesize = header['framesize']
        filesize = header['file_size']
        dtype = header['datatype']
        gain = header['meanampgain']
        dc = header['meanampdc']
        scalefactor = header['scalefactor']

        # [["dtype","nbytes","datatype","type_out"],[...]]
        l = [[11, 1, "uchar", "uint8", "B"], [12, 2, "ushort", "uint16", "H"],
             [13, 4, "ulong", "uint32", "I"], [14, 4, "float", "single", "f"]]

        for i in l:
            if dtype == i[0]:
                nbytes, datatype, type_out, struct_type = i[1], i[2], i[3], i[
                    4]

        if framesize != ni * nj * nbytes:
            print(
                "BAD HEADER!!! framesize does not match framewidth*frameheight*nbytes!"
            )
            framesize = ni * nj * nbytes
        if (filesize - lenh) > (framesize * nfr * nstim):
            nfr2 = nfr + 1
            includesrefframe = True
        else:
            nfr2 = nfr
            includesrefframe = False

        nbin = nblocks
        conds = [i for i in range(1, nstim + 1)]
        ncond = len(conds)
        data = [[[np.zeros((ni, nj, nfr), type_out)] for x in range(ncond)]
                for i in range(nbin)]
        for k in range(1, nbin + 1):
            print("reading block")
            bin = np.arange(math.floor((k - 1 / nbin * nblocks) + 1),
                            math.floor((k / nbin * nblocks) + 1))
            sbin = bin.size
            for j in range(1, sbin + 1):
                file = open(self.filename, 'rb')
                for i in range(1, ncond + 1):

                    framestart = conds[i - 1] * nfr2 - nfr
                    offset = framestart * ni * nj * nbytes + lenh
                    file.seek(offset, 0)

                    a = [(struct.unpack(struct_type, file.read(nbytes)))[0]
                         for m in range(ni * nj * nfr)]
                    a = np.reshape(np.array(a, dtype=type_out, order='F'),
                                   (ni * nj, nfr),
                                   order='F')
                    a = np.reshape(a, (ni, nj, nfr), order='F')

                    if includesrefframe:
                        # not tested
                        framestart = (conds[i] - 1) * nfr2
                        offset = framestart * ni * nj * nbytes + lenh

                        file.seek(offset)

                        ref = [(struct.unpack(struct_type,
                                              file.read(nbytes)))[0]
                               for m in range(ni * nj)]
                        ref = np.array(ref, dtype=type_out)
                        for y in range(len(ref)):
                            ref[y] *= scalefactor
                        ref = np.reshape(ref, (ni, nj))
                        b = np.tile(ref, [1, 1, nfr])
                        for y in range(len(a)):
                            b.append([])
                            for x in range(len(a[y])):
                                b[y + 1].append([])
                                for frame in range(len(a[y][x])):
                                    b[y + 1][x][frame] = (a[y][x][frame] / gain) - \
                                        (scalefactor * dc / gain)
                        a = b
                    if sbin == 1:
                        data[k - 1][i - 1] = a
                    else:
                        # not tested
                        for y in range(len(a)):
                            for x in range(len(a[y])):
                                a[y][x] /= sbin
                        data[k - 1][i - 1] = data[k - 1][i - 1] + a / sbin

                file.close()

        # data format [block][stim][width][height][frame]]
        # data structure should be [block][stim][frame][width][height] in order to be easy to use with neo
        # each file is a block
        # each stim could be a segment
        # then an image sequence [frame][width][height]
        # image need to be rotated

        # changing order of data for compatibility
        # [block][stim][width][height][frame]]
        # to
        # [block][stim][frame][width][height]

        for block in range(len(data)):
            for stim in range(len(data[block])):
                a = []
                for frame in range(header['nframesperstim']):
                    a.append([])
                    for width in range(len(data[block][stim])):
                        a[frame].append([])
                        for height in range(len(data[block][stim][width])):
                            a[frame][width].append(
                                data[block][stim][width][height][frame])
                    # rotation of data to be the same as thomas deneux screenshot
                    a[frame] = np.rot90(np.fliplr(a[frame]))
                data[block][stim] = a

        block = Block(file_origin=self.filename)
        for stim in range(len(data[0])):
            image_sequence = ImageSequence(data[0][stim],
                                           units=self.units,
                                           sampling_rate=self.sampling_rate,
                                           spatial_scale=self.spatial_scale)
            segment = Segment(file_origin=self.filename,
                              description=("stim nb:" + str(stim)))
            segment.imagesequences = [image_sequence]
            segment.block = block
            for key in header:
                block.annotations[key] = header[key]
            block.segments.append(segment)

        print("returning block")

        return block
示例#17
0
    def read_segment(
        self,
        cascade=True,
        lazy=False,
    ):
        """
        Arguments:
        """
        f = struct_file(self.filename, 'rb')

        #Name
        f.seek(64, 0)
        surname = f.read(22)
        while surname[-1] == ' ':
            if len(surname) == 0: break
            surname = surname[:-1]
        firstname = f.read(20)
        while firstname[-1] == ' ':
            if len(firstname) == 0: break
            firstname = firstname[:-1]

        #Date
        f.seek(128, 0)
        day, month, year, hour, minute, sec = f.read_f('bbbbbb')
        rec_datetime = datetime.datetime(year + 1900, month, day, hour, minute,
                                         sec)

        f.seek(138, 0)
        Data_Start_Offset, Num_Chan, Multiplexer, Rate_Min, Bytes = f.read_f(
            'IHHHH')
        #~ print Num_Chan, Bytes

        #header version
        f.seek(175, 0)
        header_version, = f.read_f('b')
        assert header_version == 4

        seg = Segment(
            name=firstname + ' ' + surname,
            file_origin=os.path.basename(self.filename),
        )
        seg.annotate(surname=surname)
        seg.annotate(firstname=firstname)
        seg.annotate(rec_datetime=rec_datetime)

        if not cascade:
            return seg

        # area
        f.seek(176, 0)
        zone_names = [
            'ORDER', 'LABCOD', 'NOTE', 'FLAGS', 'TRONCA', 'IMPED_B', 'IMPED_E',
            'MONTAGE', 'COMPRESS', 'AVERAGE', 'HISTORY', 'DVIDEO', 'EVENT A',
            'EVENT B', 'TRIGGER'
        ]
        zones = {}
        for zname in zone_names:
            zname2, pos, length = f.read_f('8sII')
            zones[zname] = zname2, pos, length
            #~ print zname2, pos, length

        # reading raw data
        if not lazy:
            f.seek(Data_Start_Offset, 0)
            rawdata = np.fromstring(f.read(), dtype='u' + str(Bytes))
            rawdata = rawdata.reshape((rawdata.size / Num_Chan, Num_Chan))

        # Reading Code Info
        zname2, pos, length = zones['ORDER']
        f.seek(pos, 0)
        code = np.fromfile(f, dtype='u2', count=Num_Chan)

        units = {
            -1: pq.nano * pq.V,
            0: pq.uV,
            1: pq.mV,
            2: 1,
            100: pq.percent,
            101: pq.dimensionless,
            102: pq.dimensionless
        }

        for c in range(Num_Chan):
            zname2, pos, length = zones['LABCOD']
            f.seek(pos + code[c] * 128 + 2, 0)

            label = f.read(6).strip("\x00")
            ground = f.read(6).strip("\x00")
            logical_min, logical_max, logical_ground, physical_min, physical_max = f.read_f(
                'iiiii')
            k, = f.read_f('h')
            if k in units.keys():
                unit = units[k]
            else:
                unit = pq.uV

            f.seek(8, 1)
            sampling_rate, = f.read_f('H') * pq.Hz
            sampling_rate *= Rate_Min

            if lazy:
                signal = [] * unit
            else:
                factor = float(physical_max -
                               physical_min) / float(logical_max -
                                                     logical_min + 1)
                signal = (rawdata[:, c].astype('f') -
                          logical_ground) * factor * unit

            anaSig = AnalogSignal(signal,
                                  sampling_rate=sampling_rate,
                                  name=label,
                                  channel_index=c)
            if lazy:
                anaSig.lazy_shape = None
            anaSig.annotate(ground=ground)

            seg.analogsignals.append(anaSig)

        sampling_rate = np.mean(
            [anaSig.sampling_rate for anaSig in seg.analogsignals]) * pq.Hz

        # Read trigger and notes
        for zname, label_dtype in [('TRIGGER', 'u2'), ('NOTE', 'S40')]:
            zname2, pos, length = zones[zname]
            f.seek(pos, 0)
            triggers = np.fromstring(
                f.read(length),
                dtype=[('pos', 'u4'), ('label', label_dtype)],
            )
            ea = EventArray(name=zname[0] + zname[1:].lower())
            if not lazy:
                keep = (triggers['pos'] >= triggers['pos'][0]) & (
                    triggers['pos'] < rawdata.shape[0]) & (triggers['pos'] !=
                                                           0)
                triggers = triggers[keep]
                ea.labels = triggers['label'].astype('S')
                ea.times = (triggers['pos'] / sampling_rate).rescale('s')
            else:
                ea.lazy_shape = triggers.size
            seg.eventarrays.append(ea)

        # Read Event A and B
        # Not so well  tested
        for zname in ['EVENT A', 'EVENT B']:
            zname2, pos, length = zones[zname]
            f.seek(pos, 0)
            epochs = np.fromstring(f.read(length),
                                   dtype=[
                                       ('label', 'u4'),
                                       ('start', 'u4'),
                                       ('stop', 'u4'),
                                   ])
            ep = EpochArray(name=zname[0] + zname[1:].lower())
            if not lazy:
                keep = (epochs['start'] > 0) & (
                    epochs['start'] < rawdata.shape[0]) & (epochs['stop'] <
                                                           rawdata.shape[0])
                epochs = epochs[keep]
                ep.labels = epochs['label'].astype('S')
                ep.times = (epochs['start'] / sampling_rate).rescale('s')
                ep.durations = ((epochs['stop'] - epochs['start']) /
                                sampling_rate).rescale('s')
            else:
                ep.lazy_shape = triggers.size
            seg.epocharrays.append(ep)

        seg.create_many_to_one_relationship()
        return seg
示例#18
0
    def read_block(self, lazy=False):
        """Returns a Block containing spike information.

        There is no obvious way to infer the segment boundaries from
        raw spike times, so for now all spike times are returned in one
        big segment. The way around this would be to specify the segment
        boundaries, and then change this code to put the spikes in the right
        segments.
        """
        assert not lazy, 'Do not support lazy'

        # Create block and segment to hold all the data
        block = Block()
        # Search data directory for KlustaKwik files.
        # If nothing found, return empty block
        self._fetfiles = self._fp.read_filenames('fet')
        self._clufiles = self._fp.read_filenames('clu')
        if len(self._fetfiles) == 0:
            return block

        # Create a single segment to hold all of the data
        seg = Segment(name='seg0', index=0, file_origin=self.filename)
        block.segments.append(seg)

        # Load spike times from each group and store in a dict, keyed
        # by group number
        self.spiketrains = dict()
        for group in sorted(self._fetfiles.keys()):
            # Load spike times
            fetfile = self._fetfiles[group]
            spks, features = self._load_spike_times(fetfile)

            # Load cluster ids or generate
            if group in self._clufiles:
                clufile = self._clufiles[group]
                uids = self._load_unit_id(clufile)
            else:
                # unclustered data, assume all zeros
                uids = np.zeros(spks.shape, dtype=np.int32)

            # error check
            if len(spks) != len(uids):
                raise ValueError("lengths of fet and clu files are different")

            # Create Group for each cluster
            unique_unit_ids = np.unique(uids)
            for unit_id in sorted(unique_unit_ids):
                # Initialize the unit
                u = Group(name=('unit %d from group %d' % (unit_id, group)),
                         index=unit_id, group=group)

                # Initialize a new SpikeTrain for the spikes from this unit
                st = SpikeTrain(
                    times=spks[uids == unit_id] / self.sampling_rate,
                    units='sec', t_start=0.0,
                    t_stop=spks.max() / self.sampling_rate,
                    name=('unit %d from group %d' % (unit_id, group)))
                st.annotations['cluster'] = unit_id
                st.annotations['group'] = group

                # put features in
                if len(features) != 0:
                    st.annotations['waveform_features'] = features

                # Link
                u.add(st)
                seg.spiketrains.append(st)

        block.create_many_to_one_relationship()
        return block
def proc_src_condition(rep, filename, ADperiod, side, block):
    '''Get the condition in a src file that has been processed by the official
    matlab function.  See proc_src for details'''

    rcg = block.recordingchannelgroups[0]

    stim = rep['stim'].flatten()
    params = [str(res[0]) for res in stim['paramName'][0].flatten()]
    values = [res for res in stim['paramVal'][0].flatten()]
    stim = dict(zip(params, values))
    sweepLen = rep['sweepLen'][0, 0]

    if not len(rep):
        return

    unassignedSpikes = rep['unassignedSpikes'].flatten()
    if len(unassignedSpikes):
        damaIndexes = [res[0, 0] for res in unassignedSpikes['damaIndex']]
        timeStamps = [res[0, 0] for res in unassignedSpikes['timeStamp']]
        spikeunit = [res.flatten() for res in unassignedSpikes['spikes']]
        respWin = np.array([], dtype=np.int32)
        trains = proc_src_condition_unit(spikeunit, sweepLen, side, ADperiod,
                                         respWin, damaIndexes, timeStamps,
                                         filename)
        rcg.units[0].spiketrains.extend(trains)
        atrains = [trains]
    else:
        damaIndexes = []
        timeStamps = []
        atrains = []

    clusters = rep['clusters'].flatten()
    if len(clusters):
        IdStrings = [res[0] for res in clusters['IdString']]
        sweepLens = [res[0, 0] for res in clusters['sweepLen']]
        respWins = [res.flatten() for res in clusters['respWin']]
        spikeunits = []
        for cluster in clusters['sweeps']:
            if len(cluster):
                spikes = [res.flatten() for res in cluster['spikes'].flatten()]
            else:
                spikes = []
            spikeunits.append(spikes)
    else:
        IdStrings = []
        sweepLens = []
        respWins = []
        spikeunits = []

    for unit, IdString in zip(rcg.units[1:], IdStrings):
        unit.name = str(IdString)

    fullunit = zip(spikeunits, rcg.units[1:], sweepLens, respWins)
    for spikeunit, unit, sweepLen, respWin in fullunit:
        trains = proc_src_condition_unit(spikeunit, sweepLen, side, ADperiod,
                                         respWin, damaIndexes, timeStamps,
                                         filename)
        atrains.append(trains)
        unit.spiketrains.extend(trains)

    atrains = zip(*atrains)
    for trains in atrains:
        segment = Segment(file_origin=filename,
                          feature_type=-1,
                          go_by_closest_unit_center=False,
                          include_unit_bounds=False,
                          **stim)
        block.segments.append(segment)
        segment.spiketrains = trains
示例#20
0
    def read_block(
            self,
            # the 2 first keyword arguments are imposed by neo.io API
            lazy=False,
            cascade=True):
        """
        Return a Block.

        """
        def count_samples(m_length):
            """
            Count the number of signal samples available in a type 5 data block
            of length m_length

            """

            # for information about type 5 data block, see [1]
            count = int((m_length - 6) / 2 - 2)
            # -6 corresponds to the header of block 5, and the -2 take into
            # account the fact that last 2 values are not available as the 4
            # corresponding bytes are coding the time stamp of the beginning
            # of the block
            return count

        # create the neo Block that will be returned at the end
        blck = Block(file_origin=os.path.basename(self.filename))
        blck.file_origin = os.path.basename(self.filename)

        fid = open(self.filename, 'rb')

        # NOTE: in the following, the word "block" is used in the sense used in
        # the alpha-omega specifications (ie a data chunk in the file), rather
        # than in the sense of the usual Block object in neo

        # step 1: read the headers of all the data blocks to load the file
        # structure

        pos_block = 0  # position of the current block in the file
        file_blocks = []  # list of data blocks available in the file

        if not cascade:
            # we read only the main header

            m_length, m_TypeBlock = struct.unpack('Hcx', fid.read(4))
            # m_TypeBlock should be 'h', as we read the first block
            block = HeaderReader(
                fid, dict_header_type.get(m_TypeBlock, Type_Unknown)).read_f()
            block.update({
                'm_length': m_length,
                'm_TypeBlock': m_TypeBlock,
                'pos': pos_block
            })
            file_blocks.append(block)

        else:  # cascade == True

            seg = Segment(file_origin=os.path.basename(self.filename))
            seg.file_origin = os.path.basename(self.filename)
            blck.segments.append(seg)

            while True:
                first_4_bytes = fid.read(4)
                if len(first_4_bytes) < 4:
                    # we have reached the end of the file
                    break
                else:
                    m_length, m_TypeBlock = struct.unpack('Hcx', first_4_bytes)

                block = HeaderReader(
                    fid, dict_header_type.get(m_TypeBlock,
                                              Type_Unknown)).read_f()
                block.update({
                    'm_length': m_length,
                    'm_TypeBlock': m_TypeBlock,
                    'pos': pos_block
                })

                if m_TypeBlock == '2':
                    # The beggining of the block of type '2' is identical for
                    # all types of channels, but the following part depends on
                    # the type of channel. So we need a special case here.

                    # WARNING: How to check the type of channel is not
                    # described in the documentation. So here I use what is
                    # proposed in the C code [2].
                    # According to this C code, it seems that the 'm_isAnalog'
                    # is used to distinguished analog and digital channels, and
                    # 'm_Mode' encodes the type of analog channel:
                    # 0 for continuous, 1 for level, 2 for external trigger.
                    # But in some files, I found channels that seemed to be
                    # continuous channels with 'm_Modes' = 128 or 192. So I
                    # decided to consider every channel with 'm_Modes'
                    # different from 1 or 2 as continuous. I also couldn't
                    # check that values of 1 and 2 are really for level and
                    # external trigger as I had no test files containing data
                    # of this types.

                    type_subblock = 'unknown_channel_type(m_Mode=' \
                                    + str(block['m_Mode'])+ ')'
                    description = Type2_SubBlockUnknownChannels
                    block.update({'m_Name': 'unknown_name'})
                    if block['m_isAnalog'] == 0:
                        # digital channel
                        type_subblock = 'digital'
                        description = Type2_SubBlockDigitalChannels
                    elif block['m_isAnalog'] == 1:
                        # analog channel
                        if block['m_Mode'] == 1:
                            # level channel
                            type_subblock = 'level'
                            description = Type2_SubBlockLevelChannels
                        elif block['m_Mode'] == 2:
                            # external trigger channel
                            type_subblock = 'external_trigger'
                            description = Type2_SubBlockExtTriggerChannels
                        else:
                            # continuous channel
                            type_subblock = 'continuous(Mode' \
                                            + str(block['m_Mode']) +')'
                            description = Type2_SubBlockContinuousChannels

                    subblock = HeaderReader(fid, description).read_f()

                    block.update(subblock)
                    block.update({'type_subblock': type_subblock})

                file_blocks.append(block)
                pos_block += m_length
                fid.seek(pos_block)

            # step 2: find the available channels
            list_chan = []  # list containing indexes of channel blocks
            for ind_block, block in enumerate(file_blocks):
                if block['m_TypeBlock'] == '2':
                    list_chan.append(ind_block)

            # step 3: find blocks containing data for the available channels
            list_data = []  # list of lists of indexes of data blocks
            # corresponding to each channel
            for ind_chan, chan in enumerate(list_chan):
                list_data.append([])
                num_chan = file_blocks[chan]['m_numChannel']
                for ind_block, block in enumerate(file_blocks):
                    if block['m_TypeBlock'] == '5':
                        if block['m_numChannel'] == num_chan:
                            list_data[ind_chan].append(ind_block)

            # step 4: compute the length (number of samples) of the channels
            chan_len = np.zeros(len(list_data), dtype=np.int)
            for ind_chan, list_blocks in enumerate(list_data):
                for ind_block in list_blocks:
                    chan_len[ind_chan] += count_samples(
                        file_blocks[ind_block]['m_length'])

            # step 5: find channels for which data are available
            ind_valid_chan = np.nonzero(chan_len)[0]

            # step 6: load the data
            # TODO give the possibility to load data as AnalogSignalArrays
            for ind_chan in ind_valid_chan:
                list_blocks = list_data[ind_chan]
                ind = 0  # index in the data vector

                # read time stamp for the beginning of the signal
                form = '<l'  # reading format
                ind_block = list_blocks[0]
                count = count_samples(file_blocks[ind_block]['m_length'])
                fid.seek(file_blocks[ind_block]['pos'] + 6 + count * 2)
                buf = fid.read(struct.calcsize(form))
                val = struct.unpack(form, buf)
                start_index = val[0]

                # WARNING: in the following blocks are read supposing taht they
                # are all contiguous and sorted in time. I don't know if it's
                # always the case. Maybe we should use the time stamp of each
                # data block to choose where to put the read data in the array.
                if not lazy:
                    temp_array = np.empty(chan_len[ind_chan], dtype=np.int16)
                    # NOTE: we could directly create an empty AnalogSignal and
                    # load the data in it, but it is much faster to load data
                    # in a temporary numpy array and create the AnalogSignals
                    # from this temporary array
                    for ind_block in list_blocks:
                        count = count_samples(
                            file_blocks[ind_block]['m_length'])
                        fid.seek(file_blocks[ind_block]['pos'] + 6)
                        temp_array[ind:ind+count] = \
                            np.fromfile(fid, dtype = np.int16, count = count)
                        ind += count

                sampling_rate = \
                    file_blocks[list_chan[ind_chan]]['m_SampleRate'] * pq.kHz
                t_start = (start_index / sampling_rate).simplified
                if lazy:
                    ana_sig = AnalogSignal([],
                                           sampling_rate = sampling_rate,
                                           t_start = t_start,
                                           name = file_blocks\
                                               [list_chan[ind_chan]]['m_Name'],
                                           file_origin = \
                                               os.path.basename(self.filename),
                                           units = pq.dimensionless)
                    ana_sig.lazy_shape = chan_len[ind_chan]
                else:
                    ana_sig = AnalogSignal(temp_array,
                                           sampling_rate = sampling_rate,
                                           t_start = t_start,
                                           name = file_blocks\
                                               [list_chan[ind_chan]]['m_Name'],
                                           file_origin = \
                                               os.path.basename(self.filename),
                                           units = pq.dimensionless)

                ana_sig.channel_index = \
                            file_blocks[list_chan[ind_chan]]['m_numChannel']
                ana_sig.annotate(channel_name = \
                            file_blocks[list_chan[ind_chan]]['m_Name'])
                ana_sig.annotate(channel_type = \
                            file_blocks[list_chan[ind_chan]]['type_subblock'])
                seg.analogsignals.append(ana_sig)

        fid.close()

        if file_blocks[0]['m_TypeBlock'] == 'h':  # this should always be true
            blck.rec_datetime = datetime.datetime(\
                file_blocks[0]['m_date_year'],
                file_blocks[0]['m_date_month'],
                file_blocks[0]['m_date_day'],
                file_blocks[0]['m_time_hour'],
                file_blocks[0]['m_time_minute'],
                file_blocks[0]['m_time_second'],
                10000 * file_blocks[0]['m_time_hsecond'])
            # the 10000 is here to convert m_time_hsecond from centisecond
            # to microsecond
            version = file_blocks[0]['m_version']
            blck.annotate(alphamap_version=version)
            if cascade:
                seg.rec_datetime = blck.rec_datetime.replace()
                # I couldn't find a simple copy function for datetime,
                # using replace without arguments is a twisted way to make a
                # copy
                seg.annotate(alphamap_version=version)
        if cascade:
            populate_RecordingChannel(blck, remove_from_annotation=True)
            blck.create_many_to_one_relationship()

        return blck
示例#21
0
    def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True):
        """
        Read in a segment.

        Arguments:
            load_spike_waveform : load or not waveform of spikes (default True)

        """

        fid = open(self.filename, 'rb')
        globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)

        # metadatas
        seg = Segment()
        seg.rec_datetime = datetime.datetime(
            globalHeader.pop('Year'),
            globalHeader.pop('Month'),
            globalHeader.pop('Day'),
            globalHeader.pop('Hour'),
            globalHeader.pop('Minute'),
            globalHeader.pop('Second')
        )
        seg.file_origin = os.path.basename(self.filename)

        for key, val in globalHeader.iteritems():
            seg.annotate(**{key: val})

        if not cascade:
            return seg

        ## Step 1 : read headers
        # dsp channels header = spikes and waveforms
        dspChannelHeaders = {}
        maxunit = 0
        maxchan = 0
        for _ in range(globalHeader['NumDSPChannels']):
            # channel is 1 based
            channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None)
            channelHeader['Template'] = np.array(channelHeader['Template']).reshape((5,64))
            channelHeader['Boxes'] = np.array(channelHeader['Boxes']).reshape((5,2,4))
            dspChannelHeaders[channelHeader['Channel']] = channelHeader
            maxunit = max(channelHeader['NUnits'], maxunit)
            maxchan = max(channelHeader['Channel'], maxchan)

        # event channel header
        eventHeaders = { }
        for _ in range(globalHeader['NumEventChannels']):
            eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None)
            eventHeaders[eventHeader['Channel']] = eventHeader

        # slow channel header = signal
        slowChannelHeaders = {}
        for _ in range(globalHeader['NumSlowChannels']):
            slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(offset=None)
            slowChannelHeaders[slowChannelHeader['Channel']] = slowChannelHeader

        ## Step 2 : a first loop for counting size
        # signal
        nb_samples = np.zeros(len(slowChannelHeaders))
        sample_positions = np.zeros(len(slowChannelHeaders))
        t_starts = np.zeros(len(slowChannelHeaders), dtype='f')

        #spiketimes and waveform
        nb_spikes = np.zeros((maxchan+1, maxunit+1) ,dtype='i')
        wf_sizes = np.zeros((maxchan+1, maxunit+1, 2) ,dtype='i')

        # eventarrays
        nb_events = { }
        #maxstrsizeperchannel = { }
        for chan, h in iteritems(eventHeaders):
            nb_events[chan] = 0
            #maxstrsizeperchannel[chan] = 0

        start = fid.tell()
        while fid.tell() !=-1 :
            # read block header
            dataBlockHeader = HeaderReader(fid , DataBlockHeader ).read_f(offset = None)
            if dataBlockHeader is None : break
            chan = dataBlockHeader['Channel']
            unit = dataBlockHeader['Unit']
            n1,n2 = dataBlockHeader['NumberOfWaveforms'] , dataBlockHeader['NumberOfWordsInWaveform']
            time = (dataBlockHeader['UpperByteOf5ByteTimestamp']*2.**32 +
                    dataBlockHeader['TimeStamp'])

            if dataBlockHeader['Type'] == 1:
                nb_spikes[chan,unit] +=1
                wf_sizes[chan,unit,:] = [n1,n2]
                fid.seek(n1*n2*2,1)
            elif dataBlockHeader['Type'] ==4:
                #event
                nb_events[chan] += 1
            elif dataBlockHeader['Type'] == 5:
                #continuous signal
                fid.seek(n2*2, 1)
                if n2> 0:
                    nb_samples[chan] += n2
                if nb_samples[chan] ==0:
                    t_starts[chan] = time
                    

        ## Step 3: allocating memory and 2 loop for reading if not lazy
        if not lazy:
            # allocating mem for signal
            sigarrays = { }
            for chan, h in iteritems(slowChannelHeaders):
                sigarrays[chan] = np.zeros(nb_samples[chan])
                
            # allocating mem for SpikeTrain
            stimearrays = np.zeros((maxchan+1, maxunit+1) ,dtype=object)
            swfarrays = np.zeros((maxchan+1, maxunit+1) ,dtype=object)
            for (chan, unit), _ in np.ndenumerate(nb_spikes):
                stimearrays[chan,unit] = np.zeros(nb_spikes[chan,unit], dtype = 'f')
                if load_spike_waveform:
                    n1,n2 = wf_sizes[chan, unit,:]
                    swfarrays[chan, unit] = np.zeros( (nb_spikes[chan, unit], n1, n2 ) , dtype = 'f4' )
            pos_spikes = np.zeros(nb_spikes.shape, dtype = 'i')
                    
            # allocating mem for event
            eventpositions = { }
            evarrays = { }
            for chan, nb in iteritems(nb_events):
                evarrays[chan] = {
                    'times': np.zeros(nb, dtype='f'),
                    'labels': np.zeros(nb, dtype='S4')
                }
                eventpositions[chan]=0 
                
            fid.seek(start)
            while fid.tell() !=-1 :
                dataBlockHeader = HeaderReader(fid , DataBlockHeader ).read_f(offset = None)
                if dataBlockHeader is None : break
                chan = dataBlockHeader['Channel']
                n1,n2 = dataBlockHeader['NumberOfWaveforms'] , dataBlockHeader['NumberOfWordsInWaveform']
                time = dataBlockHeader['UpperByteOf5ByteTimestamp']*2.**32 + dataBlockHeader['TimeStamp']
                time/= globalHeader['ADFrequency']

                if n2 <0: break
                if dataBlockHeader['Type'] == 1:
                    #spike
                    unit = dataBlockHeader['Unit']
                    pos = pos_spikes[chan,unit]
                    stimearrays[chan, unit][pos] = time
                    if load_spike_waveform and n1*n2 != 0 :
                        swfarrays[chan,unit][pos,:,:] = np.fromstring( fid.read(n1*n2*2) , dtype = 'i2').reshape(n1,n2).astype('f4')
                    else:
                        fid.seek(n1*n2*2,1)
                    pos_spikes[chan,unit] +=1
                
                elif dataBlockHeader['Type'] == 4:
                    # event
                    pos = eventpositions[chan]
                    evarrays[chan]['times'][pos] = time
                    evarrays[chan]['labels'][pos] = dataBlockHeader['Unit']
                    eventpositions[chan]+= 1

                elif dataBlockHeader['Type'] == 5:
                    #signal
                    data = np.fromstring( fid.read(n2*2) , dtype = 'i2').astype('f4')
                    sigarrays[chan][sample_positions[chan] : sample_positions[chan]+data.size] = data
                    sample_positions[chan] += data.size


        ## Step 4: create neo object
        for chan, h in iteritems(eventHeaders):
            if lazy:
                times = []
                labels = None
            else:
                times = evarrays[chan]['times']
                labels = evarrays[chan]['labels']
            ea = EventArray(
                times*pq.s,
                labels=labels,
                channel_name=eventHeaders[chan]['Name'],
                channel_index=chan
            )
            if lazy:
                ea.lazy_shape = nb_events[chan]
            seg.eventarrays.append(ea)

            
        for chan, h in iteritems(slowChannelHeaders):
            if lazy:
                signal = [ ]
            else:
                if globalHeader['Version'] ==100 or globalHeader['Version'] ==101 :
                    gain = 5000./(2048*slowChannelHeaders[chan]['Gain']*1000.)
                elif globalHeader['Version'] ==102 :
                    gain = 5000./(2048*slowChannelHeaders[chan]['Gain']*slowChannelHeaders[chan]['PreampGain'])
                elif globalHeader['Version'] >= 103:
                    gain = globalHeader['SlowMaxMagnitudeMV']/(.5*(2**globalHeader['BitsPerSpikeSample'])*\
                                                        slowChannelHeaders[chan]['Gain']*slowChannelHeaders[chan]['PreampGain'])
                signal = sigarrays[chan]*gain
            anasig =  AnalogSignal(signal*pq.V,
                sampling_rate = float(slowChannelHeaders[chan]['ADFreq'])*pq.Hz,
                t_start = t_starts[chan]*pq.s,
                channel_index = slowChannelHeaders[chan]['Channel'],
                channel_name = slowChannelHeaders[chan]['Name'],
            )
            if lazy:
                anasig.lazy_shape = nb_samples[chan]
            seg.analogsignals.append(anasig)
            
        for (chan, unit), value in np.ndenumerate(nb_spikes):
            if nb_spikes[chan, unit] == 0: continue
            if lazy:
                times = [ ]
                waveforms = None
                t_stop = 0
            else:
                times = stimearrays[chan,unit]
                t_stop = times.max()
                if load_spike_waveform:
                    if globalHeader['Version'] <103:
                        gain = 3000./(2048*dspChannelHeaders[chan]['Gain']*1000.)
                    elif globalHeader['Version'] >=103 and globalHeader['Version'] <105:
                        gain = globalHeader['SpikeMaxMagnitudeMV']/(.5*2.**(globalHeader['BitsPerSpikeSample'])*1000.)
                    elif globalHeader['Version'] >105:
                        gain = globalHeader['SpikeMaxMagnitudeMV']/(.5*2.**(globalHeader['BitsPerSpikeSample'])*globalHeader['SpikePreAmpGain'])                    
                    waveforms = swfarrays[chan, unit] * gain * pq.V
                else:
                    waveforms = None
            sptr = SpikeTrain(
                times,
                units='s', 
                t_stop=t_stop*pq.s,
                waveforms=waveforms
            )
            sptr.annotate(unit_name = dspChannelHeaders[chan]['Name'])
            sptr.annotate(channel_index = chan)
            for key, val in dspChannelHeaders[chan].iteritems():
                sptr.annotate(**{key: val})

            if lazy:
                sptr.lazy_shape = nb_spikes[chan,unit]
            seg.spiketrains.append(sptr)

        seg.create_many_to_one_relationship()
        return seg
示例#22
0
    def test_roundtrip_with_annotations(self):
        # test with NWB-specific annotations

        original_block = Block(name="experiment",
                               session_start_time=datetime.now())
        segment = Segment(name="session 1")
        original_block.segments.append(segment)
        segment.block = original_block

        electrode_annotations = {
            "name": "electrode #1",
            "description": "intracellular electrode",
            "device": {
                "name": "electrode #1"
            }
        }
        stimulus_annotations = {
            "nwb_group": "stimulus",
            "nwb_neurodata_type":
            ("pynwb.icephys", "CurrentClampStimulusSeries"),
            "nwb_electrode": electrode_annotations,
            "nwb:sweep_number": 1,
            "nwb:gain": 1.0
        }
        response_annotations = {
            "nwb_group": "acquisition",
            "nwb_neurodata_type": ("pynwb.icephys", "CurrentClampSeries"),
            "nwb_electrode": electrode_annotations,
            "nwb:sweep_number": 1,
            "nwb:gain": 1.0,
            "nwb:bias_current": 1e-12,
            "nwb:bridge_balance": 70e6,
            "nwb:capacitance_compensation": 1e-12
        }
        stimulus = AnalogSignal(np.random.randn(100, 1) * pq.nA,
                                sampling_rate=5 * pq.kHz,
                                t_start=50 * pq.ms,
                                name="stimulus",
                                **stimulus_annotations)
        response = AnalogSignal(np.random.randn(100, 1) * pq.mV,
                                sampling_rate=5 * pq.kHz,
                                t_start=50 * pq.ms,
                                name="response",
                                **response_annotations)
        segment.analogsignals = [stimulus, response]
        stimulus.segment = response.segment = segment

        test_file_name = "test_round_trip_with_annotations.nwb"
        iow = NWBIO(filename=test_file_name, mode='w')
        iow.write_all_blocks([original_block])

        nwbfile = pynwb.NWBHDF5IO(test_file_name, mode="r").read()

        self.assertIsInstance(nwbfile.acquisition["response"],
                              pynwb.icephys.CurrentClampSeries)
        self.assertIsInstance(nwbfile.stimulus["stimulus"],
                              pynwb.icephys.CurrentClampStimulusSeries)
        self.assertEqual(nwbfile.acquisition["response"].bridge_balance,
                         response_annotations["nwb:bridge_balance"])

        ior = NWBIO(filename=test_file_name, mode='r')
        retrieved_block = ior.read_all_blocks()[0]

        original_response = original_block.segments[0].filter(
            name="response")[0]
        retrieved_response = retrieved_block.segments[0].filter(
            name="response")[0]
        for attr_name in ("name", "units", "sampling_rate", "t_start"):
            retrieved_attribute = getattr(retrieved_response, attr_name)
            original_attribute = getattr(original_response, attr_name)
            self.assertEqual(retrieved_attribute, original_attribute)
        assert_array_equal(retrieved_response.magnitude,
                           original_response.magnitude)

        os.remove(test_file_name)
示例#23
0
    def read_block(
        self,
        lazy=False,
        cascade=True,
    ):
        bl = Block(file_origin=os.path.basename(self.filename), )
        if not cascade:
            return bl

        fid = open(self.filename, 'rb')

        headertext = fid.read(1024)
        if PY3K:
            headertext = headertext.decode('ascii')
        header = {}
        for line in headertext.split('\r\n'):
            if '=' not in line: continue
            #print '#' , line , '#'
            key, val = line.split('=')
            if key in [
                    'NC',
                    'NR',
                    'NBH',
                    'NBA',
                    'NBD',
                    'ADCMAX',
                    'NP',
                    'NZ',
            ]:
                val = int(val)
            elif key in [
                    'AD',
                    'DT',
            ]:
                val = val.replace(',', '.')
                val = float(val)
            header[key] = val

        #print header

        SECTORSIZE = 512
        # loop for record number
        for i in range(header['NR']):
            #print 'record ',i
            offset = 1024 + i * (SECTORSIZE * header['NBD'] + 1024)

            # read analysis zone
            analysisHeader = HeaderReader(
                fid, AnalysisDescription).read_f(offset=offset)
            #print analysisHeader

            # read data
            NP = (SECTORSIZE * header['NBD']) / 2
            NP = NP - NP % header['NC']
            NP = int(NP // header['NC'])
            if not lazy:
                data = np.memmap(self.filename,
                                 np.dtype('int16'),
                                 mode='r',
                                 shape=(
                                     NP,
                                     header['NC'],
                                 ),
                                 offset=offset + header['NBA'] * SECTORSIZE)

            # create a segment
            seg = Segment()
            bl.segments.append(seg)

            for c in range(header['NC']):

                unit = header['YU%d' % c]
                try:
                    unit = pq.Quantity(1., unit)
                except:
                    unit = pq.Quantity(1., '')

                if lazy:
                    signal = [] * unit
                else:
                    YG = float(header['YG%d' % c].replace(',', '.'))
                    ADCMAX = header['ADCMAX']
                    VMax = analysisHeader['VMax'][c]
                    chan = int(header['YO%d' % c])
                    signal = data[:, chan].astype(
                        'f4') * VMax / ADCMAX / YG * unit
                anaSig = AnalogSignal(
                    signal,
                    sampling_rate=pq.Hz / analysisHeader['SamplingInterval'],
                    t_start=analysisHeader['TimeRecorded'] * pq.s,
                    name=header['YN%d' % c],
                    channel_index=c)

                if lazy:
                    anaSig.lazy_shape = NP
                seg.analogsignals.append(anaSig)

        fid.close()

        bl.create_many_to_one_relationship()
        return bl
示例#24
0
    def test_roundtrip(self):

        annotations = {"session_start_time": datetime.now()}
        # Define Neo blocks
        bl0 = Block(name='First block', **annotations)
        bl1 = Block(name='Second block', **annotations)
        bl2 = Block(name='Third block', **annotations)
        original_blocks = [bl0, bl1, bl2]

        num_seg = 4  # number of segments
        num_chan = 3  # number of channels

        for blk in original_blocks:

            for ind in range(num_seg):  # number of Segments
                seg = Segment(index=ind)
                seg.block = blk
                blk.segments.append(seg)

            for seg in blk.segments:  # AnalogSignal objects

                # 3 Neo AnalogSignals
                a = AnalogSignal(np.random.randn(44, num_chan) * pq.nA,
                                 sampling_rate=10 * pq.kHz,
                                 t_start=50 * pq.ms)
                b = AnalogSignal(np.random.randn(64, num_chan) * pq.mV,
                                 sampling_rate=8 * pq.kHz,
                                 t_start=40 * pq.ms)
                c = AnalogSignal(np.random.randn(33, num_chan) * pq.uA,
                                 sampling_rate=10 * pq.kHz,
                                 t_start=120 * pq.ms)

                # 2 Neo IrregularlySampledSignals
                d = IrregularlySampledSignal(
                    np.arange(7.0) * pq.ms,
                    np.random.randn(7, num_chan) * pq.mV)

                # 2 Neo SpikeTrains
                train = SpikeTrain(times=[1, 2, 3] * pq.s,
                                   t_start=1.0,
                                   t_stop=10.0)
                train2 = SpikeTrain(times=[4, 5, 6] * pq.s, t_stop=10.0)
                # todo: add waveforms

                # 1 Neo Event
                evt = Event(times=np.arange(0, 30, 10) * pq.ms,
                            labels=np.array(['ev0', 'ev1', 'ev2']))

                # 2 Neo Epochs
                epc = Epoch(times=np.arange(0, 30, 10) * pq.s,
                            durations=[10, 5, 7] * pq.ms,
                            labels=np.array(['btn0', 'btn1', 'btn2']))

                epc2 = Epoch(times=np.arange(10, 40, 10) * pq.s,
                             durations=[9, 3, 8] * pq.ms,
                             labels=np.array(['btn3', 'btn4', 'btn5']))

                seg.spiketrains.append(train)
                seg.spiketrains.append(train2)

                seg.epochs.append(epc)
                seg.epochs.append(epc2)

                seg.analogsignals.append(a)
                seg.analogsignals.append(b)
                seg.analogsignals.append(c)
                seg.irregularlysampledsignals.append(d)
                seg.events.append(evt)
                a.segment = seg
                b.segment = seg
                c.segment = seg
                d.segment = seg
                evt.segment = seg
                train.segment = seg
                train2.segment = seg
                epc.segment = seg
                epc2.segment = seg

        # write to file
        test_file_name = "test_round_trip.nwb"
        iow = NWBIO(filename=test_file_name, mode='w')
        iow.write_all_blocks(original_blocks)

        ior = NWBIO(filename=test_file_name, mode='r')
        retrieved_blocks = ior.read_all_blocks()

        self.assertEqual(len(retrieved_blocks), 3)
        self.assertEqual(len(retrieved_blocks[2].segments), num_seg)

        original_signal_22b = original_blocks[2].segments[2].analogsignals[1]
        retrieved_signal_22b = retrieved_blocks[2].segments[2].analogsignals[1]
        for attr_name in ("name", "units", "sampling_rate", "t_start"):
            retrieved_attribute = getattr(retrieved_signal_22b, attr_name)
            original_attribute = getattr(original_signal_22b, attr_name)
            self.assertEqual(retrieved_attribute, original_attribute)
        assert_array_equal(retrieved_signal_22b.magnitude,
                           original_signal_22b.magnitude)

        original_issignal_22d = original_blocks[2].segments[
            2].irregularlysampledsignals[0]
        retrieved_issignal_22d = retrieved_blocks[2].segments[
            2].irregularlysampledsignals[0]
        for attr_name in ("name", "units", "t_start"):
            retrieved_attribute = getattr(retrieved_issignal_22d, attr_name)
            original_attribute = getattr(original_issignal_22d, attr_name)
            self.assertEqual(retrieved_attribute, original_attribute)
        assert_array_equal(
            retrieved_issignal_22d.times.rescale('ms').magnitude,
            original_issignal_22d.times.rescale('ms').magnitude)
        assert_array_equal(retrieved_issignal_22d.magnitude,
                           original_issignal_22d.magnitude)

        original_event_11 = original_blocks[1].segments[1].events[0]
        retrieved_event_11 = retrieved_blocks[1].segments[1].events[0]
        for attr_name in ("name", ):
            retrieved_attribute = getattr(retrieved_event_11, attr_name)
            original_attribute = getattr(original_event_11, attr_name)
            self.assertEqual(retrieved_attribute, original_attribute)
        assert_array_equal(
            retrieved_event_11.rescale('ms').magnitude,
            original_event_11.rescale('ms').magnitude)
        assert_array_equal(retrieved_event_11.labels, original_event_11.labels)

        original_spiketrain_131 = original_blocks[1].segments[1].spiketrains[1]
        retrieved_spiketrain_131 = retrieved_blocks[1].segments[1].spiketrains[
            1]
        for attr_name in ("name", "t_start", "t_stop"):
            retrieved_attribute = getattr(retrieved_spiketrain_131, attr_name)
            original_attribute = getattr(original_spiketrain_131, attr_name)
            self.assertEqual(retrieved_attribute, original_attribute)
        assert_array_equal(
            retrieved_spiketrain_131.times.rescale('ms').magnitude,
            original_spiketrain_131.times.rescale('ms').magnitude)

        original_epoch_11 = original_blocks[1].segments[1].epochs[0]
        retrieved_epoch_11 = retrieved_blocks[1].segments[1].epochs[0]
        for attr_name in ("name", ):
            retrieved_attribute = getattr(retrieved_epoch_11, attr_name)
            original_attribute = getattr(original_epoch_11, attr_name)
            self.assertEqual(retrieved_attribute, original_attribute)
        assert_array_equal(
            retrieved_epoch_11.rescale('ms').magnitude,
            original_epoch_11.rescale('ms').magnitude)
        assert_allclose(
            retrieved_epoch_11.durations.rescale('ms').magnitude,
            original_epoch_11.durations.rescale('ms').magnitude)
        assert_array_equal(retrieved_epoch_11.labels, original_epoch_11.labels)
        os.remove(test_file_name)
示例#25
0
def generate_one_simple_segment(seg_name='segment 0', supported_objects=[], nb_analogsignal=4,
                                t_start=0. * pq.s, sampling_rate=10 * pq.kHz, duration=6. * pq.s,

                                nb_spiketrain=6, spikerate_range=[.5 * pq.Hz, 12 * pq.Hz],

                                event_types={'stim': ['a', 'b', 'c', 'd'],
                                             'enter_zone': ['one', 'two'],
                                             'color': ['black', 'yellow', 'green'], },
                                event_size_range=[5, 20],

                                epoch_types={'animal state': ['Sleep', 'Freeze', 'Escape'],
                                             'light': ['dark', 'lighted']},
                                epoch_duration_range=[.5, 3.],
                                # this should be multiplied by pq.s, no?

                                array_annotations={'valid': np.array([True, False]),
                                                   'number': np.array(range(5))}

                                ):
    if supported_objects and Segment not in supported_objects:
        raise ValueError('Segment must be in supported_objects')
    seg = Segment(name=seg_name)
    if AnalogSignal in supported_objects:
        for a in range(nb_analogsignal):
            anasig = AnalogSignal(rand(int((sampling_rate * duration).simplified)),
                                  sampling_rate=sampling_rate,
                                  t_start=t_start, units=pq.mV, channel_index=a,
                                  name='sig %d for segment %s' % (a, seg.name))
            seg.analogsignals.append(anasig)

    if SpikeTrain in supported_objects:
        for s in range(nb_spiketrain):
            spikerate = rand() * np.diff(spikerate_range)
            spikerate += spikerate_range[0].magnitude
            # spikedata = rand(int((spikerate*duration).simplified))*duration
            # sptr = SpikeTrain(spikedata,
            #                  t_start=t_start, t_stop=t_start+duration)
            #                  #, name = 'spiketrain %d'%s)
            spikes = rand(int((spikerate * duration).simplified))
            spikes.sort()  # spikes are supposed to be an ascending sequence
            sptr = SpikeTrain(spikes * duration, t_start=t_start, t_stop=t_start + duration)
            sptr.annotations['channel_index'] = s
            # Randomly generate array_annotations from given options
            arr_ann = {key: value[(rand(len(spikes)) * len(value)).astype('i')] for (key, value) in
                       array_annotations.items()}
            sptr.array_annotate(**arr_ann)
            seg.spiketrains.append(sptr)

    if Event in supported_objects:
        for name, labels in event_types.items():
            evt_size = rand() * np.diff(event_size_range)
            evt_size += event_size_range[0]
            evt_size = int(evt_size)
            labels = np.array(labels, dtype='S')
            labels = labels[(rand(evt_size) * len(labels)).astype('i')]
            evt = Event(times=rand(evt_size) * duration, labels=labels)
            # Randomly generate array_annotations from given options
            arr_ann = {key: value[(rand(evt_size) * len(value)).astype('i')] for (key, value) in
                       array_annotations.items()}
            evt.array_annotate(**arr_ann)
            seg.events.append(evt)

    if Epoch in supported_objects:
        for name, labels in epoch_types.items():
            t = 0
            times = []
            durations = []
            while t < duration:
                times.append(t)
                dur = rand() * (epoch_duration_range[1] - epoch_duration_range[0])
                dur += epoch_duration_range[0]
                durations.append(dur)
                t = t + dur
            labels = np.array(labels, dtype='S')
            labels = labels[(rand(len(times)) * len(labels)).astype('i')]
            assert len(times) == len(durations)
            assert len(times) == len(labels)
            epc = Epoch(times=pq.Quantity(times, units=pq.s),
                        durations=pq.Quantity(durations, units=pq.s),
                        labels=labels,)
            assert epc.times.dtype == 'float'
            # Randomly generate array_annotations from given options
            arr_ann = {key: value[(rand(len(times)) * len(value)).astype('i')] for (key, value) in
                       array_annotations.items()}
            epc.array_annotate(**arr_ann)
            seg.epochs.append(epc)

    # TODO : Spike, Event

    seg.create_many_to_one_relationship()
    return seg
示例#26
0
    def read_segment(
        self,
        # the 2 first keyword arguments are imposed by neo.io API
        lazy=False,
        cascade=True,
        # all following arguments are decied by this IO and are free
        segment_duration=15.,
        num_analogsignal=4,
        num_spiketrain_by_channel=3,
    ):
        """
        Return a fake Segment.

        The self.filename does not matter.

        In this IO read by default a Segment.

        This is just a example to be adapted to each ClassIO.
        In this case these 3 paramters are  taken in account because this function
        return a generated segment with fake AnalogSignal and fake SpikeTrain.

        Parameters:
            segment_duration :is the size in secend of the segment.
            num_analogsignal : number of AnalogSignal in this segment
            num_spiketrain : number of SpikeTrain in this segment

        """

        sampling_rate = 10000.  #Hz
        t_start = -1.

        #time vector for generated signal
        timevect = np.arange(t_start, t_start + segment_duration,
                             1. / sampling_rate)

        # create an empty segment
        seg = Segment(name='it is a seg from exampleio')

        if cascade:
            # read nested analosignal
            for i in range(num_analogsignal):
                ana = self.read_analogsignal(lazy=lazy,
                                             cascade=cascade,
                                             channel_index=i,
                                             segment_duration=segment_duration,
                                             t_start=t_start)
                seg.analogsignals += [ana]

            # read nested spiketrain
            for i in range(num_analogsignal):
                for _ in range(num_spiketrain_by_channel):
                    sptr = self.read_spiketrain(
                        lazy=lazy,
                        cascade=cascade,
                        segment_duration=segment_duration,
                        t_start=t_start,
                        channel_index=i)
                    seg.spiketrains += [sptr]

            # create an EventArray that mimic triggers.
            # note that ExampleIO  do not allow to acess directly to EventArray
            # for that you need read_segment(cascade = True)
            eva = EventArray()
            if lazy:
                # in lazy case no data are readed
                # eva is empty
                pass
            else:
                # otherwise it really contain data
                n = 1000

                # neo.io support quantities my vector use second for unit
                eva.times = timevect[(np.random.rand(n) *
                                      timevect.size).astype('i')] * pq.s
                # all duration are the same
                eva.durations = np.ones(n) * 500 * pq.ms
                # label
                l = []
                for i in range(n):
                    if np.random.rand() > .6: l.append('TriggerA')
                    else: l.append('TriggerB')
                eva.labels = np.array(l)

            seg.eventarrays += [eva]

        create_many_to_one_relationship(seg)
        return seg
示例#27
0
    def read_segment(
        self,
        lazy=False,
        cascade=True,
    ):

        fid = open(self.filename, 'rb')
        globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)
        #~ print globalHeader
        #~ print 'version' , globalHeader['version']
        seg = Segment()
        seg.file_origin = os.path.basename(self.filename)
        seg.annotate(neuroexplorer_version=globalHeader['version'])
        seg.annotate(comment=globalHeader['comment'])

        if not cascade:
            return seg

        offset = 544
        for i in range(globalHeader['nvar']):
            entityHeader = HeaderReader(
                fid, EntityHeader).read_f(offset=offset + i * 208)
            entityHeader['name'] = entityHeader['name'].replace('\x00', '')

            #print 'i',i, entityHeader['type']

            if entityHeader['type'] == 0:
                # neuron
                if lazy:
                    spike_times = [] * pq.s
                else:
                    spike_times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'],
                    )
                    spike_times = spike_times.astype(
                        'f8') / globalHeader['freq'] * pq.s
                sptr = SpikeTrain(
                    times=spike_times,
                    t_start=globalHeader['tbeg'] / globalHeader['freq'] * pq.s,
                    t_stop=globalHeader['tend'] / globalHeader['freq'] * pq.s,
                    name=entityHeader['name'],
                )
                if lazy:
                    sptr.lazy_shape = entityHeader['n']
                sptr.annotate(channel_index=entityHeader['WireNumber'])
                seg.spiketrains.append(sptr)

            if entityHeader['type'] == 1:
                # event
                if lazy:
                    event_times = [] * pq.s
                else:
                    event_times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'],
                    )
                    event_times = event_times.astype(
                        'f8') / globalHeader['freq'] * pq.s
                labels = np.array([''] * event_times.size, dtype='S')
                evar = EventArray(times=event_times,
                                  labels=labels,
                                  channel_name=entityHeader['name'])
                if lazy:
                    evar.lazy_shape = entityHeader['n']
                seg.eventarrays.append(evar)

            if entityHeader['type'] == 2:
                # interval
                if lazy:
                    start_times = [] * pq.s
                    stop_times = [] * pq.s
                else:
                    start_times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'],
                    )
                    start_times = start_times.astype(
                        'f8') / globalHeader['freq'] * pq.s
                    stop_times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'] + entityHeader['n'] * 4,
                    )
                    stop_times = stop_times.astype(
                        'f') / globalHeader['freq'] * pq.s
                epar = EpochArray(times=start_times,
                                  durations=stop_times - start_times,
                                  labels=np.array([''] * start_times.size,
                                                  dtype='S'),
                                  channel_name=entityHeader['name'])
                if lazy:
                    epar.lazy_shape = entityHeader['n']
                seg.epocharrays.append(epar)

            if entityHeader['type'] == 3:
                # spiketrain and wavefoms
                if lazy:
                    spike_times = [] * pq.s
                    waveforms = None
                else:

                    spike_times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'],
                    )
                    spike_times = spike_times.astype(
                        'f8') / globalHeader['freq'] * pq.s

                    waveforms = np.memmap(
                        self.filename,
                        np.dtype('i2'),
                        'r',
                        shape=(entityHeader['n'], 1,
                               entityHeader['NPointsWave']),
                        offset=entityHeader['offset'] + entityHeader['n'] * 4,
                    )
                    waveforms = (waveforms.astype('f') * entityHeader['ADtoMV']
                                 + entityHeader['MVOffset']) * pq.mV
                t_stop = globalHeader['tend'] / globalHeader['freq'] * pq.s
                if spike_times.size > 0:
                    t_stop = max(t_stop, max(spike_times))
                sptr = SpikeTrain(
                    times=spike_times,
                    t_start=globalHeader['tbeg'] / globalHeader['freq'] * pq.s,
                    #~ t_stop = max(globalHeader['tend']/globalHeader['freq']*pq.s,max(spike_times)),
                    t_stop=t_stop,
                    name=entityHeader['name'],
                    waveforms=waveforms,
                    sampling_rate=entityHeader['WFrequency'] * pq.Hz,
                    left_sweep=0 * pq.ms,
                )
                if lazy:
                    sptr.lazy_shape = entityHeader['n']
                sptr.annotate(channel_index=entityHeader['WireNumber'])
                seg.spiketrains.append(sptr)

            if entityHeader['type'] == 4:
                # popvectors
                pass

            if entityHeader['type'] == 5:
                # analog

                timestamps = np.memmap(
                    self.filename,
                    np.dtype('i4'),
                    'r',
                    shape=(entityHeader['n']),
                    offset=entityHeader['offset'],
                )
                timestamps = timestamps.astype('f8') / globalHeader['freq']
                fragmentStarts = np.memmap(
                    self.filename,
                    np.dtype('i4'),
                    'r',
                    shape=(entityHeader['n']),
                    offset=entityHeader['offset'],
                )
                fragmentStarts = fragmentStarts.astype(
                    'f8') / globalHeader['freq']
                t_start = timestamps[0] - fragmentStarts[0] / float(
                    entityHeader['WFrequency'])
                del timestamps, fragmentStarts

                if lazy:
                    signal = [] * pq.mV
                else:
                    signal = np.memmap(
                        self.filename,
                        np.dtype('i2'),
                        'r',
                        shape=(entityHeader['NPointsWave']),
                        offset=entityHeader['offset'],
                    )
                    signal = signal.astype('f')
                    signal *= entityHeader['ADtoMV']
                    signal += entityHeader['MVOffset']
                    signal = signal * pq.mV

                anaSig = AnalogSignal(
                    signal=signal,
                    t_start=t_start * pq.s,
                    sampling_rate=entityHeader['WFrequency'] * pq.Hz,
                    name=entityHeader['name'],
                    channel_index=entityHeader['WireNumber'])
                if lazy:
                    anaSig.lazy_shape = entityHeader['NPointsWave']
                seg.analogsignals.append(anaSig)

            if entityHeader['type'] == 6:
                # markers  : TO TEST
                if lazy:
                    times = [] * pq.s
                    labels = np.array([], dtype='S')
                    markertype = None
                else:
                    times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'],
                    )
                    times = times.astype('f8') / globalHeader['freq'] * pq.s
                    fid.seek(entityHeader['offset'] + entityHeader['n'] * 4)
                    markertype = fid.read(64).replace('\x00', '')
                    labels = np.memmap(
                        self.filename,
                        np.dtype('S' + str(entityHeader['MarkerLength'])),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'] + entityHeader['n'] * 4 +
                        64)
                ea = EventArray(times=times,
                                labels=labels.view(np.ndarray),
                                name=entityHeader['name'],
                                channel_index=entityHeader['WireNumber'],
                                marker_type=markertype)
                if lazy:
                    ea.lazy_shape = entityHeader['n']
                seg.eventarrays.append(ea)

        create_many_to_one_relationship(seg)
        return seg
示例#28
0
    def read_block(self, lazy=False, cascade=True):

        header = self.read_header()
        version = header['fFileVersionNumber']

        bl = Block()
        bl.file_origin = os.path.basename(self.filename)
        bl.annotate(abf_version=str(version))

        # date and time
        if version < 2.:
            YY = 1900
            MM = 1
            DD = 1
            hh = int(header['lFileStartTime'] / 3600.)
            mm = int((header['lFileStartTime'] - hh * 3600) / 60)
            ss = header['lFileStartTime'] - hh * 3600 - mm * 60
            ms = int(np.mod(ss, 1) * 1e6)
            ss = int(ss)
        elif version >= 2.:
            YY = int(header['uFileStartDate'] / 10000)
            MM = int((header['uFileStartDate'] - YY * 10000) / 100)
            DD = int(header['uFileStartDate'] - YY * 10000 - MM * 100)
            hh = int(header['uFileStartTimeMS'] / 1000. / 3600.)
            mm = int((header['uFileStartTimeMS'] / 1000. - hh * 3600) / 60)
            ss = header['uFileStartTimeMS'] / 1000. - hh * 3600 - mm * 60
            ms = int(np.mod(ss, 1) * 1e6)
            ss = int(ss)
        bl.rec_datetime = datetime.datetime(YY, MM, DD, hh, mm, ss, ms)

        if not cascade:
            return bl

        # file format
        if header['nDataFormat'] == 0:
            dt = np.dtype('i2')
        elif header['nDataFormat'] == 1:
            dt = np.dtype('f4')

        if version < 2.:
            nbchannel = header['nADCNumChannels']
            head_offset = header['lDataSectionPtr'] * BLOCKSIZE + header[
                'nNumPointsIgnored'] * dt.itemsize
            totalsize = header['lActualAcqLength']
        elif version >= 2.:
            nbchannel = header['sections']['ADCSection']['llNumEntries']
            head_offset = header['sections']['DataSection'][
                'uBlockIndex'] * BLOCKSIZE
            totalsize = header['sections']['DataSection']['llNumEntries']

        data = np.memmap(self.filename,
                         dt,
                         'r',
                         shape=(totalsize, ),
                         offset=head_offset)

        # 3 possible modes
        if version < 2.:
            mode = header['nOperationMode']
        elif version >= 2.:
            mode = header['protocol']['nOperationMode']

        if (mode == 1) or (mode == 2) or (mode == 5) or (mode == 3):
            # event-driven variable-length mode (mode 1)
            # event-driven fixed-length mode (mode 2 or 5)
            # gap free mode (mode 3) can be in several episodes

            # read sweep pos
            if version < 2.:
                nbepisod = header['lSynchArraySize']
                offset_episode = header['lSynchArrayPtr'] * BLOCKSIZE
            elif version >= 2.:
                nbepisod = header['sections']['SynchArraySection'][
                    'llNumEntries']
                offset_episode = header['sections']['SynchArraySection'][
                    'uBlockIndex'] * BLOCKSIZE
            if nbepisod > 0:
                episode_array = np.memmap(self.filename, [('offset', 'i4'),
                                                          ('len', 'i4')],
                                          'r',
                                          shape=nbepisod,
                                          offset=offset_episode)
            else:
                episode_array = np.empty(1, [('offset', 'i4'), ('len', 'i4')])
                episode_array[0]['len'] = data.size
                episode_array[0]['offset'] = 0

            # sampling_rate
            if version < 2.:
                sampling_rate = 1. / (header['fADCSampleInterval'] *
                                      nbchannel * 1.e-6) * pq.Hz
            elif version >= 2.:
                sampling_rate = 1.e6 / \
                    header['protocol']['fADCSequenceInterval'] * pq.Hz

            # construct block
            # one sweep = one segment in a block
            pos = 0
            for j in range(episode_array.size):
                seg = Segment(index=j)

                length = episode_array[j]['len']

                if version < 2.:
                    fSynchTimeUnit = header['fSynchTimeUnit']
                elif version >= 2.:
                    fSynchTimeUnit = header['protocol']['fSynchTimeUnit']

                if (fSynchTimeUnit != 0) and (mode == 1):
                    length /= fSynchTimeUnit

                if not lazy:
                    subdata = data[pos:pos + length]
                    subdata = subdata.reshape(
                        (int(subdata.size / nbchannel), nbchannel)).astype('f')
                    if dt == np.dtype('i2'):
                        if version < 2.:
                            reformat_integer_v1(subdata, nbchannel, header)
                        elif version >= 2.:
                            reformat_integer_v2(subdata, nbchannel, header)

                pos += length

                if version < 2.:
                    chans = [
                        chan_num for chan_num in header['nADCSamplingSeq']
                        if chan_num >= 0
                    ]
                else:
                    chans = range(nbchannel)
                for n, i in enumerate(chans[:nbchannel]):  # fix SamplingSeq
                    if version < 2.:
                        name = header['sADCChannelName'][i].replace(b' ', b'')
                        unit = header['sADCUnits'][i].replace(b'\xb5', b'u').\
                            replace(b' ', b'').decode('utf-8')  # \xb5 is µ
                        num = header['nADCPtoLChannelMap'][i]
                    elif version >= 2.:
                        lADCIi = header['listADCInfo'][i]
                        name = lADCIi['ADCChNames'].replace(b' ', b'')
                        unit = lADCIi['ADCChUnits'].replace(b'\xb5', b'u').\
                            replace(b' ', b'').decode('utf-8')
                        num = header['listADCInfo'][i]['nADCNum']
                    if (fSynchTimeUnit == 0):
                        t_start = float(
                            episode_array[j]['offset']) / sampling_rate
                    else:
                        t_start = float(episode_array[j]['offset']
                                        ) * fSynchTimeUnit * 1e-6 * pq.s
                    t_start = t_start.rescale('s')
                    try:
                        pq.Quantity(1, unit)
                    except:
                        unit = ''

                    if lazy:
                        signal = [] * pq.Quantity(1, unit)
                    else:
                        signal = pq.Quantity(subdata[:, n], unit)

                    anaSig = AnalogSignal(signal,
                                          sampling_rate=sampling_rate,
                                          t_start=t_start,
                                          name=str(name),
                                          channel_index=int(num))
                    if lazy:
                        anaSig.lazy_shape = length / nbchannel
                    seg.analogsignals.append(anaSig)
                bl.segments.append(seg)

            if mode in [3, 5]:  # TODO check if tags exits in other mode
                # tag is EventArray that should be attached to Block
                # It is attched to the first Segment
                times = []
                labels = []
                comments = []
                for i, tag in enumerate(header['listTag']):
                    times.append(tag['lTagTime'] / sampling_rate)
                    labels.append(str(tag['nTagType']))
                    comments.append(clean_string(tag['sComment']))
                times = np.array(times)
                labels = np.array(labels, dtype='S')
                comments = np.array(comments, dtype='S')
                # attach all tags to the first segment.
                seg = bl.segments[0]
                if lazy:
                    ea = Event(times=[] * pq.s, labels=np.array([], dtype='S'))
                    ea.lazy_shape = len(times)
                else:
                    ea = Event(times=times * pq.s,
                               labels=labels,
                               comments=comments)
                seg.events.append(ea)

        bl.create_many_to_one_relationship()
        return bl
示例#29
0
    def test__issue_285(self):
        # Spiketrain
        train = SpikeTrain([3, 4, 5] * pq.s, t_stop=10.0)
        unit = Unit()
        train.unit = unit
        unit.spiketrains.append(train)

        epoch = Epoch(np.array([0, 10, 20]),
                      np.array([2, 2, 2]),
                      np.array(["a", "b", "c"]),
                      units="ms")

        blk = Block()
        seg = Segment()
        seg.spiketrains.append(train)
        seg.epochs.append(epoch)
        epoch.segment = seg
        blk.segments.append(seg)

        reader = PickleIO(filename="blk.pkl")
        reader.write(blk)

        reader = PickleIO(filename="blk.pkl")
        r_blk = reader.read_block()
        r_seg = r_blk.segments[0]
        self.assertIsInstance(r_seg.spiketrains[0].unit, Unit)
        self.assertIsInstance(r_seg.epochs[0], Epoch)
        os.remove('blk.pkl')

        # Epoch
        epoch = Epoch(times=np.arange(0, 30, 10) * pq.s,
                      durations=[10, 5, 7] * pq.ms,
                      labels=np.array(['btn0', 'btn1', 'btn2'], dtype='U'))
        epoch.segment = Segment()
        blk = Block()
        seg = Segment()
        seg.epochs.append(epoch)
        blk.segments.append(seg)

        reader = PickleIO(filename="blk.pkl")
        reader.write(blk)

        reader = PickleIO(filename="blk.pkl")
        r_blk = reader.read_block()
        r_seg = r_blk.segments[0]
        self.assertIsInstance(r_seg.epochs[0].segment, Segment)
        os.remove('blk.pkl')

        # Event
        event = Event(np.arange(0, 30, 10) * pq.s,
                      labels=np.array(['trig0', 'trig1', 'trig2'], dtype='U'))
        event.segment = Segment()

        blk = Block()
        seg = Segment()
        seg.events.append(event)
        blk.segments.append(seg)

        reader = PickleIO(filename="blk.pkl")
        reader.write(blk)

        reader = PickleIO(filename="blk.pkl")
        r_blk = reader.read_block()
        r_seg = r_blk.segments[0]
        self.assertIsInstance(r_seg.events[0].segment, Segment)
        os.remove('blk.pkl')

        # IrregularlySampledSignal
        signal = IrregularlySampledSignal([0.0, 1.23, 6.78], [1, 2, 3],
                                          units='mV',
                                          time_units='ms')
        signal.segment = Segment()

        blk = Block()
        seg = Segment()
        seg.irregularlysampledsignals.append(signal)
        blk.segments.append(seg)
        blk.segments[0].block = blk

        reader = PickleIO(filename="blk.pkl")
        reader.write(blk)

        reader = PickleIO(filename="blk.pkl")
        r_blk = reader.read_block()
        r_seg = r_blk.segments[0]
        self.assertIsInstance(r_seg.irregularlysampledsignals[0].segment,
                              Segment)
        os.remove('blk.pkl')
示例#30
0
    def read_protocol(self):
        """
        Read the protocol waveform of the file, if present;
        function works with ABF2 only. Protocols can be reconstructed
        from the ABF1 header.

        Returns: list of segments (one for every episode)
                 with list of analog signls (one for every DAC).
        """
        header = self.read_header()

        if header['fFileVersionNumber'] < 2.:
            raise IOError("Protocol section is only present in ABF2 files.")

        nADC = header['sections']['ADCSection'][
            'llNumEntries']  # Number of ADC channels
        nDAC = header['sections']['DACSection'][
            'llNumEntries']  # Number of DAC channels
        nSam = header['protocol'][
            'lNumSamplesPerEpisode'] / nADC  # Number of samples per episode
        nEpi = header['lActualEpisodes']  # Actual number of episodes
        sampling_rate = 1.e6 / header['protocol'][
            'fADCSequenceInterval'] * pq.Hz

        # Make a list of segments with analog signals with just holding levels
        # List of segments relates to number of episodes, as for recorded data
        segments = []
        for epiNum in range(nEpi):
            seg = Segment(index=epiNum)
            # One analog signal for each DAC in segment (episode)
            for DACNum in range(nDAC):
                t_start = 0 * pq.s  # TODO: Possibly check with episode array
                name = header['listDACInfo'][DACNum]['DACChNames']
                unit = header['listDACInfo'][DACNum]['DACChUnits'].\
                    replace(b'\xb5', b'u').decode('utf-8')  # \xb5 is µ
                signal = np.ones(nSam) *\
                    header['listDACInfo'][DACNum]['fDACHoldingLevel'] *\
                    pq.Quantity(1, unit)
                ana_sig = AnalogSignal(signal,
                                       sampling_rate=sampling_rate,
                                       t_start=t_start,
                                       name=str(name),
                                       channel_index=DACNum)
                # If there are epoch infos for this DAC
                if DACNum in header['dictEpochInfoPerDAC']:
                    # Save last sample index
                    i_last = int(nSam * 15625 / 10**6)
                    # TODO guess for first holding
                    # Go over EpochInfoPerDAC and change the analog signal
                    # according to the epochs
                    epochInfo = header['dictEpochInfoPerDAC'][DACNum]
                    for epochNum, epoch in iteritems(epochInfo):
                        i_begin = i_last
                        i_end = i_last + epoch['lEpochInitDuration'] +\
                            epoch['lEpochDurationInc'] * epiNum
                        dif = i_end - i_begin
                        ana_sig[i_begin:i_end] = np.ones(len(range(dif))) *\
                            pq.Quantity(1, unit) * (epoch['fEpochInitLevel'] +
                                                    epoch['fEpochLevelInc'] *
                                                    epiNum)
                        i_last += epoch['lEpochInitDuration']
                seg.analogsignals.append(ana_sig)
            segments.append(seg)

        return segments