示例#1
0
 def read_segment(self,
                 lazy = False,
                 cascade = True,
                 group = 0,
                 series = 0):
     seg = Segment( name = 'test')
     if cascade:
         tree = getbyroute(self.pul.tree,[0,group,series])
         for sw,sweep in enumerate(tree['children']):
             if sw == 0:
                 starttime = pq.Quantity(float(sweep['contents'].swTimer),'s')
             for ch,channel in enumerate(sweep['children']):
                 sig = self.read_analogsignal(group=group,
                                         series=series,
                                         sweep=sw,
                                         channel = ch)
                 annotations = sweep['contents'].__dict__.keys()
                 annotations.remove('readlist')
                 for a in annotations:
                     d = {a:str(sweep['contents'].__dict__[a])}
                     sig.annotate(**d)
                 sig.t_start = pq.Quantity(float(sig.annotations['swTimer']),'s') - starttime
                 seg.analogsignals.append(sig)
         annotations = tree['contents'].__dict__.keys()
         annotations.remove('readlist')
         for a in annotations:
             d = {a:str(tree['contents'].__dict__[a])}
             seg.annotate(**d)
     create_many_to_one_relationship(seg)
     return seg
示例#2
0
    def test_segment_write(self):
        block = Block(name=self.rword())
        segment = Segment(name=self.rword(), description=self.rword())
        block.segments.append(segment)
        self.write_and_compare([block])

        segment.annotate(**self.rdict(2))
        self.write_and_compare([block])
示例#3
0
    def test_segment_write(self):
        block = Block(name=self.rword())
        segment = Segment(name=self.rword(), description=self.rword())
        block.segments.append(segment)
        self.write_and_compare([block])

        segment.annotate(**self.rdict(2))
        self.write_and_compare([block])
    def read_segment(self, n_start, n_stop, chlist=None, lazy=False, cascade=True):
        """Reads a Segment from the file and stores in database.

        The Segment will contain one AnalogSignal for each channel
        and will go from n_start to n_stop (in samples).

        Arguments:
            n_start : time in samples that the Segment begins
            n_stop : time in samples that the Segment ends

        Python indexing is used, so n_stop is not inclusive.

        Returns a Segment object containing the data.
        """
        # If no channel numbers provided, get all of them
        if chlist is None:
            chlist = self.loader.get_neural_channel_numbers()

        # Conversion from bits to full_range units
        conversion = self.full_range / 2**(8*self.header.sample_width)

        # Create the Segment
        seg = Segment(file_origin=self.filename)
        t_start = float(n_start) / self.header.f_samp
        t_stop = float(n_stop) / self.header.f_samp
        seg.annotate(t_start=t_start)
        seg.annotate(t_stop=t_stop)

        # Load data from each channel and store
        for ch in chlist:
            if lazy:
                sig = np.array([]) * conversion
            else:
                # Get the data from the loader
                sig = np.array(\
                    self.loader._get_channel(ch)[n_start:n_stop]) * conversion

            # Create an AnalogSignal with the data in it
            anasig = AnalogSignal(signal=sig,
                sampling_rate=self.header.f_samp*pq.Hz,
                t_start=t_start*pq.s, file_origin=self.filename,
                description='Channel %d from %f to %f' % (ch, t_start, t_stop),
                channel_index=int(ch))

            if lazy:
                anasig.lazy_shape = n_stop-n_start


            # Link the signal to the segment
            seg.analogsignals.append(anasig)

            # Link the signal to the recording channel from which it came
            #rc = self.channel_number_to_recording_channel[ch]
            #rc.analogsignals.append(anasig)

        return seg
示例#5
0
    def read_segment(self, n_start, n_stop, chlist=None, lazy=False, cascade=True):
        """Reads a Segment from the file and stores in database.

        The Segment will contain one AnalogSignal for each channel
        and will go from n_start to n_stop (in samples).

        Arguments:
            n_start : time in samples that the Segment begins
            n_stop : time in samples that the Segment ends

        Python indexing is used, so n_stop is not inclusive.

        Returns a Segment object containing the data.
        """
        # If no channel numbers provided, get all of them
        if chlist is None:
            chlist = self.loader.get_neural_channel_numbers()

        # Conversion from bits to full_range units
        conversion = self.full_range / 2**(8*self.header.sample_width)

        # Create the Segment
        seg = Segment(file_origin=self.filename)
        t_start = float(n_start) / self.header.f_samp
        t_stop = float(n_stop) / self.header.f_samp
        seg.annotate(t_start=t_start)
        seg.annotate(t_stop=t_stop)

        # Load data from each channel and store
        for ch in chlist:
            if lazy:
                sig = np.array([]) * conversion
            else:
                # Get the data from the loader
                sig = np.array(\
                    self.loader._get_channel(ch)[n_start:n_stop]) * conversion

            # Create an AnalogSignal with the data in it
            anasig = AnalogSignal(signal=sig,
                sampling_rate=self.header.f_samp*pq.Hz,
                t_start=t_start*pq.s, file_origin=self.filename,
                description='Channel %d from %f to %f' % (ch, t_start, t_stop),
                channel_index=int(ch))

            if lazy:
                anasig.lazy_shape = n_stop-n_start


            # Link the signal to the segment
            seg.analogsignals.append(anasig)

            # Link the signal to the recording channel from which it came
            #rc = self.channel_number_to_recording_channel[ch]
            #rc.analogsignals.append(anasig)

        return seg
示例#6
0
    def read_block(self, lazy=False, **kwargs):
        # to sort file
        def natural_sort(l):
            convert = lambda text: int(text) if text.isdigit() else text.lower(
            )
            alphanum_key = lambda key: [
                convert(c) for c in re.split('([0-9]+)', key)
            ]
            return sorted(l, key=alphanum_key)

        # find all the images in the given directory
        file_name_list = []
        # name of extensions to track
        types = ["*.tif", "*.tiff"]
        for file in types:
            file_name_list.append(glob.glob(self.filename + "/" + file))
        # flatten list
        file_name_list = [
            item for sublist in file_name_list for item in sublist
        ]
        # delete path in the name of file
        file_name_list = [
            file_name[len(self.filename) + 1::] for file_name in file_name_list
        ]
        # sorting file
        file_name_list = natural_sort(file_name_list)
        list_data_image = []
        for file_name in file_name_list:
            list_data_image.append(
                np.array(Image.open(self.filename + "/" + file_name),
                         dtype=np.float))
        list_data_image = np.array(list_data_image)
        if len(list_data_image.shape) == 4:
            list_data_image = []
            for file_name in file_name_list:
                list_data_image.append(
                    np.array(Image.open(self.filename + "/" +
                                        file_name).convert('L'),
                             dtype=np.float))

        print("read block")
        image_sequence = ImageSequence(np.stack(list_data_image),
                                       units=self.units,
                                       sampling_rate=self.sampling_rate,
                                       spatial_scale=self.spatial_scale)
        print("creating segment")
        segment = Segment(file_origin=self.filename)
        segment.annotate(tiff_file_names=file_name_list)
        segment.imagesequences = [image_sequence]

        block = Block(file_origin=self.filename)
        segment.block = block
        block.segments.append(segment)
        print("returning block")
        return block
示例#7
0
 def read_segment(self,
                 lazy = False,
                 cascade = True,
                 group = 0,
                 series = 0):
     seg = Segment( name = 'test')
     if cascade:
         tree = getbyroute(self.pul.tree,[0,group,series])
         for sw,sweep in enumerate(tree['children']):
             if sw == 0:
                 starttime = pq.Quantity(float(sweep['contents'].swTimer),'s')
             for ch,channel in enumerate(sweep['children']):
                 sig = self.read_analogsignal(group=group,
                                         series=series,
                                         sweep=sw,
                                         channel = ch)
                 annotations = sweep['contents'].__dict__.keys()
                 annotations.remove('readlist')
                 for a in annotations:
                     d = {a:str(sweep['contents'].__dict__[a])}
                     sig.annotate(**d)
                 sig.t_start = pq.Quantity(float(sig.annotations['swTimer']),'s') - starttime
                 seg.analogsignals.append(sig)
         annotations = tree['contents'].__dict__.keys()
         annotations.remove('readlist')
         for a in annotations:
             d = {a:str(tree['contents'].__dict__[a])}
             seg.annotate(**d)
     create_many_to_one_relationship(seg)
     ### add protocols to signals
     for sig_index,sig in enumerate(seg.analogsignals):
         pgf_index = sig.annotations['pgf_index']
         st_rec = self.pgf.tree['children'][pgf_index]['contents']
         chnls = [ch for ch in self.pgf.tree['children'][pgf_index]['children']]
         for ch_index, chnl in enumerate(chnls):
             ep_start = sig.t_start
             for se_epoch_index, se_epoch in enumerate(chnl['children']):
                 se_rec = se_epoch['contents']
                 se_duration = pq.Quantity(float(se_rec.seDuration),'s')
                 if not(int(se_rec.seVoltageSource)):
                     se_voltage = pq.Quantity(float(se_rec.seVoltage),'V')
                 else:
                     se_voltage = pq.Quantity(float(chnl['contents'].chHolding),'V')
                 epoch = neo.Epoch(ep_start,se_duration,'protocol_epoch',value=se_voltage,channel_index=ch_index)
                 fully_annototate(chnl,epoch)
                 epoch.annotations['sig_index'] = sig_index
                 ep_start = ep_start + se_duration
                 seg.epochs.append(epoch)
     return seg
示例#8
0
    def create_all_annotated(cls):
        times = cls.rquant(1, pq.s)
        signal = cls.rquant(1, pq.V)
        blk = Block()
        blk.annotate(**cls.rdict(3))
        cls.populate_dates(blk)

        seg = Segment()
        seg.annotate(**cls.rdict(4))
        cls.populate_dates(seg)
        blk.segments.append(seg)

        asig = AnalogSignal(signal=signal, sampling_rate=pq.Hz)
        asig.annotate(**cls.rdict(2))
        seg.analogsignals.append(asig)

        isig = IrregularlySampledSignal(times=times,
                                        signal=signal,
                                        time_units=pq.s)
        isig.annotate(**cls.rdict(2))
        seg.irregularlysampledsignals.append(isig)

        epoch = Epoch(times=times, durations=times)
        epoch.annotate(**cls.rdict(4))
        seg.epochs.append(epoch)

        event = Event(times=times)
        event.annotate(**cls.rdict(4))
        seg.events.append(event)

        spiketrain = SpikeTrain(times=times, t_stop=pq.s, units=pq.s)
        d = cls.rdict(6)
        d["quantity"] = pq.Quantity(10, "mV")
        d["qarray"] = pq.Quantity(range(10), "mA")
        spiketrain.annotate(**d)
        seg.spiketrains.append(spiketrain)

        chx = ChannelIndex(name="achx", index=[1, 2], channel_ids=[0, 10])
        chx.annotate(**cls.rdict(5))
        blk.channel_indexes.append(chx)

        unit = Unit()
        unit.annotate(**cls.rdict(2))
        chx.units.append(unit)

        return blk
示例#9
0
 def read_segment(self, lazy=False, cascade=True):
     data, metadata = self._read_file_contents()
     annotations = dict((k, metadata.get(k, 'unknown')) for k in ("label", "variable", "first_id", "last_id"))
     seg = Segment(**annotations)
     if cascade:
         if metadata['variable'] == 'spikes':
             for i in range(metadata['first_index'], metadata['last_index'] + 1):
                 spiketrain = self._extract_spikes(data, metadata, i, lazy)
                 if spiketrain is not None:
                     seg.spiketrains.append(spiketrain)
             seg.annotate(dt=metadata['dt']) # store dt for SpikeTrains only, as can be retrieved from sampling_period for AnalogSignal
         else:
             signal = self._extract_signals(data, metadata, lazy)
             if signal is not None:
                 seg.analogsignals.append(signal)
         seg.create_many_to_one_relationship()
     return seg
示例#10
0
 def read_segment(self, lazy=False, cascade=True):
     data, metadata = self._read_file_contents()
     annotations = dict((k, metadata.get(k, 'unknown')) for k in ("label", "variable", "first_id", "last_id"))
     seg = Segment(**annotations)
     if cascade:
         if metadata['variable'] == 'spikes':
             for i in range(metadata['first_index'], metadata['last_index']):
                 spiketrain = self._extract_spikes(data, metadata, i, lazy)
                 if spiketrain is not None:
                     seg.spiketrains.append(spiketrain)
             seg.annotate(dt=metadata['dt']) # store dt for SpikeTrains only, as can be retrieved from sampling_period for AnalogSignal
         else:
             for i in range(metadata['first_index'], metadata['last_index']):
                 # probably slow. Replace with numpy-based version from 0.1
                 signal = self._extract_signal(data, metadata, i, lazy)
                 if signal is not None:
                     seg.analogsignals.append(signal)
         seg.create_many_to_one_relationship()
     return seg
示例#11
0
    def create_all_annotated(cls):
        times = cls.rquant(1, pq.s)
        signal = cls.rquant(1, pq.V)
        blk = Block()
        blk.annotate(**cls.rdict(3))

        seg = Segment()
        seg.annotate(**cls.rdict(4))
        blk.segments.append(seg)

        asig = AnalogSignal(signal=signal, sampling_rate=pq.Hz)
        asig.annotate(**cls.rdict(2))
        seg.analogsignals.append(asig)

        isig = IrregularlySampledSignal(times=times, signal=signal,
                                        time_units=pq.s)
        isig.annotate(**cls.rdict(2))
        seg.irregularlysampledsignals.append(isig)

        epoch = Epoch(times=times, durations=times)
        epoch.annotate(**cls.rdict(4))
        seg.epochs.append(epoch)

        event = Event(times=times)
        event.annotate(**cls.rdict(4))
        seg.events.append(event)

        spiketrain = SpikeTrain(times=times, t_stop=pq.s, units=pq.s)
        d = cls.rdict(6)
        d["quantity"] = pq.Quantity(10, "mV")
        d["qarray"] = pq.Quantity(range(10), "mA")
        spiketrain.annotate(**d)
        seg.spiketrains.append(spiketrain)

        chx = ChannelIndex(name="achx", index=[1, 2], channel_ids=[0, 10])
        chx.annotate(**cls.rdict(5))
        blk.channel_indexes.append(chx)

        unit = Unit()
        unit.annotate(**cls.rdict(2))
        chx.units.append(unit)

        return blk
示例#12
0
    def read_segment(self, lazy=False):
        assert not lazy, 'Do not support lazy'

        data, metadata = self._read_file_contents()
        annotations = dict(
            (k, metadata.get(k, 'unknown'))
            for k in ("label", "variable", "first_id", "last_id"))
        seg = Segment(**annotations)
        if metadata['variable'] == 'spikes':
            for i in range(metadata['first_index'],
                           metadata['last_index'] + 1):
                spiketrain = self._extract_spikes(data, metadata, i)
                if spiketrain is not None:
                    seg.spiketrains.append(spiketrain)
            # store dt for SpikeTrains only, as can be retrieved from sampling_period for AnalogSignal
            seg.annotate(dt=metadata['dt'])
        else:
            signal = self._extract_signals(data, metadata)
            if signal is not None:
                seg.analogsignals.append(signal)
        seg.create_many_to_one_relationship()
        return seg
示例#13
0
    def read_segment(
        self,
        cascade=True,
        lazy=False,
    ):
        """
        Arguments:
        """
        f = struct_file(self.filename, 'rb')

        #Name
        f.seek(64, 0)
        surname = f.read(22)
        while surname[-1] == ' ':
            if len(surname) == 0: break
            surname = surname[:-1]
        firstname = f.read(20)
        while firstname[-1] == ' ':
            if len(firstname) == 0: break
            firstname = firstname[:-1]

        #Date
        f.seek(128, 0)
        day, month, year, hour, minute, sec = f.read_f('bbbbbb')
        rec_datetime = datetime.datetime(year + 1900, month, day, hour, minute,
                                         sec)

        f.seek(138, 0)
        Data_Start_Offset, Num_Chan, Multiplexer, Rate_Min, Bytes = f.read_f(
            'IHHHH')
        #~ print Num_Chan, Bytes

        #header version
        f.seek(175, 0)
        header_version, = f.read_f('b')
        assert header_version == 4

        seg = Segment(
            name=firstname + ' ' + surname,
            file_origin=os.path.basename(self.filename),
        )
        seg.annotate(surname=surname)
        seg.annotate(firstname=firstname)
        seg.annotate(rec_datetime=rec_datetime)

        if not cascade:
            return seg

        # area
        f.seek(176, 0)
        zone_names = [
            'ORDER', 'LABCOD', 'NOTE', 'FLAGS', 'TRONCA', 'IMPED_B', 'IMPED_E',
            'MONTAGE', 'COMPRESS', 'AVERAGE', 'HISTORY', 'DVIDEO', 'EVENT A',
            'EVENT B', 'TRIGGER'
        ]
        zones = {}
        for zname in zone_names:
            zname2, pos, length = f.read_f('8sII')
            zones[zname] = zname2, pos, length
            #~ print zname2, pos, length

        # reading raw data
        if not lazy:
            f.seek(Data_Start_Offset, 0)
            rawdata = np.fromstring(f.read(), dtype='u' + str(Bytes))
            rawdata = rawdata.reshape((rawdata.size / Num_Chan, Num_Chan))

        # Reading Code Info
        zname2, pos, length = zones['ORDER']
        f.seek(pos, 0)
        code = np.fromfile(f, dtype='u2', count=Num_Chan)

        units = {
            -1: pq.nano * pq.V,
            0: pq.uV,
            1: pq.mV,
            2: 1,
            100: pq.percent,
            101: pq.dimensionless,
            102: pq.dimensionless
        }

        for c in range(Num_Chan):
            zname2, pos, length = zones['LABCOD']
            f.seek(pos + code[c] * 128 + 2, 0)

            label = f.read(6).strip("\x00")
            ground = f.read(6).strip("\x00")
            logical_min, logical_max, logical_ground, physical_min, physical_max = f.read_f(
                'iiiii')
            k, = f.read_f('h')
            if k in units.keys():
                unit = units[k]
            else:
                unit = pq.uV

            f.seek(8, 1)
            sampling_rate, = f.read_f('H') * pq.Hz
            sampling_rate *= Rate_Min

            if lazy:
                signal = [] * unit
            else:
                factor = float(physical_max -
                               physical_min) / float(logical_max -
                                                     logical_min + 1)
                signal = (rawdata[:, c].astype('f') -
                          logical_ground) * factor * unit

            anaSig = AnalogSignal(signal,
                                  sampling_rate=sampling_rate,
                                  name=label,
                                  channel_index=c)
            if lazy:
                anaSig.lazy_shape = None
            anaSig.annotate(ground=ground)

            seg.analogsignals.append(anaSig)

        sampling_rate = np.mean(
            [anaSig.sampling_rate for anaSig in seg.analogsignals]) * pq.Hz

        # Read trigger and notes
        for zname, label_dtype in [('TRIGGER', 'u2'), ('NOTE', 'S40')]:
            zname2, pos, length = zones[zname]
            f.seek(pos, 0)
            triggers = np.fromstring(
                f.read(length),
                dtype=[('pos', 'u4'), ('label', label_dtype)],
            )
            ea = EventArray(name=zname[0] + zname[1:].lower())
            if not lazy:
                keep = (triggers['pos'] >= triggers['pos'][0]) & (
                    triggers['pos'] < rawdata.shape[0]) & (triggers['pos'] !=
                                                           0)
                triggers = triggers[keep]
                ea.labels = triggers['label'].astype('S')
                ea.times = (triggers['pos'] / sampling_rate).rescale('s')
            else:
                ea.lazy_shape = triggers.size
            seg.eventarrays.append(ea)

        # Read Event A and B
        # Not so well  tested
        for zname in ['EVENT A', 'EVENT B']:
            zname2, pos, length = zones[zname]
            f.seek(pos, 0)
            epochs = np.fromstring(f.read(length),
                                   dtype=[
                                       ('label', 'u4'),
                                       ('start', 'u4'),
                                       ('stop', 'u4'),
                                   ])
            ep = EpochArray(name=zname[0] + zname[1:].lower())
            if not lazy:
                keep = (epochs['start'] > 0) & (
                    epochs['start'] < rawdata.shape[0]) & (epochs['stop'] <
                                                           rawdata.shape[0])
                epochs = epochs[keep]
                ep.labels = epochs['label'].astype('S')
                ep.times = (epochs['start'] / sampling_rate).rescale('s')
                ep.durations = ((epochs['stop'] - epochs['start']) /
                                sampling_rate).rescale('s')
            else:
                ep.lazy_shape = triggers.size
            seg.epocharrays.append(ep)

        seg.create_many_to_one_relationship()
        return seg
示例#14
0
    def read_block(
            self,
            # the 2 first keyword arguments are imposed by neo.io API
            lazy=False,
            cascade=True):
        """
        Return a Block.

        """
        def count_samples(m_length):
            """
            Count the number of signal samples available in a type 5 data block
            of length m_length

            """

            # for information about type 5 data block, see [1]
            count = int((m_length - 6) / 2 - 2)
            # -6 corresponds to the header of block 5, and the -2 take into
            # account the fact that last 2 values are not available as the 4
            # corresponding bytes are coding the time stamp of the beginning
            # of the block
            return count

        # create the neo Block that will be returned at the end
        blck = Block(file_origin=os.path.basename(self.filename))
        blck.file_origin = os.path.basename(self.filename)

        fid = open(self.filename, 'rb')

        # NOTE: in the following, the word "block" is used in the sense used in
        # the alpha-omega specifications (ie a data chunk in the file), rather
        # than in the sense of the usual Block object in neo

        # step 1: read the headers of all the data blocks to load the file
        # structure

        pos_block = 0  # position of the current block in the file
        file_blocks = []  # list of data blocks available in the file

        if not cascade:
            # we read only the main header

            m_length, m_TypeBlock = struct.unpack('Hcx', fid.read(4))
            # m_TypeBlock should be 'h', as we read the first block
            block = HeaderReader(
                fid, dict_header_type.get(m_TypeBlock, Type_Unknown)).read_f()
            block.update({
                'm_length': m_length,
                'm_TypeBlock': m_TypeBlock,
                'pos': pos_block
            })
            file_blocks.append(block)

        else:  # cascade == True

            seg = Segment(file_origin=os.path.basename(self.filename))
            seg.file_origin = os.path.basename(self.filename)
            blck.segments.append(seg)

            while True:
                first_4_bytes = fid.read(4)
                if len(first_4_bytes) < 4:
                    # we have reached the end of the file
                    break
                else:
                    m_length, m_TypeBlock = struct.unpack('Hcx', first_4_bytes)

                block = HeaderReader(
                    fid, dict_header_type.get(m_TypeBlock,
                                              Type_Unknown)).read_f()
                block.update({
                    'm_length': m_length,
                    'm_TypeBlock': m_TypeBlock,
                    'pos': pos_block
                })

                if m_TypeBlock == '2':
                    # The beggining of the block of type '2' is identical for
                    # all types of channels, but the following part depends on
                    # the type of channel. So we need a special case here.

                    # WARNING: How to check the type of channel is not
                    # described in the documentation. So here I use what is
                    # proposed in the C code [2].
                    # According to this C code, it seems that the 'm_isAnalog'
                    # is used to distinguished analog and digital channels, and
                    # 'm_Mode' encodes the type of analog channel:
                    # 0 for continuous, 1 for level, 2 for external trigger.
                    # But in some files, I found channels that seemed to be
                    # continuous channels with 'm_Modes' = 128 or 192. So I
                    # decided to consider every channel with 'm_Modes'
                    # different from 1 or 2 as continuous. I also couldn't
                    # check that values of 1 and 2 are really for level and
                    # external trigger as I had no test files containing data
                    # of this types.

                    type_subblock = 'unknown_channel_type(m_Mode=' \
                                    + str(block['m_Mode'])+ ')'
                    description = Type2_SubBlockUnknownChannels
                    block.update({'m_Name': 'unknown_name'})
                    if block['m_isAnalog'] == 0:
                        # digital channel
                        type_subblock = 'digital'
                        description = Type2_SubBlockDigitalChannels
                    elif block['m_isAnalog'] == 1:
                        # analog channel
                        if block['m_Mode'] == 1:
                            # level channel
                            type_subblock = 'level'
                            description = Type2_SubBlockLevelChannels
                        elif block['m_Mode'] == 2:
                            # external trigger channel
                            type_subblock = 'external_trigger'
                            description = Type2_SubBlockExtTriggerChannels
                        else:
                            # continuous channel
                            type_subblock = 'continuous(Mode' \
                                            + str(block['m_Mode']) +')'
                            description = Type2_SubBlockContinuousChannels

                    subblock = HeaderReader(fid, description).read_f()

                    block.update(subblock)
                    block.update({'type_subblock': type_subblock})

                file_blocks.append(block)
                pos_block += m_length
                fid.seek(pos_block)

            # step 2: find the available channels
            list_chan = []  # list containing indexes of channel blocks
            for ind_block, block in enumerate(file_blocks):
                if block['m_TypeBlock'] == '2':
                    list_chan.append(ind_block)

            # step 3: find blocks containing data for the available channels
            list_data = []  # list of lists of indexes of data blocks
            # corresponding to each channel
            for ind_chan, chan in enumerate(list_chan):
                list_data.append([])
                num_chan = file_blocks[chan]['m_numChannel']
                for ind_block, block in enumerate(file_blocks):
                    if block['m_TypeBlock'] == '5':
                        if block['m_numChannel'] == num_chan:
                            list_data[ind_chan].append(ind_block)

            # step 4: compute the length (number of samples) of the channels
            chan_len = np.zeros(len(list_data), dtype=np.int)
            for ind_chan, list_blocks in enumerate(list_data):
                for ind_block in list_blocks:
                    chan_len[ind_chan] += count_samples(
                        file_blocks[ind_block]['m_length'])

            # step 5: find channels for which data are available
            ind_valid_chan = np.nonzero(chan_len)[0]

            # step 6: load the data
            # TODO give the possibility to load data as AnalogSignalArrays
            for ind_chan in ind_valid_chan:
                list_blocks = list_data[ind_chan]
                ind = 0  # index in the data vector

                # read time stamp for the beginning of the signal
                form = '<l'  # reading format
                ind_block = list_blocks[0]
                count = count_samples(file_blocks[ind_block]['m_length'])
                fid.seek(file_blocks[ind_block]['pos'] + 6 + count * 2)
                buf = fid.read(struct.calcsize(form))
                val = struct.unpack(form, buf)
                start_index = val[0]

                # WARNING: in the following blocks are read supposing taht they
                # are all contiguous and sorted in time. I don't know if it's
                # always the case. Maybe we should use the time stamp of each
                # data block to choose where to put the read data in the array.
                if not lazy:
                    temp_array = np.empty(chan_len[ind_chan], dtype=np.int16)
                    # NOTE: we could directly create an empty AnalogSignal and
                    # load the data in it, but it is much faster to load data
                    # in a temporary numpy array and create the AnalogSignals
                    # from this temporary array
                    for ind_block in list_blocks:
                        count = count_samples(
                            file_blocks[ind_block]['m_length'])
                        fid.seek(file_blocks[ind_block]['pos'] + 6)
                        temp_array[ind:ind+count] = \
                            np.fromfile(fid, dtype = np.int16, count = count)
                        ind += count

                sampling_rate = \
                    file_blocks[list_chan[ind_chan]]['m_SampleRate'] * pq.kHz
                t_start = (start_index / sampling_rate).simplified
                if lazy:
                    ana_sig = AnalogSignal([],
                                           sampling_rate = sampling_rate,
                                           t_start = t_start,
                                           name = file_blocks\
                                               [list_chan[ind_chan]]['m_Name'],
                                           file_origin = \
                                               os.path.basename(self.filename),
                                           units = pq.dimensionless)
                    ana_sig.lazy_shape = chan_len[ind_chan]
                else:
                    ana_sig = AnalogSignal(temp_array,
                                           sampling_rate = sampling_rate,
                                           t_start = t_start,
                                           name = file_blocks\
                                               [list_chan[ind_chan]]['m_Name'],
                                           file_origin = \
                                               os.path.basename(self.filename),
                                           units = pq.dimensionless)

                ana_sig.channel_index = \
                            file_blocks[list_chan[ind_chan]]['m_numChannel']
                ana_sig.annotate(channel_name = \
                            file_blocks[list_chan[ind_chan]]['m_Name'])
                ana_sig.annotate(channel_type = \
                            file_blocks[list_chan[ind_chan]]['type_subblock'])
                seg.analogsignals.append(ana_sig)

        fid.close()

        if file_blocks[0]['m_TypeBlock'] == 'h':  # this should always be true
            blck.rec_datetime = datetime.datetime(\
                file_blocks[0]['m_date_year'],
                file_blocks[0]['m_date_month'],
                file_blocks[0]['m_date_day'],
                file_blocks[0]['m_time_hour'],
                file_blocks[0]['m_time_minute'],
                file_blocks[0]['m_time_second'],
                10000 * file_blocks[0]['m_time_hsecond'])
            # the 10000 is here to convert m_time_hsecond from centisecond
            # to microsecond
            version = file_blocks[0]['m_version']
            blck.annotate(alphamap_version=version)
            if cascade:
                seg.rec_datetime = blck.rec_datetime.replace()
                # I couldn't find a simple copy function for datetime,
                # using replace without arguments is a twisted way to make a
                # copy
                seg.annotate(alphamap_version=version)
        if cascade:
            populate_RecordingChannel(blck, remove_from_annotation=True)
            blck.create_many_to_one_relationship()

        return blck
示例#15
0
    def read_segment(self, lazy=False, cascade=True):
        fid = open(self.filename, 'rb')
        global_header = HeaderReader(fid, GlobalHeader).read_f(offset=0)
        # ~ print globalHeader
        #~ print 'version' , globalHeader['version']
        seg = Segment()
        seg.file_origin = os.path.basename(self.filename)
        seg.annotate(neuroexplorer_version=global_header['version'])
        seg.annotate(comment=global_header['comment'])

        if not cascade:
            return seg

        offset = 544
        for i in range(global_header['nvar']):
            entity_header = HeaderReader(fid, EntityHeader).read_f(
                offset=offset + i * 208)
            entity_header['name'] = entity_header['name'].replace('\x00', '')

            #print 'i',i, entityHeader['type']

            if entity_header['type'] == 0:
                # neuron
                if lazy:
                    spike_times = [] * pq.s
                else:
                    spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    spike_times = spike_times.astype('f8') / global_header[
                        'freq'] * pq.s
                sptr = SpikeTrain(
                    times=spike_times,
                    t_start=global_header['tbeg'] /
                    global_header['freq'] * pq.s,
                    t_stop=global_header['tend'] /
                    global_header['freq'] * pq.s,
                    name=entity_header['name'])
                if lazy:
                    sptr.lazy_shape = entity_header['n']
                sptr.annotate(channel_index=entity_header['WireNumber'])
                seg.spiketrains.append(sptr)

            if entity_header['type'] == 1:
                # event
                if lazy:
                    event_times = [] * pq.s
                else:
                    event_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    event_times = event_times.astype('f8') / global_header[
                        'freq'] * pq.s
                labels = np.array([''] * event_times.size, dtype='S')
                evar = Event(times=event_times, labels=labels,
                             channel_name=entity_header['name'])
                if lazy:
                    evar.lazy_shape = entity_header['n']
                seg.events.append(evar)

            if entity_header['type'] == 2:
                # interval
                if lazy:
                    start_times = [] * pq.s
                    stop_times = [] * pq.s
                else:
                    start_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    start_times = start_times.astype('f8') / global_header[
                        'freq'] * pq.s
                    stop_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                           shape=(entity_header['n']),
                                           offset=entity_header['offset'] +
                                           entity_header['n'] * 4)
                    stop_times = stop_times.astype('f') / global_header[
                        'freq'] * pq.s
                epar = Epoch(times=start_times,
                             durations=stop_times - start_times,
                             labels=np.array([''] * start_times.size,
                                             dtype='S'),
                             channel_name=entity_header['name'])
                if lazy:
                    epar.lazy_shape = entity_header['n']
                seg.epochs.append(epar)

            if entity_header['type'] == 3:
                # spiketrain and wavefoms
                if lazy:
                    spike_times = [] * pq.s
                    waveforms = None
                else:

                    spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    spike_times = spike_times.astype('f8') / global_header[
                        'freq'] * pq.s

                    waveforms = np.memmap(self.filename, np.dtype('i2'), 'r',
                                          shape=(entity_header['n'], 1,
                                                 entity_header['NPointsWave']),
                                          offset=entity_header['offset'] +
                                          entity_header['n'] * 4)
                    waveforms = (waveforms.astype('f') *
                                 entity_header['ADtoMV'] +
                                 entity_header['MVOffset']) * pq.mV
                t_stop = global_header['tend'] / global_header['freq'] * pq.s
                if spike_times.size > 0:
                    t_stop = max(t_stop, max(spike_times))
                sptr = SpikeTrain(
                    times=spike_times,
                    t_start=global_header['tbeg'] /
                    global_header['freq'] * pq.s,
                    #~ t_stop = max(globalHeader['tend']/
                    #~ globalHeader['freq']*pq.s,max(spike_times)),
                    t_stop=t_stop, name=entity_header['name'],
                    waveforms=waveforms,
                    sampling_rate=entity_header['WFrequency'] * pq.Hz,
                    left_sweep=0 * pq.ms)
                if lazy:
                    sptr.lazy_shape = entity_header['n']
                sptr.annotate(channel_index=entity_header['WireNumber'])
                seg.spiketrains.append(sptr)

            if entity_header['type'] == 4:
                # popvectors
                pass

            if entity_header['type'] == 5:
                # analog
                timestamps = np.memmap(self.filename, np.dtype('i4'), 'r',
                                       shape=(entity_header['n']),
                                       offset=entity_header['offset'])
                timestamps = timestamps.astype('f8') / global_header['freq']
                fragment_starts = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                fragment_starts = fragment_starts.astype('f8') / global_header[
                    'freq']
                t_start = timestamps[0] - fragment_starts[0] / float(
                    entity_header['WFrequency'])
                del timestamps, fragment_starts

                if lazy:
                    signal = [] * pq.mV
                else:
                    signal = np.memmap(self.filename, np.dtype('i2'), 'r',
                                       shape=(entity_header['NPointsWave']),
                                       offset=entity_header['offset'])
                    signal = signal.astype('f')
                    signal *= entity_header['ADtoMV']
                    signal += entity_header['MVOffset']
                    signal = signal * pq.mV

                ana_sig = AnalogSignal(
                    signal=signal, t_start=t_start * pq.s,
                    sampling_rate=entity_header['WFrequency'] * pq.Hz,
                    name=entity_header['name'],
                    channel_index=entity_header['WireNumber'])
                if lazy:
                    ana_sig.lazy_shape = entity_header['NPointsWave']
                seg.analogsignals.append(ana_sig)

            if entity_header['type'] == 6:
                # markers  : TO TEST
                if lazy:
                    times = [] * pq.s
                    labels = np.array([], dtype='S')
                    markertype = None
                else:
                    times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                      shape=(entity_header['n']),
                                      offset=entity_header['offset'])
                    times = times.astype('f8') / global_header['freq'] * pq.s
                    fid.seek(entity_header['offset'] + entity_header['n'] * 4)
                    markertype = fid.read(64).replace('\x00', '')
                    labels = np.memmap(
                        self.filename, np.dtype(
                            'S' + str(entity_header['MarkerLength'])),
                        'r', shape=(entity_header['n']),
                        offset=entity_header['offset'] +
                        entity_header['n'] * 4 + 64)
                ea = Event(times=times,
                           labels=labels.view(np.ndarray),
                           name=entity_header['name'],
                           channel_index=entity_header['WireNumber'],
                           marker_type=markertype)
                if lazy:
                    ea.lazy_shape = entity_header['n']
                seg.events.append(ea)

        seg.create_many_to_one_relationship()
        return seg
    def read_segment(self, cascade=True, lazy=False, ):
        """
        Arguments:
        """
        f = StructFile(open(self.filename, 'rb'))

        # Name
        f.seek(64, 0)
        surname = f.read(22).decode('ascii')
        while surname[-1] == ' ':
            if len(surname) == 0:
                break
            surname = surname[:-1]
        firstname = f.read(20).decode('ascii')
        while firstname[-1] == ' ':
            if len(firstname) == 0:
                break
            firstname = firstname[:-1]

        #Date
        f.seek(128, 0)
        day, month, year, hour, minute, sec = f.read_f('bbbbbb')
        rec_datetime = datetime.datetime(year + 1900, month, day, hour, minute,
                                         sec)

        f.seek(138, 0)
        Data_Start_Offset, Num_Chan, Multiplexer, Rate_Min, Bytes = f.read_f(
            'IHHHH')
        #~ print Num_Chan, Bytes

        #header version
        f.seek(175, 0)
        header_version, = f.read_f('b')
        assert header_version == 4

        seg = Segment(name=str(firstname + ' ' + surname),
                      file_origin=os.path.basename(self.filename))
        seg.annotate(surname=surname)
        seg.annotate(firstname=firstname)
        seg.annotate(rec_datetime=rec_datetime)

        if not cascade:
            f.close()
            return seg

        # area
        f.seek(176, 0)
        zone_names = ['ORDER', 'LABCOD', 'NOTE', 'FLAGS', 'TRONCA', 'IMPED_B',
                      'IMPED_E', 'MONTAGE',
                      'COMPRESS', 'AVERAGE', 'HISTORY', 'DVIDEO', 'EVENT A',
                      'EVENT B', 'TRIGGER']
        zones = {}
        for zname in zone_names:
            zname2, pos, length = f.read_f('8sII')
            zones[zname] = zname2, pos, length
            #~ print zname2, pos, length

        # reading raw data
        if not lazy:
            f.seek(Data_Start_Offset, 0)
            rawdata = np.fromstring(f.read(), dtype='u' + str(Bytes))
            rawdata = rawdata.reshape((-1, Num_Chan))

        # Reading Code Info
        zname2, pos, length = zones['ORDER']
        f.seek(pos, 0)
        code = np.fromstring(f.read(Num_Chan*2), dtype='u2', count=Num_Chan)

        units = {-1: pq.nano * pq.V, 0: pq.uV, 1: pq.mV, 2: 1, 100: pq.percent,
                 101: pq.dimensionless, 102: pq.dimensionless}

        for c in range(Num_Chan):
            zname2, pos, length = zones['LABCOD']
            f.seek(pos + code[c] * 128 + 2, 0)

            label = f.read(6).strip(b"\x00").decode('ascii')
            ground = f.read(6).strip(b"\x00").decode('ascii')
            (logical_min, logical_max, logical_ground, physical_min,
             physical_max) = f.read_f('iiiii')
            k, = f.read_f('h')
            if k in units.keys():
                unit = units[k]
            else:
                unit = pq.uV

            f.seek(8, 1)
            sampling_rate, = f.read_f('H') * pq.Hz
            sampling_rate *= Rate_Min

            if lazy:
                signal = [] * unit
            else:
                factor = float(physical_max - physical_min) / float(
                    logical_max - logical_min + 1)
                signal = (rawdata[:, c].astype(
                    'f') - logical_ground) * factor * unit

            ana_sig = AnalogSignal(signal, sampling_rate=sampling_rate,
                                   name=str(label), channel_index=c)
            if lazy:
                ana_sig.lazy_shape = None
            ana_sig.annotate(ground=ground)

            seg.analogsignals.append(ana_sig)

        sampling_rate = np.mean(
            [ana_sig.sampling_rate for ana_sig in seg.analogsignals]) * pq.Hz

        # Read trigger and notes
        for zname, label_dtype in [('TRIGGER', 'u2'), ('NOTE', 'S40')]:
            zname2, pos, length = zones[zname]
            f.seek(pos, 0)
            triggers = np.fromstring(f.read(length), dtype=[('pos', 'u4'), (
                'label', label_dtype)])
            if not lazy:
                keep = (triggers['pos'] >= triggers['pos'][0]) & (
                    triggers['pos'] < rawdata.shape[0]) & (
                    triggers['pos'] != 0)
                triggers = triggers[keep]
                ea = Event(name=zname[0] + zname[1:].lower(),
                           labels=triggers['label'].astype('S'),
                           times=(triggers['pos'] / sampling_rate).rescale('s'))
            else:
                ea = Event(name=zname[0] + zname[1:].lower())
                ea.lazy_shape = triggers.size
            seg.events.append(ea)

        # Read Event A and B
        # Not so well  tested
        for zname in ['EVENT A', 'EVENT B']:
            zname2, pos, length = zones[zname]
            f.seek(pos, 0)
            epochs = np.fromstring(f.read(length),
                                   dtype=[('label', 'u4'), ('start', 'u4'),
                                          ('stop', 'u4'), ])
            ep = Epoch(name=zname[0] + zname[1:].lower())
            if not lazy:
                keep = (epochs['start'] > 0) & (
                    epochs['start'] < rawdata.shape[0]) & (
                    epochs['stop'] < rawdata.shape[0])
                epochs = epochs[keep]
                ep = Epoch(name=zname[0] + zname[1:].lower(),
                           labels=epochs['label'].astype('S'),
                           times=(epochs['start'] / sampling_rate).rescale('s'),
                           durations=((epochs['stop'] - epochs['start']) / sampling_rate).rescale('s'))
            else:
                ep = Epoch(name=zname[0] + zname[1:].lower())
                ep.lazy_shape = triggers.size
            seg.epochs.append(ep)

        seg.create_many_to_one_relationship()
        f.close()
        return seg
示例#17
0
    def read_segment(
        self,
        lazy=False,
        cascade=True,
    ):

        fid = open(self.filename, 'rb')
        globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)
        #~ print globalHeader
        #~ print 'version' , globalHeader['version']
        seg = Segment()
        seg.file_origin = os.path.basename(self.filename)
        seg.annotate(neuroexplorer_version=globalHeader['version'])
        seg.annotate(comment=globalHeader['comment'])

        if not cascade:
            return seg

        offset = 544
        for i in range(globalHeader['nvar']):
            entityHeader = HeaderReader(
                fid, EntityHeader).read_f(offset=offset + i * 208)
            entityHeader['name'] = entityHeader['name'].replace('\x00', '')

            #print 'i',i, entityHeader['type']

            if entityHeader['type'] == 0:
                # neuron
                if lazy:
                    spike_times = [] * pq.s
                else:
                    spike_times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'],
                    )
                    spike_times = spike_times.astype(
                        'f8') / globalHeader['freq'] * pq.s
                sptr = SpikeTrain(
                    times=spike_times,
                    t_start=globalHeader['tbeg'] / globalHeader['freq'] * pq.s,
                    t_stop=globalHeader['tend'] / globalHeader['freq'] * pq.s,
                    name=entityHeader['name'],
                )
                if lazy:
                    sptr.lazy_shape = entityHeader['n']
                sptr.annotate(channel_index=entityHeader['WireNumber'])
                seg.spiketrains.append(sptr)

            if entityHeader['type'] == 1:
                # event
                if lazy:
                    event_times = [] * pq.s
                else:
                    event_times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'],
                    )
                    event_times = event_times.astype(
                        'f8') / globalHeader['freq'] * pq.s
                labels = np.array([''] * event_times.size, dtype='S')
                evar = EventArray(times=event_times,
                                  labels=labels,
                                  channel_name=entityHeader['name'])
                if lazy:
                    evar.lazy_shape = entityHeader['n']
                seg.eventarrays.append(evar)

            if entityHeader['type'] == 2:
                # interval
                if lazy:
                    start_times = [] * pq.s
                    stop_times = [] * pq.s
                else:
                    start_times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'],
                    )
                    start_times = start_times.astype(
                        'f8') / globalHeader['freq'] * pq.s
                    stop_times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'] + entityHeader['n'] * 4,
                    )
                    stop_times = stop_times.astype(
                        'f') / globalHeader['freq'] * pq.s
                epar = EpochArray(times=start_times,
                                  durations=stop_times - start_times,
                                  labels=np.array([''] * start_times.size,
                                                  dtype='S'),
                                  channel_name=entityHeader['name'])
                if lazy:
                    epar.lazy_shape = entityHeader['n']
                seg.epocharrays.append(epar)

            if entityHeader['type'] == 3:
                # spiketrain and wavefoms
                if lazy:
                    spike_times = [] * pq.s
                    waveforms = None
                else:

                    spike_times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'],
                    )
                    spike_times = spike_times.astype(
                        'f8') / globalHeader['freq'] * pq.s

                    waveforms = np.memmap(
                        self.filename,
                        np.dtype('i2'),
                        'r',
                        shape=(entityHeader['n'], 1,
                               entityHeader['NPointsWave']),
                        offset=entityHeader['offset'] + entityHeader['n'] * 4,
                    )
                    waveforms = (waveforms.astype('f') * entityHeader['ADtoMV']
                                 + entityHeader['MVOffset']) * pq.mV
                t_stop = globalHeader['tend'] / globalHeader['freq'] * pq.s
                if spike_times.size > 0:
                    t_stop = max(t_stop, max(spike_times))
                sptr = SpikeTrain(
                    times=spike_times,
                    t_start=globalHeader['tbeg'] / globalHeader['freq'] * pq.s,
                    #~ t_stop = max(globalHeader['tend']/globalHeader['freq']*pq.s,max(spike_times)),
                    t_stop=t_stop,
                    name=entityHeader['name'],
                    waveforms=waveforms,
                    sampling_rate=entityHeader['WFrequency'] * pq.Hz,
                    left_sweep=0 * pq.ms,
                )
                if lazy:
                    sptr.lazy_shape = entityHeader['n']
                sptr.annotate(channel_index=entityHeader['WireNumber'])
                seg.spiketrains.append(sptr)

            if entityHeader['type'] == 4:
                # popvectors
                pass

            if entityHeader['type'] == 5:
                # analog

                timestamps = np.memmap(
                    self.filename,
                    np.dtype('i4'),
                    'r',
                    shape=(entityHeader['n']),
                    offset=entityHeader['offset'],
                )
                timestamps = timestamps.astype('f8') / globalHeader['freq']
                fragmentStarts = np.memmap(
                    self.filename,
                    np.dtype('i4'),
                    'r',
                    shape=(entityHeader['n']),
                    offset=entityHeader['offset'],
                )
                fragmentStarts = fragmentStarts.astype(
                    'f8') / globalHeader['freq']
                t_start = timestamps[0] - fragmentStarts[0] / float(
                    entityHeader['WFrequency'])
                del timestamps, fragmentStarts

                if lazy:
                    signal = [] * pq.mV
                else:
                    signal = np.memmap(
                        self.filename,
                        np.dtype('i2'),
                        'r',
                        shape=(entityHeader['NPointsWave']),
                        offset=entityHeader['offset'],
                    )
                    signal = signal.astype('f')
                    signal *= entityHeader['ADtoMV']
                    signal += entityHeader['MVOffset']
                    signal = signal * pq.mV

                anaSig = AnalogSignal(
                    signal=signal,
                    t_start=t_start * pq.s,
                    sampling_rate=entityHeader['WFrequency'] * pq.Hz,
                    name=entityHeader['name'],
                    channel_index=entityHeader['WireNumber'])
                if lazy:
                    anaSig.lazy_shape = entityHeader['NPointsWave']
                seg.analogsignals.append(anaSig)

            if entityHeader['type'] == 6:
                # markers  : TO TEST
                if lazy:
                    times = [] * pq.s
                    labels = np.array([], dtype='S')
                    markertype = None
                else:
                    times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'],
                    )
                    times = times.astype('f8') / globalHeader['freq'] * pq.s
                    fid.seek(entityHeader['offset'] + entityHeader['n'] * 4)
                    markertype = fid.read(64).replace('\x00', '')
                    labels = np.memmap(
                        self.filename,
                        np.dtype('S' + str(entityHeader['MarkerLength'])),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'] + entityHeader['n'] * 4 +
                        64)
                ea = EventArray(times=times,
                                labels=labels.view(np.ndarray),
                                name=entityHeader['name'],
                                channel_index=entityHeader['WireNumber'],
                                marker_type=markertype)
                if lazy:
                    ea.lazy_shape = entityHeader['n']
                seg.eventarrays.append(ea)

        create_many_to_one_relationship(seg)
        return seg
示例#18
0
    def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True):
        """
        Read in a segment.

        Arguments:
            load_spike_waveform : load or not waveform of spikes (default True)

        """

        fid = open(self.filename, 'rb')
        globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)

        # metadatas
        seg = Segment()
        seg.rec_datetime = datetime.datetime(
            globalHeader.pop('Year'),
            globalHeader.pop('Month'),
            globalHeader.pop('Day'),
            globalHeader.pop('Hour'),
            globalHeader.pop('Minute'),
            globalHeader.pop('Second')
        )
        seg.file_origin = os.path.basename(self.filename)

        for key, val in globalHeader.iteritems():
            seg.annotate(**{key: val})

        if not cascade:
            return seg

        ## Step 1 : read headers
        # dsp channels header = spikes and waveforms
        dspChannelHeaders = {}
        maxunit = 0
        maxchan = 0
        for _ in range(globalHeader['NumDSPChannels']):
            # channel is 1 based
            channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None)
            channelHeader['Template'] = np.array(channelHeader['Template']).reshape((5,64))
            channelHeader['Boxes'] = np.array(channelHeader['Boxes']).reshape((5,2,4))
            dspChannelHeaders[channelHeader['Channel']] = channelHeader
            maxunit = max(channelHeader['NUnits'], maxunit)
            maxchan = max(channelHeader['Channel'], maxchan)

        # event channel header
        eventHeaders = { }
        for _ in range(globalHeader['NumEventChannels']):
            eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None)
            eventHeaders[eventHeader['Channel']] = eventHeader

        # slow channel header = signal
        slowChannelHeaders = {}
        for _ in range(globalHeader['NumSlowChannels']):
            slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(offset=None)
            slowChannelHeaders[slowChannelHeader['Channel']] = slowChannelHeader

        ## Step 2 : a first loop for counting size
        # signal
        nb_samples = np.zeros(len(slowChannelHeaders))
        sample_positions = np.zeros(len(slowChannelHeaders))
        t_starts = np.zeros(len(slowChannelHeaders), dtype='f')

        #spiketimes and waveform
        nb_spikes = np.zeros((maxchan+1, maxunit+1) ,dtype='i')
        wf_sizes = np.zeros((maxchan+1, maxunit+1, 2) ,dtype='i')

        # eventarrays
        nb_events = { }
        #maxstrsizeperchannel = { }
        for chan, h in iteritems(eventHeaders):
            nb_events[chan] = 0
            #maxstrsizeperchannel[chan] = 0

        start = fid.tell()
        while fid.tell() !=-1 :
            # read block header
            dataBlockHeader = HeaderReader(fid , DataBlockHeader ).read_f(offset = None)
            if dataBlockHeader is None : break
            chan = dataBlockHeader['Channel']
            unit = dataBlockHeader['Unit']
            n1,n2 = dataBlockHeader['NumberOfWaveforms'] , dataBlockHeader['NumberOfWordsInWaveform']
            time = (dataBlockHeader['UpperByteOf5ByteTimestamp']*2.**32 +
                    dataBlockHeader['TimeStamp'])

            if dataBlockHeader['Type'] == 1:
                nb_spikes[chan,unit] +=1
                wf_sizes[chan,unit,:] = [n1,n2]
                fid.seek(n1*n2*2,1)
            elif dataBlockHeader['Type'] ==4:
                #event
                nb_events[chan] += 1
            elif dataBlockHeader['Type'] == 5:
                #continuous signal
                fid.seek(n2*2, 1)
                if n2> 0:
                    nb_samples[chan] += n2
                if nb_samples[chan] ==0:
                    t_starts[chan] = time
                    

        ## Step 3: allocating memory and 2 loop for reading if not lazy
        if not lazy:
            # allocating mem for signal
            sigarrays = { }
            for chan, h in iteritems(slowChannelHeaders):
                sigarrays[chan] = np.zeros(nb_samples[chan])
                
            # allocating mem for SpikeTrain
            stimearrays = np.zeros((maxchan+1, maxunit+1) ,dtype=object)
            swfarrays = np.zeros((maxchan+1, maxunit+1) ,dtype=object)
            for (chan, unit), _ in np.ndenumerate(nb_spikes):
                stimearrays[chan,unit] = np.zeros(nb_spikes[chan,unit], dtype = 'f')
                if load_spike_waveform:
                    n1,n2 = wf_sizes[chan, unit,:]
                    swfarrays[chan, unit] = np.zeros( (nb_spikes[chan, unit], n1, n2 ) , dtype = 'f4' )
            pos_spikes = np.zeros(nb_spikes.shape, dtype = 'i')
                    
            # allocating mem for event
            eventpositions = { }
            evarrays = { }
            for chan, nb in iteritems(nb_events):
                evarrays[chan] = {
                    'times': np.zeros(nb, dtype='f'),
                    'labels': np.zeros(nb, dtype='S4')
                }
                eventpositions[chan]=0 
                
            fid.seek(start)
            while fid.tell() !=-1 :
                dataBlockHeader = HeaderReader(fid , DataBlockHeader ).read_f(offset = None)
                if dataBlockHeader is None : break
                chan = dataBlockHeader['Channel']
                n1,n2 = dataBlockHeader['NumberOfWaveforms'] , dataBlockHeader['NumberOfWordsInWaveform']
                time = dataBlockHeader['UpperByteOf5ByteTimestamp']*2.**32 + dataBlockHeader['TimeStamp']
                time/= globalHeader['ADFrequency']

                if n2 <0: break
                if dataBlockHeader['Type'] == 1:
                    #spike
                    unit = dataBlockHeader['Unit']
                    pos = pos_spikes[chan,unit]
                    stimearrays[chan, unit][pos] = time
                    if load_spike_waveform and n1*n2 != 0 :
                        swfarrays[chan,unit][pos,:,:] = np.fromstring( fid.read(n1*n2*2) , dtype = 'i2').reshape(n1,n2).astype('f4')
                    else:
                        fid.seek(n1*n2*2,1)
                    pos_spikes[chan,unit] +=1
                
                elif dataBlockHeader['Type'] == 4:
                    # event
                    pos = eventpositions[chan]
                    evarrays[chan]['times'][pos] = time
                    evarrays[chan]['labels'][pos] = dataBlockHeader['Unit']
                    eventpositions[chan]+= 1

                elif dataBlockHeader['Type'] == 5:
                    #signal
                    data = np.fromstring( fid.read(n2*2) , dtype = 'i2').astype('f4')
                    sigarrays[chan][sample_positions[chan] : sample_positions[chan]+data.size] = data
                    sample_positions[chan] += data.size


        ## Step 4: create neo object
        for chan, h in iteritems(eventHeaders):
            if lazy:
                times = []
                labels = None
            else:
                times = evarrays[chan]['times']
                labels = evarrays[chan]['labels']
            ea = EventArray(
                times*pq.s,
                labels=labels,
                channel_name=eventHeaders[chan]['Name'],
                channel_index=chan
            )
            if lazy:
                ea.lazy_shape = nb_events[chan]
            seg.eventarrays.append(ea)

            
        for chan, h in iteritems(slowChannelHeaders):
            if lazy:
                signal = [ ]
            else:
                if globalHeader['Version'] ==100 or globalHeader['Version'] ==101 :
                    gain = 5000./(2048*slowChannelHeaders[chan]['Gain']*1000.)
                elif globalHeader['Version'] ==102 :
                    gain = 5000./(2048*slowChannelHeaders[chan]['Gain']*slowChannelHeaders[chan]['PreampGain'])
                elif globalHeader['Version'] >= 103:
                    gain = globalHeader['SlowMaxMagnitudeMV']/(.5*(2**globalHeader['BitsPerSpikeSample'])*\
                                                        slowChannelHeaders[chan]['Gain']*slowChannelHeaders[chan]['PreampGain'])
                signal = sigarrays[chan]*gain
            anasig =  AnalogSignal(signal*pq.V,
                sampling_rate = float(slowChannelHeaders[chan]['ADFreq'])*pq.Hz,
                t_start = t_starts[chan]*pq.s,
                channel_index = slowChannelHeaders[chan]['Channel'],
                channel_name = slowChannelHeaders[chan]['Name'],
            )
            if lazy:
                anasig.lazy_shape = nb_samples[chan]
            seg.analogsignals.append(anasig)
            
        for (chan, unit), value in np.ndenumerate(nb_spikes):
            if nb_spikes[chan, unit] == 0: continue
            if lazy:
                times = [ ]
                waveforms = None
                t_stop = 0
            else:
                times = stimearrays[chan,unit]
                t_stop = times.max()
                if load_spike_waveform:
                    if globalHeader['Version'] <103:
                        gain = 3000./(2048*dspChannelHeaders[chan]['Gain']*1000.)
                    elif globalHeader['Version'] >=103 and globalHeader['Version'] <105:
                        gain = globalHeader['SpikeMaxMagnitudeMV']/(.5*2.**(globalHeader['BitsPerSpikeSample'])*1000.)
                    elif globalHeader['Version'] >105:
                        gain = globalHeader['SpikeMaxMagnitudeMV']/(.5*2.**(globalHeader['BitsPerSpikeSample'])*globalHeader['SpikePreAmpGain'])                    
                    waveforms = swfarrays[chan, unit] * gain * pq.V
                else:
                    waveforms = None
            sptr = SpikeTrain(
                times,
                units='s', 
                t_stop=t_stop*pq.s,
                waveforms=waveforms
            )
            sptr.annotate(unit_name = dspChannelHeaders[chan]['Name'])
            sptr.annotate(channel_index = chan)
            for key, val in dspChannelHeaders[chan].iteritems():
                sptr.annotate(**{key: val})

            if lazy:
                sptr.lazy_shape = nb_spikes[chan,unit]
            seg.spiketrains.append(sptr)

        seg.create_many_to_one_relationship()
        return seg
示例#19
0
    def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True):
        """

        """

        fid = open(self.filename, "rb")
        globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)

        # metadatas
        seg = Segment()
        seg.rec_datetime = datetime.datetime(
            globalHeader["Year"],
            globalHeader["Month"],
            globalHeader["Day"],
            globalHeader["Hour"],
            globalHeader["Minute"],
            globalHeader["Second"],
        )
        seg.file_origin = os.path.basename(self.filename)
        seg.annotate(plexon_version=globalHeader["Version"])

        if not cascade:
            return seg

        ## Step 1 : read headers
        # dsp channels header = sipkes and waveforms
        dspChannelHeaders = {}
        maxunit = 0
        maxchan = 0
        for _ in range(globalHeader["NumDSPChannels"]):
            # channel is 1 based
            channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None)
            channelHeader["Template"] = np.array(channelHeader["Template"]).reshape((5, 64))
            channelHeader["Boxes"] = np.array(channelHeader["Boxes"]).reshape((5, 2, 4))
            dspChannelHeaders[channelHeader["Channel"]] = channelHeader
            maxunit = max(channelHeader["NUnits"], maxunit)
            maxchan = max(channelHeader["Channel"], maxchan)

        # event channel header
        eventHeaders = {}
        for _ in range(globalHeader["NumEventChannels"]):
            eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None)
            eventHeaders[eventHeader["Channel"]] = eventHeader

        # slow channel header = signal
        slowChannelHeaders = {}
        for _ in range(globalHeader["NumSlowChannels"]):
            slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(offset=None)
            slowChannelHeaders[slowChannelHeader["Channel"]] = slowChannelHeader

        ## Step 2 : a first loop for counting size
        # signal
        nb_samples = np.zeros(len(slowChannelHeaders))
        sample_positions = np.zeros(len(slowChannelHeaders))
        t_starts = np.zeros(len(slowChannelHeaders), dtype="f")

        # spiketimes and waveform
        nb_spikes = np.zeros((maxchan + 1, maxunit + 1), dtype="i")
        wf_sizes = np.zeros((maxchan + 1, maxunit + 1, 2), dtype="i")

        # eventarrays
        nb_events = {}
        # maxstrsizeperchannel = { }
        for chan, h in iteritems(eventHeaders):
            nb_events[chan] = 0
            # maxstrsizeperchannel[chan] = 0

        start = fid.tell()
        while fid.tell() != -1:
            # read block header
            dataBlockHeader = HeaderReader(fid, DataBlockHeader).read_f(offset=None)
            if dataBlockHeader is None:
                break
            chan = dataBlockHeader["Channel"]
            unit = dataBlockHeader["Unit"]
            n1, n2 = dataBlockHeader["NumberOfWaveforms"], dataBlockHeader["NumberOfWordsInWaveform"]
            time = dataBlockHeader["UpperByteOf5ByteTimestamp"] * 2.0 ** 32 + dataBlockHeader["TimeStamp"]

            if dataBlockHeader["Type"] == 1:
                nb_spikes[chan, unit] += 1
                wf_sizes[chan, unit, :] = [n1, n2]
                fid.seek(n1 * n2 * 2, 1)
            elif dataBlockHeader["Type"] == 4:
                # event
                nb_events[chan] += 1
            elif dataBlockHeader["Type"] == 5:
                # continuous signal
                fid.seek(n2 * 2, 1)
                if n2 > 0:
                    nb_samples[chan] += n2
                if nb_samples[chan] == 0:
                    t_starts[chan] = time

        ## Step 3: allocating memory and 2 loop for reading if not lazy
        if not lazy:
            # allocating mem for signal
            sigarrays = {}
            for chan, h in iteritems(slowChannelHeaders):
                sigarrays[chan] = np.zeros(nb_samples[chan])

            # allocating mem for SpikeTrain
            stimearrays = np.zeros((maxchan + 1, maxunit + 1), dtype=object)
            swfarrays = np.zeros((maxchan + 1, maxunit + 1), dtype=object)
            for (chan, unit), _ in np.ndenumerate(nb_spikes):
                stimearrays[chan, unit] = np.zeros(nb_spikes[chan, unit], dtype="f")
                if load_spike_waveform:
                    n1, n2 = wf_sizes[chan, unit, :]
                    swfarrays[chan, unit] = np.zeros((nb_spikes[chan, unit], n1, n2), dtype="f4")
            pos_spikes = np.zeros(nb_spikes.shape, dtype="i")

            # allocating mem for event
            eventpositions = {}
            evarrays = {}
            for chan, nb in iteritems(nb_events):
                evarrays[chan] = np.zeros(nb, dtype="f")
                eventpositions[chan] = 0

            fid.seek(start)
            while fid.tell() != -1:
                dataBlockHeader = HeaderReader(fid, DataBlockHeader).read_f(offset=None)
                if dataBlockHeader is None:
                    break
                chan = dataBlockHeader["Channel"]
                n1, n2 = dataBlockHeader["NumberOfWaveforms"], dataBlockHeader["NumberOfWordsInWaveform"]
                time = dataBlockHeader["UpperByteOf5ByteTimestamp"] * 2.0 ** 32 + dataBlockHeader["TimeStamp"]
                time /= globalHeader["ADFrequency"]

                if n2 < 0:
                    break
                if dataBlockHeader["Type"] == 1:
                    # spike
                    unit = dataBlockHeader["Unit"]
                    pos = pos_spikes[chan, unit]
                    stimearrays[chan, unit][pos] = time
                    if load_spike_waveform and n1 * n2 != 0:
                        swfarrays[chan, unit][pos, :, :] = (
                            np.fromstring(fid.read(n1 * n2 * 2), dtype="i2").reshape(n1, n2).astype("f4")
                        )
                    else:
                        fid.seek(n1 * n2 * 2, 1)
                    pos_spikes[chan, unit] += 1

                elif dataBlockHeader["Type"] == 4:
                    # event
                    pos = eventpositions[chan]
                    evarrays[chan][pos] = time
                    eventpositions[chan] += 1

                elif dataBlockHeader["Type"] == 5:
                    # signal
                    data = np.fromstring(fid.read(n2 * 2), dtype="i2").astype("f4")
                    sigarrays[chan][sample_positions[chan] : sample_positions[chan] + data.size] = data
                    sample_positions[chan] += data.size

        ## Step 3: create neo object
        for chan, h in iteritems(eventHeaders):
            if lazy:
                times = []
            else:
                times = evarrays[chan]
            ea = EventArray(times * pq.s, channel_name=eventHeaders[chan]["Name"], channel_index=chan)
            if lazy:
                ea.lazy_shape = nb_events[chan]
            seg.eventarrays.append(ea)

        for chan, h in iteritems(slowChannelHeaders):
            if lazy:
                signal = []
            else:
                if globalHeader["Version"] == 100 or globalHeader["Version"] == 101:
                    gain = 5000.0 / (2048 * slowChannelHeaders[chan]["Gain"] * 1000.0)
                elif globalHeader["Version"] == 102:
                    gain = 5000.0 / (2048 * slowChannelHeaders[chan]["Gain"] * slowChannelHeaders[chan]["PreampGain"])
                elif globalHeader["Version"] >= 103:
                    gain = globalHeader["SlowMaxMagnitudeMV"] / (
                        0.5
                        * (2 ** globalHeader["BitsPerSpikeSample"])
                        * slowChannelHeaders[chan]["Gain"]
                        * slowChannelHeaders[chan]["PreampGain"]
                    )
                signal = sigarrays[chan] * gain
            anasig = AnalogSignal(
                signal * pq.V,
                sampling_rate=float(slowChannelHeaders[chan]["ADFreq"]) * pq.Hz,
                t_start=t_starts[chan] * pq.s,
                channel_index=slowChannelHeaders[chan]["Channel"],
                channel_name=slowChannelHeaders[chan]["Name"],
            )
            if lazy:
                anasig.lazy_shape = nb_samples[chan]
            seg.analogsignals.append(anasig)

        for (chan, unit), value in np.ndenumerate(nb_spikes):
            if nb_spikes[chan, unit] == 0:
                continue
            if lazy:
                times = []
                waveforms = None
                t_stop = 0
            else:
                times = stimearrays[chan, unit]
                t_stop = times.max()
                if load_spike_waveform:
                    if globalHeader["Version"] < 103:
                        gain = 3000.0 / (2048 * dspChannelHeaders[chan]["Gain"] * 1000.0)
                    elif globalHeader["Version"] >= 103 and globalHeader["Version"] < 105:
                        gain = globalHeader["SpikeMaxMagnitudeMV"] / (
                            0.5 * 2.0 ** (globalHeader["BitsPerSpikeSample"]) * 1000.0
                        )
                    elif globalHeader["Version"] > 105:
                        gain = globalHeader["SpikeMaxMagnitudeMV"] / (
                            0.5 * 2.0 ** (globalHeader["BitsPerSpikeSample"]) * globalHeader["SpikePreAmpGain"]
                        )
                    waveforms = swfarrays[chan, unit] * gain * pq.V
                else:
                    waveforms = None
            sptr = SpikeTrain(times, units="s", t_stop=t_stop * pq.s, waveforms=waveforms)
            sptr.annotate(unit_name=dspChannelHeaders[chan]["Name"])
            sptr.annotate(channel_index=chan)
            if lazy:
                sptr.lazy_shape = nb_spikes[chan, unit]
            seg.spiketrains.append(sptr)

        seg.create_many_to_one_relationship()
        return seg
示例#20
0
    def read_segment(self,
                     import_neuroshare_segment=True,
                     lazy=False,
                     cascade=True):
        """
        Arguments:
            import_neuroshare_segment: import neuroshare segment as SpikeTrain with associated waveforms or not imported at all.

        """
        seg = Segment(file_origin=os.path.basename(self.filename), )

        if sys.platform.startswith('win'):
            neuroshare = ctypes.windll.LoadLibrary(self.dllname)
        elif sys.platform.startswith('linux'):
            neuroshare = ctypes.cdll.LoadLibrary(self.dllname)
        neuroshare = DllWithError(neuroshare)

        #elif sys.platform.startswith('darwin'):

        # API version
        info = ns_LIBRARYINFO()
        neuroshare.ns_GetLibraryInfo(ctypes.byref(info), ctypes.sizeof(info))
        seg.annotate(neuroshare_version=str(info.dwAPIVersionMaj) + '.' +
                     str(info.dwAPIVersionMin))

        if not cascade:
            return seg

        # open file
        hFile = ctypes.c_uint32(0)
        neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename),
                               ctypes.byref(hFile))
        fileinfo = ns_FILEINFO()
        neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo),
                                  ctypes.sizeof(fileinfo))

        # read all entities
        for dwEntityID in range(fileinfo.dwEntityCount):
            entityInfo = ns_ENTITYINFO()
            neuroshare.ns_GetEntityInfo(hFile, dwEntityID,
                                        ctypes.byref(entityInfo),
                                        ctypes.sizeof(entityInfo))

            # EVENT
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_EVENT':
                pEventInfo = ns_EVENTINFO()
                neuroshare.ns_GetEventInfo(hFile, dwEntityID,
                                           ctypes.byref(pEventInfo),
                                           ctypes.sizeof(pEventInfo))

                if pEventInfo.dwEventType == 0:  #TEXT
                    pData = ctypes.create_string_buffer(
                        pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 1:  #CVS
                    pData = ctypes.create_string_buffer(
                        pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 2:  # 8bit
                    pData = ctypes.c_byte(0)
                elif pEventInfo.dwEventType == 3:  # 16bit
                    pData = ctypes.c_int16(0)
                elif pEventInfo.dwEventType == 4:  # 32bit
                    pData = ctypes.c_int32(0)
                pdTimeStamp = ctypes.c_double(0.)
                pdwDataRetSize = ctypes.c_uint32(0)

                ea = Event(name=str(entityInfo.szEntityLabel), )
                if not lazy:
                    times = []
                    labels = []
                    for dwIndex in range(entityInfo.dwItemCount):
                        neuroshare.ns_GetEventData(
                            hFile, dwEntityID, dwIndex,
                            ctypes.byref(pdTimeStamp), ctypes.byref(pData),
                            ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize))
                        times.append(pdTimeStamp.value)
                        labels.append(str(pData.value))
                    ea.times = times * pq.s
                    ea.labels = np.array(labels, dtype='S')
                else:
                    ea.lazy_shape = entityInfo.dwItemCount
                seg.eventarrays.append(ea)

            # analog
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_ANALOG':
                pAnalogInfo = ns_ANALOGINFO()

                neuroshare.ns_GetAnalogInfo(hFile, dwEntityID,
                                            ctypes.byref(pAnalogInfo),
                                            ctypes.sizeof(pAnalogInfo))
                dwIndexCount = entityInfo.dwItemCount

                if lazy:
                    signal = [] * pq.Quantity(1, pAnalogInfo.szUnits)
                else:
                    pdwContCount = ctypes.c_uint32(0)
                    pData = np.zeros((entityInfo.dwItemCount, ),
                                     dtype='float64')
                    total_read = 0
                    while total_read < entityInfo.dwItemCount:
                        dwStartIndex = ctypes.c_uint32(total_read)
                        dwStopIndex = ctypes.c_uint32(entityInfo.dwItemCount -
                                                      total_read)

                        neuroshare.ns_GetAnalogData(
                            hFile, dwEntityID, dwStartIndex, dwStopIndex,
                            ctypes.byref(pdwContCount),
                            pData[total_read:].ctypes.data_as(
                                ctypes.POINTER(ctypes.c_double)))
                        total_read += pdwContCount.value

                    signal = pq.Quantity(pData,
                                         units=pAnalogInfo.szUnits,
                                         copy=False)

                #t_start
                dwIndex = 0
                pdTime = ctypes.c_double(0)
                neuroshare.ns_GetTimeByIndex(hFile, dwEntityID, dwIndex,
                                             ctypes.byref(pdTime))

                anaSig = AnalogSignal(
                    signal,
                    sampling_rate=pAnalogInfo.dSampleRate * pq.Hz,
                    t_start=pdTime.value * pq.s,
                    name=str(entityInfo.szEntityLabel),
                )
                anaSig.annotate(probe_info=str(pAnalogInfo.szProbeInfo))
                if lazy:
                    anaSig.lazy_shape = entityInfo.dwItemCount
                seg.analogsignals.append(anaSig)

            #segment
            if entity_types[
                    entityInfo.
                    dwEntityType] == 'ns_ENTITY_SEGMENT' and import_neuroshare_segment:

                pdwSegmentInfo = ns_SEGMENTINFO()
                if not str(entityInfo.szEntityLabel).startswith('spks'):
                    continue

                neuroshare.ns_GetSegmentInfo(hFile, dwEntityID,
                                             ctypes.byref(pdwSegmentInfo),
                                             ctypes.sizeof(pdwSegmentInfo))
                nsource = pdwSegmentInfo.dwSourceCount

                pszMsgBuffer = ctypes.create_string_buffer(" " * 256)
                neuroshare.ns_GetLastErrorMsg(ctypes.byref(pszMsgBuffer), 256)

                for dwSourceID in range(pdwSegmentInfo.dwSourceCount):
                    pSourceInfo = ns_SEGSOURCEINFO()
                    neuroshare.ns_GetSegmentSourceInfo(
                        hFile, dwEntityID, dwSourceID,
                        ctypes.byref(pSourceInfo), ctypes.sizeof(pSourceInfo))

                if lazy:
                    sptr = SpikeTrain(times,
                                      name=str(entityInfo.szEntityLabel),
                                      t_stop=0. * pq.s)
                    sptr.lazy_shape = entityInfo.dwItemCount
                else:
                    pdTimeStamp = ctypes.c_double(0.)
                    dwDataBufferSize = pdwSegmentInfo.dwMaxSampleCount * pdwSegmentInfo.dwSourceCount
                    pData = np.zeros((dwDataBufferSize), dtype='float64')
                    pdwSampleCount = ctypes.c_uint32(0)
                    pdwUnitID = ctypes.c_uint32(0)

                    nsample = int(dwDataBufferSize)
                    times = np.empty((entityInfo.dwItemCount), dtype='f')
                    waveforms = np.empty(
                        (entityInfo.dwItemCount, nsource, nsample), dtype='f')
                    for dwIndex in range(entityInfo.dwItemCount):
                        neuroshare.ns_GetSegmentData(
                            hFile, dwEntityID, dwIndex,
                            ctypes.byref(pdTimeStamp),
                            pData.ctypes.data_as(
                                ctypes.POINTER(ctypes.c_double)),
                            dwDataBufferSize * 8, ctypes.byref(pdwSampleCount),
                            ctypes.byref(pdwUnitID))

                        times[dwIndex] = pdTimeStamp.value
                        waveforms[
                            dwIndex, :, :] = pData[:nsample * nsource].reshape(
                                nsample, nsource).transpose()

                    sptr = SpikeTrain(
                        times=pq.Quantity(times, units='s', copy=False),
                        t_stop=times.max(),
                        waveforms=pq.Quantity(waveforms,
                                              units=str(
                                                  pdwSegmentInfo.szUnits),
                                              copy=False),
                        left_sweep=nsample / 2. /
                        float(pdwSegmentInfo.dSampleRate) * pq.s,
                        sampling_rate=float(pdwSegmentInfo.dSampleRate) *
                        pq.Hz,
                        name=str(entityInfo.szEntityLabel),
                    )
                seg.spiketrains.append(sptr)

            # neuralevent
            if entity_types[
                    entityInfo.dwEntityType] == 'ns_ENTITY_NEURALEVENT':

                pNeuralInfo = ns_NEURALINFO()
                neuroshare.ns_GetNeuralInfo(hFile, dwEntityID,
                                            ctypes.byref(pNeuralInfo),
                                            ctypes.sizeof(pNeuralInfo))

                if lazy:
                    times = [] * pq.s
                    t_stop = 0 * pq.s
                else:
                    pData = np.zeros((entityInfo.dwItemCount, ),
                                     dtype='float64')
                    dwStartIndex = 0
                    dwIndexCount = entityInfo.dwItemCount
                    neuroshare.ns_GetNeuralData(
                        hFile, dwEntityID, dwStartIndex, dwIndexCount,
                        pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                    times = pData * pq.s
                    t_stop = times.max()
                sptr = SpikeTrain(
                    times,
                    t_stop=t_stop,
                    name=str(entityInfo.szEntityLabel),
                )
                if lazy:
                    sptr.lazy_shape = entityInfo.dwItemCount
                seg.spiketrains.append(sptr)

        # close
        neuroshare.ns_CloseFile(hFile)

        seg.create_many_to_one_relationship()
        return seg
示例#21
0
    def read_block(self,
                   # the 2 first keyword arguments are imposed by neo.io API
                   lazy = False,
                   cascade = True):
        """
        Return a Block.

        """

        def count_samples(m_length):
            """
            Count the number of signal samples available in a type 5 data block
            of length m_length

            """

            # for information about type 5 data block, see [1]
            count = int((m_length-6)/2-2)
            # -6 corresponds to the header of block 5, and the -2 take into
            # account the fact that last 2 values are not available as the 4
            # corresponding bytes are coding the time stamp of the beginning
            # of the block
            return count

        # create the neo Block that will be returned at the end
        blck = Block(file_origin = os.path.basename(self.filename))
        blck.file_origin = os.path.basename(self.filename)

        fid = open(self.filename, 'rb')

        # NOTE: in the following, the word "block" is used in the sense used in
        # the alpha-omega specifications (ie a data chunk in the file), rather
        # than in the sense of the usual Block object in neo

        # step 1: read the headers of all the data blocks to load the file
        # structure

        pos_block = 0 # position of the current block in the file
        file_blocks = [] # list of data blocks available in the file

        if not cascade:
            # we read only the main header

            m_length, m_TypeBlock = struct.unpack('Hcx' , fid.read(4))
            # m_TypeBlock should be 'h', as we read the first block
            block = HeaderReader(fid,
                                 dict_header_type.get(m_TypeBlock,
                                                      Type_Unknown)).read_f()
            block.update({'m_length': m_length,
                          'm_TypeBlock': m_TypeBlock,
                          'pos': pos_block})
            file_blocks.append(block)

        else: # cascade == True

            seg = Segment(file_origin = os.path.basename(self.filename))
            seg.file_origin = os.path.basename(self.filename)
            blck.segments.append(seg)

            while True:
                first_4_bytes = fid.read(4)
                if len(first_4_bytes) < 4:
                    # we have reached the end of the file
                    break
                else:
                    m_length, m_TypeBlock = struct.unpack('Hcx', first_4_bytes)

                block = HeaderReader(fid,
                                dict_header_type.get(m_TypeBlock,
                                                     Type_Unknown)).read_f()
                block.update({'m_length': m_length,
                              'm_TypeBlock': m_TypeBlock,
                              'pos': pos_block})

                if m_TypeBlock == '2':
                    # The beginning of the block of type '2' is identical for
                    # all types of channels, but the following part depends on
                    # the type of channel. So we need a special case here.

                    # WARNING: How to check the type of channel is not
                    # described in the documentation. So here I use what is
                    # proposed in the C code [2].
                    # According to this C code, it seems that the 'm_isAnalog'
                    # is used to distinguished analog and digital channels, and
                    # 'm_Mode' encodes the type of analog channel:
                    # 0 for continuous, 1 for level, 2 for external trigger.
                    # But in some files, I found channels that seemed to be
                    # continuous channels with 'm_Modes' = 128 or 192. So I
                    # decided to consider every channel with 'm_Modes'
                    # different from 1 or 2 as continuous. I also couldn't
                    # check that values of 1 and 2 are really for level and
                    # external trigger as I had no test files containing data
                    # of this types.

                    type_subblock = 'unknown_channel_type(m_Mode=' \
                                    + str(block['m_Mode'])+ ')'
                    description = Type2_SubBlockUnknownChannels
                    block.update({'m_Name': 'unknown_name'})
                    if block['m_isAnalog'] == 0:
                        # digital channel
                        type_subblock = 'digital'
                        description = Type2_SubBlockDigitalChannels
                    elif block['m_isAnalog'] == 1:
                        # analog channel
                        if block['m_Mode'] == 1:
                            # level channel
                            type_subblock = 'level'
                            description = Type2_SubBlockLevelChannels
                        elif block['m_Mode'] == 2:
                            # external trigger channel
                            type_subblock = 'external_trigger'
                            description = Type2_SubBlockExtTriggerChannels
                        else:
                            # continuous channel
                            type_subblock = 'continuous(Mode' \
                                            + str(block['m_Mode']) +')'
                            description = Type2_SubBlockContinuousChannels

                    subblock = HeaderReader(fid, description).read_f()

                    block.update(subblock)
                    block.update({'type_subblock': type_subblock})

                file_blocks.append(block)
                pos_block += m_length
                fid.seek(pos_block)

            # step 2: find the available channels
            list_chan = [] # list containing indexes of channel blocks
            for ind_block, block in enumerate(file_blocks):
                if block['m_TypeBlock'] == '2':
                    list_chan.append(ind_block)

            # step 3: find blocks containing data for the available channels
            list_data = [] # list of lists of indexes of data blocks
                           # corresponding to each channel
            for ind_chan, chan in enumerate(list_chan):
                list_data.append([])
                num_chan = file_blocks[chan]['m_numChannel']
                for ind_block, block in enumerate(file_blocks):
                    if block['m_TypeBlock'] == '5':
                        if block['m_numChannel'] == num_chan:
                            list_data[ind_chan].append(ind_block)


            # step 4: compute the length (number of samples) of the channels
            chan_len = np.zeros(len(list_data), dtype = np.int)
            for ind_chan, list_blocks in enumerate(list_data):
                for ind_block in list_blocks:
                    chan_len[ind_chan] += count_samples(
                                          file_blocks[ind_block]['m_length'])

            # step 5: find channels for which data are available
            ind_valid_chan = np.nonzero(chan_len)[0]

            # step 6: load the data
            # TODO give the possibility to load data as AnalogSignalArrays
            for ind_chan in ind_valid_chan:
                list_blocks = list_data[ind_chan]
                ind = 0 # index in the data vector

                # read time stamp for the beginning of the signal
                form = '<l' # reading format
                ind_block = list_blocks[0]
                count = count_samples(file_blocks[ind_block]['m_length'])
                fid.seek(file_blocks[ind_block]['pos']+6+count*2)
                buf = fid.read(struct.calcsize(form))
                val = struct.unpack(form , buf)
                start_index = val[0]

                # WARNING: in the following blocks are read supposing taht they
                # are all contiguous and sorted in time. I don't know if it's
                # always the case. Maybe we should use the time stamp of each
                # data block to choose where to put the read data in the array.
                if not lazy:
                    temp_array = np.empty(chan_len[ind_chan], dtype = np.int16)
                    # NOTE: we could directly create an empty AnalogSignal and
                    # load the data in it, but it is much faster to load data
                    # in a temporary numpy array and create the AnalogSignals
                    # from this temporary array
                    for ind_block in list_blocks:
                        count = count_samples(
                                file_blocks[ind_block]['m_length'])
                        fid.seek(file_blocks[ind_block]['pos']+6)
                        temp_array[ind:ind+count] = \
                            np.fromfile(fid, dtype = np.int16, count = count)
                        ind += count

                sampling_rate = \
                    file_blocks[list_chan[ind_chan]]['m_SampleRate'] * pq.kHz
                t_start = (start_index / sampling_rate).simplified
                if lazy:
                    ana_sig = AnalogSignal([],
                                           sampling_rate = sampling_rate,
                                           t_start = t_start,
                                           name = file_blocks\
                                               [list_chan[ind_chan]]['m_Name'],
                                           file_origin = \
                                               os.path.basename(self.filename),
                                           units = pq.dimensionless)
                    ana_sig.lazy_shape = chan_len[ind_chan]
                else:
                    ana_sig = AnalogSignal(temp_array,
                                           sampling_rate = sampling_rate,
                                           t_start = t_start,
                                           name = file_blocks\
                                               [list_chan[ind_chan]]['m_Name'],
                                           file_origin = \
                                               os.path.basename(self.filename),
                                           units = pq.dimensionless)
# todo apibreak: create ChannelIndex for each signals
#                ana_sig.channel_index = \
#                            file_blocks[list_chan[ind_chan]]['m_numChannel']
                ana_sig.annotate(channel_name = \
                            file_blocks[list_chan[ind_chan]]['m_Name'])
                ana_sig.annotate(channel_type = \
                            file_blocks[list_chan[ind_chan]]['type_subblock'])
                seg.analogsignals.append(ana_sig)

        fid.close()

        if file_blocks[0]['m_TypeBlock'] == 'h': # this should always be true
            blck.rec_datetime = datetime.datetime(\
                file_blocks[0]['m_date_year'],
                file_blocks[0]['m_date_month'],
                file_blocks[0]['m_date_day'],
                file_blocks[0]['m_time_hour'],
                file_blocks[0]['m_time_minute'],
                file_blocks[0]['m_time_second'],
                10000 * file_blocks[0]['m_time_hsecond'])
                # the 10000 is here to convert m_time_hsecond from centisecond
                # to microsecond
            version = file_blocks[0]['m_version']
            blck.annotate(alphamap_version = version)
            if cascade:
                seg.rec_datetime = blck.rec_datetime.replace()
                # I couldn't find a simple copy function for datetime,
                # using replace without arguments is a twisted way to make a
                # copy
                seg.annotate(alphamap_version = version)
        if cascade:
            blck.create_many_to_one_relationship()

        return blck
示例#22
0
    def read_segment(self, import_neuroshare_segment = True,
                     lazy=False, cascade=True):
        """
        Arguments:
            import_neuroshare_segment: import neuroshare segment as SpikeTrain with associated waveforms or not imported at all.

        """
        seg = Segment( file_origin = os.path.basename(self.filename), )
        
        if sys.platform.startswith('win'):
            neuroshare = ctypes.windll.LoadLibrary(self.dllname)
        elif sys.platform.startswith('linux'):
            neuroshare = ctypes.cdll.LoadLibrary(self.dllname)
        neuroshare = DllWithError(neuroshare)
        
        #elif sys.platform.startswith('darwin'):
        

        # API version
        info = ns_LIBRARYINFO()
        neuroshare.ns_GetLibraryInfo(ctypes.byref(info) , ctypes.sizeof(info))
        seg.annotate(neuroshare_version = str(info.dwAPIVersionMaj)+'.'+str(info.dwAPIVersionMin))

        if not cascade:
            return seg


        # open file
        hFile = ctypes.c_uint32(0)
        neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename) ,ctypes.byref(hFile))
        fileinfo = ns_FILEINFO()
        neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo) , ctypes.sizeof(fileinfo))
        
        # read all entities
        for dwEntityID in range(fileinfo.dwEntityCount):
            entityInfo = ns_ENTITYINFO()
            neuroshare.ns_GetEntityInfo( hFile, dwEntityID, ctypes.byref(entityInfo), ctypes.sizeof(entityInfo))

            # EVENT
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_EVENT':
                pEventInfo = ns_EVENTINFO()
                neuroshare.ns_GetEventInfo ( hFile,  dwEntityID,  ctypes.byref(pEventInfo), ctypes.sizeof(pEventInfo))

                if pEventInfo.dwEventType == 0: #TEXT
                    pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 1:#CVS
                    pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 2:# 8bit
                    pData = ctypes.c_byte(0)
                elif pEventInfo.dwEventType == 3:# 16bit
                    pData = ctypes.c_int16(0)
                elif pEventInfo.dwEventType == 4:# 32bit
                    pData = ctypes.c_int32(0)
                pdTimeStamp  = ctypes.c_double(0.)
                pdwDataRetSize = ctypes.c_uint32(0)

                ea = Event(name = str(entityInfo.szEntityLabel),)
                if not lazy:
                    times = [ ]
                    labels = [ ]
                    for dwIndex in range(entityInfo.dwItemCount ):
                        neuroshare.ns_GetEventData ( hFile, dwEntityID, dwIndex,
                                            ctypes.byref(pdTimeStamp), ctypes.byref(pData),
                                            ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize) )
                        times.append(pdTimeStamp.value)
                        labels.append(str(pData.value))
                    ea.times = times*pq.s
                    ea.labels = np.array(labels, dtype ='S')
                else :
                    ea.lazy_shape = entityInfo.dwItemCount
                seg.eventarrays.append(ea)

            # analog
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_ANALOG':
                pAnalogInfo = ns_ANALOGINFO()

                neuroshare.ns_GetAnalogInfo( hFile, dwEntityID,ctypes.byref(pAnalogInfo),ctypes.sizeof(pAnalogInfo) )
                dwIndexCount = entityInfo.dwItemCount

                if lazy:
                    signal = [ ]*pq.Quantity(1, pAnalogInfo.szUnits)
                else:
                    pdwContCount = ctypes.c_uint32(0)
                    pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64')
                    total_read = 0
                    while total_read< entityInfo.dwItemCount:
                        dwStartIndex = ctypes.c_uint32(total_read)
                        dwStopIndex = ctypes.c_uint32(entityInfo.dwItemCount - total_read)
                        
                        neuroshare.ns_GetAnalogData( hFile,  dwEntityID,  dwStartIndex,
                                     dwStopIndex, ctypes.byref( pdwContCount) , pData[total_read:].ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                        total_read += pdwContCount.value
                            
                    signal =  pq.Quantity(pData, units=pAnalogInfo.szUnits, copy = False)

                #t_start
                dwIndex = 0
                pdTime = ctypes.c_double(0)
                neuroshare.ns_GetTimeByIndex( hFile,  dwEntityID,  dwIndex, ctypes.byref(pdTime))

                anaSig = AnalogSignal(signal,
                                                    sampling_rate = pAnalogInfo.dSampleRate*pq.Hz,
                                                    t_start = pdTime.value * pq.s,
                                                    name = str(entityInfo.szEntityLabel),
                                                    )
                anaSig.annotate( probe_info = str(pAnalogInfo.szProbeInfo))
                if lazy:
                    anaSig.lazy_shape = entityInfo.dwItemCount
                seg.analogsignals.append( anaSig )


            #segment
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_SEGMENT' and import_neuroshare_segment:

                pdwSegmentInfo = ns_SEGMENTINFO()
                if not str(entityInfo.szEntityLabel).startswith('spks'):
                    continue

                neuroshare.ns_GetSegmentInfo( hFile,  dwEntityID,
                                             ctypes.byref(pdwSegmentInfo), ctypes.sizeof(pdwSegmentInfo) )
                nsource = pdwSegmentInfo.dwSourceCount

                pszMsgBuffer  = ctypes.create_string_buffer(" "*256)
                neuroshare.ns_GetLastErrorMsg(ctypes.byref(pszMsgBuffer), 256)
                
                for dwSourceID in range(pdwSegmentInfo.dwSourceCount) :
                    pSourceInfo = ns_SEGSOURCEINFO()
                    neuroshare.ns_GetSegmentSourceInfo( hFile,  dwEntityID, dwSourceID,
                                    ctypes.byref(pSourceInfo), ctypes.sizeof(pSourceInfo) )

                if lazy:
                    sptr = SpikeTrain(times, name = str(entityInfo.szEntityLabel), t_stop = 0.*pq.s)
                    sptr.lazy_shape = entityInfo.dwItemCount
                else:
                    pdTimeStamp  = ctypes.c_double(0.)
                    dwDataBufferSize = pdwSegmentInfo.dwMaxSampleCount*pdwSegmentInfo.dwSourceCount
                    pData = np.zeros( (dwDataBufferSize), dtype = 'float64')
                    pdwSampleCount = ctypes.c_uint32(0)
                    pdwUnitID= ctypes.c_uint32(0)

                    nsample  = int(dwDataBufferSize)
                    times = np.empty( (entityInfo.dwItemCount), dtype = 'f')
                    waveforms = np.empty( (entityInfo.dwItemCount, nsource, nsample), dtype = 'f')
                    for dwIndex in range(entityInfo.dwItemCount ):
                        neuroshare.ns_GetSegmentData ( hFile,  dwEntityID,  dwIndex,
                            ctypes.byref(pdTimeStamp), pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
                            dwDataBufferSize * 8, ctypes.byref(pdwSampleCount),
                                ctypes.byref(pdwUnitID ) )

                        times[dwIndex] = pdTimeStamp.value
                        waveforms[dwIndex, :,:] = pData[:nsample*nsource].reshape(nsample ,nsource).transpose()
                    
                    sptr = SpikeTrain(times = pq.Quantity(times, units = 's', copy = False),
                                        t_stop = times.max(),
                                        waveforms = pq.Quantity(waveforms, units = str(pdwSegmentInfo.szUnits), copy = False ),
                                        left_sweep = nsample/2./float(pdwSegmentInfo.dSampleRate)*pq.s,
                                        sampling_rate = float(pdwSegmentInfo.dSampleRate)*pq.Hz,
                                        name = str(entityInfo.szEntityLabel),
                                        )
                seg.spiketrains.append(sptr)


            # neuralevent
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_NEURALEVENT':

                pNeuralInfo = ns_NEURALINFO()
                neuroshare.ns_GetNeuralInfo ( hFile,  dwEntityID,
                                 ctypes.byref(pNeuralInfo), ctypes.sizeof(pNeuralInfo))

                if lazy:
                    times = [ ]*pq.s
                    t_stop = 0*pq.s
                else:
                    pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64')
                    dwStartIndex = 0
                    dwIndexCount = entityInfo.dwItemCount
                    neuroshare.ns_GetNeuralData( hFile,  dwEntityID,  dwStartIndex,
                        dwIndexCount,  pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                    times = pData*pq.s
                    t_stop = times.max()
                sptr = SpikeTrain(times, t_stop =t_stop,
                                                name = str(entityInfo.szEntityLabel),)
                if lazy:
                    sptr.lazy_shape = entityInfo.dwItemCount
                seg.spiketrains.append(sptr)

        # close
        neuroshare.ns_CloseFile(hFile)

        seg.create_many_to_one_relationship()
        return seg
示例#23
0
    def read(self):
        """read the nex file and add the analog signals from matfile

        all the data from different channels and blocks is found in a big
        data array and has to be extracted by the indexes in other variables
        """

        nex_block = super(NexIOplus, self).read_segment()

        # create a new block to return in the end
        block = Block(
            name="mechanical and heat stimulation recording",
            description=nex_block.annotations,
            file_origin=self.filename,
        )
        train = nex_block.spiketrains[0]

        # load data from matlab file
        mat = sio.loadmat(self.matname, squeeze_me=True)
        n_channels, n_segments = np.shape(mat["datastart"])

        # convert blocktimes to posix format (from stupid matlab convention)
        blockt_pos = (mat["blocktimes"] - 719529) * 86400.0
        blockt_pos = blockt_pos - blockt_pos[0]

        for segment in range(n_segments):

            seg = Segment(name=str(segment))
            for channel in range(n_channels):

                rate = mat["samplerate"][channel, segment] / self.f_down
                start = mat["datastart"][channel, segment] - 1
                end = mat["dataend"][channel, segment]
                tmp_sig = mat["data"][start:end][:: self.f_down]

                # check for mechanical stimulation
                if (n_channels, channel) in [(3, 0), (4, 1)]:
                    onoffs = self._extract_onsets(tmp_sig)
                    for i, (x1, x2) in enumerate(onoffs):
                        seg.epochs.append(Epoch(x1, x2 - x1, i + 1))
                    if onoffs:
                        seg.annotate(mechanical=True)
                if (n_channels, channel) in [(3, 2), (4, 3)]:
                    if np.max(tmp_sig) - np.min(tmp_sig) > self.boring_thresh:
                        seg.annotate(temp=True)

                ansig = AnalogSignal(
                    signal=tmp_sig,
                    name=mat["titles"][channel],
                    # TODO use unittextmap properly
                    units=mat["unittext"].item(),
                    sampling_rate=rate * (1 / pq.s),
                    t_start=blockt_pos[segment] * pq.s,
                )
                seg.analogsignals.append(ansig)

            # ignore segments without heat or mechanical stimulation
            if not seg.annotations:
                continue

            # last segment has to be treated differently
            if segment + 1 < n_segments:
                t = train[(train > blockt_pos[segment]) & (train < blockt_pos[segment + 1])]
                end = blockt_pos[segment + 1]
            else:
                t = train[train > blockt_pos[segment]]
                end = blockt_pos[segment] + len(ansig) / rate

            seg.spiketrains.append(
                SpikeTrain(times=t.magnitude, units=train.units, t_start=blockt_pos[segment], t_stop=end)
            )
            block.segments.append(seg)
        return block