Example #1
0
def read_and_import(name, ioclass, io_kargs, dbinfo, options):
    #~ print 'read_and_import', name

    if ioclass.mode == 'file':
        reader = ioclass(filename=name)
    elif ioclass.mode == 'dir':
        reader = ioclass(dirname=name)
    elif ioclass.mode == 'database':
        reader = ioclass(url=name)
    elif ioclass.mode == 'fake':
        reader = ioclass()

    if distutils.version.LooseVersion(neo.__version__) < '0.3':
        neo_blocks = [reader.read(**io_kargs)]
    else:
        neo_blocks = reader.read(**io_kargs)

    for neo_block in neo_blocks:
        if options[
                'populate_recordingchannel'] and neo.RecordingChannelGroup not in reader.supported_objects:
            #~ print 'populate_RecordingChannel'
            populate_RecordingChannel(neo_block, remove_from_annotation=False)

        oe_block = OEBase.from_neo(neo_block,
                                   dbinfo.mapped_classes,
                                   cascade=True)
        oe_block.file_origin = os.path.basename(name)

    session = dbinfo.Session()
    dbinfo.Session.add(oe_block)
    session.commit()
Example #2
0
def read_and_import(name, ioclass,io_kargs, dbinfo, options):
    #~ print 'read_and_import', name
    
    if ioclass.mode =='file':
        reader = ioclass(filename = name  )
    elif ioclass.mode =='dir':
        reader = ioclass(dirname = name  )
    elif ioclass.mode =='database':
        reader = ioclass(url = name  )
    elif ioclass.mode =='fake':
        reader = ioclass()
    
    
    if distutils.version.LooseVersion(neo.__version__) < '0.3':
        neo_blocks = [ reader.read(** io_kargs) ]
    else:
        neo_blocks = reader.read(** io_kargs)
        
    for neo_block in neo_blocks:
        if options['populate_recordingchannel'] and neo.RecordingChannelGroup not in reader.supported_objects:
            #~ print 'populate_RecordingChannel'
            populate_RecordingChannel(neo_block, remove_from_annotation = False)
        
        oe_block = OEBase.from_neo(neo_block, dbinfo.mapped_classes, cascade = True)
        oe_block.file_origin = os.path.basename(name)
    
    session = dbinfo.Session()
    dbinfo.Session.add(oe_block)
    session.commit()
def generate_one_simple_block(block_name = 'block_0',
                                            nb_segment = 3,
                                            supported_objects = [ ],
                                            **kws):
    bl = Block()#name = block_name)

    if Segment in supported_objects:
        for s in range(nb_segment):
            seg = generate_one_simple_segment(seg_name = "seg" + str(s),
                supported_objects = supported_objects, **kws)
            bl.segments.append(seg)

    if RecordingChannel in supported_objects:
        populate_RecordingChannel(bl)

    return bl
Example #4
0
def generate_one_simple_block(block_name='block_0', nb_segment=3,
                              supported_objects=[], **kws):
    if supported_objects and Block not in supported_objects:
        raise ValueError('Block must be in supported_objects')
    bl = Block()  # name = block_name)

    objects = supported_objects
    if Segment in objects:
        for s in range(nb_segment):
            seg = generate_one_simple_segment(seg_name="seg" + str(s),
                                              supported_objects=objects, **kws)
            bl.segments.append(seg)

    if RecordingChannel in objects:
        populate_RecordingChannel(bl)

    bl.create_many_to_one_relationship()
    return bl
Example #5
0
def generate_one_simple_block(block_name='block_0',
                              nb_segment=3,
                              supported_objects=[],
                              **kws):
    bl = Block()  # name = block_name)

    objects = supported_objects
    if Segment in objects:
        for s in range(nb_segment):
            seg = generate_one_simple_segment(seg_name="seg" + str(s),
                                              supported_objects=objects,
                                              **kws)
            bl.segments.append(seg)

    if RecordingChannel in objects:
        populate_RecordingChannel(bl)

    return bl
Example #6
0
def generate_one_simple_block(block_name='block_0',
                              nb_segment=3,
                              supported_objects=[],
                              **kws):
    if supported_objects and Block not in supported_objects:
        raise ValueError('Block must be in supported_objects')
    bl = Block()  # name = block_name)

    objects = supported_objects
    if Segment in objects:
        for s in range(nb_segment):
            seg = generate_one_simple_segment(seg_name="seg" + str(s),
                                              supported_objects=objects,
                                              **kws)
            bl.segments.append(seg)

    if RecordingChannel in objects:
        populate_RecordingChannel(bl)

    bl.create_many_to_one_relationship()
    return bl
Example #7
0
    def read_block(self, lazy=False, cascade=True, 
        n_starts=None, n_stops=None, channel_list=None):
        """Reads the file and returns contents as a Block.
        
        The Block contains one Segment for each entry in zip(n_starts,
        n_stops). If these parameters are not specified, the default is
        to store all data in one Segment.
        
        The Block also contains one RecordingChannelGroup for all channels.
        
        n_starts: list or array of starting times of each Segment in
            samples from the beginning of the file.
        n_stops: similar, stopping times of each Segment
        channel_list: list of channel numbers to get. The neural data channels
            are 1 - 128. The analog inputs are 129 - 144. The default
            is to acquire all channels.
        
        Returns: Block object containing the data.
        """


        # Create block
        block = Block(file_origin=self.filename)
        
        if not cascade:
            return block
        
        self.loader = Loader(self.filename)
        self.loader.load_file()
        self.header = self.loader.header
        
        # If channels not specified, get all
        if channel_list is None:
            channel_list = self.loader.get_neural_channel_numbers()
        
        # If not specified, load all as one Segment
        if n_starts is None:
            n_starts = [0]
            n_stops = [self.loader.header.n_samples]
        
        #~ # Add channel hierarchy
        #~ rcg = RecordingChannelGroup(name='allchannels',
            #~ description='group of all channels', file_origin=self.filename)
        #~ block.recordingchannelgroups.append(rcg)
        #~ self.channel_number_to_recording_channel = {}

        #~ # Add each channel at a time to hierarchy
        #~ for ch in channel_list:            
            #~ ch_object = RecordingChannel(name='channel%d' % ch,
                #~ file_origin=self.filename, index=ch)
            #~ rcg.channel_indexes.append(ch_object.index)
            #~ rcg.channel_names.append(ch_object.name)
            #~ rcg.recordingchannels.append(ch_object)
            #~ self.channel_number_to_recording_channel[ch] = ch_object

        # Iterate through n_starts and n_stops and add one Segment
        # per each.
        for n, (t1, t2) in enumerate(zip(n_starts, n_stops)):
            # Create segment and add metadata
            seg = self.read_segment(n_start=t1, n_stop=t2, chlist=channel_list,
                lazy=lazy, cascade=cascade)
            seg.name = 'Segment %d' % n
            seg.index = n
            t1sec = t1 / self.loader.header.f_samp
            t2sec = t2 / self.loader.header.f_samp
            seg.description = 'Segment %d from %f to %f' % (n, t1sec, t2sec)
            
            # Link to block
            block.segments.append(seg)
        
        # Create hardware view, and bijectivity
        tools.populate_RecordingChannel(block)
        tools.create_many_to_one_relationship(block)        
        
        return block
Example #8
0
    def test1(self):
        """Write data to binary file, then read it back in and verify"""
        # delete temporary file before trying to write to it
        if os.path.exists(self.fn):
            os.remove(self.fn)

        block = neo.Block()
        full_range = 234 * pq.mV

        # Create segment1 with analogsignals
        segment1 = neo.Segment()
        sig1 = neo.AnalogSignal([3, 4, 5],
                                units='mV',
                                channel_index=3,
                                sampling_rate=30000. * pq.Hz)
        sig2 = neo.AnalogSignal([6, -4, -5],
                                units='mV',
                                channel_index=4,
                                sampling_rate=30000. * pq.Hz)
        segment1.analogsignals.append(sig1)
        segment1.analogsignals.append(sig2)

        # Create segment2 with analogsignals
        segment2 = neo.Segment()
        sig3 = neo.AnalogSignal([-3, -4, -5],
                                units='mV',
                                channel_index=3,
                                sampling_rate=30000. * pq.Hz)
        sig4 = neo.AnalogSignal([-6, 4, 5],
                                units='mV',
                                channel_index=4,
                                sampling_rate=30000. * pq.Hz)
        segment2.analogsignals.append(sig3)
        segment2.analogsignals.append(sig4)

        # Link segments to block
        block.segments.append(segment1)
        block.segments.append(segment2)

        # Create hardware view, and bijectivity
        #tools.populate_RecordingChannel(block)
        #print "problem happening"
        #print block.recordingchannelgroups[0].recordingchannels
        #chan = block.recordingchannelgroups[0].recordingchannels[0]
        #print chan.analogsignals
        #tools.create_many_to_one_relationship(block)
        #print "here: "
        #print block.segments[0].analogsignals[0].recordingchannel

        # Chris I prefer that:
        #tools.finalize_block(block)
        tools.populate_RecordingChannel(block)
        tools.create_many_to_one_relationship(block)

        # Check that blackrockio is correctly extracting channel indexes
        self.assertEqual(
            neo.io.blackrockio.channel_indexes_in_segment(segment1), [3, 4])
        self.assertEqual(
            neo.io.blackrockio.channel_indexes_in_segment(segment2), [3, 4])

        # Create writer. Write block, then read back in.
        bio = neo.io.BlackrockIO(filename=self.fn, full_range=full_range)
        bio.write_block(block)
        fi = file(self.fn)

        # Text header
        self.assertEqual(fi.read(16), 'NEURALSG30 kS/s\x00')
        self.assertEqual(fi.read(8), '\x00\x00\x00\x00\x00\x00\x00\x00')

        # Integers: period, channel count, channel index1, channel index2
        self.assertEqual(struct.unpack('<4I', fi.read(16)), (1, 2, 3, 4))

        # What should the signals be after conversion?
        conv = float(full_range) / 2**16
        sigs = np.array(
            [np.concatenate((sig1, sig3)),
             np.concatenate((sig2, sig4))])
        sigs_converted = np.rint(sigs / conv).astype(np.int)

        # Check that each time point is the same
        for time_slc in sigs_converted.transpose():
            written_data = struct.unpack('<2h', fi.read(4))
            self.assertEqual(list(time_slc), list(written_data))

        # Check that we read to the end
        currentpos = fi.tell()
        fi.seek(0, 2)
        truelen = fi.tell()
        self.assertEqual(currentpos, truelen)
        fi.close()
Example #9
0
    def read_block(
            self,
            # the 2 first keyword arguments are imposed by neo.io API
            lazy=False,
            cascade=True):
        """
        Return a Block.

        """
        def count_samples(m_length):
            """
            Count the number of signal samples available in a type 5 data block
            of length m_length

            """

            # for information about type 5 data block, see [1]
            count = int((m_length - 6) / 2 - 2)
            # -6 corresponds to the header of block 5, and the -2 take into
            # account the fact that last 2 values are not available as the 4
            # corresponding bytes are coding the time stamp of the beginning
            # of the block
            return count

        # create the neo Block that will be returned at the end
        blck = Block(file_origin=os.path.basename(self.filename))
        blck.file_origin = os.path.basename(self.filename)

        fid = open(self.filename, 'rb')

        # NOTE: in the following, the word "block" is used in the sense used in
        # the alpha-omega specifications (ie a data chunk in the file), rather
        # than in the sense of the usual Block object in neo

        # step 1: read the headers of all the data blocks to load the file
        # structure

        pos_block = 0  # position of the current block in the file
        file_blocks = []  # list of data blocks available in the file

        if not cascade:
            # we read only the main header

            m_length, m_TypeBlock = struct.unpack('Hcx', fid.read(4))
            # m_TypeBlock should be 'h', as we read the first block
            block = HeaderReader(
                fid, dict_header_type.get(m_TypeBlock, Type_Unknown)).read_f()
            block.update({
                'm_length': m_length,
                'm_TypeBlock': m_TypeBlock,
                'pos': pos_block
            })
            file_blocks.append(block)

        else:  # cascade == True

            seg = Segment(file_origin=os.path.basename(self.filename))
            seg.file_origin = os.path.basename(self.filename)
            blck.segments.append(seg)

            while True:
                first_4_bytes = fid.read(4)
                if len(first_4_bytes) < 4:
                    # we have reached the end of the file
                    break
                else:
                    m_length, m_TypeBlock = struct.unpack('Hcx', first_4_bytes)

                block = HeaderReader(
                    fid, dict_header_type.get(m_TypeBlock,
                                              Type_Unknown)).read_f()
                block.update({
                    'm_length': m_length,
                    'm_TypeBlock': m_TypeBlock,
                    'pos': pos_block
                })

                if m_TypeBlock == '2':
                    # The beggining of the block of type '2' is identical for
                    # all types of channels, but the following part depends on
                    # the type of channel. So we need a special case here.

                    # WARNING: How to check the type of channel is not
                    # described in the documentation. So here I use what is
                    # proposed in the C code [2].
                    # According to this C code, it seems that the 'm_isAnalog'
                    # is used to distinguished analog and digital channels, and
                    # 'm_Mode' encodes the type of analog channel:
                    # 0 for continuous, 1 for level, 2 for external trigger.
                    # But in some files, I found channels that seemed to be
                    # continuous channels with 'm_Modes' = 128 or 192. So I
                    # decided to consider every channel with 'm_Modes'
                    # different from 1 or 2 as continuous. I also couldn't
                    # check that values of 1 and 2 are really for level and
                    # external trigger as I had no test files containing data
                    # of this types.

                    type_subblock = 'unknown_channel_type(m_Mode=' \
                                    + str(block['m_Mode'])+ ')'
                    description = Type2_SubBlockUnknownChannels
                    block.update({'m_Name': 'unknown_name'})
                    if block['m_isAnalog'] == 0:
                        # digital channel
                        type_subblock = 'digital'
                        description = Type2_SubBlockDigitalChannels
                    elif block['m_isAnalog'] == 1:
                        # analog channel
                        if block['m_Mode'] == 1:
                            # level channel
                            type_subblock = 'level'
                            description = Type2_SubBlockLevelChannels
                        elif block['m_Mode'] == 2:
                            # external trigger channel
                            type_subblock = 'external_trigger'
                            description = Type2_SubBlockExtTriggerChannels
                        else:
                            # continuous channel
                            type_subblock = 'continuous(Mode' \
                                            + str(block['m_Mode']) +')'
                            description = Type2_SubBlockContinuousChannels

                    subblock = HeaderReader(fid, description).read_f()

                    block.update(subblock)
                    block.update({'type_subblock': type_subblock})

                file_blocks.append(block)
                pos_block += m_length
                fid.seek(pos_block)

            # step 2: find the available channels
            list_chan = []  # list containing indexes of channel blocks
            for ind_block, block in enumerate(file_blocks):
                if block['m_TypeBlock'] == '2':
                    list_chan.append(ind_block)

            # step 3: find blocks containing data for the available channels
            list_data = []  # list of lists of indexes of data blocks
            # corresponding to each channel
            for ind_chan, chan in enumerate(list_chan):
                list_data.append([])
                num_chan = file_blocks[chan]['m_numChannel']
                for ind_block, block in enumerate(file_blocks):
                    if block['m_TypeBlock'] == '5':
                        if block['m_numChannel'] == num_chan:
                            list_data[ind_chan].append(ind_block)

            # step 4: compute the length (number of samples) of the channels
            chan_len = np.zeros(len(list_data), dtype=np.int)
            for ind_chan, list_blocks in enumerate(list_data):
                for ind_block in list_blocks:
                    chan_len[ind_chan] += count_samples(
                        file_blocks[ind_block]['m_length'])

            # step 5: find channels for which data are available
            ind_valid_chan = np.nonzero(chan_len)[0]

            # step 6: load the data
            # TODO give the possibility to load data as AnalogSignalArrays
            for ind_chan in ind_valid_chan:
                list_blocks = list_data[ind_chan]
                ind = 0  # index in the data vector

                # read time stamp for the beginning of the signal
                form = '<l'  # reading format
                ind_block = list_blocks[0]
                count = count_samples(file_blocks[ind_block]['m_length'])
                fid.seek(file_blocks[ind_block]['pos'] + 6 + count * 2)
                buf = fid.read(struct.calcsize(form))
                val = struct.unpack(form, buf)
                start_index = val[0]

                # WARNING: in the following blocks are read supposing taht they
                # are all contiguous and sorted in time. I don't know if it's
                # always the case. Maybe we should use the time stamp of each
                # data block to choose where to put the read data in the array.
                if not lazy:
                    temp_array = np.empty(chan_len[ind_chan], dtype=np.int16)
                    # NOTE: we could directly create an empty AnalogSignal and
                    # load the data in it, but it is much faster to load data
                    # in a temporary numpy array and create the AnalogSignals
                    # from this temporary array
                    for ind_block in list_blocks:
                        count = count_samples(
                            file_blocks[ind_block]['m_length'])
                        fid.seek(file_blocks[ind_block]['pos'] + 6)
                        temp_array[ind:ind+count] = \
                            np.fromfile(fid, dtype = np.int16, count = count)
                        ind += count

                sampling_rate = \
                    file_blocks[list_chan[ind_chan]]['m_SampleRate'] * pq.kHz
                t_start = (start_index / sampling_rate).simplified
                if lazy:
                    ana_sig = AnalogSignal([],
                                           sampling_rate = sampling_rate,
                                           t_start = t_start,
                                           name = file_blocks\
                                               [list_chan[ind_chan]]['m_Name'],
                                           file_origin = \
                                               os.path.basename(self.filename),
                                           units = pq.dimensionless)
                    ana_sig.lazy_shape = chan_len[ind_chan]
                else:
                    ana_sig = AnalogSignal(temp_array,
                                           sampling_rate = sampling_rate,
                                           t_start = t_start,
                                           name = file_blocks\
                                               [list_chan[ind_chan]]['m_Name'],
                                           file_origin = \
                                               os.path.basename(self.filename),
                                           units = pq.dimensionless)

                ana_sig.channel_index = \
                            file_blocks[list_chan[ind_chan]]['m_numChannel']
                ana_sig.annotate(channel_name = \
                            file_blocks[list_chan[ind_chan]]['m_Name'])
                ana_sig.annotate(channel_type = \
                            file_blocks[list_chan[ind_chan]]['type_subblock'])
                seg.analogsignals.append(ana_sig)

        fid.close()

        if file_blocks[0]['m_TypeBlock'] == 'h':  # this should always be true
            blck.rec_datetime = datetime.datetime(\
                file_blocks[0]['m_date_year'],
                file_blocks[0]['m_date_month'],
                file_blocks[0]['m_date_day'],
                file_blocks[0]['m_time_hour'],
                file_blocks[0]['m_time_minute'],
                file_blocks[0]['m_time_second'],
                10000 * file_blocks[0]['m_time_hsecond'])
            # the 10000 is here to convert m_time_hsecond from centisecond
            # to microsecond
            version = file_blocks[0]['m_version']
            blck.annotate(alphamap_version=version)
            if cascade:
                seg.rec_datetime = blck.rec_datetime.replace()
                # I couldn't find a simple copy function for datetime,
                # using replace without arguments is a twisted way to make a
                # copy
                seg.annotate(alphamap_version=version)
        if cascade:
            populate_RecordingChannel(blck, remove_from_annotation=True)
            blck.create_many_to_one_relationship()

        return blck
Example #10
0
    def read_block(self,
                   lazy=False,
                   cascade=True,
                   n_starts=None,
                   n_stops=None,
                   channel_list=None):
        """Reads the file and returns contents as a Block.

        The Block contains one Segment for each entry in zip(n_starts,
        n_stops). If these parameters are not specified, the default is
        to store all data in one Segment.

        The Block also contains one RecordingChannelGroup for all channels.

        n_starts: list or array of starting times of each Segment in
            samples from the beginning of the file.
        n_stops: similar, stopping times of each Segment
        channel_list: list of channel numbers to get. The neural data channels
            are 1 - 128. The analog inputs are 129 - 144. The default
            is to acquire all channels.

        Returns: Block object containing the data.
        """

        # Create block
        block = Block(file_origin=self.filename)

        if not cascade:
            return block

        self.loader = Loader(self.filename)
        self.loader.load_file()
        self.header = self.loader.header

        # If channels not specified, get all
        if channel_list is None:
            channel_list = self.loader.get_neural_channel_numbers()

        # If not specified, load all as one Segment
        if n_starts is None:
            n_starts = [0]
            n_stops = [self.loader.header.n_samples]

        #~ # Add channel hierarchy
        #~ rcg = RecordingChannelGroup(name='allchannels',
        #~ description='group of all channels', file_origin=self.filename)
        #~ block.recordingchannelgroups.append(rcg)
        #~ self.channel_number_to_recording_channel = {}

        #~ # Add each channel at a time to hierarchy
        #~ for ch in channel_list:
        #~ ch_object = RecordingChannel(name='channel%d' % ch,
        #~ file_origin=self.filename, index=ch)
        #~ rcg.channel_indexes.append(ch_object.index)
        #~ rcg.channel_names.append(ch_object.name)
        #~ rcg.recordingchannels.append(ch_object)
        #~ self.channel_number_to_recording_channel[ch] = ch_object

        # Iterate through n_starts and n_stops and add one Segment
        # per each.
        for n, (t1, t2) in enumerate(zip(n_starts, n_stops)):
            # Create segment and add metadata
            seg = self.read_segment(n_start=t1,
                                    n_stop=t2,
                                    chlist=channel_list,
                                    lazy=lazy,
                                    cascade=cascade)
            seg.name = 'Segment %d' % n
            seg.index = n
            t1sec = t1 / self.loader.header.f_samp
            t2sec = t2 / self.loader.header.f_samp
            seg.description = 'Segment %d from %f to %f' % (n, t1sec, t2sec)

            # Link to block
            block.segments.append(seg)

        # Create hardware view, and bijectivity
        tools.populate_RecordingChannel(block)
        tools.create_many_to_one_relationship(block)

        return block
    def test1(self):
        """Write data to binary file, then read it back in and verify"""
        # delete temporary file before trying to write to it
        if os.path.exists(self.fn):
            os.remove(self.fn)

        block = neo.Block()
        full_range = 234 * pq.mV

        # Create segment1 with analogsignals
        segment1 = neo.Segment()
        sig1 = neo.AnalogSignal([3, 4, 5], units='mV', channel_index=3,
                                sampling_rate=30000.*pq.Hz)
        sig2 = neo.AnalogSignal([6, -4, -5], units='mV', channel_index=4,
                                sampling_rate=30000.*pq.Hz)
        segment1.analogsignals.append(sig1)
        segment1.analogsignals.append(sig2)

        # Create segment2 with analogsignals
        segment2 = neo.Segment()
        sig3 = neo.AnalogSignal([-3, -4, -5], units='mV', channel_index=3,
                                sampling_rate=30000.*pq.Hz)
        sig4 = neo.AnalogSignal([-6, 4, 5], units='mV', channel_index=4,
                                sampling_rate=30000.*pq.Hz)
        segment2.analogsignals.append(sig3)
        segment2.analogsignals.append(sig4)

        # Link segments to block
        block.segments.append(segment1)
        block.segments.append(segment2)

        # Create hardware view, and bijectivity
        #tools.populate_RecordingChannel(block)
        #print "problem happening"
        #print block.recordingchannelgroups[0].recordingchannels
        #chan = block.recordingchannelgroups[0].recordingchannels[0]
        #print chan.analogsignals
        #block.create_many_to_one_relationship()
        #print "here: "
        #print block.segments[0].analogsignals[0].recordingchannel

        # Chris I prefer that:
        #tools.finalize_block(block)
        tools.populate_RecordingChannel(block)
        block.create_many_to_one_relationship()

        # Check that blackrockio is correctly extracting channel indexes
        self.assertEqual(neo.io.blackrockio.channel_indexes_in_segment(
            segment1), [3, 4])
        self.assertEqual(neo.io.blackrockio.channel_indexes_in_segment(
            segment2), [3, 4])

        # Create writer. Write block, then read back in.
        bio = neo.io.BlackrockIO(filename=self.fn, full_range=full_range)
        bio.write_block(block)
        fi = file(self.fn)

        # Text header
        self.assertEqual(fi.read(16), 'NEURALSG30 kS/s\x00')
        self.assertEqual(fi.read(8), '\x00\x00\x00\x00\x00\x00\x00\x00')

        # Integers: period, channel count, channel index1, channel index2
        self.assertEqual(struct.unpack('<4I', fi.read(16)), (1, 2, 3, 4))

        # What should the signals be after conversion?
        conv = float(full_range) / 2**16
        sigs = np.array([np.concatenate((sig1, sig3)),
                         np.concatenate((sig2, sig4))])
        sigs_converted = np.rint(sigs / conv).astype(np.int)

        # Check that each time point is the same
        for time_slc in sigs_converted.transpose():
            written_data = struct.unpack('<2h', fi.read(4))
            self.assertEqual(list(time_slc), list(written_data))

        # Check that we read to the end
        currentpos = fi.tell()
        fi.seek(0, 2)
        truelen = fi.tell()
        self.assertEqual(currentpos, truelen)
        fi.close()
Example #12
0
    def read_block(self,
                   # the 2 first keyword arguments are imposed by neo.io API
                   lazy = False,
                   cascade = True):
        """
        Return a Block.

        """

        def count_samples(m_length):
            """
            Count the number of signal samples available in a type 5 data block
            of length m_length

            """

            # for information about type 5 data block, see [1]
            count = int((m_length-6)/2-2)
            # -6 corresponds to the header of block 5, and the -2 take into
            # account the fact that last 2 values are not available as the 4
            # corresponding bytes are coding the time stamp of the beginning
            # of the block
            return count

        # create the neo Block that will be returned at the end
        blck = Block(file_origin = os.path.basename(self.filename))
        blck.file_origin = os.path.basename(self.filename)

        fid = open(self.filename, 'rb')

        # NOTE: in the following, the word "block" is used in the sense used in
        # the alpha-omega specifications (ie a data chunk in the file), rather
        # than in the sense of the usual Block object in neo

        # step 1: read the headers of all the data blocks to load the file
        # structure

        pos_block = 0 # position of the current block in the file
        file_blocks = [] # list of data blocks available in the file

        if not cascade:
            # we read only the main header

            m_length, m_TypeBlock = struct.unpack('Hcx' , fid.read(4))
            # m_TypeBlock should be 'h', as we read the first block
            block = HeaderReader(fid,
                                 dict_header_type.get(m_TypeBlock,
                                                      Type_Unknown)).read_f()
            block.update({'m_length': m_length,
                          'm_TypeBlock': m_TypeBlock,
                          'pos': pos_block})
            file_blocks.append(block)

        else: # cascade == True

            seg = Segment(file_origin = os.path.basename(self.filename))
            seg.file_origin = os.path.basename(self.filename)
            blck.segments.append(seg)

            while True:
                first_4_bytes = fid.read(4)
                if len(first_4_bytes) < 4:
                    # we have reached the end of the file
                    break
                else:
                    m_length, m_TypeBlock = struct.unpack('Hcx', first_4_bytes)

                block = HeaderReader(fid,
                                dict_header_type.get(m_TypeBlock,
                                                     Type_Unknown)).read_f()
                block.update({'m_length': m_length,
                              'm_TypeBlock': m_TypeBlock,
                              'pos': pos_block})

                if m_TypeBlock == '2':
                    # The beggining of the block of type '2' is identical for
                    # all types of channels, but the following part depends on
                    # the type of channel. So we need a special case here.

                    # WARNING: How to check the type of channel is not
                    # described in the documentation. So here I use what is
                    # proposed in the C code [2].
                    # According to this C code, it seems that the 'm_isAnalog'
                    # is used to distinguished analog and digital channels, and
                    # 'm_Mode' encodes the type of analog channel:
                    # 0 for continuous, 1 for level, 2 for external trigger.
                    # But in some files, I found channels that seemed to be
                    # continuous channels with 'm_Modes' = 128 or 192. So I
                    # decided to consider every channel with 'm_Modes'
                    # different from 1 or 2 as continuous. I also couldn't
                    # check that values of 1 and 2 are really for level and
                    # external trigger as I had no test files containing data
                    # of this types.

                    type_subblock = 'unknown_channel_type(m_Mode=' \
                                    + str(block['m_Mode'])+ ')'
                    description = Type2_SubBlockUnknownChannels
                    block.update({'m_Name': 'unknown_name'})
                    if block['m_isAnalog'] == 0:
                        # digital channel
                        type_subblock = 'digital'
                        description = Type2_SubBlockDigitalChannels
                    elif block['m_isAnalog'] == 1:
                        # analog channel
                        if block['m_Mode'] == 1:
                            # level channel
                            type_subblock = 'level'
                            description = Type2_SubBlockLevelChannels
                        elif block['m_Mode'] == 2:
                            # external trigger channel
                            type_subblock = 'external_trigger'
                            description = Type2_SubBlockExtTriggerChannels
                        else:
                            # continuous channel
                            type_subblock = 'continuous(Mode' \
                                            + str(block['m_Mode']) +')'
                            description = Type2_SubBlockContinuousChannels

                    subblock = HeaderReader(fid, description).read_f()

                    block.update(subblock)
                    block.update({'type_subblock': type_subblock})

                file_blocks.append(block)
                pos_block += m_length
                fid.seek(pos_block)

            # step 2: find the available channels
            list_chan = [] # list containing indexes of channel blocks
            for ind_block, block in enumerate(file_blocks):
                if block['m_TypeBlock'] == '2':
                    list_chan.append(ind_block)

            # step 3: find blocks containing data for the available channels
            list_data = [] # list of lists of indexes of data blocks
                           # corresponding to each channel
            for ind_chan, chan in enumerate(list_chan):
                list_data.append([])
                num_chan = file_blocks[chan]['m_numChannel']
                for ind_block, block in enumerate(file_blocks):
                    if block['m_TypeBlock'] == '5':
                        if block['m_numChannel'] == num_chan:
                            list_data[ind_chan].append(ind_block)


            # step 4: compute the length (number of samples) of the channels
            chan_len = np.zeros(len(list_data), dtype = np.int)
            for ind_chan, list_blocks in enumerate(list_data):
                for ind_block in list_blocks:
                    chan_len[ind_chan] += count_samples(
                                          file_blocks[ind_block]['m_length'])

            # step 5: find channels for which data are available
            ind_valid_chan = np.nonzero(chan_len)[0]

            # step 6: load the data
            # TODO give the possibility to load data as AnalogSignalArrays
            for ind_chan in ind_valid_chan:
                list_blocks = list_data[ind_chan]
                ind = 0 # index in the data vector

                # read time stamp for the beginning of the signal
                form = '<l' # reading format
                ind_block = list_blocks[0]
                count = count_samples(file_blocks[ind_block]['m_length'])
                fid.seek(file_blocks[ind_block]['pos']+6+count*2)
                buf = fid.read(struct.calcsize(form))
                val = struct.unpack(form , buf)
                start_index = val[0]

                # WARNING: in the following blocks are read supposing taht they
                # are all contiguous and sorted in time. I don't know if it's
                # always the case. Maybe we should use the time stamp of each
                # data block to choose where to put the read data in the array.
                if not lazy:
                    temp_array = np.empty(chan_len[ind_chan], dtype = np.int16)
                    # NOTE: we could directly create an empty AnalogSignal and
                    # load the data in it, but it is much faster to load data
                    # in a temporary numpy array and create the AnalogSignals
                    # from this temporary array
                    for ind_block in list_blocks:
                        count = count_samples(
                                file_blocks[ind_block]['m_length'])
                        fid.seek(file_blocks[ind_block]['pos']+6)
                        temp_array[ind:ind+count] = \
                            np.fromfile(fid, dtype = np.int16, count = count)
                        ind += count

                sampling_rate = \
                    file_blocks[list_chan[ind_chan]]['m_SampleRate'] * pq.kHz
                t_start = (start_index / sampling_rate).simplified
                if lazy:
                    ana_sig = AnalogSignal([],
                                           sampling_rate = sampling_rate,
                                           t_start = t_start,
                                           name = file_blocks\
                                               [list_chan[ind_chan]]['m_Name'],
                                           file_origin = \
                                               os.path.basename(self.filename),
                                           units = pq.dimensionless)
                    ana_sig.lazy_shape = chan_len[ind_chan]
                else:
                    ana_sig = AnalogSignal(temp_array,
                                           sampling_rate = sampling_rate,
                                           t_start = t_start,
                                           name = file_blocks\
                                               [list_chan[ind_chan]]['m_Name'],
                                           file_origin = \
                                               os.path.basename(self.filename),
                                           units = pq.dimensionless)

                ana_sig.channel_index = \
                            file_blocks[list_chan[ind_chan]]['m_numChannel']
                ana_sig.annotate(channel_name = \
                            file_blocks[list_chan[ind_chan]]['m_Name'])
                ana_sig.annotate(channel_type = \
                            file_blocks[list_chan[ind_chan]]['type_subblock'])
                seg.analogsignals.append(ana_sig)

        fid.close()

        if file_blocks[0]['m_TypeBlock'] == 'h': # this should always be true
            blck.rec_datetime = datetime.datetime(\
                file_blocks[0]['m_date_year'],
                file_blocks[0]['m_date_month'],
                file_blocks[0]['m_date_day'],
                file_blocks[0]['m_time_hour'],
                file_blocks[0]['m_time_minute'],
                file_blocks[0]['m_time_second'],
                10000 * file_blocks[0]['m_time_hsecond'])
                # the 10000 is here to convert m_time_hsecond from centisecond
                # to microsecond
            version = file_blocks[0]['m_version']
            blck.annotate(alphamap_version = version)
            if cascade:
                seg.rec_datetime = blck.rec_datetime.replace()
                # I couldn't find a simple copy function for datetime,
                # using replace without arguments is a twisted way to make a
                # copy
                seg.annotate(alphamap_version = version)
        if cascade:
            populate_RecordingChannel(blck, remove_from_annotation = True)
            blck.create_many_to_one_relationship()

        return blck
Example #13
0
def test3():
    """
    With no db : just a file
    """
    
    url = 'sqlite://'
    
    
    
    dbinfo = open_db(url, 
                        object_number_in_cache = 3000,
                        use_global_session = False,
                        compress = None,
                        )
    session = dbinfo.Session()
    
    #~ bl = neo.AxonIO(filename = 'File_axon_1.abf').read()
    #~ print bl.segments
    #~ bl2 = OEBase.from_neo(bl, generic_classes, cascade = True)
    
    from neo.test.io.generate_datasets import generate_one_simple_block
    from neo.io.tools import create_many_to_one_relationship, populate_RecordingChannel
    bl = generate_one_simple_block(supported_objects = [neo.Segment, neo.AnalogSignal, ])
    create_many_to_one_relationship(bl)
    populate_RecordingChannel(bl)
    bl2 = OEBase.from_neo(bl, dbinfo.mapped_classes, cascade = True)
    session.add(bl2)
    session.commit()
    print bl2
    
    treedescription1 = TreeDescription(
                            dbinfo =  dbinfo,
                            table_children = { 
                                                    'Block' : ['Segment' ],
                                                    'Segment' : [ 'AnalogSignal'],
                                                    },
                            columns_to_show = { },
                            table_on_top = 'Block',
                            #~ table_order = None,
                            )
    treedescription2 = TreeDescription(
                            dbinfo =  dbinfo,
                            table_children = { 
                                                    'Block' : ['RecordingChannelGroup' ],
                                                    'RecordingChannelGroup' : [ 'RecordingChannel', ],
                                                    'RecordingChannel' : [ 'AnalogSignal'],
                                                    },
                            columns_to_show = { },
                            table_on_top = 'Block',
                            #~ table_order = None,
                            )
    
    app = QApplication([ ])
    
    from OpenElectrophy.gui.contextmenu import context_menu
    
    w1 = QtSqlTreeView(session = session, treedescription = treedescription1,  context_menu = context_menu)
    w2 = QtSqlTreeView(session = session, treedescription = treedescription2,  context_menu = context_menu)
    w1.show()
    w2.show()
    sys.exit(app.exec_())