예제 #1
0
    def read_block(self, lazy=False, cascade=True, channel_index=None):
        """
        Arguments:
            Channel_index: can be int, iterable or None to select one, many or all channel(s)

        """

        blk = Block()
        if cascade:
            seg = Segment(file_origin=self._filename)
            blk.segments += [seg]

            if channel_index:
                if type(channel_index) is int:
                    channel_index = [channel_index]
                if type(channel_index) is list:
                    channel_index = np.array(channel_index)
            else:
                channel_index = np.arange(0, self._attrs["shape"][1])

            chx = ChannelIndex(name="all channels", index=channel_index)
            blk.channel_indexes.append(chx)

            ana = self.read_analogsignal(channel_index=channel_index, lazy=lazy, cascade=cascade)
            ana.channel_index = chx
            seg.duration = (self._attrs["shape"][0] / self._attrs["kwik"]["sample_rate"]) * pq.s

            # neo.tools.populate_RecordingChannel(blk)
        blk.create_many_to_one_relationship()
        return blk
예제 #2
0
파일: nsdfio.py 프로젝트: INM-6/python-neo
    def read_block(self, lazy=False, group=None, reader=None):
        """
        Read a Block from the file

        :param lazy: Enables lazy reading
        :param group: HDF5 Group representing the block in NSDF model tree (optional)
        :param reader: NSDFReader instance (optional)
        :return: Read block
        """
        assert not lazy, 'Do not support lazy'

        block = Block()
        group, reader = self._select_first_container(group, reader, 'block')

        if group is None:
            return None

        attrs = group.attrs

        self._read_block_children(block, group, reader)
        block.create_many_to_one_relationship()

        self._read_container_metadata(attrs, block)

        return block
예제 #3
0
    def test_block_write(self):
        block = Block(name=self.rword(),
                      description=self.rsentence())
        self.write_and_compare([block])

        block.annotate(**self.rdict(5))
        self.write_and_compare([block])
 def _block_to_neo(self, nix_block):
     neo_attrs = self._nix_attr_to_neo(nix_block)
     neo_block = Block(**neo_attrs)
     neo_block.rec_datetime = datetime.fromtimestamp(
         nix_block.created_at
     )
     self._neo_map[nix_block.name] = neo_block
     return neo_block
예제 #5
0
def proc_dam(filename):
    '''Load an dam file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareDamIO to
    make sure BrainwareDamIO is working properly

    block = proc_dam(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_dam_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.dam', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_dam_py2.npz'
             dam file name = 'file1.dam'
    '''
    with np.load(filename) as damobj:
        damfile = damobj.items()[0][1].flatten()

    filename = os.path.basename(filename[:-12]+'.dam')

    signals = [res.flatten() for res in damfile['signal']]
    stimIndexes = [int(res[0, 0].tolist()) for res in damfile['stimIndex']]
    timestamps = [res[0, 0] for res in damfile['timestamp']]

    block = Block(file_origin=filename)

    chx = ChannelIndex(file_origin=filename,
                       index=np.array([0]),
                       channel_ids=np.array([1]),
                       channel_names=np.array(['Chan1'], dtype='S'))

    block.channel_indexes.append(chx)

    params = [res['params'][0, 0].flatten() for res in damfile['stim']]
    values = [res['values'][0, 0].flatten() for res in damfile['stim']]
    params = [[res1[0] for res1 in res] for res in params]
    values = [[res1 for res1 in res] for res in values]
    stims = [dict(zip(param, value)) for param, value in zip(params, values)]

    fulldam = zip(stimIndexes, timestamps, signals, stims)
    for stimIndex, timestamp, signal, stim in fulldam:
        sig = AnalogSignal(signal=signal*pq.mV,
                           t_start=timestamp*pq.d,
                           file_origin=filename,
                           sampling_period=1.*pq.s)
        segment = Segment(file_origin=filename,
                          index=stimIndex,
                          **stim)
        segment.analogsignals = [sig]
        block.segments.append(segment)

    block.create_many_to_one_relationship()

    return block
예제 #6
0
    def read_block(self,
                     lazy = False,
                     cascade = True,
                    ):
        """
        """

        
        tree = ElementTree.parse(self.filename)
        root = tree.getroot()
        acq = root.find('acquisitionSystem')
        nbits = int(acq.find('nBits').text)
        nbchannel = int(acq.find('nChannels').text)
        sampling_rate = float(acq.find('samplingRate').text)*pq.Hz
        voltage_range = float(acq.find('voltageRange').text)
        #offset = int(acq.find('offset').text)
        amplification = float(acq.find('amplification').text)
        
        bl = Block(file_origin = os.path.basename(self.filename).replace('.xml', ''))
        if cascade:
            seg = Segment()
            bl.segments.append(seg)
            
            # RC and RCG
            rc_list = [ ]
            for i, xml_rcg in  enumerate(root.find('anatomicalDescription').find('channelGroups').findall('group')):
                rcg = RecordingChannelGroup(name = 'Group {0}'.format(i))
                bl.recordingchannelgroups.append(rcg)
                for xml_rc in xml_rcg:
                    rc = RecordingChannel(index = int(xml_rc.text))
                    rc_list.append(rc)
                    rcg.recordingchannels.append(rc)
                    rc.recordingchannelgroups.append(rcg)
                rcg.channel_indexes = np.array([rc.index for rc in rcg.recordingchannels], dtype = int)
                rcg.channel_names = np.array(['Channel{0}'.format(rc.index) for rc in rcg.recordingchannels], dtype = 'S')
        
            # AnalogSignals
            reader = RawBinarySignalIO(filename = self.filename.replace('.xml', '.dat'))
            seg2 = reader.read_segment(cascade = True, lazy = lazy,
                                                        sampling_rate = sampling_rate,
                                                        t_start = 0.*pq.s,
                                                        unit = pq.V, nbchannel = nbchannel,
                                                        bytesoffset = 0,
                                                        dtype = np.int16 if nbits<=16 else np.int32,
                                                        rangemin = -voltage_range/2.,
                                                        rangemax = voltage_range/2.,)
            for s, sig in enumerate(seg2.analogsignals):
                if not lazy:
                    sig /= amplification
                sig.segment = seg
                seg.analogsignals.append(sig)
                rc_list[s].analogsignals.append(sig)
            
        bl.create_many_to_one_relationship()
        return bl
def proc_src(filename):
    '''Load an src file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareSrcIO to
    make sure BrainwareSrcIO is working properly

    block = proc_src(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_src_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.src', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_src_py2.npz'
             src file name = 'file1.src'
    '''
    with np.load(filename) as srcobj:
        srcfile = srcobj.items()[0][1]

    filename = os.path.basename(filename[:-12]+'.src')

    block = Block(file_origin=filename)

    NChannels = srcfile['NChannels'][0, 0][0, 0]
    side = str(srcfile['side'][0, 0][0])
    ADperiod = srcfile['ADperiod'][0, 0][0, 0]

    comm_seg = proc_src_comments(srcfile, filename)
    block.segments.append(comm_seg)

    rcg = proc_src_units(srcfile, filename)
    chan_nums = np.arange(NChannels, dtype='int')
    chan_names = []
    for i in chan_nums:
        name = 'Chan'+str(i)
        chan_names.append(name)
        chan = RecordingChannel(file_origin='filename',
                                name=name,
                                index=int(i))
        rcg.recordingchannels.append(chan)
    rcg.channel_indexes = chan_nums
    rcg.channel_names = np.array(chan_names, dtype='string_')
    block.recordingchannelgroups.append(rcg)

    for rep in srcfile['sets'][0, 0].flatten():
        proc_src_condition(rep, filename, ADperiod, side, block)

    block.create_many_to_one_relationship()

    return block
예제 #8
0
파일: tdtio.py 프로젝트: bal47/python-neo
    def read_block(self, lazy=False, cascade=True):
        bl = Block()
        tankname = os.path.basename(self.dirname)
        bl.file_origin = tankname

        if not cascade : return bl

        for blockname in os.listdir(self.dirname):
            seg = self.read_segment(blockname, lazy, cascade)
            bl.segments.append(seg)

        bl.create_many_to_one_relationship()
        return bl
    def test__children(self):
        blk = Block(name='block1')
        blk.recordingchannelgroups = [self.rcg1]
        blk.create_many_to_one_relationship()

        self.assertEqual(self.rcg1._container_child_objects, ('Unit',))
        self.assertEqual(self.rcg1._data_child_objects, ('AnalogSignalArray',))
        self.assertEqual(self.rcg1._single_parent_objects, ('Block',))
        self.assertEqual(self.rcg1._multi_child_objects, ('RecordingChannel',))
        self.assertEqual(self.rcg1._multi_parent_objects, ())
        self.assertEqual(self.rcg1._child_properties, ())

        self.assertEqual(self.rcg1._single_child_objects,
                         ('Unit', 'AnalogSignalArray',))

        self.assertEqual(self.rcg1._container_child_containers, ('units',))
        self.assertEqual(self.rcg1._data_child_containers,
                         ('analogsignalarrays',))
        self.assertEqual(self.rcg1._single_child_containers,
                         ('units', 'analogsignalarrays'))
        self.assertEqual(self.rcg1._single_parent_containers, ('block',))
        self.assertEqual(self.rcg1._multi_child_containers,
                         ('recordingchannels',))
        self.assertEqual(self.rcg1._multi_parent_containers, ())

        self.assertEqual(self.rcg1._child_objects,
                         ('Unit', 'AnalogSignalArray', 'RecordingChannel'))
        self.assertEqual(self.rcg1._child_containers,
                         ('units', 'analogsignalarrays', 'recordingchannels'))
        self.assertEqual(self.rcg1._parent_objects, ('Block',))
        self.assertEqual(self.rcg1._parent_containers, ('block',))

        self.assertEqual(len(self.rcg1.children),
                         (len(self.units1) +
                          len(self.rchan1) +
                          len(self.sigarr1)))
        self.assertEqual(self.rcg1.children[0].name, self.unitnames1[0])
        self.assertEqual(self.rcg1.children[1].name, self.unitnames1[1])
        self.assertEqual(self.rcg1.children[2].name, self.sigarrnames1[0])
        self.assertEqual(self.rcg1.children[3].name, self.sigarrnames1[1])
        self.assertEqual(self.rcg1.children[4].name, self.rchannames1[0])
        self.assertEqual(self.rcg1.children[5].name, self.rchannames1[1])
        self.assertEqual(len(self.rcg1.parents), 1)
        self.assertEqual(self.rcg1.parents[0].name, 'block1')

        self.rcg1.create_many_to_one_relationship()
        self.rcg1.create_many_to_many_relationship()
        self.rcg1.create_relationship()
        assert_neo_object_is_compliant(self.rcg1)
예제 #10
0
파일: baseio.py 프로젝트: bal47/python-neo
 def read(self, lazy=False, cascade=True, **kargs):
     if Block in self.readable_objects:
         if hasattr(self, "read_all_blocks") and callable(getattr(self, "read_all_blocks")):
             return self.read_all_blocks(lazy=lazy, cascade=cascade, **kargs)
         return [self.read_block(lazy=lazy, cascade=cascade, **kargs)]
     elif Segment in self.readable_objects:
         bl = Block(name="One segment only")
         if not cascade:
             return bl
         seg = self.read_segment(lazy=lazy, cascade=cascade, **kargs)
         bl.segments.append(seg)
         bl.create_many_to_one_relationship()
         return [bl]
     else:
         raise NotImplementedError
예제 #11
0
파일: baseio.py 프로젝트: INM-6/python-neo
 def read(self, lazy=False, **kargs):
     if lazy:
         assert self.support_lazy, 'This IO do not support lazy loading'
     if Block in self.readable_objects:
         if (hasattr(self, 'read_all_blocks') and
                 callable(getattr(self, 'read_all_blocks'))):
             return self.read_all_blocks(lazy=lazy, **kargs)
         return [self.read_block(lazy=lazy, **kargs)]
     elif Segment in self.readable_objects:
         bl = Block(name='One segment only')
         seg = self.read_segment(lazy=lazy, **kargs)
         bl.segments.append(seg)
         bl.create_many_to_one_relationship()
         return [bl]
     else:
         raise NotImplementedError
예제 #12
0
 def read_block(self,
                lazy = False,
                cascade = True,
                group = 0):
     blo = Block(name = 'test')
     if cascade:
         tree = getbyroute(self.pul.tree,[0,group])
         for i,child in enumerate(tree['children']):
             blo.segments.append(self.read_segment(group=group,series = i))
         annotations = tree['contents'].__dict__.keys()
         annotations.remove('readlist')
         for a in annotations:
             d = {a:str(tree['contents'].__dict__[a])}
             blo.annotate(**d)
     create_many_to_one_relationship(blo)
     return blo
예제 #13
0
    def test__construct_subsegment_by_unit(self):
        nb_seg = 3
        nb_unit = 7
        unit_with_sig = np.array([0, 2, 5])
        signal_types = ['Vm', 'Conductances']
        sig_len = 100

        # channelindexes
        chxs = [ChannelIndex(name='Vm',
                             index=unit_with_sig),
                ChannelIndex(name='Conductance',
                             index=unit_with_sig)]

        # Unit
        all_unit = []
        for u in range(nb_unit):
            un = Unit(name='Unit #%d' % u, channel_indexes=np.array([u]))
            assert_neo_object_is_compliant(un)
            all_unit.append(un)

        blk = Block()
        blk.channel_indexes = chxs
        for s in range(nb_seg):
            seg = Segment(name='Simulation %s' % s)
            for j in range(nb_unit):
                st = SpikeTrain([1, 2], units='ms',
                                t_start=0., t_stop=10)
                st.unit = all_unit[j]

            for t in signal_types:
                anasigarr = AnalogSignal(np.zeros((sig_len,
                                                        len(unit_with_sig))),
                                              units='nA',
                                              sampling_rate=1000.*pq.Hz,
                                              channel_indexes=unit_with_sig)
                seg.analogsignals.append(anasigarr)

        blk.create_many_to_one_relationship()
        for unit in all_unit:
            assert_neo_object_is_compliant(unit)
        for chx in chxs:
            assert_neo_object_is_compliant(chx)
        assert_neo_object_is_compliant(blk)

        # what you want
        newseg = seg.construct_subsegment_by_unit(all_unit[:4])
        assert_neo_object_is_compliant(newseg)
예제 #14
0
    def read_block(self, lazy=False, cascade=True, **kargs):
        '''
        Reads a block from the raw data file "fname" generated
        with BrainWare
        '''

        # there are no keyargs implemented to so far.  If someone tries to pass
        # them they are expecting them to do something or making a mistake,
        # neither of which should pass silently
        if kargs:
            raise NotImplementedError('This method does not have any '
                                      'arguments implemented yet')
        self._fsrc = None

        block = Block(file_origin=self._filename)

        # if we aren't doing cascade, don't load anything
        if not cascade:
            return block

        # create the objects to store other objects
        chx = ChannelIndex(file_origin=self._filename,
                                    channel_ids=np.array([1]),
                                    index=np.array([0]),
                                    channel_names=np.array(['Chan1'], dtype='S'))

        # load objects into their containers
        block.channel_indexes.append(chx)

        # open the file
        with open(self._path, 'rb') as fobject:
            # while the file is not done keep reading segments
            while True:
                seg = self._read_segment(fobject, lazy)
                # if there are no more Segments, stop
                if not seg:
                    break

                # store the segment and signals
                seg.analogsignals[0].channel_index = chx
                block.segments.append(seg)

        # remove the file object
        self._fsrc = None

        block.create_many_to_one_relationship()
        return block
예제 #15
0
    def read_block(self, lazy=False, cascade=True, **kargs):
        """
        Reads a block from the raw data file "fname" generated
        with BrainWare
        """

        # there are no keyargs implemented to so far.  If someone tries to pass
        # them they are expecting them to do something or making a mistake,
        # neither of which should pass silently
        if kargs:
            raise NotImplementedError("This method does not have any " "argument implemented yet")
        self._fsrc = None

        block = Block(file_origin=self._filename)

        # if we aren't doing cascade, don't load anything
        if not cascade:
            return block

        # create the objects to store other objects
        rcg = RecordingChannelGroup(file_origin=self._filename)
        rchan = RecordingChannel(file_origin=self._filename, index=1, name="Chan1")

        # load objects into their containers
        rcg.recordingchannels.append(rchan)
        block.recordingchannelgroups.append(rcg)
        rcg.channel_indexes = np.array([1])
        rcg.channel_names = np.array(["Chan1"], dtype="S")

        # open the file
        with open(self._path, "rb") as fobject:
            # while the file is not done keep reading segments
            while True:
                seg = self._read_segment(fobject, lazy)
                # if there are no more Segments, stop
                if not seg:
                    break

                # store the segment and signals
                block.segments.append(seg)
                rchan.analogsignals.append(seg.analogsignals[0])

        # remove the file object
        self._fsrc = None

        block.create_many_to_one_relationship()
        return block
예제 #16
0
    def test_context_write(self):
        neoblock = Block(name=self.rword(), description=self.rsentence())
        with NixIO(self.filename, "ow") as iofile:
            iofile.write_block(neoblock)

        nixfile = nix.File.open(self.filename, nix.FileMode.ReadOnly,
                                backend="h5py")
        self.compare_blocks([neoblock], nixfile.blocks)
        nixfile.close()

        neoblock.annotate(**self.rdict(5))
        with NixIO(self.filename, "rw") as iofile:
            iofile.write_block(neoblock)
        nixfile = nix.File.open(self.filename, nix.FileMode.ReadOnly,
                                backend="h5py")
        self.compare_blocks([neoblock], nixfile.blocks)
        nixfile.close()
예제 #17
0
def generate_one_simple_block(block_name='block_0', nb_segment=3, supported_objects=[], **kws):
    if supported_objects and Block not in supported_objects:
        raise ValueError('Block must be in supported_objects')
    bl = Block()  # name = block_name)

    objects = supported_objects
    if Segment in objects:
        for s in range(nb_segment):
            seg = generate_one_simple_segment(seg_name="seg" + str(s), supported_objects=objects,
                                              **kws)
            bl.segments.append(seg)

    # if RecordingChannel in objects:
    #    populate_RecordingChannel(bl)

    bl.create_many_to_one_relationship()
    return bl
예제 #18
0
파일: kwikio.py 프로젝트: bal47/python-neo
    def read_block(self,
                     lazy=False,
                     cascade=True,
                     channel_index=None
                    ):
        """
        Arguments:
            Channel_index: can be int, iterable or None to select one, many or all channel(s)

        """

        blk = Block()
        if cascade:
            seg = Segment( file_origin=self._filename )
            blk.segments += [ seg ]



            if channel_index:
                if type(channel_index) is int: channel_index = [ channel_index ]
                if type(channel_index) is list: channel_index = np.array( channel_index )
            else:
                channel_index = np.arange(0,self._attrs['shape'][1])

            rcg = RecordingChannelGroup(name='all channels',
                                 channel_indexes=channel_index)
            blk.recordingchannelgroups.append(rcg)

            for idx in channel_index:
                # read nested analosignal
                ana = self.read_analogsignal(channel_index=idx,
                                        lazy=lazy,
                                        cascade=cascade,
                                         )
                chan = RecordingChannel(index=int(idx))
                seg.analogsignals += [ ana ]
                chan.analogsignals += [ ana ]
                rcg.recordingchannels.append(chan)
            seg.duration = (self._attrs['shape'][0]
                          / self._attrs['kwik']['sample_rate']) * pq.s

            # neo.tools.populate_RecordingChannel(blk)
        blk.create_many_to_one_relationship()
        return blk
예제 #19
0
    def create_all_annotated(cls):
        times = cls.rquant(1, pq.s)
        signal = cls.rquant(1, pq.V)
        blk = Block()
        blk.annotate(**cls.rdict(3))

        seg = Segment()
        seg.annotate(**cls.rdict(4))
        blk.segments.append(seg)

        asig = AnalogSignal(signal=signal, sampling_rate=pq.Hz)
        asig.annotate(**cls.rdict(2))
        seg.analogsignals.append(asig)

        isig = IrregularlySampledSignal(times=times, signal=signal,
                                        time_units=pq.s)
        isig.annotate(**cls.rdict(2))
        seg.irregularlysampledsignals.append(isig)

        epoch = Epoch(times=times, durations=times)
        epoch.annotate(**cls.rdict(4))
        seg.epochs.append(epoch)

        event = Event(times=times)
        event.annotate(**cls.rdict(4))
        seg.events.append(event)

        spiketrain = SpikeTrain(times=times, t_stop=pq.s, units=pq.s)
        d = cls.rdict(6)
        d["quantity"] = pq.Quantity(10, "mV")
        d["qarray"] = pq.Quantity(range(10), "mA")
        spiketrain.annotate(**d)
        seg.spiketrains.append(spiketrain)

        chx = ChannelIndex(name="achx", index=[1, 2], channel_ids=[0, 10])
        chx.annotate(**cls.rdict(5))
        blk.channel_indexes.append(chx)

        unit = Unit()
        unit.annotate(**cls.rdict(2))
        chx.units.append(unit)

        return blk
예제 #20
0
    def read_block(self, lazy=False, cascade=True):

        if self.filename is not None:
            self.stfio_rec = stfio.read(self.filename)

        bl = Block()
        bl.description = self.stfio_rec.file_description
        bl.annotate(comment=self.stfio_rec.comment)
        try:
            bl.rec_datetime = self.stfio_rec.datetime
        except:
            bl.rec_datetime = None

        if not cascade:
            return bl

        dt = np.round(self.stfio_rec.dt * 1e-3, 9) * pq.s  # ms to s
        sampling_rate = 1.0/dt
        t_start = 0 * pq.s

        # iterate over sections first:
        for j, recseg in enumerate(self.stfio_rec[0]):
            seg = Segment(index=j)
            length = len(recseg)

            # iterate over channels:
            for i, recsig in enumerate(self.stfio_rec):
                name = recsig.name
                unit = recsig.yunits
                try:
                    pq.Quantity(1, unit)
                except:
                    unit = ''

                if lazy:
                    signal = pq.Quantity([], unit)
                else:
                    signal = pq.Quantity(recsig[j], unit)
                anaSig = AnalogSignal(signal, sampling_rate=sampling_rate,
                                      t_start=t_start, name=str(name),
                                      channel_index=i)
                if lazy:
                    anaSig.lazy_shape = length
                seg.analogsignals.append(anaSig)

            bl.segments.append(seg)
            t_start = t_start + length * dt

        bl.create_many_to_one_relationship()

        return bl
예제 #21
0
 def read_block(self, lazy=False, cascade=True, load_waveforms = False):
     """
     """
     # Create block
     bl = Block(file_origin=self.filename)
     if not cascade:
         return bl
     
     seg = self.read_segment(self.filename,lazy=lazy, cascade=cascade,
                         load_waveforms = load_waveforms)
     bl.segments.append(seg)
     neo.io.tools.populate_RecordingChannel(bl, remove_from_annotation=False)
     
     # This create rc and RCG for attaching Units
     rcg0 = bl.recordingchannelgroups[0]
     def find_rc(chan):
         for rc in rcg0.recordingchannels:
             if rc.index==chan:
                 return rc
     for st in seg.spiketrains:
         chan = st.annotations['channel_index']
         rc = find_rc(chan)
         if rc is None:
             rc = RecordingChannel(index = chan)
             rcg0.recordingchannels.append(rc)
             rc.recordingchannelgroups.append(rcg0)
         if len(rc.recordingchannelgroups) == 1:
             rcg = RecordingChannelGroup(name = 'Group {0}'.format(chan))
             rcg.recordingchannels.append(rc)
             rc.recordingchannelgroups.append(rcg)
             bl.recordingchannelgroups.append(rcg)
         else:
             rcg = rc.recordingchannelgroups[1]
         unit = Unit(name = st.name)
         rcg.units.append(unit)
         unit.spiketrains.append(st)
     bl.create_many_to_one_relationship()
     
     return bl
예제 #22
0
    def read_block(self, **kargs):
        if self.filename is not None:
            self.axo_obj = axographio.read(self.filename)

        # Build up the block
        blk = Block()

        blk.rec_datetime = None
        if self.filename is not None:
            # modified time is not ideal but less prone to
            # cross-platform issues than created time (ctime)
            blk.file_datetime = datetime.fromtimestamp(os.path.getmtime(self.filename))

            # store the filename if it is available
            blk.file_origin = self.filename

        # determine the channel names and counts
        _, channel_ordering = np.unique(self.axo_obj.names[1:], return_index=True)
        channel_names = np.array(self.axo_obj.names[1:])[np.sort(channel_ordering)]
        channel_count = len(channel_names)

        # determine the time signal and sample period
        sample_period = self.axo_obj.data[0].step * pq.s
        start_time = self.axo_obj.data[0].start * pq.s

        # Attempt to read units from the channel names
        channel_unit_names = [x.split()[-1].strip('()') for x in channel_names]
        channel_units = []

        for unit in channel_unit_names:
            try:
                channel_units.append(pq.Quantity(1, unit))
            except LookupError:
                channel_units.append(None)

        # Strip units from channel names
        channel_names = [' '.join(x.split()[:-1]) for x in channel_names]

        # build up segments by grouping axograph columns
        for seg_idx in range(1, len(self.axo_obj.data), channel_count):
            seg = Segment(index=seg_idx)

            # add in the channels
            for chan_idx in range(0, channel_count):
                signal = pq.Quantity(
                    self.axo_obj.data[seg_idx + chan_idx], channel_units[chan_idx])
                analog = AnalogSignal(signal,
                                      sampling_period=sample_period, t_start=start_time,
                                      name=channel_names[chan_idx], channel_index=chan_idx)
                seg.analogsignals.append(analog)

            blk.segments.append(seg)

        blk.create_many_to_one_relationship()

        return blk
예제 #23
0
    def test__get_epochs(self):
        a_1 = Epoch([0.5, 10.0, 25.2] * pq.s, durations=[5.1, 4.8, 5.0] * pq.s)
        a_1.annotate(epoch_type='a', pick='me')
        a_1.array_annotate(trial_id=[1, 2, 3])

        b_1 = Epoch([5.5, 14.9, 30.1] * pq.s, durations=[4.7, 4.9, 5.2] * pq.s)
        b_1.annotate(epoch_type='b')
        b_1.array_annotate(trial_id=[1, 2, 3])

        a_2 = Epoch([33.2, 41.7, 52.4] * pq.s,
                    durations=[5.3, 5.0, 5.1] * pq.s)
        a_2.annotate(epoch_type='a')
        a_2.array_annotate(trial_id=[4, 5, 6])

        b_2 = Epoch([37.6, 46.1, 57.0] * pq.s,
                    durations=[4.9, 5.2, 5.1] * pq.s)
        b_2.annotate(epoch_type='b')
        b_2.array_annotate(trial_id=[4, 5, 6])

        seg = Segment()
        seg2 = Segment()
        seg.epochs = [a_1, b_1]
        seg2.epochs = [a_2, b_2]

        block = Block()
        block.segments = [seg, seg2]

        # test getting one whole event via annotation
        extracted_a_1 = get_epochs(seg, epoch_type='a')
        extracted_a_1b = get_epochs(block, pick='me')

        self.assertEqual(len(extracted_a_1), 1)
        self.assertEqual(len(extracted_a_1b), 1)

        extracted_a_1 = extracted_a_1[0]
        extracted_a_1b = extracted_a_1b[0]

        assert_same_attributes(extracted_a_1, a_1)
        assert_same_attributes(extracted_a_1b, a_1)

        # test getting an empty list by searching for a non-existent property
        empty1 = get_epochs(seg, foo='bar')

        self.assertEqual(len(empty1), 0)

        # test getting an empty list by searching for a non-existent property value
        empty2 = get_epochs(seg, epoch_type='undefined')

        self.assertEqual(len(empty2), 0)

        # test getting only one event time of one event
        trial_2 = get_epochs(block, trial_id=2, epoch_type='a')

        self.assertEqual(len(trial_2), 1)

        trial_2 = trial_2[0]

        self.assertEqual(a_1.name, trial_2.name)
        self.assertEqual(a_1.description, trial_2.description)
        self.assertEqual(a_1.file_origin, trial_2.file_origin)
        self.assertEqual(a_1.annotations['epoch_type'],
                         trial_2.annotations['epoch_type'])
        assert_arrays_equal(trial_2.array_annotations['trial_id'],
                            np.array([2]))
        self.assertIsInstance(trial_2.array_annotations, ArrayDict)

        # test getting only one event time of more than one event
        trial_2b = get_epochs(block, trial_id=2)

        self.assertEqual(len(trial_2b), 2)

        a_idx = np.where(
            np.array([ev.annotations['epoch_type']
                      for ev in trial_2b]) == 'a')[0][0]

        trial_2b_a = trial_2b[a_idx]
        trial_2b_b = trial_2b[a_idx - 1]

        assert_same_attributes(trial_2b_a, trial_2)

        self.assertEqual(b_1.name, trial_2b_b.name)
        self.assertEqual(b_1.description, trial_2b_b.description)
        self.assertEqual(b_1.file_origin, trial_2b_b.file_origin)
        self.assertEqual(b_1.annotations['epoch_type'],
                         trial_2b_b.annotations['epoch_type'])
        assert_arrays_equal(trial_2b_b.array_annotations['trial_id'],
                            np.array([2]))
        self.assertIsInstance(trial_2b_b.array_annotations, ArrayDict)

        # test getting more than one event time of one event
        trials_1_2 = get_epochs(block, trial_id=[1, 2], epoch_type='a')

        self.assertEqual(len(trials_1_2), 1)

        trials_1_2 = trials_1_2[0]

        self.assertEqual(a_1.name, trials_1_2.name)
        self.assertEqual(a_1.description, trials_1_2.description)
        self.assertEqual(a_1.file_origin, trials_1_2.file_origin)
        self.assertEqual(a_1.annotations['epoch_type'],
                         trials_1_2.annotations['epoch_type'])
        assert_arrays_equal(trials_1_2.array_annotations['trial_id'],
                            np.array([1, 2]))
        self.assertIsInstance(trials_1_2.array_annotations, ArrayDict)

        # test getting more than one event time of more than one event
        trials_1_2b = get_epochs(block, trial_id=[1, 2])

        self.assertEqual(len(trials_1_2b), 2)

        a_idx = np.where(
            np.array([ev.annotations['epoch_type']
                      for ev in trials_1_2b]) == 'a')[0][0]

        trials_1_2b_a = trials_1_2b[a_idx]
        trials_1_2b_b = trials_1_2b[a_idx - 1]

        assert_same_attributes(trials_1_2b_a, trials_1_2)

        self.assertEqual(b_1.name, trials_1_2b_b.name)
        self.assertEqual(b_1.description, trials_1_2b_b.description)
        self.assertEqual(b_1.file_origin, trials_1_2b_b.file_origin)
        self.assertEqual(b_1.annotations['epoch_type'],
                         trials_1_2b_b.annotations['epoch_type'])
        assert_arrays_equal(trials_1_2b_b.array_annotations['trial_id'],
                            np.array([1, 2]))
        self.assertIsInstance(trials_1_2b_b.array_annotations, ArrayDict)
예제 #24
0
    def read_block(self, lazy=False, cascade=True):

        header = self.read_header()
        version = header['fFileVersionNumber']

        bl = Block()
        bl.file_origin = os.path.basename(self.filename)
        bl.annotate(abf_version=str(version))

        # date and time
        if version < 2.:
            YY = 1900
            MM = 1
            DD = 1
            hh = int(header['lFileStartTime'] / 3600.)
            mm = int((header['lFileStartTime'] - hh * 3600) / 60)
            ss = header['lFileStartTime'] - hh * 3600 - mm * 60
            ms = int(np.mod(ss, 1) * 1e6)
            ss = int(ss)
        elif version >= 2.:
            YY = int(header['uFileStartDate'] / 10000)
            MM = int((header['uFileStartDate'] - YY * 10000) / 100)
            DD = int(header['uFileStartDate'] - YY * 10000 - MM * 100)
            hh = int(header['uFileStartTimeMS'] / 1000. / 3600.)
            mm = int((header['uFileStartTimeMS'] / 1000. - hh * 3600) / 60)
            ss = header['uFileStartTimeMS'] / 1000. - hh * 3600 - mm * 60
            ms = int(np.mod(ss, 1) * 1e6)
            ss = int(ss)
        bl.rec_datetime = datetime.datetime(YY, MM, DD, hh, mm, ss, ms)

        if not cascade:
            return bl

        # file format
        if header['nDataFormat'] == 0:
            dt = np.dtype('i2')
        elif header['nDataFormat'] == 1:
            dt = np.dtype('f4')

        if version < 2.:
            nbchannel = header['nADCNumChannels']
            head_offset = header['lDataSectionPtr'] * BLOCKSIZE + header[
                'nNumPointsIgnored'] * dt.itemsize
            totalsize = header['lActualAcqLength']
        elif version >= 2.:
            nbchannel = header['sections']['ADCSection']['llNumEntries']
            head_offset = header['sections']['DataSection'][
                'uBlockIndex'] * BLOCKSIZE
            totalsize = header['sections']['DataSection']['llNumEntries']

        data = np.memmap(self.filename, dt, 'r',
                         shape=(totalsize,), offset=head_offset)

        # 3 possible modes
        if version < 2.:
            mode = header['nOperationMode']
        elif version >= 2.:
            mode = header['protocol']['nOperationMode']

        if (mode == 1) or (mode == 2) or (mode == 5) or (mode == 3):
            # event-driven variable-length mode (mode 1)
            # event-driven fixed-length mode (mode 2 or 5)
            # gap free mode (mode 3) can be in several episodes

            # read sweep pos
            if version < 2.:
                nbepisod = header['lSynchArraySize']
                offset_episode = header['lSynchArrayPtr'] * BLOCKSIZE
            elif version >= 2.:
                nbepisod = header['sections']['SynchArraySection'][
                    'llNumEntries']
                offset_episode = header['sections']['SynchArraySection'][
                    'uBlockIndex'] * BLOCKSIZE
            if nbepisod > 0:
                episode_array = np.memmap(
                    self.filename, [('offset', 'i4'), ('len', 'i4')], 'r',
                    shape=nbepisod, offset=offset_episode)
            else:
                episode_array = np.empty(1, [('offset', 'i4'), ('len', 'i4')])
                episode_array[0]['len'] = data.size
                episode_array[0]['offset'] = 0

            # sampling_rate
            if version < 2.:
                sampling_rate = 1. / (header['fADCSampleInterval'] *
                                      nbchannel * 1.e-6) * pq.Hz
            elif version >= 2.:
                sampling_rate = 1.e6 / \
                    header['protocol']['fADCSequenceInterval'] * pq.Hz

            # construct block
            # one sweep = one segment in a block
            pos = 0
            for j in range(episode_array.size):
                seg = Segment(index=j)

                length = episode_array[j]['len']

                if version < 2.:
                    fSynchTimeUnit = header['fSynchTimeUnit']
                elif version >= 2.:
                    fSynchTimeUnit = header['protocol']['fSynchTimeUnit']

                if (fSynchTimeUnit != 0) and (mode == 1):
                    length /= fSynchTimeUnit

                if not lazy:
                    subdata = data[pos:pos+length]
                    subdata = subdata.reshape((int(subdata.size/nbchannel),
                                               nbchannel)).astype('f')
                    if dt == np.dtype('i2'):
                        if version < 2.:
                            reformat_integer_v1(subdata, nbchannel, header)
                        elif version >= 2.:
                            reformat_integer_v2(subdata, nbchannel, header)

                pos += length

                if version < 2.:
                    chans = [chan_num for chan_num in
                             header['nADCSamplingSeq'] if chan_num >= 0]
                else:
                    chans = range(nbchannel)
                for n, i in enumerate(chans[:nbchannel]):  # fix SamplingSeq
                    if version < 2.:
                        name = header['sADCChannelName'][i].replace(b' ', b'')
                        unit = header['sADCUnits'][i].replace(b'\xb5', b'u').\
                            replace(b' ', b'').decode('utf-8')  # \xb5 is µ
                        num = header['nADCPtoLChannelMap'][i]
                    elif version >= 2.:
                        lADCIi = header['listADCInfo'][i]
                        name = lADCIi['ADCChNames'].replace(b' ', b'')
                        unit = lADCIi['ADCChUnits'].replace(b'\xb5', b'u').\
                            replace(b' ', b'').decode('utf-8')
                        num = header['listADCInfo'][i]['nADCNum']
                    if (fSynchTimeUnit == 0):
                        t_start = float(episode_array[j]['offset']) / sampling_rate
                    else:
                        t_start = float(episode_array[j]['offset']) * fSynchTimeUnit *1e-6* pq.s
                    t_start = t_start.rescale('s')
                    try:
                        pq.Quantity(1, unit)
                    except:
                        unit = ''

                    if lazy:
                        signal = [] * pq.Quantity(1, unit)
                    else:
                        signal = pq.Quantity(subdata[:, n], unit)

                    anaSig = AnalogSignal(signal, sampling_rate=sampling_rate,
                                          t_start=t_start,
                                          name=str(name),
                                          channel_index=int(num))
                    if lazy:
                        anaSig.lazy_shape = length / nbchannel
                    seg.analogsignals.append(anaSig)
                bl.segments.append(seg)

            if mode in [3, 5]:  # TODO check if tags exits in other mode
                # tag is EventArray that should be attached to Block
                # It is attched to the first Segment
                times = []
                labels = []
                comments = []
                for i, tag in enumerate(header['listTag']):
                    times.append(tag['lTagTime']/sampling_rate)
                    labels.append(str(tag['nTagType']))
                    comments.append(clean_string(tag['sComment']))
                times = np.array(times)
                labels = np.array(labels, dtype='S')
                comments = np.array(comments, dtype='S')
                # attach all tags to the first segment.
                seg = bl.segments[0]
                if lazy:
                    ea = Event(times=[] * pq.s, labels=np.array([], dtype='S'))
                    ea.lazy_shape = len(times)
                else:
                    ea = Event(times=times * pq.s, labels=labels,
                               comments=comments)
                seg.events.append(ea)

        bl.create_many_to_one_relationship()
        return bl
예제 #25
0
    def test__children(self):
        blk = Block(name='block1')
        blk.segments = [self.seg1]
        blk.create_many_to_one_relationship(force=True)
        assert_neo_object_is_compliant(self.seg1)
        assert_neo_object_is_compliant(blk)

        childobjs = ('AnalogSignal',
                     'Epoch', 'Event',
                     'IrregularlySampledSignal',
                     'SpikeTrain')
        childconts = ('analogsignals',
                      'epochs', 'events',
                      'irregularlysampledsignals',
                      'spiketrains')
        self.assertEqual(self.seg1._container_child_objects, ())
        self.assertEqual(self.seg1._data_child_objects, childobjs)
        self.assertEqual(self.seg1._single_parent_objects, ('Block',))
        self.assertEqual(self.seg1._multi_child_objects, ())
        self.assertEqual(self.seg1._multi_parent_objects, ())
        self.assertEqual(self.seg1._child_properties, ())

        self.assertEqual(self.seg1._single_child_objects, childobjs)
        self.assertEqual(self.seg1._container_child_containers, ())
        self.assertEqual(self.seg1._data_child_containers, childconts)
        self.assertEqual(self.seg1._single_child_containers, childconts)
        self.assertEqual(self.seg1._single_parent_containers, ('block',))
        self.assertEqual(self.seg1._multi_child_containers, ())
        self.assertEqual(self.seg1._multi_parent_containers, ())

        self.assertEqual(self.seg1._child_objects, childobjs)
        self.assertEqual(self.seg1._child_containers, childconts)
        self.assertEqual(self.seg1._parent_objects, ('Block',))
        self.assertEqual(self.seg1._parent_containers, ('block',))

        totchildren = (self.nchildren * 2 +  # epoch/event
                       self.nchildren +  # analogsignal
                       self.nchildren ** 2 +  # spiketrain
                       self.nchildren)  # irregsignal
        self.assertEqual(len(self.seg1._single_children), totchildren)
        self.assertEqual(len(self.seg1.data_children), totchildren)
        self.assertEqual(len(self.seg1.children), totchildren)
        self.assertEqual(len(self.seg1.data_children_recur), totchildren)
        self.assertEqual(len(self.seg1.children_recur), totchildren)

        self.assertEqual(len(self.seg1._multi_children), 0)
        self.assertEqual(len(self.seg1.container_children), 0)
        self.assertEqual(len(self.seg1.container_children_recur), 0)

        children = (self.sigarrs1a +
                    self.epcs1a + self.evts1a +
                    self.irsigs1a +
                    self.trains1a)
        assert_same_sub_schema(list(self.seg1._single_children), children)
        assert_same_sub_schema(list(self.seg1.data_children), children)
        assert_same_sub_schema(list(self.seg1.data_children_recur), children)
        assert_same_sub_schema(list(self.seg1.children), children)
        assert_same_sub_schema(list(self.seg1.children_recur), children)

        self.assertEqual(len(self.seg1.parents), 1)
        self.assertEqual(self.seg1.parents[0].name, 'block1')
예제 #26
0
def proc_f32(filename):
    """Load an f32 file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareF32IO to
    make sure BrainwareF32IO is working properly

    block = proc_f32(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_f32_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.f32', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_f32_py2.npz'
             f32 file name = 'file1.f32'
    """

    filenameorig = os.path.basename(filename[:-12] + ".f32")

    # create the objects to store other objects
    block = Block(file_origin=filenameorig)
    rcg = RecordingChannelGroup(file_origin=filenameorig)
    rcg.channel_indexes = np.array([], dtype=np.int)
    rcg.channel_names = np.array([], dtype="S")
    unit = Unit(file_origin=filenameorig)

    # load objects into their containers
    block.recordingchannelgroups.append(rcg)
    rcg.units.append(unit)

    try:
        with np.load(filename) as f32obj:
            f32file = f32obj.items()[0][1].flatten()
    except IOError as exc:
        if "as a pickle" in exc.message:
            block.create_many_to_one_relationship()
            return block
        else:
            raise

    sweeplengths = [res[0, 0].tolist() for res in f32file["sweeplength"]]
    stims = [res.flatten().tolist() for res in f32file["stim"]]

    sweeps = [res["spikes"].flatten() for res in f32file["sweep"] if res.size]

    fullf32 = zip(sweeplengths, stims, sweeps)
    for sweeplength, stim, sweep in fullf32:
        for trainpts in sweep:
            if trainpts.size:
                trainpts = trainpts.flatten().astype("float32")
            else:
                trainpts = []

            paramnames = ["Param%s" % i for i in range(len(stim))]
            params = dict(zip(paramnames, stim))
            train = SpikeTrain(trainpts, units=pq.ms, t_start=0, t_stop=sweeplength, file_origin=filenameorig)

            segment = Segment(file_origin=filenameorig, **params)
            segment.spiketrains = [train]
            unit.spiketrains.append(train)
            block.segments.append(segment)

    block.create_many_to_one_relationship()

    return block
def proc_f32(filename):
    '''Load an f32 file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareF32IO to
    make sure BrainwareF32IO is working properly

    block = proc_f32(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_f32_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.f32', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_f32_py2.npz'
             f32 file name = 'file1.f32'
    '''

    filenameorig = os.path.basename(filename[:-12] + '.f32')

    # create the objects to store other objects
    block = Block(file_origin=filenameorig)
    chx = ChannelIndex(file_origin=filenameorig,
                       index=np.array([], dtype=np.int),
                       channel_names=np.array([], dtype='S'))
    unit = Unit(file_origin=filenameorig)

    # load objects into their containers
    block.channel_indexes.append(chx)
    chx.units.append(unit)

    try:
        with np.load(filename) as f32obj:
            f32file = list(f32obj.items())[0][1].flatten()
    except IOError as exc:
        if 'as a pickle' in exc.message:
            block.create_many_to_one_relationship()
            return block
        else:
            raise

    sweeplengths = [res[0, 0].tolist() for res in f32file['sweeplength']]
    stims = [res.flatten().tolist() for res in f32file['stim']]

    sweeps = [res['spikes'].flatten() for res in f32file['sweep'] if res.size]

    fullf32 = zip(sweeplengths, stims, sweeps)
    for sweeplength, stim, sweep in fullf32:
        for trainpts in sweep:
            if trainpts.size:
                trainpts = trainpts.flatten().astype('float32')
            else:
                trainpts = []

            paramnames = ['Param%s' % i for i in range(len(stim))]
            params = dict(zip(paramnames, stim))
            train = SpikeTrain(trainpts,
                               units=pq.ms,
                               t_start=0,
                               t_stop=sweeplength,
                               file_origin=filenameorig)

            segment = Segment(file_origin=filenameorig, **params)
            segment.spiketrains = [train]
            unit.spiketrains.append(train)
            block.segments.append(segment)

    block.create_many_to_one_relationship()

    return block
예제 #28
0
    def read_block(self, lazy=False, cascade=True):

        header = self.read_header()
        version = header['fFileVersionNumber']

        bl = Block()
        bl.file_origin = os.path.basename(self.filename)
        bl.annotate(abf_version=str(version))

        # date and time
        if version < 2.:
            YY = 1900
            MM = 1
            DD = 1
            hh = int(header['lFileStartTime'] / 3600.)
            mm = int((header['lFileStartTime'] - hh * 3600) / 60)
            ss = header['lFileStartTime'] - hh * 3600 - mm * 60
            ms = int(np.mod(ss, 1) * 1e6)
            ss = int(ss)
        elif version >= 2.:
            YY = int(header['uFileStartDate'] / 10000)
            MM = int((header['uFileStartDate'] - YY * 10000) / 100)
            DD = int(header['uFileStartDate'] - YY * 10000 - MM * 100)
            hh = int(header['uFileStartTimeMS'] / 1000. / 3600.)
            mm = int((header['uFileStartTimeMS'] / 1000. - hh * 3600) / 60)
            ss = header['uFileStartTimeMS'] / 1000. - hh * 3600 - mm * 60
            ms = int(np.mod(ss, 1) * 1e6)
            ss = int(ss)
        bl.rec_datetime = datetime.datetime(YY, MM, DD, hh, mm, ss, ms)

        if not cascade:
            return bl

        # file format
        if header['nDataFormat'] == 0:
            dt = np.dtype('i2')
        elif header['nDataFormat'] == 1:
            dt = np.dtype('f4')

        if version < 2.:
            nbchannel = header['nADCNumChannels']
            head_offset = header['lDataSectionPtr'] * BLOCKSIZE + header[
                'nNumPointsIgnored'] * dt.itemsize
            totalsize = header['lActualAcqLength']
        elif version >= 2.:
            nbchannel = header['sections']['ADCSection']['llNumEntries']
            head_offset = header['sections']['DataSection'][
                'uBlockIndex'] * BLOCKSIZE
            totalsize = header['sections']['DataSection']['llNumEntries']

        data = np.memmap(self.filename,
                         dt,
                         'r',
                         shape=(totalsize, ),
                         offset=head_offset)

        # 3 possible modes
        if version < 2.:
            mode = header['nOperationMode']
        elif version >= 2.:
            mode = header['protocol']['nOperationMode']

        if (mode == 1) or (mode == 2) or (mode == 5) or (mode == 3):
            # event-driven variable-length mode (mode 1)
            # event-driven fixed-length mode (mode 2 or 5)
            # gap free mode (mode 3) can be in several episodes

            # read sweep pos
            if version < 2.:
                nbepisod = header['lSynchArraySize']
                offset_episode = header['lSynchArrayPtr'] * BLOCKSIZE
            elif version >= 2.:
                nbepisod = header['sections']['SynchArraySection'][
                    'llNumEntries']
                offset_episode = header['sections']['SynchArraySection'][
                    'uBlockIndex'] * BLOCKSIZE
            if nbepisod > 0:
                episode_array = np.memmap(self.filename, [('offset', 'i4'),
                                                          ('len', 'i4')],
                                          'r',
                                          shape=nbepisod,
                                          offset=offset_episode)
            else:
                episode_array = np.empty(1, [('offset', 'i4'), ('len', 'i4')])
                episode_array[0]['len'] = data.size
                episode_array[0]['offset'] = 0

            # sampling_rate
            if version < 2.:
                sampling_rate = 1. / (header['fADCSampleInterval'] *
                                      nbchannel * 1.e-6) * pq.Hz
            elif version >= 2.:
                sampling_rate = 1.e6 / \
                    header['protocol']['fADCSequenceInterval'] * pq.Hz

            # construct block
            # one sweep = one segment in a block
            pos = 0
            for j in range(episode_array.size):
                seg = Segment(index=j)

                length = episode_array[j]['len']

                if version < 2.:
                    fSynchTimeUnit = header['fSynchTimeUnit']
                elif version >= 2.:
                    fSynchTimeUnit = header['protocol']['fSynchTimeUnit']

                if (fSynchTimeUnit != 0) and (mode == 1):
                    length /= fSynchTimeUnit

                if not lazy:
                    subdata = data[pos:pos + length]
                    subdata = subdata.reshape(
                        (int(subdata.size / nbchannel), nbchannel)).astype('f')
                    if dt == np.dtype('i2'):
                        if version < 2.:
                            reformat_integer_v1(subdata, nbchannel, header)
                        elif version >= 2.:
                            reformat_integer_v2(subdata, nbchannel, header)

                pos += length

                if version < 2.:
                    chans = [
                        chan_num for chan_num in header['nADCSamplingSeq']
                        if chan_num >= 0
                    ]
                else:
                    chans = range(nbchannel)
                for n, i in enumerate(chans[:nbchannel]):  # fix SamplingSeq
                    if version < 2.:
                        name = header['sADCChannelName'][i].replace(b' ', b'')
                        unit = header['sADCUnits'][i].replace(b'\xb5', b'u').\
                            replace(b' ', b'').decode('utf-8')  # \xb5 is µ
                        num = header['nADCPtoLChannelMap'][i]
                    elif version >= 2.:
                        lADCIi = header['listADCInfo'][i]
                        name = lADCIi['ADCChNames'].replace(b' ', b'')
                        unit = lADCIi['ADCChUnits'].replace(b'\xb5', b'u').\
                            replace(b' ', b'').decode('utf-8')
                        num = header['listADCInfo'][i]['nADCNum']
                    if (fSynchTimeUnit == 0):
                        t_start = float(
                            episode_array[j]['offset']) / sampling_rate
                    else:
                        t_start = float(episode_array[j]['offset']
                                        ) * fSynchTimeUnit * 1e-6 * pq.s
                    t_start = t_start.rescale('s')
                    try:
                        pq.Quantity(1, unit)
                    except:
                        unit = ''

                    if lazy:
                        signal = [] * pq.Quantity(1, unit)
                    else:
                        signal = pq.Quantity(subdata[:, n], unit)

                    anaSig = AnalogSignal(signal,
                                          sampling_rate=sampling_rate,
                                          t_start=t_start,
                                          name=name.decode("utf-8"),
                                          channel_index=int(num))
                    if lazy:
                        anaSig.lazy_shape = length / nbchannel
                    seg.analogsignals.append(anaSig)
                bl.segments.append(seg)

            if mode in [3, 5]:  # TODO check if tags exits in other mode
                # tag is EventArray that should be attached to Block
                # It is attched to the first Segment
                times = []
                labels = []
                comments = []
                for i, tag in enumerate(header['listTag']):
                    times.append(tag['lTagTime'] / sampling_rate)
                    labels.append(tag['nTagType'].decode("utf-8"))
                    comments.append(clean_string(tag['sComment']))
                times = np.array(times)
                labels = np.array(labels, dtype='S')
                comments = np.array(comments, dtype='S')
                # attach all tags to the first segment.
                seg = bl.segments[0]
                if lazy:
                    ea = Event(times=[] * pq.s, labels=np.array([], dtype='S'))
                    ea.lazy_shape = len(times)
                else:
                    ea = Event(times=times * pq.s,
                               labels=labels,
                               comments=comments)
                seg.events.append(ea)

        bl.create_many_to_one_relationship()
        return bl
예제 #29
0
    def read_block(self,
                   lazy=False,
                   cascade=True,
                   n_starts=None,
                   n_stops=None,
                   channel_list=None):
        """Reads the file and returns contents as a Block.

        The Block contains one Segment for each entry in zip(n_starts,
        n_stops). If these parameters are not specified, the default is
        to store all data in one Segment.

        The Block also contains one RecordingChannelGroup for all channels.

        n_starts: list or array of starting times of each Segment in
            samples from the beginning of the file.
        n_stops: similar, stopping times of each Segment
        channel_list: list of channel numbers to get. The neural data channels
            are 1 - 128. The analog inputs are 129 - 144. The default
            is to acquire all channels.

        Returns: Block object containing the data.
        """

        # Create block
        block = Block(file_origin=self.filename)

        if not cascade:
            return block

        self.loader = Loader(self.filename)
        self.loader.load_file()
        self.header = self.loader.header

        # If channels not specified, get all
        if channel_list is None:
            channel_list = self.loader.get_neural_channel_numbers()

        # If not specified, load all as one Segment
        if n_starts is None:
            n_starts = [0]
            n_stops = [self.loader.header.n_samples]

        #~ # Add channel hierarchy
        #~ rcg = RecordingChannelGroup(name='allchannels',
        #~ description='group of all channels', file_origin=self.filename)
        #~ block.recordingchannelgroups.append(rcg)
        #~ self.channel_number_to_recording_channel = {}

        #~ # Add each channel at a time to hierarchy
        #~ for ch in channel_list:
        #~ ch_object = RecordingChannel(name='channel%d' % ch,
        #~ file_origin=self.filename, index=ch)
        #~ rcg.channel_indexes.append(ch_object.index)
        #~ rcg.channel_names.append(ch_object.name)
        #~ rcg.recordingchannels.append(ch_object)
        #~ self.channel_number_to_recording_channel[ch] = ch_object

        # Iterate through n_starts and n_stops and add one Segment
        # per each.
        for n, (t1, t2) in enumerate(zip(n_starts, n_stops)):
            # Create segment and add metadata
            seg = self.read_segment(n_start=t1,
                                    n_stop=t2,
                                    chlist=channel_list,
                                    lazy=lazy,
                                    cascade=cascade)
            seg.name = 'Segment %d' % n
            seg.index = n
            t1sec = t1 / self.loader.header.f_samp
            t2sec = t2 / self.loader.header.f_samp
            seg.description = 'Segment %d from %f to %f' % (n, t1sec, t2sec)

            # Link to block
            block.segments.append(seg)

        # Create hardware view, and bijectivity
        tools.populate_RecordingChannel(block)
        tools.create_many_to_one_relationship(block)

        return block
예제 #30
0
    def test__issue_285(self):
        # Spiketrain
        train = SpikeTrain([3, 4, 5] * pq.s, t_stop=10.0)
        unit = Unit()
        train.unit = unit
        unit.spiketrains.append(train)

        epoch = Epoch([0, 10, 20], [2, 2, 2], ["a", "b", "c"], units="ms")

        blk = Block()
        seg = Segment()
        seg.spiketrains.append(train)
        seg.epochs.append(epoch)
        epoch.segment = seg
        blk.segments.append(seg)

        reader = PickleIO(filename="blk.pkl")
        reader.write(blk)

        reader = PickleIO(filename="blk.pkl")
        r_blk = reader.read_block()
        r_seg = r_blk.segments[0]
        self.assertIsInstance(r_seg.spiketrains[0].unit, Unit)
        self.assertIsInstance(r_seg.epochs[0], Epoch)
        os.remove('blk.pkl')

        # Epoch
        epoch = Epoch(times=np.arange(0, 30, 10) * pq.s,
                      durations=[10, 5, 7] * pq.ms,
                      labels=np.array(['btn0', 'btn1', 'btn2'], dtype='S'))
        epoch.segment = Segment()
        blk = Block()
        seg = Segment()
        seg.epochs.append(epoch)
        blk.segments.append(seg)

        reader = PickleIO(filename="blk.pkl")
        reader.write(blk)

        reader = PickleIO(filename="blk.pkl")
        r_blk = reader.read_block()
        r_seg = r_blk.segments[0]
        self.assertIsInstance(r_seg.epochs[0].segment, Segment)
        os.remove('blk.pkl')

        # Event
        event = Event(np.arange(0, 30, 10) * pq.s,
                      labels=np.array(['trig0', 'trig1', 'trig2'], dtype='S'))
        event.segment = Segment()

        blk = Block()
        seg = Segment()
        seg.events.append(event)
        blk.segments.append(seg)

        reader = PickleIO(filename="blk.pkl")
        reader.write(blk)

        reader = PickleIO(filename="blk.pkl")
        r_blk = reader.read_block()
        r_seg = r_blk.segments[0]
        self.assertIsInstance(r_seg.events[0].segment, Segment)
        os.remove('blk.pkl')

        # IrregularlySampledSignal
        signal = IrregularlySampledSignal([0.0, 1.23, 6.78], [1, 2, 3],
                                          units='mV',
                                          time_units='ms')
        signal.segment = Segment()

        blk = Block()
        seg = Segment()
        seg.irregularlysampledsignals.append(signal)
        blk.segments.append(seg)
        blk.segments[0].block = blk

        reader = PickleIO(filename="blk.pkl")
        reader.write(blk)

        reader = PickleIO(filename="blk.pkl")
        r_blk = reader.read_block()
        r_seg = r_blk.segments[0]
        self.assertIsInstance(r_seg.irregularlysampledsignals[0].segment,
                              Segment)
        os.remove('blk.pkl')
예제 #31
0
    def setUp(self):
        self.fname = '/tmp/test.exdir'
        if os.path.exists(self.fname):
            shutil.rmtree(self.fname)
        self.n_channels = 5
        self.n_samples = 20
        self.n_spikes = 50
        blk = Block()
        seg = Segment()
        blk.segments.append(seg)
        chx1 = ChannelIndex(index=np.arange(self.n_channels),
                            channel_ids=np.arange(self.n_channels))
        chx2 = ChannelIndex(index=np.arange(self.n_channels),
                            channel_ids=np.arange(self.n_channels) * 2)
        blk.channel_indexes.extend([chx1, chx2])

        wf1 = np.random.random(
            (self.n_spikes, self.n_channels, self.n_samples))
        ts1 = np.sort(np.random.random(self.n_spikes))
        t_stop1 = np.ceil(ts1[-1])
        sptr1 = SpikeTrain(
            times=ts1,
            units='s',
            waveforms=np.random.random(
                (self.n_spikes, self.n_channels, self.n_samples)) * pq.V,
            name='spikes 1',
            description='sptr1',
            t_stop=t_stop1,
            **{'id': 1})
        sptr1.channel_index = chx1
        unit1 = Unit(name='unit 1')
        unit1.spiketrains.append(sptr1)
        chx1.units.append(unit1)
        seg.spiketrains.append(sptr1)

        ts2 = np.sort(np.random.random(self.n_spikes))
        t_stop2 = np.ceil(ts2[-1])
        sptr2 = SpikeTrain(
            times=ts2,
            units='s',
            waveforms=np.random.random(
                (self.n_spikes, self.n_channels, self.n_samples)) * pq.V,
            description='sptr2',
            name='spikes 2',
            t_stop=t_stop2,
            **{'id': 2})
        sptr2.channel_index = chx2
        unit2 = Unit(name='unit 2')
        unit2.spiketrains.append(sptr2)
        chx2.units.append(unit2)
        seg.spiketrains.append(sptr2)

        wf3 = np.random.random(
            (self.n_spikes, self.n_channels, self.n_samples))
        ts3 = np.sort(np.random.random(self.n_spikes))
        t_stop3 = np.ceil(ts3[-1])
        sptr3 = SpikeTrain(
            times=ts3,
            units='s',
            waveforms=np.random.random(
                (self.n_spikes, self.n_channels, self.n_samples)) * pq.V,
            description='sptr3',
            name='spikes 3',
            t_stop=t_stop3,
            **{'id': 3})
        sptr3.channel_index = chx2
        unit3 = Unit(name='unit 3')
        unit3.spiketrains.append(sptr3)
        chx2.units.append(unit3)
        seg.spiketrains.append(sptr3)

        t_stop = max([t_stop1, t_stop2, t_stop3]) * pq.s

        ana = AnalogSignal(np.random.random(self.n_samples),
                           sampling_rate=self.n_samples / t_stop,
                           units='V',
                           name='ana1',
                           description='LFP')
        assert t_stop == ana.t_stop
        seg.analogsignals.append(ana)
        epo = Epoch(np.random.random(self.n_samples),
                    durations=[1] * self.n_samples * pq.s,
                    units='s',
                    name='epo1')
        seg.epochs.append(epo)
        self.blk = blk
예제 #32
0
    def read_block(
        self,
        lazy=False,
        get_waveforms=True,
        cluster_group=None,
        raw_data_units='uV',
        get_raw_data=False,
    ):
        """
        Reads a block with segments and channel_indexes

        Parameters:
        get_waveforms: bool, default = False
            Wether or not to get the waveforms
        get_raw_data: bool, default = False
            Wether or not to get the raw traces
        raw_data_units: str, default = "uV"
            SI units of the raw trace according to voltage_gain given to klusta
        cluster_group: str, default = None
            Which clusters to load, possibilities are "noise", "unsorted",
            "good", if None all is loaded.
        """
        assert not lazy, 'Do not support lazy'

        blk = Block()
        seg = Segment(file_origin=self.filename)
        blk.segments += [seg]
        for model in self.models:
            group_id = model.channel_group
            group_meta = {'group_id': group_id}
            group_meta.update(model.metadata)
            chx = ChannelIndex(name='channel group #{}'.format(group_id),
                               index=model.channels,
                               **group_meta)
            blk.channel_indexes.append(chx)
            clusters = model.spike_clusters
            for cluster_id in model.cluster_ids:
                meta = model.cluster_metadata[cluster_id]
                if cluster_group is None:
                    pass
                elif cluster_group != meta:
                    continue
                sptr = self.read_spiketrain(cluster_id=cluster_id,
                                            model=model,
                                            get_waveforms=get_waveforms,
                                            raw_data_units=raw_data_units)
                sptr.annotations.update({
                    'cluster_group': meta,
                    'group_id': model.channel_group
                })
                sptr.channel_index = chx
                unit = Unit(cluster_group=meta,
                            group_id=model.channel_group,
                            name='unit #{}'.format(cluster_id))
                unit.spiketrains.append(sptr)
                chx.units.append(unit)
                unit.channel_index = chx
                seg.spiketrains.append(sptr)
            if get_raw_data:
                ana = self.read_analogsignal(model, units=raw_data_units)
                ana.channel_index = chx
                seg.analogsignals.append(ana)

        seg.duration = model.duration * pq.s

        blk.create_many_to_one_relationship()
        return blk
예제 #33
0
파일: nixio.py 프로젝트: csmarc/python-neo
 def _block_to_neo(self, nix_block):
     neo_attrs = self._nix_attr_to_neo(nix_block)
     neo_block = Block(**neo_attrs)
     neo_block.rec_datetime = datetime.fromtimestamp(nix_block.created_at)
     self._neo_map[nix_block.name] = neo_block
     return neo_block
예제 #34
0
    def read_block(self, block_index=0, lazy=False, signal_group_mode=None,
                   units_group_mode=None, load_waveforms=False):
        """


        :param block_index: int default 0. In case of several block block_index can be specified.

        :param lazy: False by default.

        :param signal_group_mode: 'split-all' or 'group-by-same-units' (default depend IO):
        This control behavior for grouping channels in AnalogSignal.
            * 'split-all': each channel will give an AnalogSignal
            * 'group-by-same-units' all channel sharing the same quantity units ar grouped in
            a 2D AnalogSignal

        :param units_group_mode: 'split-all' or 'all-in-one'(default depend IO)
        This control behavior for grouping Unit in ChannelIndex:
            * 'split-all': each neo.Unit is assigned to a new neo.ChannelIndex
            * 'all-in-one': all neo.Unit are grouped in the same neo.ChannelIndex
              (global spike sorting for instance)

        :param load_waveforms: False by default. Control SpikeTrains.waveforms is None or not.

        """

        if lazy:
            warnings.warn(
                "Lazy is deprecated and will be replaced by ProxyObject functionality.",
                DeprecationWarning)

        if signal_group_mode is None:
            signal_group_mode = self._prefered_signal_group_mode

        if units_group_mode is None:
            units_group_mode = self._prefered_units_group_mode

        # annotations
        bl_annotations = dict(self.raw_annotations['blocks'][block_index])
        bl_annotations.pop('segments')
        bl_annotations = check_annotations(bl_annotations)

        bl = Block(**bl_annotations)

        # ChannelIndex are plit in 2 parts:
        #  * some for AnalogSignals
        #  * some for Units

        # ChannelIndex for AnalogSignals
        all_channels = self.header['signal_channels']
        channel_indexes_list = self.get_group_channel_indexes()
        for channel_index in channel_indexes_list:
            for i, (ind_within, ind_abs) in self._make_signal_channel_subgroups(
                    channel_index, signal_group_mode=signal_group_mode).items():
                chidx_annotations = {}
                if signal_group_mode == "split-all":
                    chidx_annotations = self.raw_annotations['signal_channels'][i]
                elif signal_group_mode == "group-by-same-units":
                    for key in list(self.raw_annotations['signal_channels'][i].keys()):
                        chidx_annotations[key] = []
                    for j in ind_abs:
                        for key in list(self.raw_annotations['signal_channels'][i].keys()):
                            chidx_annotations[key].append(self.raw_annotations[
                                'signal_channels'][j][key])
                if 'name' in list(chidx_annotations.keys()):
                    chidx_annotations.pop('name')
                chidx_annotations = check_annotations(chidx_annotations)
                ch_names = all_channels[ind_abs]['name'].astype('S')
                neo_channel_index = ChannelIndex(index=ind_within,
                                                 channel_names=ch_names,
                                                 channel_ids=all_channels[ind_abs]['id'],
                                                 name='Channel group {}'.format(i),
                                                 **chidx_annotations)

                bl.channel_indexes.append(neo_channel_index)

        # ChannelIndex and Unit
        # 2 case are possible in neo defifferent IO have choosen one or other:
        #  * All units are grouped in the same ChannelIndex and indexes are all channels:
        #    'all-in-one'
        #  * Each units is assigned to one ChannelIndex: 'split-all'
        # This is kept for compatibility
        unit_channels = self.header['unit_channels']
        if units_group_mode == 'all-in-one':
            if unit_channels.size > 0:
                channel_index = ChannelIndex(index=np.array([], dtype='i'),
                                             name='ChannelIndex for all Unit')
                bl.channel_indexes.append(channel_index)
            for c in range(unit_channels.size):
                unit_annotations = self.raw_annotations['unit_channels'][c]
                unit_annotations = check_annotations(unit_annotations)
                unit = Unit(**unit_annotations)
                channel_index.units.append(unit)

        elif units_group_mode == 'split-all':
            for c in range(len(unit_channels)):
                unit_annotations = self.raw_annotations['unit_channels'][c]
                unit_annotations = check_annotations(unit_annotations)
                unit = Unit(**unit_annotations)
                channel_index = ChannelIndex(index=np.array([], dtype='i'),
                                             name='ChannelIndex for Unit')
                channel_index.units.append(unit)
                bl.channel_indexes.append(channel_index)

        # Read all segments
        for seg_index in range(self.segment_count(block_index)):
            seg = self.read_segment(block_index=block_index, seg_index=seg_index,
                                    lazy=lazy, signal_group_mode=signal_group_mode,
                                    load_waveforms=load_waveforms)
            bl.segments.append(seg)

        # create link to other containers ChannelIndex and Units
        for seg in bl.segments:
            for c, anasig in enumerate(seg.analogsignals):
                bl.channel_indexes[c].analogsignals.append(anasig)

            nsig = len(seg.analogsignals)
            for c, sptr in enumerate(seg.spiketrains):
                if units_group_mode == 'all-in-one':
                    bl.channel_indexes[nsig].units[c].spiketrains.append(sptr)
                elif units_group_mode == 'split-all':
                    bl.channel_indexes[nsig + c].units[0].spiketrains.append(sptr)

        bl.create_many_to_one_relationship()

        return bl
예제 #35
0
    def test__children(self):
        blk = Block(name='block1')
        blk.channel_indexes = [self.chx1]
        blk.create_many_to_one_relationship()

        self.assertEqual(self.chx1._container_child_objects, ('Unit', ))
        self.assertEqual(self.chx1._data_child_objects,
                         ('AnalogSignal', 'IrregularlySampledSignal'))
        self.assertEqual(self.chx1._single_parent_objects, ('Block', ))
        self.assertEqual(self.chx1._multi_child_objects, tuple())
        self.assertEqual(self.chx1._multi_parent_objects, ())
        self.assertEqual(self.chx1._child_properties, ())

        self.assertEqual(self.chx1._single_child_objects,
                         ('Unit', 'AnalogSignal', 'IrregularlySampledSignal'))

        self.assertEqual(self.chx1._container_child_containers, ('units', ))
        self.assertEqual(self.chx1._data_child_containers,
                         ('analogsignals', 'irregularlysampledsignals'))
        self.assertEqual(
            self.chx1._single_child_containers,
            ('units', 'analogsignals', 'irregularlysampledsignals'))
        self.assertEqual(self.chx1._single_parent_containers, ('block', ))
        self.assertEqual(self.chx1._multi_child_containers, tuple())
        self.assertEqual(self.chx1._multi_parent_containers, ())

        self.assertEqual(self.chx1._child_objects,
                         ('Unit', 'AnalogSignal', 'IrregularlySampledSignal'))
        self.assertEqual(
            self.chx1._child_containers,
            ('units', 'analogsignals', 'irregularlysampledsignals'))
        self.assertEqual(self.chx1._parent_objects, ('Block', ))
        self.assertEqual(self.chx1._parent_containers, ('block', ))

        self.assertEqual(len(self.chx1._single_children), 3 * self.nchildren)
        self.assertEqual(len(self.chx1._multi_children), 0)
        self.assertEqual(len(self.chx1.data_children), 2 * self.nchildren)
        self.assertEqual(len(self.chx1.data_children_recur),
                         2 * self.nchildren + 1 * self.nchildren**2)
        self.assertEqual(len(self.chx1.container_children), 1 * self.nchildren)
        self.assertEqual(len(self.chx1.container_children_recur),
                         1 * self.nchildren)
        self.assertEqual(len(self.chx1.children), 3 * self.nchildren)
        self.assertEqual(len(self.chx1.children_recur),
                         3 * self.nchildren + 1 * self.nchildren**2)

        assert_same_sub_schema(list(self.chx1._single_children),
                               self.units1a + self.sigarrs1a + self.irrsig1a,
                               exclude=['channel_index'])

        assert_same_sub_schema(list(self.chx1.data_children),
                               self.sigarrs1a + self.irrsig1a,
                               exclude=['channel_index'])
        assert_same_sub_schema(list(self.chx1.data_children_recur),
                               self.sigarrs1a + self.irrsig1a +
                               self.trains1[:2] + self.trains1[2:],
                               exclude=['channel_index'])

        assert_same_sub_schema(list(self.chx1.children),
                               self.sigarrs1a + self.irrsig1a + self.units1a,
                               exclude=['channel_index'])
        assert_same_sub_schema(list(self.chx1.children_recur),
                               self.sigarrs1a + self.irrsig1a +
                               self.trains1[:2] + self.trains1[2:] +
                               self.units1a,
                               exclude=['channel_index'])

        self.assertEqual(len(self.chx1.parents), 1)
        self.assertEqual(self.chx1.parents[0].name, 'block1')
예제 #36
0
    def read_block(
        self,
        lazy=False,
        cascade=True,
    ):
        bl = Block()
        tankname = os.path.basename(self.dirname)
        bl.file_origin = tankname
        if not cascade: return bl
        for blockname in os.listdir(self.dirname):
            if blockname == 'TempBlk': continue
            subdir = os.path.join(self.dirname, blockname)
            if not os.path.isdir(subdir): continue

            seg = Segment(name=blockname)
            bl.segments.append(seg)

            #TSQ is the global index
            tsq_filename = os.path.join(subdir,
                                        tankname + '_' + blockname + '.tsq')
            dt = [
                ('size', 'int32'),
                ('evtype', 'int32'),
                ('code', 'S4'),
                ('channel', 'uint16'),
                ('sortcode', 'uint16'),
                ('timestamp', 'float64'),
                ('eventoffset', 'int64'),
                ('dataformat', 'int32'),
                ('frequency', 'float32'),
            ]
            tsq = np.fromfile(tsq_filename, dtype=dt)

            #0x8801: 'EVTYPE_MARK' give the global_start
            global_t_start = tsq[tsq['evtype'] == 0x8801]['timestamp'][0]

            #TEV is the old data file
            if os.path.exists(
                    os.path.join(subdir, tankname + '_' + blockname + '.tev')):
                tev_filename = os.path.join(
                    subdir, tankname + '_' + blockname + '.tev')
                #tev_array = np.memmap(tev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead
                tev_array = np.fromfile(tev_filename, dtype='uint8')

            else:
                tev_filename = None

            for type_code, type_label in tdt_event_type:
                mask1 = tsq['evtype'] == type_code
                codes = np.unique(tsq[mask1]['code'])

                for code in codes:
                    mask2 = mask1 & (tsq['code'] == code)
                    channels = np.unique(tsq[mask2]['channel'])

                    for channel in channels:
                        mask3 = mask2 & (tsq['channel'] == channel)

                        if type_label in ['EVTYPE_STRON', 'EVTYPE_STROFF']:
                            if lazy:
                                times = [] * pq.s
                                labels = np.array([], dtype=str)
                            else:
                                times = (tsq[mask3]['timestamp'] -
                                         global_t_start) * pq.s
                                labels = tsq[mask3]['eventoffset'].view(
                                    'float64').astype('S')
                            ea = EventArray(times=times,
                                            name=code,
                                            channel_index=int(channel),
                                            labels=labels)
                            if lazy:
                                ea.lazy_shape = np.sum(mask3)
                            seg.eventarrays.append(ea)

                        elif type_label == 'EVTYPE_SNIP':
                            sortcodes = np.unique(tsq[mask3]['sortcode'])
                            for sortcode in sortcodes:
                                mask4 = mask3 & (tsq['sortcode'] == sortcode)
                                nb_spike = np.sum(mask4)
                                sr = tsq[mask4]['frequency'][0]
                                waveformsize = tsq[mask4]['size'][0] - 10
                                if lazy:
                                    times = [] * pq.s
                                    waveforms = None
                                else:
                                    times = (tsq[mask4]['timestamp'] -
                                             global_t_start) * pq.s
                                    dt = np.dtype(data_formats[
                                        tsq[mask3]['dataformat'][0]])
                                    waveforms = get_chunks(
                                        tsq[mask4]['size'],
                                        tsq[mask4]['eventoffset'],
                                        tev_array).view(dt)
                                    waveforms = waveforms.reshape(
                                        nb_spike, -1, waveformsize)
                                    waveforms = waveforms * pq.mV
                                if nb_spike > 0:
                                    #   t_start = (tsq['timestamp'][0] - global_t_start) * pq.s # this hould work but not
                                    t_start = 0 * pq.s
                                    t_stop = (tsq['timestamp'][-1] -
                                              global_t_start) * pq.s

                                else:
                                    t_start = 0 * pq.s
                                    t_stop = 0 * pq.s
                                st = SpikeTrain(
                                    times=times,
                                    name='Chan{} Code{}'.format(
                                        channel, sortcode),
                                    t_start=t_start,
                                    t_stop=t_stop,
                                    waveforms=waveforms,
                                    left_sweep=waveformsize / 2. / sr * pq.s,
                                    sampling_rate=sr * pq.Hz,
                                )
                                st.annotate(channel_index=channel)
                                if lazy:
                                    st.lazy_shape = nb_spike
                                seg.spiketrains.append(st)

                        elif type_label == 'EVTYPE_STREAM':
                            dt = np.dtype(
                                data_formats[tsq[mask3]['dataformat'][0]])
                            shape = np.sum(tsq[mask3]['size'] - 10)
                            sr = tsq[mask3]['frequency'][0]
                            if lazy:
                                signal = []
                            else:
                                if PY3K:
                                    signame = code.decode('ascii')
                                else:
                                    signame = code
                                sev_filename = os.path.join(
                                    subdir, tankname + '_' + blockname + '_' +
                                    signame + '_ch' + str(channel) + '.sev')
                                if os.path.exists(sev_filename):
                                    #sig_array = np.memmap(sev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead
                                    sig_array = np.fromfile(sev_filename,
                                                            dtype='uint8')
                                else:
                                    sig_array = tev_array
                                signal = get_chunks(tsq[mask3]['size'],
                                                    tsq[mask3]['eventoffset'],
                                                    sig_array).view(dt)

                            anasig = AnalogSignal(
                                signal=signal * pq.V,
                                name='{} {}'.format(code, channel),
                                sampling_rate=sr * pq.Hz,
                                t_start=(tsq[mask3]['timestamp'][0] -
                                         global_t_start) * pq.s,
                                channel_index=int(channel))
                            if lazy:
                                anasig.lazy_shape = shape
                            seg.analogsignals.append(anasig)
            bl.create_many_to_one_relationship()
            return bl
예제 #37
0
    def read_block(self,
                   # the 2 first keyword arguments are imposed by neo.io API
                   lazy = False,
                   cascade = True):
        """
        Return a Block.

        """

        def count_samples(m_length):
            """
            Count the number of signal samples available in a type 5 data block
            of length m_length

            """

            # for information about type 5 data block, see [1]
            count = int((m_length-6)/2-2)
            # -6 corresponds to the header of block 5, and the -2 take into
            # account the fact that last 2 values are not available as the 4
            # corresponding bytes are coding the time stamp of the beginning
            # of the block
            return count

        # create the neo Block that will be returned at the end
        blck = Block(file_origin = os.path.basename(self.filename))
        blck.file_origin = os.path.basename(self.filename)

        fid = open(self.filename, 'rb')

        # NOTE: in the following, the word "block" is used in the sense used in
        # the alpha-omega specifications (ie a data chunk in the file), rather
        # than in the sense of the usual Block object in neo

        # step 1: read the headers of all the data blocks to load the file
        # structure

        pos_block = 0 # position of the current block in the file
        file_blocks = [] # list of data blocks available in the file

        if not cascade:
            # we read only the main header

            m_length, m_TypeBlock = struct.unpack('Hcx' , fid.read(4))
            # m_TypeBlock should be 'h', as we read the first block
            block = HeaderReader(fid,
                                 dict_header_type.get(m_TypeBlock,
                                                      Type_Unknown)).read_f()
            block.update({'m_length': m_length,
                          'm_TypeBlock': m_TypeBlock,
                          'pos': pos_block})
            file_blocks.append(block)

        else: # cascade == True

            seg = Segment(file_origin = os.path.basename(self.filename))
            seg.file_origin = os.path.basename(self.filename)
            blck.segments.append(seg)

            while True:
                first_4_bytes = fid.read(4)
                if len(first_4_bytes) < 4:
                    # we have reached the end of the file
                    break
                else:
                    m_length, m_TypeBlock = struct.unpack('Hcx', first_4_bytes)

                block = HeaderReader(fid,
                                dict_header_type.get(m_TypeBlock,
                                                     Type_Unknown)).read_f()
                block.update({'m_length': m_length,
                              'm_TypeBlock': m_TypeBlock,
                              'pos': pos_block})

                if m_TypeBlock == '2':
                    # The beginning of the block of type '2' is identical for
                    # all types of channels, but the following part depends on
                    # the type of channel. So we need a special case here.

                    # WARNING: How to check the type of channel is not
                    # described in the documentation. So here I use what is
                    # proposed in the C code [2].
                    # According to this C code, it seems that the 'm_isAnalog'
                    # is used to distinguished analog and digital channels, and
                    # 'm_Mode' encodes the type of analog channel:
                    # 0 for continuous, 1 for level, 2 for external trigger.
                    # But in some files, I found channels that seemed to be
                    # continuous channels with 'm_Modes' = 128 or 192. So I
                    # decided to consider every channel with 'm_Modes'
                    # different from 1 or 2 as continuous. I also couldn't
                    # check that values of 1 and 2 are really for level and
                    # external trigger as I had no test files containing data
                    # of this types.

                    type_subblock = 'unknown_channel_type(m_Mode=' \
                                    + str(block['m_Mode'])+ ')'
                    description = Type2_SubBlockUnknownChannels
                    block.update({'m_Name': 'unknown_name'})
                    if block['m_isAnalog'] == 0:
                        # digital channel
                        type_subblock = 'digital'
                        description = Type2_SubBlockDigitalChannels
                    elif block['m_isAnalog'] == 1:
                        # analog channel
                        if block['m_Mode'] == 1:
                            # level channel
                            type_subblock = 'level'
                            description = Type2_SubBlockLevelChannels
                        elif block['m_Mode'] == 2:
                            # external trigger channel
                            type_subblock = 'external_trigger'
                            description = Type2_SubBlockExtTriggerChannels
                        else:
                            # continuous channel
                            type_subblock = 'continuous(Mode' \
                                            + str(block['m_Mode']) +')'
                            description = Type2_SubBlockContinuousChannels

                    subblock = HeaderReader(fid, description).read_f()

                    block.update(subblock)
                    block.update({'type_subblock': type_subblock})

                file_blocks.append(block)
                pos_block += m_length
                fid.seek(pos_block)

            # step 2: find the available channels
            list_chan = [] # list containing indexes of channel blocks
            for ind_block, block in enumerate(file_blocks):
                if block['m_TypeBlock'] == '2':
                    list_chan.append(ind_block)

            # step 3: find blocks containing data for the available channels
            list_data = [] # list of lists of indexes of data blocks
                           # corresponding to each channel
            for ind_chan, chan in enumerate(list_chan):
                list_data.append([])
                num_chan = file_blocks[chan]['m_numChannel']
                for ind_block, block in enumerate(file_blocks):
                    if block['m_TypeBlock'] == '5':
                        if block['m_numChannel'] == num_chan:
                            list_data[ind_chan].append(ind_block)


            # step 4: compute the length (number of samples) of the channels
            chan_len = np.zeros(len(list_data), dtype = np.int)
            for ind_chan, list_blocks in enumerate(list_data):
                for ind_block in list_blocks:
                    chan_len[ind_chan] += count_samples(
                                          file_blocks[ind_block]['m_length'])

            # step 5: find channels for which data are available
            ind_valid_chan = np.nonzero(chan_len)[0]

            # step 6: load the data
            # TODO give the possibility to load data as AnalogSignalArrays
            for ind_chan in ind_valid_chan:
                list_blocks = list_data[ind_chan]
                ind = 0 # index in the data vector

                # read time stamp for the beginning of the signal
                form = '<l' # reading format
                ind_block = list_blocks[0]
                count = count_samples(file_blocks[ind_block]['m_length'])
                fid.seek(file_blocks[ind_block]['pos']+6+count*2)
                buf = fid.read(struct.calcsize(form))
                val = struct.unpack(form , buf)
                start_index = val[0]

                # WARNING: in the following blocks are read supposing taht they
                # are all contiguous and sorted in time. I don't know if it's
                # always the case. Maybe we should use the time stamp of each
                # data block to choose where to put the read data in the array.
                if not lazy:
                    temp_array = np.empty(chan_len[ind_chan], dtype = np.int16)
                    # NOTE: we could directly create an empty AnalogSignal and
                    # load the data in it, but it is much faster to load data
                    # in a temporary numpy array and create the AnalogSignals
                    # from this temporary array
                    for ind_block in list_blocks:
                        count = count_samples(
                                file_blocks[ind_block]['m_length'])
                        fid.seek(file_blocks[ind_block]['pos']+6)
                        temp_array[ind:ind+count] = \
                            np.fromfile(fid, dtype = np.int16, count = count)
                        ind += count

                sampling_rate = \
                    file_blocks[list_chan[ind_chan]]['m_SampleRate'] * pq.kHz
                t_start = (start_index / sampling_rate).simplified
                if lazy:
                    ana_sig = AnalogSignal([],
                                           sampling_rate = sampling_rate,
                                           t_start = t_start,
                                           name = file_blocks\
                                               [list_chan[ind_chan]]['m_Name'],
                                           file_origin = \
                                               os.path.basename(self.filename),
                                           units = pq.dimensionless)
                    ana_sig.lazy_shape = chan_len[ind_chan]
                else:
                    ana_sig = AnalogSignal(temp_array,
                                           sampling_rate = sampling_rate,
                                           t_start = t_start,
                                           name = file_blocks\
                                               [list_chan[ind_chan]]['m_Name'],
                                           file_origin = \
                                               os.path.basename(self.filename),
                                           units = pq.dimensionless)
# todo apibreak: create ChannelIndex for each signals
#                ana_sig.channel_index = \
#                            file_blocks[list_chan[ind_chan]]['m_numChannel']
                ana_sig.annotate(channel_name = \
                            file_blocks[list_chan[ind_chan]]['m_Name'])
                ana_sig.annotate(channel_type = \
                            file_blocks[list_chan[ind_chan]]['type_subblock'])
                seg.analogsignals.append(ana_sig)

        fid.close()

        if file_blocks[0]['m_TypeBlock'] == 'h': # this should always be true
            blck.rec_datetime = datetime.datetime(\
                file_blocks[0]['m_date_year'],
                file_blocks[0]['m_date_month'],
                file_blocks[0]['m_date_day'],
                file_blocks[0]['m_time_hour'],
                file_blocks[0]['m_time_minute'],
                file_blocks[0]['m_time_second'],
                10000 * file_blocks[0]['m_time_hsecond'])
                # the 10000 is here to convert m_time_hsecond from centisecond
                # to microsecond
            version = file_blocks[0]['m_version']
            blck.annotate(alphamap_version = version)
            if cascade:
                seg.rec_datetime = blck.rec_datetime.replace()
                # I couldn't find a simple copy function for datetime,
                # using replace without arguments is a twisted way to make a
                # copy
                seg.annotate(alphamap_version = version)
        if cascade:
            blck.create_many_to_one_relationship()

        return blck
예제 #38
0
    def read_block(self, lazy=False):
        """Returns a Block containing spike information.

        There is no obvious way to infer the segment boundaries from
        raw spike times, so for now all spike times are returned in one
        big segment. The way around this would be to specify the segment
        boundaries, and then change this code to put the spikes in the right
        segments.
        """
        assert not lazy, 'Do not support lazy'

        # Create block and segment to hold all the data
        block = Block()
        # Search data directory for KlustaKwik files.
        # If nothing found, return empty block
        self._fetfiles = self._fp.read_filenames('fet')
        self._clufiles = self._fp.read_filenames('clu')
        if len(self._fetfiles) == 0:
            return block

        # Create a single segment to hold all of the data
        seg = Segment(name='seg0', index=0, file_origin=self.filename)
        block.segments.append(seg)

        # Load spike times from each group and store in a dict, keyed
        # by group number
        self.spiketrains = dict()
        for group in sorted(self._fetfiles.keys()):
            # Load spike times
            fetfile = self._fetfiles[group]
            spks, features = self._load_spike_times(fetfile)

            # Load cluster ids or generate
            if group in self._clufiles:
                clufile = self._clufiles[group]
                uids = self._load_unit_id(clufile)
            else:
                # unclustered data, assume all zeros
                uids = np.zeros(spks.shape, dtype=np.int32)

            # error check
            if len(spks) != len(uids):
                raise ValueError("lengths of fet and clu files are different")

            # Create Unit for each cluster
            unique_unit_ids = np.unique(uids)
            for unit_id in sorted(unique_unit_ids):
                # Initialize the unit
                u = Unit(name=('unit %d from group %d' % (unit_id, group)),
                         index=unit_id, group=group)

                # Initialize a new SpikeTrain for the spikes from this unit
                st = SpikeTrain(
                    times=spks[uids == unit_id] / self.sampling_rate,
                    units='sec', t_start=0.0,
                    t_stop=spks.max() / self.sampling_rate,
                    name=('unit %d from group %d' % (unit_id, group)))
                st.annotations['cluster'] = unit_id
                st.annotations['group'] = group

                # put features in
                if len(features) != 0:
                    st.annotations['waveform_features'] = features

                # Link
                u.spiketrains.append(st)
                seg.spiketrains.append(st)

        block.create_many_to_one_relationship()
        return block
예제 #39
0
    def read_block(self, lazy=False):
        """
        Return a Block.

        """
        assert not lazy, 'Do not support lazy'

        def count_samples(m_length):
            """
            Count the number of signal samples available in a type 5 data block
            of length m_length

            """

            # for information about type 5 data block, see [1]
            count = int((m_length - 6) / 2 - 2)
            # -6 corresponds to the header of block 5, and the -2 take into
            # account the fact that last 2 values are not available as the 4
            # corresponding bytes are coding the time stamp of the beginning
            # of the block
            return count

        # create the neo Block that will be returned at the end
        blck = Block(file_origin=os.path.basename(self.filename))
        blck.file_origin = os.path.basename(self.filename)

        fid = open(self.filename, 'rb')

        # NOTE: in the following, the word "block" is used in the sense used in
        # the alpha-omega specifications (ie a data chunk in the file), rather
        # than in the sense of the usual Block object in neo

        # step 1: read the headers of all the data blocks to load the file
        # structure

        pos_block = 0  # position of the current block in the file
        file_blocks = []  # list of data blocks available in the file

        seg = Segment(file_origin=os.path.basename(self.filename))
        seg.file_origin = os.path.basename(self.filename)
        blck.segments.append(seg)

        while True:
            first_4_bytes = fid.read(4)
            if len(first_4_bytes) < 4:
                # we have reached the end of the file
                break
            else:
                m_length, m_TypeBlock = struct.unpack('Hcx', first_4_bytes)

            block = HeaderReader(
                fid, dict_header_type.get(m_TypeBlock, Type_Unknown)).read_f()
            block.update({
                'm_length': m_length,
                'm_TypeBlock': m_TypeBlock,
                'pos': pos_block
            })

            if m_TypeBlock == '2':
                # The beginning of the block of type '2' is identical for
                # all types of channels, but the following part depends on
                # the type of channel. So we need a special case here.

                # WARNING: How to check the type of channel is not
                # described in the documentation. So here I use what is
                # proposed in the C code [2].
                # According to this C code, it seems that the 'm_isAnalog'
                # is used to distinguished analog and digital channels, and
                # 'm_Mode' encodes the type of analog channel:
                # 0 for continuous, 1 for level, 2 for external trigger.
                # But in some files, I found channels that seemed to be
                # continuous channels with 'm_Modes' = 128 or 192. So I
                # decided to consider every channel with 'm_Modes'
                # different from 1 or 2 as continuous. I also couldn't
                # check that values of 1 and 2 are really for level and
                # external trigger as I had no test files containing data
                # of this types.

                type_subblock = 'unknown_channel_type(m_Mode=' \
                                + str(block['m_Mode']) + ')'
                description = Type2_SubBlockUnknownChannels
                block.update({'m_Name': 'unknown_name'})
                if block['m_isAnalog'] == 0:
                    # digital channel
                    type_subblock = 'digital'
                    description = Type2_SubBlockDigitalChannels
                elif block['m_isAnalog'] == 1:
                    # analog channel
                    if block['m_Mode'] == 1:
                        # level channel
                        type_subblock = 'level'
                        description = Type2_SubBlockLevelChannels
                    elif block['m_Mode'] == 2:
                        # external trigger channel
                        type_subblock = 'external_trigger'
                        description = Type2_SubBlockExtTriggerChannels
                    else:
                        # continuous channel
                        type_subblock = 'continuous(Mode' \
                                        + str(block['m_Mode']) + ')'
                        description = Type2_SubBlockContinuousChannels

                subblock = HeaderReader(fid, description).read_f()

                block.update(subblock)
                block.update({'type_subblock': type_subblock})

            file_blocks.append(block)
            pos_block += m_length
            fid.seek(pos_block)

        # step 2: find the available channels
        list_chan = []  # list containing indexes of channel blocks
        for ind_block, block in enumerate(file_blocks):
            if block['m_TypeBlock'] == '2':
                list_chan.append(ind_block)

        # step 3: find blocks containing data for the available channels
        list_data = []  # list of lists of indexes of data blocks
        # corresponding to each channel
        for ind_chan, chan in enumerate(list_chan):
            list_data.append([])
            num_chan = file_blocks[chan]['m_numChannel']
            for ind_block, block in enumerate(file_blocks):
                if block['m_TypeBlock'] == '5':
                    if block['m_numChannel'] == num_chan:
                        list_data[ind_chan].append(ind_block)

        # step 4: compute the length (number of samples) of the channels
        chan_len = np.zeros(len(list_data), dtype=np.int)
        for ind_chan, list_blocks in enumerate(list_data):
            for ind_block in list_blocks:
                chan_len[ind_chan] += count_samples(
                    file_blocks[ind_block]['m_length'])

        # step 5: find channels for which data are available
        ind_valid_chan = np.nonzero(chan_len)[0]

        # step 6: load the data
        # TODO give the possibility to load data as AnalogSignalArrays
        for ind_chan in ind_valid_chan:
            list_blocks = list_data[ind_chan]
            ind = 0  # index in the data vector

            # read time stamp for the beginning of the signal
            form = '<l'  # reading format
            ind_block = list_blocks[0]
            count = count_samples(file_blocks[ind_block]['m_length'])
            fid.seek(file_blocks[ind_block]['pos'] + 6 + count * 2)
            buf = fid.read(struct.calcsize(form))
            val = struct.unpack(form, buf)
            start_index = val[0]

            # WARNING: in the following blocks are read supposing taht they
            # are all contiguous and sorted in time. I don't know if it's
            # always the case. Maybe we should use the time stamp of each
            # data block to choose where to put the read data in the array.

            temp_array = np.empty(chan_len[ind_chan], dtype=np.int16)
            # NOTE: we could directly create an empty AnalogSignal and
            # load the data in it, but it is much faster to load data
            # in a temporary numpy array and create the AnalogSignals
            # from this temporary array
            for ind_block in list_blocks:
                count = count_samples(file_blocks[ind_block]['m_length'])
                fid.seek(file_blocks[ind_block]['pos'] + 6)
                temp_array[ind:ind + count] = \
                    np.fromfile(fid, dtype=np.int16, count=count)
                ind += count

            sampling_rate = \
                file_blocks[list_chan[ind_chan]]['m_SampleRate'] * pq.kHz
            t_start = (start_index / sampling_rate).simplified

            ana_sig = AnalogSignal(
                temp_array,
                sampling_rate=sampling_rate,
                t_start=t_start,
                name=file_blocks[list_chan[ind_chan]]['m_Name'],
                file_origin=os.path.basename(self.filename),
                units=pq.dimensionless)
            # todo apibreak: create ChannelIndex for each signals
            #                ana_sig.channel_index = \
            #                            file_blocks[list_chan[ind_chan]]['m_numChannel']
            ana_sig.annotate(
                channel_name=file_blocks[list_chan[ind_chan]]['m_Name'])
            ana_sig.annotate(
                channel_type=file_blocks[list_chan[ind_chan]]['type_subblock'])
            seg.analogsignals.append(ana_sig)

        fid.close()

        if file_blocks[0]['m_TypeBlock'] == 'h':  # this should always be true
            blck.rec_datetime = datetime.datetime(
                file_blocks[0]['m_date_year'], file_blocks[0]['m_date_month'],
                file_blocks[0]['m_date_day'], file_blocks[0]['m_time_hour'],
                file_blocks[0]['m_time_minute'],
                file_blocks[0]['m_time_second'],
                10000 * file_blocks[0]['m_time_hsecond'])
            # the 10000 is here to convert m_time_hsecond from centisecond
            # to microsecond
            version = file_blocks[0]['m_version']
            blck.annotate(alphamap_version=version)

            seg.rec_datetime = blck.rec_datetime.replace()
            # I couldn't find a simple copy function for datetime,
            # using replace without arguments is a twisted way to make a
            # copy
            seg.annotate(alphamap_version=version)

        blck.create_many_to_one_relationship()

        return blck
예제 #40
0
    def test__cut_block_by_epochs(self):
        seg = Segment()

        proxy_anasig = AnalogSignalProxy(rawio=self.reader,
                                         stream_index=0,
                                         inner_stream_channels=None,
                                         block_index=0,
                                         seg_index=0)
        seg.analogsignals.append(proxy_anasig)

        proxy_st = SpikeTrainProxy(rawio=self.reader,
                                   spike_channel_index=0,
                                   block_index=0,
                                   seg_index=0)
        seg.spiketrains.append(proxy_st)

        proxy_event = EventProxy(rawio=self.reader,
                                 event_channel_index=0,
                                 block_index=0,
                                 seg_index=0)
        seg.events.append(proxy_event)

        proxy_epoch = EpochProxy(rawio=self.reader,
                                 event_channel_index=1,
                                 block_index=0,
                                 seg_index=0)
        proxy_epoch.annotate(pick='me')
        seg.epochs.append(proxy_epoch)

        loaded_epoch = proxy_epoch.load()
        loaded_event = proxy_event.load()
        loaded_st = proxy_st.load()
        loaded_anasig = proxy_anasig.load()

        original_block = Block()
        original_block.segments = [seg]
        original_block.create_many_to_one_relationship()

        block = cut_block_by_epochs(original_block, properties={'pick': 'me'})

        assert_neo_object_is_compliant(block)
        self.assertEqual(len(block.segments), proxy_epoch.shape[0])

        for epoch_idx in range(len(loaded_epoch)):
            sliced_event = loaded_event.time_slice(
                t_start=loaded_epoch.times[epoch_idx],
                t_stop=loaded_epoch.times[epoch_idx] +
                loaded_epoch.durations[epoch_idx])
            has_event = len(sliced_event) > 0

            sliced_anasig = loaded_anasig.time_slice(
                t_start=loaded_epoch.times[epoch_idx],
                t_stop=loaded_epoch.times[epoch_idx] +
                loaded_epoch.durations[epoch_idx])

            sliced_st = loaded_st.time_slice(
                t_start=loaded_epoch.times[epoch_idx],
                t_stop=loaded_epoch.times[epoch_idx] +
                loaded_epoch.durations[epoch_idx])

            self.assertEqual(len(block.segments[epoch_idx].events),
                             int(has_event))
            self.assertEqual(len(block.segments[epoch_idx].spiketrains), 1)
            self.assertEqual(len(block.segments[epoch_idx].analogsignals), 1)

            self.assertTrue(
                isinstance(block.segments[epoch_idx].spiketrains[0],
                           SpikeTrain))
            assert_same_attributes(block.segments[epoch_idx].spiketrains[0],
                                   sliced_st)

            self.assertTrue(
                isinstance(block.segments[epoch_idx].analogsignals[0],
                           AnalogSignal))
            assert_same_attributes(block.segments[epoch_idx].analogsignals[0],
                                   sliced_anasig)

            if has_event:
                self.assertTrue(
                    isinstance(block.segments[epoch_idx].events[0], Event))
                assert_same_attributes(block.segments[epoch_idx].events[0],
                                       sliced_event)

        block2 = Block()
        seg2 = Segment()
        epoch = Epoch(np.arange(10) * pq.s, durations=np.ones(10) * pq.s)
        epoch.annotate(pick='me instead')
        seg2.epochs = [proxy_epoch, epoch]
        block2.segments = [seg2]
        block2.create_many_to_one_relationship()

        # test correct loading and slicing of EpochProxy objects
        # (not tested above since we used the EpochProxy to cut the block)

        block3 = cut_block_by_epochs(block2, properties={'pick': 'me instead'})

        for epoch_idx in range(len(epoch)):
            sliced_epoch = loaded_epoch.time_slice(
                t_start=epoch.times[epoch_idx],
                t_stop=epoch.times[epoch_idx] + epoch.durations[epoch_idx])
            has_epoch = len(sliced_epoch) > 0

            if has_epoch:
                self.assertTrue(
                    isinstance(block3.segments[epoch_idx].epochs[0], Epoch))
                assert_same_attributes(block3.segments[epoch_idx].epochs[0],
                                       sliced_epoch)
예제 #41
0
    def test__children(self):
        blk = Block(name='block1')
        blk.channel_indexes = [self.chx1]
        blk.create_many_to_one_relationship()

        self.assertEqual(self.chx1._container_child_objects, ('Unit',))
        self.assertEqual(self.chx1._data_child_objects, ('AnalogSignal', 'IrregularlySampledSignal'))
        self.assertEqual(self.chx1._single_parent_objects, ('Block',))
        self.assertEqual(self.chx1._multi_child_objects, tuple())
        self.assertEqual(self.chx1._multi_parent_objects, ())
        self.assertEqual(self.chx1._child_properties, ())

        self.assertEqual(self.chx1._single_child_objects,
                         ('Unit', 'AnalogSignal', 'IrregularlySampledSignal'))

        self.assertEqual(self.chx1._container_child_containers, ('units',))
        self.assertEqual(self.chx1._data_child_containers,
                         ('analogsignals', 'irregularlysampledsignals'))
        self.assertEqual(self.chx1._single_child_containers,
                         ('units', 'analogsignals', 'irregularlysampledsignals'))
        self.assertEqual(self.chx1._single_parent_containers, ('block',))
        self.assertEqual(self.chx1._multi_child_containers,
                         tuple())
        self.assertEqual(self.chx1._multi_parent_containers, ())

        self.assertEqual(self.chx1._child_objects,
                         ('Unit', 'AnalogSignal', 'IrregularlySampledSignal'))
        self.assertEqual(self.chx1._child_containers,
                         ('units', 'analogsignals', 'irregularlysampledsignals'))
        self.assertEqual(self.chx1._parent_objects, ('Block',))
        self.assertEqual(self.chx1._parent_containers, ('block',))

        self.assertEqual(len(self.chx1._single_children), 3*self.nchildren)
        self.assertEqual(len(self.chx1._multi_children), 0)
        self.assertEqual(len(self.chx1.data_children), 2*self.nchildren)
        self.assertEqual(len(self.chx1.data_children_recur),
                         2*self.nchildren + 1*self.nchildren**2)
        self.assertEqual(len(self.chx1.container_children), 1*self.nchildren)
        self.assertEqual(len(self.chx1.container_children_recur),
                         1*self.nchildren)
        self.assertEqual(len(self.chx1.children), 3*self.nchildren)
        self.assertEqual(len(self.chx1.children_recur),
                         3*self.nchildren + 1*self.nchildren**2)

        assert_same_sub_schema(list(self.chx1._single_children),
                               self.units1a + self.sigarrs1a + self.irrsig1a,
                               exclude=['channel_index'])


        assert_same_sub_schema(list(self.chx1.data_children), self.sigarrs1a + self.irrsig1a,
                               exclude=['channel_index'])
        assert_same_sub_schema(list(self.chx1.data_children_recur),
                               self.sigarrs1a + self.irrsig1a +
                               self.trains1[:2] + self.trains1[2:],
                               exclude=['channel_index'])

        assert_same_sub_schema(list(self.chx1.children),
                               self.sigarrs1a + self.irrsig1a + self.units1a,
                               exclude=['channel_index'])
        assert_same_sub_schema(list(self.chx1.children_recur),
                               self.sigarrs1a + self.irrsig1a +
                               self.trains1[:2] + self.trains1[2:] +
                               self.units1a,
                               exclude=['channel_index'])

        self.assertEqual(len(self.chx1.parents), 1)
        self.assertEqual(self.chx1.parents[0].name, 'block1')
예제 #42
0
    def test__cut_block_by_epochs(self):
        epoch = Epoch([0.5, 10.0, 25.2] * pq.s,
                      durations=[5.1, 4.8, 5.0] * pq.s,
                      t_start=.1 * pq.s)
        epoch.annotate(epoch_type='a', pick='me', nix_name='neo.epoch.0')
        epoch.array_annotate(trial_id=[1, 2, 3])

        epoch2 = Epoch([0.6, 9.5, 16.8, 34.1] * pq.s,
                       durations=[4.5, 4.8, 5.0, 5.0] * pq.s,
                       t_start=.1 * pq.s)
        epoch2.annotate(epoch_type='b', nix_name='neo.epoch.1')
        epoch2.array_annotate(trial_id=[1, 2, 3, 4])

        event = Event(times=[0.5, 10.0, 25.2] * pq.s, t_start=.1 * pq.s)
        event.annotate(event_type='trial start', nix_name='neo.event.0')
        event.array_annotate(trial_id=[1, 2, 3])

        anasig = AnalogSignal(np.arange(50.0) * pq.mV,
                              t_start=.1 * pq.s,
                              sampling_rate=1.0 * pq.Hz)
        irrsig = IrregularlySampledSignal(signal=np.arange(50.0) * pq.mV,
                                          times=anasig.times,
                                          t_start=.1 * pq.s)
        st = SpikeTrain(
            np.arange(0.5, 50, 7) * pq.s,
            t_start=.1 * pq.s,
            t_stop=50.0 * pq.s,
            waveforms=np.array(
                [[[0., 1.], [0.1, 1.1]], [[2., 3.], [2.1, 3.1]],
                 [[4., 5.], [4.1, 5.1]], [[6., 7.], [6.1, 7.1]],
                 [[8., 9.], [8.1, 9.1]], [[12., 13.], [12.1, 13.1]],
                 [[14., 15.], [14.1, 15.1]], [[16., 17.], [16.1, 17.1]]]) *
            pq.mV,
            array_annotations={'spikenum': np.arange(1, 9)})

        # test without resetting the time
        seg = Segment(nix_name='neo.segment.0')
        seg2 = Segment(name='NoCut', nix_name='neo.segment.1')
        seg.epochs = [epoch, epoch2]
        seg.events = [event]
        seg.analogsignals = [anasig]
        seg.irregularlysampledsignals = [irrsig]
        seg.spiketrains = [st]

        original_block = Block()
        original_block.segments = [seg, seg2]
        original_block.create_many_to_one_relationship()

        with warnings.catch_warnings(record=True) as w:
            # This should raise a warning as one segment does not contain epochs
            block = cut_block_by_epochs(original_block,
                                        properties={'pick': 'me'})
            self.assertEqual(len(w), 1)

        assert_neo_object_is_compliant(block)
        self.assertEqual(len(block.segments), 3)

        for epoch_idx in range(len(epoch)):
            self.assertEqual(len(block.segments[epoch_idx].events), 1)
            self.assertEqual(len(block.segments[epoch_idx].spiketrains), 1)
            self.assertEqual(len(block.segments[epoch_idx].analogsignals), 1)
            self.assertEqual(
                len(block.segments[epoch_idx].irregularlysampledsignals), 1)

            annos = block.segments[epoch_idx].annotations
            # new segment objects have different identity
            self.assertNotIn('nix_name', annos)

            if epoch_idx != 0:
                self.assertEqual(len(block.segments[epoch_idx].epochs), 1)
            else:
                self.assertEqual(len(block.segments[epoch_idx].epochs), 2)

            assert_same_attributes(
                block.segments[epoch_idx].spiketrains[0],
                st.time_slice(t_start=epoch.times[epoch_idx],
                              t_stop=epoch.times[epoch_idx] +
                              epoch.durations[epoch_idx]))
            assert_same_attributes(
                block.segments[epoch_idx].analogsignals[0],
                anasig.time_slice(t_start=epoch.times[epoch_idx],
                                  t_stop=epoch.times[epoch_idx] +
                                  epoch.durations[epoch_idx]))
            assert_same_attributes(
                block.segments[epoch_idx].irregularlysampledsignals[0],
                irrsig.time_slice(t_start=epoch.times[epoch_idx],
                                  t_stop=epoch.times[epoch_idx] +
                                  epoch.durations[epoch_idx]))
            assert_same_attributes(
                block.segments[epoch_idx].events[0],
                event.time_slice(t_start=epoch.times[epoch_idx],
                                 t_stop=epoch.times[epoch_idx] +
                                 epoch.durations[epoch_idx]))
        assert_same_attributes(
            block.segments[0].epochs[0],
            epoch.time_slice(t_start=epoch.times[0],
                             t_stop=epoch.times[0] + epoch.durations[0]))
        assert_same_attributes(
            block.segments[0].epochs[1],
            epoch2.time_slice(t_start=epoch.times[0],
                              t_stop=epoch.times[0] + epoch.durations[0]))

        # test with resetting the time
        seg = Segment(nix_name='neo.segment.0')
        seg2 = Segment(name='NoCut', nix_name='neo.segment.1')
        seg.epochs = [epoch, epoch2]
        seg.events = [event]
        seg.analogsignals = [anasig]
        seg.irregularlysampledsignals = [irrsig]
        seg.spiketrains = [st]

        original_block = Block()
        original_block.segments = [seg, seg2]
        original_block.create_many_to_one_relationship()

        with warnings.catch_warnings(record=True) as w:
            # This should raise a warning as one segment does not contain epochs
            block = cut_block_by_epochs(original_block,
                                        properties={'pick': 'me'},
                                        reset_time=True)
            self.assertEqual(len(w), 1)

        assert_neo_object_is_compliant(block)
        self.assertEqual(len(block.segments), 3)

        for epoch_idx in range(len(epoch)):
            self.assertEqual(len(block.segments[epoch_idx].events), 1)
            self.assertEqual(len(block.segments[epoch_idx].spiketrains), 1)
            self.assertEqual(len(block.segments[epoch_idx].analogsignals), 1)
            self.assertEqual(
                len(block.segments[epoch_idx].irregularlysampledsignals), 1)

            annos = block.segments[epoch_idx].annotations
            self.assertNotIn('nix_name', annos)

            if epoch_idx != 0:
                self.assertEqual(len(block.segments[epoch_idx].epochs), 1)
            else:
                self.assertEqual(len(block.segments[epoch_idx].epochs), 2)

            assert_same_attributes(
                block.segments[epoch_idx].spiketrains[0],
                st.time_shift(-epoch.times[epoch_idx]).time_slice(
                    t_start=0 * pq.s, t_stop=epoch.durations[epoch_idx]))

            anasig_target = anasig.time_shift(-epoch.times[epoch_idx])
            anasig_target = anasig_target.time_slice(
                t_start=0 * pq.s, t_stop=epoch.durations[epoch_idx])
            assert_same_attributes(block.segments[epoch_idx].analogsignals[0],
                                   anasig_target)
            irrsig_target = irrsig.time_shift(-epoch.times[epoch_idx])
            irrsig_target = irrsig_target.time_slice(
                t_start=0 * pq.s, t_stop=epoch.durations[epoch_idx])
            assert_same_attributes(
                block.segments[epoch_idx].irregularlysampledsignals[0],
                irrsig_target)
            assert_same_attributes(
                block.segments[epoch_idx].events[0],
                event.time_shift(-epoch.times[epoch_idx]).time_slice(
                    t_start=0 * pq.s, t_stop=epoch.durations[epoch_idx]))

        assert_same_attributes(
            block.segments[0].epochs[0],
            epoch.time_shift(-epoch.times[0]).time_slice(
                t_start=0 * pq.s, t_stop=epoch.durations[0]))
        assert_same_attributes(
            block.segments[0].epochs[1],
            epoch2.time_shift(-epoch.times[0]).time_slice(
                t_start=0 * pq.s, t_stop=epoch.durations[0]))
예제 #43
0
파일: kwikio.py 프로젝트: CINPLA/python-neo
    def read_block(self,
                   lazy=False,
                   cascade=True,
                   get_waveforms=True,
                   cluster_metadata='all',
                   raw_data_units='uV',
                   get_raw_data=False,
                   ):
        """
        Reads a block with segments and channel_indexes

        Parameters:
        get_waveforms: bool, default = False
            Wether or not to get the waveforms
        get_raw_data: bool, default = False
            Wether or not to get the raw traces
        raw_data_units: str, default = "uV"
            SI units of the raw trace according to voltage_gain given to klusta
        cluster_metadata: str, default = "all"
            Which clusters to load, possibilities are "noise", "unsorted",
            "good", "all", if all is selected noise is omitted.
        """
        assert isinstance(cluster_metadata, str)
        blk = Block()
        if cascade:
            seg = Segment(file_origin=self.filename)
            blk.segments += [seg]
            for model in self.models:
                group_id = model.channel_group
                group_meta = {'group_id': group_id}
                group_meta.update(model.metadata)
                chx = ChannelIndex(name='channel group #{}'.format(group_id),
                                   index=model.channels,
                                   **group_meta)
                blk.channel_indexes.append(chx)
                clusters = model.spike_clusters
                for cluster_id in model.cluster_ids:
                    meta = model.cluster_metadata[cluster_id]
                    if cluster_metadata == 'all':
                        if meta == 'noise':
                            continue
                    elif cluster_metadata != meta:
                        continue
                    sptr = self.read_spiketrain(cluster_id=cluster_id,
                                                model=model, lazy=lazy,
                                                cascade=cascade,
                                                get_waveforms=get_waveforms)
                    sptr.annotations.update({'cluster_metadata': meta,
                                             'group_id': model.channel_group})
                    sptr.channel_index = chx
                    unit = Unit()
                    unit.spiketrains.append(sptr)
                    chx.units.append(unit)
                    unit.channel_index = chx
                    seg.spiketrains.append(sptr)
                if get_raw_data:
                    ana = self.read_analogsignal(model, raw_data_units,
                                                 lazy, cascade)
                    ana.channel_index = chx
                    seg.analogsignals.append(ana)

            seg.duration = model.duration * pq.s

        blk.create_many_to_one_relationship()
        return blk
    def test__children(self):
        blk = Block(name='block1')
        blk.recordingchannelgroups = [self.rcg1]
        blk.create_many_to_one_relationship()

        self.assertEqual(self.rcg1._container_child_objects, ('Unit',))
        self.assertEqual(self.rcg1._data_child_objects, ('AnalogSignalArray',))
        self.assertEqual(self.rcg1._single_parent_objects, ('Block',))
        self.assertEqual(self.rcg1._multi_child_objects, ('RecordingChannel',))
        self.assertEqual(self.rcg1._multi_parent_objects, ())
        self.assertEqual(self.rcg1._child_properties, ())

        self.assertEqual(self.rcg1._single_child_objects,
                         ('Unit', 'AnalogSignalArray',))

        self.assertEqual(self.rcg1._container_child_containers, ('units',))
        self.assertEqual(self.rcg1._data_child_containers,
                         ('analogsignalarrays',))
        self.assertEqual(self.rcg1._single_child_containers,
                         ('units', 'analogsignalarrays'))
        self.assertEqual(self.rcg1._single_parent_containers, ('block',))
        self.assertEqual(self.rcg1._multi_child_containers,
                         ('recordingchannels',))
        self.assertEqual(self.rcg1._multi_parent_containers, ())

        self.assertEqual(self.rcg1._child_objects,
                         ('Unit', 'AnalogSignalArray', 'RecordingChannel'))
        self.assertEqual(self.rcg1._child_containers,
                         ('units', 'analogsignalarrays', 'recordingchannels'))
        self.assertEqual(self.rcg1._parent_objects, ('Block',))
        self.assertEqual(self.rcg1._parent_containers, ('block',))

        self.assertEqual(len(self.rcg1._single_children), 2*self.nchildren)
        self.assertEqual(len(self.rcg1._multi_children), self.nchildren)
        self.assertEqual(len(self.rcg1.data_children), self.nchildren)
        self.assertEqual(len(self.rcg1.data_children_recur),
                         self.nchildren + 4*self.nchildren**2)
        self.assertEqual(len(self.rcg1.container_children), 2*self.nchildren)
        self.assertEqual(len(self.rcg1.container_children_recur),
                         2*self.nchildren)
        self.assertEqual(len(self.rcg1.children), 3*self.nchildren)
        self.assertEqual(len(self.rcg1.children_recur),
                         3*self.nchildren + 4*self.nchildren**2)

        assert_same_sub_schema(list(self.rcg1._multi_children), self.rchans1)
        assert_same_sub_schema(list(self.rcg1._single_children),
                               self.units1a + self.sigarrs1a,
                               exclude=['channel_index'])

        assert_same_sub_schema(list(self.rcg1.container_children),
                               self.units1a + self.rchans1)
        assert_same_sub_schema(list(self.rcg1.container_children_recur),
                               self.units1a + self.rchans1)

        assert_same_sub_schema(list(self.rcg1.data_children), self.sigarrs1a,
                               exclude=['channel_index'])
        assert_same_sub_schema(list(self.rcg1.data_children_recur),
                               self.sigarrs1a +
                               self.spikes1[:2] + self.trains1[:2] +
                               self.spikes1[2:] + self.trains1[2:] +
                               self.sigs1[:2] + self.irsigs1[:2] +
                               self.sigs1[2:] + self.irsigs1[2:],
                               exclude=['channel_index'])

        assert_same_sub_schema(list(self.rcg1.children),
                               self.sigarrs1a + self.units1a + self.rchans1a,
                               exclude=['channel_index'])
        assert_same_sub_schema(list(self.rcg1.children_recur),
                               self.sigarrs1a +
                               self.spikes1[:2] + self.trains1[:2] +
                               self.spikes1[2:] + self.trains1[2:] +
                               self.sigs1[:2] + self.irsigs1[:2] +
                               self.sigs1[2:] + self.irsigs1[2:] +
                               self.units1a + self.rchans1a,
                               exclude=['channel_index'])

        self.assertEqual(len(self.rcg1.parents), 1)
        self.assertEqual(self.rcg1.parents[0].name, 'block1')
예제 #45
0
파일: __init__.py 프로젝트: matham/MCDConv
def create_nix_file(file_pat,
                    output=None,
                    signed=False,
                    rate=1,
                    electrode_scale=0.0104,
                    analog_scale=12.5122,
                    channels=channels,
                    chunks=256 * 1024 * 1024):
    '''The default resolution (i.e. voltage per bit) in uV '''
    filenames = sorted(glob(file_pat))

    dtype = np.int16 if signed else np.uint16
    N = len(channels)
    slice_size = N * 2
    chunks = chunks - chunks % slice_size  # round to equal blocks

    reader = read_files(filenames, dtype, chunks, slice_size)
    data = next(reader)

    if signed:
        af = lambda x: x.astype(np.float32) * analog_scale
        ef = lambda x: x.astype(np.float32) * electrode_scale
        df = lambda x: np.array(x, dtype=np.bool_)
    else:
        af = lambda x: (x.astype(np.float32) - 2**15) * analog_scale
        ef = lambda x: (x.astype(np.float32) - 2**15) * electrode_scale
        df = lambda x: np.array(x.astype(np.int32) - 2**15, dtype=np.bool_)

    analogs = [(i, ch) for i, ch in enumerate(channels) if ch.startswith('A')]
    digitals = [(i, ch) for i, ch in enumerate(channels) if ch.startswith('D')]
    electrodes = [(i, ch) for i, ch in enumerate(channels)
                  if not ch.startswith('D') and not ch.startswith('A')]

    groups = (('Analog', analogs, af, np.float32),
              ('Digital', digitals, df, np.bool_), ('Electrodes', electrodes,
                                                    ef, np.float32))

    if output is None:
        output = '{}.h5'.format(splitext(filenames[0])[0])
    ofile = NixIO(output, mode='ow')

    blk = Block()
    for group_name, channels, f, dtype in groups:
        seg = Segment(name=group_name)
        for slice_idx, chan_name in channels:
            seg.analogsignals.append(
                AnalogSignal(f(data[slice_idx::N]),
                             dtype=dtype,
                             units=uV,
                             sampling_rate=rate * Hz,
                             name=chan_name))
        blk.segments.append(seg)

    ofile.write_block(blk)
    nix_file = ofile.nix_file
    nix_groups = nix_file.blocks[0].groups

    for data in reader:
        for k, (group_name, channels, f, _) in enumerate(groups):
            data_arrays = nix_groups[k].data_arrays
            for i, (slice_idx, _) in enumerate(channels):
                # nix_file._h5file.flush()
                data_arrays[i].append(f(data[slice_idx::N]))

    nix_file.close()
    return output
예제 #46
0
    def test__get_events(self):
        starts_1 = Event(times=[0.5, 10.0, 25.2] * pq.s,
                         labels=['label1', 'label2', 'label3'],
                         name='pick_me')
        starts_1.annotate(event_type='trial start')
        starts_1.array_annotate(trial_id=[1, 2, 3])

        stops_1 = Event(times=[5.5, 14.9, 30.1] * pq.s)
        stops_1.annotate(event_type='trial stop')
        stops_1.array_annotate(trial_id=[1, 2, 3])

        starts_2 = Event(times=[33.2, 41.7, 52.4] * pq.s)
        starts_2.annotate(event_type='trial start')
        starts_2.array_annotate(trial_id=[4, 5, 6])

        stops_2 = Event(times=[37.6, 46.1, 57.0] * pq.s)
        stops_2.annotate(event_type='trial stop')
        stops_2.array_annotate(trial_id=[4, 5, 6])

        seg = Segment()
        seg2 = Segment()
        seg.events = [starts_1, stops_1]
        seg2.events = [starts_2, stops_2]

        block = Block()
        block.segments = [seg, seg2]

        # test getting one whole event via annotation or attribute
        extracted_starts1 = get_events(seg, event_type='trial start')
        extracted_starts1b = get_events(block, name='pick_me')

        self.assertEqual(len(extracted_starts1), 1)
        self.assertEqual(len(extracted_starts1b), 1)

        extracted_starts1 = extracted_starts1[0]
        extracted_starts1b = extracted_starts1b[0]

        assert_same_attributes(extracted_starts1, starts_1)
        assert_same_attributes(extracted_starts1b, starts_1)

        # test getting an empty list by searching for a non-existent property
        empty1 = get_events(seg, foo='bar')

        self.assertEqual(len(empty1), 0)

        # test getting an empty list by searching for a non-existent property value
        empty2 = get_events(seg, event_type='undefined')

        self.assertEqual(len(empty2), 0)

        # test getting only one event time of one event
        trial_2 = get_events(block, trial_id=2, event_type='trial start')

        self.assertEqual(len(trial_2), 1)

        trial_2 = trial_2[0]

        self.assertEqual(starts_1.name, trial_2.name)
        self.assertEqual(starts_1.description, trial_2.description)
        self.assertEqual(starts_1.file_origin, trial_2.file_origin)
        self.assertEqual(starts_1.annotations['event_type'],
                         trial_2.annotations['event_type'])
        assert_arrays_equal(trial_2.array_annotations['trial_id'],
                            np.array([2]))
        self.assertIsInstance(trial_2.array_annotations, ArrayDict)

        # test getting only one event time of more than one event
        trial_2b = get_events(block, trial_id=2)

        self.assertEqual(len(trial_2b), 2)

        start_idx = np.where(
            np.array([ev.annotations['event_type']
                      for ev in trial_2b]) == 'trial start')[0][0]

        trial_2b_start = trial_2b[start_idx]
        trial_2b_stop = trial_2b[start_idx - 1]

        assert_same_attributes(trial_2b_start, trial_2)

        self.assertEqual(stops_1.name, trial_2b_stop.name)
        self.assertEqual(stops_1.description, trial_2b_stop.description)
        self.assertEqual(stops_1.file_origin, trial_2b_stop.file_origin)
        self.assertEqual(stops_1.annotations['event_type'],
                         trial_2b_stop.annotations['event_type'])
        assert_arrays_equal(trial_2b_stop.array_annotations['trial_id'],
                            np.array([2]))
        self.assertIsInstance(trial_2b_stop.array_annotations, ArrayDict)

        # test getting more than one event time of one event
        trials_1_2 = get_events(block,
                                trial_id=[1, 2],
                                event_type='trial start')

        self.assertEqual(len(trials_1_2), 1)

        trials_1_2 = trials_1_2[0]

        self.assertEqual(starts_1.name, trials_1_2.name)
        self.assertEqual(starts_1.description, trials_1_2.description)
        self.assertEqual(starts_1.file_origin, trials_1_2.file_origin)
        self.assertEqual(starts_1.annotations['event_type'],
                         trials_1_2.annotations['event_type'])
        assert_arrays_equal(trials_1_2.array_annotations['trial_id'],
                            np.array([1, 2]))
        self.assertIsInstance(trials_1_2.array_annotations, ArrayDict)

        # test selecting event times by label
        trials_1_2 = get_events(block, labels=['label1', 'label2'])

        self.assertEqual(len(trials_1_2), 1)

        trials_1_2 = trials_1_2[0]

        self.assertEqual(starts_1.name, trials_1_2.name)
        self.assertEqual(starts_1.description, trials_1_2.description)
        self.assertEqual(starts_1.file_origin, trials_1_2.file_origin)
        self.assertEqual(starts_1.annotations['event_type'],
                         trials_1_2.annotations['event_type'])
        assert_arrays_equal(trials_1_2.array_annotations['trial_id'],
                            np.array([1, 2]))
        self.assertIsInstance(trials_1_2.array_annotations, ArrayDict)

        # test getting more than one event time of more than one event
        trials_1_2b = get_events(block, trial_id=[1, 2])

        self.assertEqual(len(trials_1_2b), 2)

        start_idx = np.where(
            np.array([ev.annotations['event_type']
                      for ev in trials_1_2b]) == 'trial start')[0][0]

        trials_1_2b_start = trials_1_2b[start_idx]
        trials_1_2b_stop = trials_1_2b[start_idx - 1]

        assert_same_attributes(trials_1_2b_start, trials_1_2)

        self.assertEqual(stops_1.name, trials_1_2b_stop.name)
        self.assertEqual(stops_1.description, trials_1_2b_stop.description)
        self.assertEqual(stops_1.file_origin, trials_1_2b_stop.file_origin)
        self.assertEqual(stops_1.annotations['event_type'],
                         trials_1_2b_stop.annotations['event_type'])
        assert_arrays_equal(trials_1_2b_stop.array_annotations['trial_id'],
                            np.array([1, 2]))
        self.assertIsInstance(trials_1_2b_stop.array_annotations, ArrayDict)
예제 #47
0
    def read_block(self, lazy=False):
        """Returns a Block containing spike information.

        There is no obvious way to infer the segment boundaries from
        raw spike times, so for now all spike times are returned in one
        big segment. The way around this would be to specify the segment
        boundaries, and then change this code to put the spikes in the right
        segments.
        """
        assert not lazy, 'Do not support lazy'

        # Create block and segment to hold all the data
        block = Block()
        # Search data directory for KlustaKwik files.
        # If nothing found, return empty block
        self._fetfiles = self._fp.read_filenames('fet')
        self._clufiles = self._fp.read_filenames('clu')
        if len(self._fetfiles) == 0:
            return block

        # Create a single segment to hold all of the data
        seg = Segment(name='seg0', index=0, file_origin=self.filename)
        block.segments.append(seg)

        # Load spike times from each group and store in a dict, keyed
        # by group number
        self.spiketrains = dict()
        for group in sorted(self._fetfiles.keys()):
            # Load spike times
            fetfile = self._fetfiles[group]
            spks, features = self._load_spike_times(fetfile)

            # Load cluster ids or generate
            if group in self._clufiles:
                clufile = self._clufiles[group]
                uids = self._load_unit_id(clufile)
            else:
                # unclustered data, assume all zeros
                uids = np.zeros(spks.shape, dtype=np.int32)

            # error check
            if len(spks) != len(uids):
                raise ValueError("lengths of fet and clu files are different")

            # Create Unit for each cluster
            unique_unit_ids = np.unique(uids)
            for unit_id in sorted(unique_unit_ids):
                # Initialize the unit
                u = Unit(name=('unit %d from group %d' % (unit_id, group)),
                         index=unit_id, group=group)

                # Initialize a new SpikeTrain for the spikes from this unit
                st = SpikeTrain(
                    times=spks[uids == unit_id] / self.sampling_rate,
                    units='sec', t_start=0.0,
                    t_stop=spks.max() / self.sampling_rate,
                    name=('unit %d from group %d' % (unit_id, group)))
                st.annotations['cluster'] = unit_id
                st.annotations['group'] = group

                # put features in
                if len(features) != 0:
                    st.annotations['waveform_features'] = features

                # Link
                u.spiketrains.append(st)
                seg.spiketrains.append(st)

        block.create_many_to_one_relationship()
        return block
예제 #48
0
    def read_block(self, lazy=False, cascade=True,
        n_starts=None, n_stops=None, channel_list=None):
        """Reads the file and returns contents as a Block.

        The Block contains one Segment for each entry in zip(n_starts,
        n_stops). If these parameters are not specified, the default is
        to store all data in one Segment.

        The Block also contains one RecordingChannelGroup for all channels.

        n_starts: list or array of starting times of each Segment in
            samples from the beginning of the file.
        n_stops: similar, stopping times of each Segment
        channel_list: list of channel numbers to get. The neural data channels
            are 1 - 128. The analog inputs are 129 - 144. The default
            is to acquire all channels.

        Returns: Block object containing the data.
        """


        # Create block
        block = Block(file_origin=self.filename)

        if not cascade:
            return block

        self.loader = Loader(self.filename)
        self.loader.load_file()
        self.header = self.loader.header

        # If channels not specified, get all
        if channel_list is None:
            channel_list = self.loader.get_neural_channel_numbers()

        # If not specified, load all as one Segment
        if n_starts is None:
            n_starts = [0]
            n_stops = [self.loader.header.n_samples]

        #~ # Add channel hierarchy
        #~ rcg = RecordingChannelGroup(name='allchannels',
            #~ description='group of all channels', file_origin=self.filename)
        #~ block.recordingchannelgroups.append(rcg)
        #~ self.channel_number_to_recording_channel = {}

        #~ # Add each channel at a time to hierarchy
        #~ for ch in channel_list:
            #~ ch_object = RecordingChannel(name='channel%d' % ch,
                #~ file_origin=self.filename, index=ch)
            #~ rcg.channel_indexes.append(ch_object.index)
            #~ rcg.channel_names.append(ch_object.name)
            #~ rcg.recordingchannels.append(ch_object)
            #~ self.channel_number_to_recording_channel[ch] = ch_object

        # Iterate through n_starts and n_stops and add one Segment
        # per each.
        for n, (t1, t2) in enumerate(zip(n_starts, n_stops)):
            # Create segment and add metadata
            seg = self.read_segment(n_start=t1, n_stop=t2, chlist=channel_list,
                lazy=lazy, cascade=cascade)
            seg.name = 'Segment %d' % n
            seg.index = n
            t1sec = t1 / self.loader.header.f_samp
            t2sec = t2 / self.loader.header.f_samp
            seg.description = 'Segment %d from %f to %f' % (n, t1sec, t2sec)

            # Link to block
            block.segments.append(seg)

        # Create hardware view, and bijectivity
        tools.populate_RecordingChannel(block)
        block.create_many_to_one_relationship()

        return block
예제 #49
0
    def test__children(self):
        blk = Block(name='block1')
        blk.segments = [self.segment1]
        blk.create_many_to_one_relationship()

        self.assertEqual(self.segment1._container_child_objects, ())
        self.assertEqual(self.segment1._data_child_objects,
                         ('AnalogSignal', 'AnalogSignalArray',
                          'Epoch', 'EpochArray',
                          'Event', 'EventArray',
                          'IrregularlySampledSignal',
                          'Spike', 'SpikeTrain'))
        self.assertEqual(self.segment1._single_parent_objects, ('Block',))
        self.assertEqual(self.segment1._multi_child_objects, ())
        self.assertEqual(self.segment1._multi_parent_objects, ())
        self.assertEqual(self.segment1._child_properties, ())

        self.assertEqual(self.segment1._single_child_objects,
                         ('AnalogSignal', 'AnalogSignalArray',
                          'Epoch', 'EpochArray',
                          'Event', 'EventArray',
                          'IrregularlySampledSignal',
                          'Spike', 'SpikeTrain'))

        self.assertEqual(self.segment1._container_child_containers, ())
        self.assertEqual(self.segment1._data_child_containers,
                         ('analogsignals', 'analogsignalarrays',
                          'epochs', 'epocharrays',
                          'events', 'eventarrays',
                          'irregularlysampledsignals',
                          'spikes', 'spiketrains'))
        self.assertEqual(self.segment1._single_child_containers,
                         ('analogsignals', 'analogsignalarrays',
                          'epochs', 'epocharrays',
                          'events', 'eventarrays',
                          'irregularlysampledsignals',
                          'spikes', 'spiketrains'))
        self.assertEqual(self.segment1._single_parent_containers, ('block',))
        self.assertEqual(self.segment1._multi_child_containers, ())
        self.assertEqual(self.segment1._multi_parent_containers, ())

        self.assertEqual(self.segment1._child_objects,
                         ('AnalogSignal', 'AnalogSignalArray',
                          'Epoch', 'EpochArray',
                          'Event', 'EventArray',
                          'IrregularlySampledSignal',
                          'Spike', 'SpikeTrain'))
        self.assertEqual(self.segment1._child_containers,
                         ('analogsignals', 'analogsignalarrays',
                          'epochs', 'epocharrays',
                          'events', 'eventarrays',
                          'irregularlysampledsignals',
                          'spikes', 'spiketrains'))
        self.assertEqual(self.segment1._parent_objects, ('Block',))
        self.assertEqual(self.segment1._parent_containers, ('block',))

        self.assertEqual(len(self.segment1.children),
                         (len(self.sig1) +
                          len(self.sigarr1) +
                          len(self.epoch1) +
                          len(self.epocharr1) +
                          len(self.event1) +
                          len(self.eventarr1) +
                          len(self.irsig1) +
                          len(self.spike1) +
                          len(self.train1)))
        self.assertEqual(self.segment1.children[0].name, self.signames1[0])
        self.assertEqual(self.segment1.children[1].name, self.signames1[1])
        self.assertEqual(self.segment1.children[2].name, self.sigarrnames1[0])
        self.assertEqual(self.segment1.children[3].name, self.sigarrnames1[1])
        self.assertEqual(self.segment1.children[4].name, self.epochnames1[0])
        self.assertEqual(self.segment1.children[5].name, self.epochnames1[1])
        self.assertEqual(self.segment1.children[6].name,
                         self.epocharrnames1[0])
        self.assertEqual(self.segment1.children[7].name,
                         self.epocharrnames1[1])
        self.assertEqual(self.segment1.children[8].name, self.eventnames1[0])
        self.assertEqual(self.segment1.children[9].name, self.eventnames1[1])
        self.assertEqual(self.segment1.children[10].name,
                         self.eventarrnames1[0])
        self.assertEqual(self.segment1.children[11].name,
                         self.eventarrnames1[1])
        self.assertEqual(self.segment1.children[12].name, self.irsignames1[0])
        self.assertEqual(self.segment1.children[13].name, self.irsignames1[1])
        self.assertEqual(self.segment1.children[14].name, self.spikenames1[0])
        self.assertEqual(self.segment1.children[15].name, self.spikenames1[1])
        self.assertEqual(self.segment1.children[16].name, self.trainnames1[0])
        self.assertEqual(self.segment1.children[17].name, self.trainnames1[1])
        self.assertEqual(len(self.segment1.parents), 1)
        self.assertEqual(self.segment1.parents[0].name, 'block1')

        self.segment1.create_many_to_one_relationship()
        self.segment1.create_many_to_many_relationship()
        self.segment1.create_relationship()
        assert_neo_object_is_compliant(self.segment1)
예제 #50
0
    def test__time_slice(self):
        time_slice = [.5, 5.6] * pq.s

        epoch2 = Epoch([0.6, 9.5, 16.8, 34.1] * pq.s, durations=[4.5, 4.8, 5.0, 5.0] * pq.s,
                       t_start=.1 * pq.s)
        epoch2.annotate(epoch_type='b')
        epoch2.array_annotate(trial_id=[1, 2, 3, 4])

        event = Event(times=[0.5, 10.0, 25.2] * pq.s, t_start=.1 * pq.s)
        event.annotate(event_type='trial start')
        event.array_annotate(trial_id=[1, 2, 3])

        anasig = AnalogSignal(np.arange(50.0) * pq.mV, t_start=.1 * pq.s,
                              sampling_rate=1.0 * pq.Hz)
        irrsig = IrregularlySampledSignal(signal=np.arange(50.0) * pq.mV,
                                          times=anasig.times, t_start=.1 * pq.s)
        st = SpikeTrain(np.arange(0.5, 50, 7) * pq.s, t_start=.1 * pq.s, t_stop=50.0 * pq.s,
                        waveforms=np.array([[[0., 1.], [0.1, 1.1]], [[2., 3.], [2.1, 3.1]],
                                            [[4., 5.], [4.1, 5.1]], [[6., 7.], [6.1, 7.1]],
                                            [[8., 9.], [8.1, 9.1]], [[12., 13.], [12.1, 13.1]],
                                            [[14., 15.], [14.1, 15.1]],
                                            [[16., 17.], [16.1, 17.1]]]) * pq.mV,
                        array_annotations={'spikenum': np.arange(1, 9)})

        seg = Segment()
        seg.epochs = [epoch2]
        seg.events = [event]
        seg.analogsignals = [anasig]
        seg.irregularlysampledsignals = [irrsig]
        seg.spiketrains = [st]

        block = Block()
        block.segments = [seg]
        block.create_many_to_one_relationship()

        # test without resetting the time
        sliced = seg.time_slice(time_slice[0], time_slice[1])

        assert_neo_object_is_compliant(sliced)

        self.assertEqual(len(sliced.events), 1)
        self.assertEqual(len(sliced.spiketrains), 1)
        self.assertEqual(len(sliced.analogsignals), 1)
        self.assertEqual(len(sliced.irregularlysampledsignals), 1)
        self.assertEqual(len(sliced.epochs), 1)

        assert_same_attributes(sliced.spiketrains[0],
                               st.time_slice(t_start=time_slice[0],
                                             t_stop=time_slice[1]))
        assert_same_attributes(sliced.analogsignals[0],
                               anasig.time_slice(t_start=time_slice[0],
                                                 t_stop=time_slice[1]))
        assert_same_attributes(sliced.irregularlysampledsignals[0],
                               irrsig.time_slice(t_start=time_slice[0],
                                                 t_stop=time_slice[1]))
        assert_same_attributes(sliced.events[0],
                               event.time_slice(t_start=time_slice[0],
                                                t_stop=time_slice[1]))
        assert_same_attributes(sliced.epochs[0],
                               epoch2.time_slice(t_start=time_slice[0],
                                                t_stop=time_slice[1]))

        seg = Segment()
        seg.epochs = [epoch2]
        seg.events = [event]
        seg.analogsignals = [anasig]
        seg.irregularlysampledsignals = [irrsig]
        seg.spiketrains = [st]

        block = Block()
        block.segments = [seg]
        block.create_many_to_one_relationship()

        # test with resetting the time
        sliced = seg.time_slice(time_slice[0], time_slice[1], reset_time=True)

        assert_neo_object_is_compliant(sliced)

        self.assertEqual(len(sliced.events), 1)
        self.assertEqual(len(sliced.spiketrains), 1)
        self.assertEqual(len(sliced.analogsignals), 1)
        self.assertEqual(len(sliced.irregularlysampledsignals), 1)
        self.assertEqual(len(sliced.epochs), 1)

        assert_same_attributes(sliced.spiketrains[0],
                               st.time_shift(- time_slice[0]).time_slice(
                                   t_start=0 * pq.s, t_stop=time_slice[1] - time_slice[0]))

        anasig_target = anasig.copy()
        anasig_target = anasig_target.time_shift(- time_slice[0]).time_slice(t_start=0 * pq.s,
                                                 t_stop=time_slice[1] - time_slice[0])
        assert_same_attributes(sliced.analogsignals[0], anasig_target)
        irrsig_target = irrsig.copy()
        irrsig_target = irrsig_target.time_shift(- time_slice[0]).time_slice(t_start=0 * pq.s,
                                                t_stop=time_slice[1] - time_slice[0])
        assert_same_attributes(sliced.irregularlysampledsignals[0], irrsig_target)
        assert_same_attributes(sliced.events[0],
                               event.time_shift(- time_slice[0]).time_slice(
                                   t_start=0 * pq.s, t_stop=time_slice[1] - time_slice[0]))
        assert_same_attributes(sliced.epochs[0],
                               epoch2.time_shift(- time_slice[0]).time_slice(t_start=0 * pq.s,
                                                    t_stop=time_slice[1] - time_slice[0]))

        seg = Segment()

        reader = ExampleRawIO(filename='my_filename.fake')
        reader.parse_header()

        proxy_anasig = AnalogSignalProxy(rawio=reader,
                                         global_channel_indexes=None,
                                         block_index=0, seg_index=0)
        seg.analogsignals.append(proxy_anasig)

        proxy_st = SpikeTrainProxy(rawio=reader, unit_index=0,
                                     block_index=0, seg_index=0)
        seg.spiketrains.append(proxy_st)

        proxy_event = EventProxy(rawio=reader, event_channel_index=0,
                                 block_index=0, seg_index=0)
        seg.events.append(proxy_event)

        proxy_epoch = EpochProxy(rawio=reader, event_channel_index=1,
                                 block_index=0, seg_index=0)
        proxy_epoch.annotate(pick='me')
        seg.epochs.append(proxy_epoch)

        loaded_epoch = proxy_epoch.load()
        loaded_event = proxy_event.load()
        loaded_st = proxy_st.load()
        loaded_anasig = proxy_anasig.load()

        block = Block()
        block.segments = [seg]
        block.create_many_to_one_relationship()

        # test with proxy objects
        sliced = seg.time_slice(time_slice[0], time_slice[1])

        assert_neo_object_is_compliant(sliced)

        sliced_event = loaded_event.time_slice(t_start=time_slice[0],
                                             t_stop=time_slice[1])
        has_event = len(sliced_event) > 0

        sliced_anasig = loaded_anasig.time_slice(t_start=time_slice[0],
                                             t_stop=time_slice[1])

        sliced_st = loaded_st.time_slice(t_start=time_slice[0],
                                             t_stop=time_slice[1])

        self.assertEqual(len(sliced.events), int(has_event))
        self.assertEqual(len(sliced.spiketrains), 1)
        self.assertEqual(len(sliced.analogsignals), 1)

        self.assertTrue(isinstance(sliced.spiketrains[0],
                                   SpikeTrain))
        assert_same_attributes(sliced.spiketrains[0],
                               sliced_st)

        self.assertTrue(isinstance(sliced.analogsignals[0],
                                   AnalogSignal))
        assert_same_attributes(sliced.analogsignals[0],
                               sliced_anasig)

        if has_event:
            self.assertTrue(isinstance(sliced.events[0],
                                       Event))
            assert_same_attributes(sliced.events[0],
                               sliced_event)