def test__children(self):
        signal = self.signals[0]

        segment = Segment(name='seg1')
        segment.analogsignalarrays = [signal]
        segment.create_many_to_one_relationship()

        rcg = RecordingChannelGroup(name='rcg1')
        rcg.analogsignalarrays = [signal]
        rcg.create_many_to_one_relationship()

        self.assertEqual(signal._single_parent_objects,
                         ('Segment', 'RecordingChannelGroup'))
        self.assertEqual(signal._multi_parent_objects, ())

        self.assertEqual(signal._single_parent_containers,
                         ('segment', 'recordingchannelgroup'))
        self.assertEqual(signal._multi_parent_containers, ())

        self.assertEqual(signal._parent_objects,
                         ('Segment', 'RecordingChannelGroup'))
        self.assertEqual(signal._parent_containers,
                         ('segment', 'recordingchannelgroup'))

        self.assertEqual(len(signal.parents), 2)
        self.assertEqual(signal.parents[0].name, 'seg1')
        self.assertEqual(signal.parents[1].name, 'rcg1')

        assert_neo_object_is_compliant(signal)
Exemple #2
0
    def test__construct_subsegment_by_unit(self):
        nb_seg = 3
        nb_unit = 7
        unit_with_sig = [0, 2, 5]
        signal_types = ['Vm', 'Conductances']
        sig_len = 100

        #recordingchannelgroups
        rcgs = [ RecordingChannelGroup(name = 'Vm', channel_indexes = unit_with_sig),
                        RecordingChannelGroup(name = 'Conductance', channel_indexes = unit_with_sig), ]

        # Unit
        all_unit = [ ]
        for u in range(nb_unit):
            un = Unit(name = 'Unit #%d' % u, channel_indexes = [u])
            all_unit.append(un)

        bl = Block()
        for s in range(nb_seg):
            seg = Segment(name = 'Simulation %s' % s)
            for j in range(nb_unit):
                st = SpikeTrain([1, 2, 3], units = 'ms', t_start = 0., t_stop = 10)
                st.unit = all_unit[j]

            for t in signal_types:
                anasigarr = AnalogSignalArray( np.zeros((sig_len, len(unit_with_sig)) ), units = 'nA',
                                sampling_rate = 1000.*pq.Hz, channel_indexes = unit_with_sig )
                seg.analogsignalarrays.append(anasigarr)

        # what you want
        subseg = seg.construct_subsegment_by_unit(all_unit[:4])
    def test__children(self):
        signal = self.signals[0]

        segment = Segment(name='seg1')
        segment.analogsignalarrays = [signal]
        segment.create_many_to_one_relationship()

        rcg = RecordingChannelGroup(name='rcg1')
        rcg.analogsignalarrays = [signal]
        rcg.create_many_to_one_relationship()

        self.assertEqual(signal._single_parent_objects,
                         ('Segment', 'RecordingChannelGroup'))
        self.assertEqual(signal._multi_parent_objects, ())

        self.assertEqual(signal._single_parent_containers,
                         ('segment', 'recordingchannelgroup'))
        self.assertEqual(signal._multi_parent_containers, ())

        self.assertEqual(signal._parent_objects,
                         ('Segment', 'RecordingChannelGroup'))
        self.assertEqual(signal._parent_containers,
                         ('segment', 'recordingchannelgroup'))

        self.assertEqual(len(signal.parents), 2)
        self.assertEqual(signal.parents[0].name, 'seg1')
        self.assertEqual(signal.parents[1].name, 'rcg1')

        assert_neo_object_is_compliant(signal)
Exemple #4
0
    def read_RCG(fh, block_id, rcg_id):
        def read_multiple(nix_file, obj_type):
            signals = filter(lambda x: x.type == obj_type, nix_file.blocks[block_id].data_arrays)
            signals = [x for x in signals if nsn in [y.name for y in x.sources]]
            read_func = getattr(Reader, 'read_' + obj_type)
            return [read_func(fh, block_id, da.name) for da in signals]

        def read_units(nix_file):
            units = filter(lambda x: x.type == 'unit', nix_file.blocks[block_id].sources[nsn].sources)
            return [Reader.read_unit(fh, block_id, nsn, unit.name) for unit in units]

        nix_block = fh.handle.blocks[block_id]
        nix_source = nix_block.sources[rcg_id]
        nsn = nix_source.name

        params = {
            'name': nix_source.name,
            'channel_indexes': nix_source.metadata['channel_indexes']
        }
        rcg = RecordingChannelGroup(**params)

        for key, value in Reader.Help.read_attributes(nix_source.metadata, 'recordingchannelgroup').items():
            setattr(rcg, key, value)

        rcg.annotations = Reader.Help.read_annotations(nix_source.metadata, 'recordingchannelgroup')

        setattr(rcg, 'analogsignals', ProxyList(fh, lambda f: read_multiple(f, 'analogsignal')))
        setattr(rcg, 'irregularlysampledsignals', ProxyList(fh, lambda f: read_multiple(f, 'irregularlysampledsignal')))
        setattr(rcg, 'units', ProxyList(fh, read_units))

        return rcg
    def read_block(self, lazy=False, cascade=True, **kargs):
        '''
        Reads a block from the simple spike data file "fname" generated
        with BrainWare
        '''

        # there are no keyargs implemented to so far.  If someone tries to pass
        # them they are expecting them to do something or making a mistake,
        # neither of which should pass silently
        if kargs:
            raise NotImplementedError('This method does not have any '
                                      'argument implemented yet')
        self._fsrc = None
        self.__lazy = lazy

        self._blk = Block(file_origin=self._filename)
        block = self._blk

        # if we aren't doing cascade, don't load anything
        if not cascade:
            return block

        # create the objects to store other objects
        rcg = RecordingChannelGroup(file_origin=self._filename)
        rcg.channel_indexes = np.array([], dtype=np.int)
        rcg.channel_names = np.array([], dtype='S')
        self.__unit = Unit(file_origin=self._filename)

        # load objects into their containers
        block.recordingchannelgroups.append(rcg)
        rcg.units.append(self.__unit)

        # initialize values
        self.__t_stop = None
        self.__params = None
        self.__seg = None
        self.__spiketimes = None

        # open the file
        with open(self._path, 'rb') as self._fsrc:
            res = True
            # while the file is not done keep reading segments
            while res:
                res = self.__read_id()

        create_many_to_one_relationship(block)

        # cleanup attributes
        self._fsrc = None
        self.__lazy = False

        self._blk = None

        self.__t_stop = None
        self.__params = None
        self.__seg = None
        self.__spiketimes = None

        return block
def proc_dam(filename):
    '''Load an dam file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareDamIO to
    make sure BrainwareDamIO is working properly

    block = proc_dam(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_dam_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.dam', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_dam_py2.npz'
             dam file name = 'file1.dam'
    '''
    with np.load(filename) as damobj:
        damfile = damobj.items()[0][1].flatten()

    filename = os.path.basename(filename[:-12]+'.dam')

    signals = [res.flatten() for res in damfile['signal']]
    stimIndexes = [int(res[0, 0].tolist()) for res in damfile['stimIndex']]
    timestamps = [res[0, 0] for res in damfile['timestamp']]

    block = Block(file_origin=filename)

    rcg = RecordingChannelGroup(file_origin=filename)
    chan = RecordingChannel(file_origin=filename, index=0, name='Chan1')
    rcg.channel_indexes = np.array([1])
    rcg.channel_names = np.array(['Chan1'], dtype='S')

    block.recordingchannelgroups.append(rcg)
    rcg.recordingchannels.append(chan)

    params = [res['params'][0, 0].flatten() for res in damfile['stim']]
    values = [res['values'][0, 0].flatten() for res in damfile['stim']]
    params = [[res1[0] for res1 in res] for res in params]
    values = [[res1 for res1 in res] for res in values]
    stims = [dict(zip(param, value)) for param, value in zip(params, values)]

    fulldam = zip(stimIndexes, timestamps, signals, stims)
    for stimIndex, timestamp, signal, stim in fulldam:
        sig = AnalogSignal(signal=signal*pq.mV,
                           t_start=timestamp*pq.d,
                           file_origin=filename,
                           sampling_period=1.*pq.s)
        segment = Segment(file_origin=filename,
                          index=stimIndex,
                          **stim)
        segment.analogsignals = [sig]
        block.segments.append(segment)

    create_many_to_one_relationship(block)

    return block
def proc_dam(filename):
    '''Load an dam file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareDamIO to
    make sure BrainwareDamIO is working properly

    block = proc_dam(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_dam_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.dam', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_dam_py2.npz'
             dam file name = 'file1.dam'
    '''
    with np.load(filename) as damobj:
        damfile = damobj.items()[0][1].flatten()

    filename = os.path.basename(filename[:-12] + '.dam')

    signals = [res.flatten() for res in damfile['signal']]
    stimIndexes = [int(res[0, 0].tolist()) for res in damfile['stimIndex']]
    timestamps = [res[0, 0] for res in damfile['timestamp']]

    block = Block(file_origin=filename)

    rcg = RecordingChannelGroup(file_origin=filename)
    chan = RecordingChannel(file_origin=filename, index=0, name='Chan1')
    rcg.channel_indexes = np.array([1])
    rcg.channel_names = np.array(['Chan1'], dtype='S')

    block.recordingchannelgroups.append(rcg)
    rcg.recordingchannels.append(chan)

    params = [res['params'][0, 0].flatten() for res in damfile['stim']]
    values = [res['values'][0, 0].flatten() for res in damfile['stim']]
    params = [[res1[0] for res1 in res] for res in params]
    values = [[res1 for res1 in res] for res in values]
    stims = [dict(zip(param, value)) for param, value in zip(params, values)]

    fulldam = zip(stimIndexes, timestamps, signals, stims)
    for stimIndex, timestamp, signal, stim in fulldam:
        sig = AnalogSignal(signal=signal * pq.mV,
                           t_start=timestamp * pq.d,
                           file_origin=filename,
                           sampling_period=1. * pq.s)
        segment = Segment(file_origin=filename, index=stimIndex, **stim)
        segment.analogsignals = [sig]
        block.segments.append(segment)

    create_many_to_one_relationship(block)

    return block
    def read_block(self,
                     lazy = False,
                     cascade = True,
                    ):
        """
        """

        
        tree = ElementTree.parse(self.filename)
        root = tree.getroot()
        acq = root.find('acquisitionSystem')
        nbits = int(acq.find('nBits').text)
        nbchannel = int(acq.find('nChannels').text)
        sampling_rate = float(acq.find('samplingRate').text)*pq.Hz
        voltage_range = float(acq.find('voltageRange').text)
        #offset = int(acq.find('offset').text)
        amplification = float(acq.find('amplification').text)
        
        bl = Block(file_origin = os.path.basename(self.filename).replace('.xml', ''))
        if cascade:
            seg = Segment()
            bl.segments.append(seg)
            
            # RC and RCG
            rc_list = [ ]
            for i, xml_rcg in  enumerate(root.find('anatomicalDescription').find('channelGroups').findall('group')):
                rcg = RecordingChannelGroup(name = 'Group {0}'.format(i))
                bl.recordingchannelgroups.append(rcg)
                for xml_rc in xml_rcg:
                    rc = RecordingChannel(index = int(xml_rc.text))
                    rc_list.append(rc)
                    rcg.recordingchannels.append(rc)
                    rc.recordingchannelgroups.append(rcg)
                rcg.channel_indexes = np.array([rc.index for rc in rcg.recordingchannels], dtype = int)
                rcg.channel_names = np.array(['Channel{0}'.format(rc.index) for rc in rcg.recordingchannels], dtype = 'S')
        
            # AnalogSignals
            reader = RawBinarySignalIO(filename = self.filename.replace('.xml', '.dat'))
            seg2 = reader.read_segment(cascade = True, lazy = lazy,
                                                        sampling_rate = sampling_rate,
                                                        t_start = 0.*pq.s,
                                                        unit = pq.V, nbchannel = nbchannel,
                                                        bytesoffset = 0,
                                                        dtype = np.int16 if nbits<=16 else np.int32,
                                                        rangemin = -voltage_range/2.,
                                                        rangemax = voltage_range/2.,)
            for s, sig in enumerate(seg2.analogsignals):
                if not lazy:
                    sig /= amplification
                sig.segment = seg
                seg.analogsignals.append(sig)
                rc_list[s].analogsignals.append(sig)
            
        bl.create_many_to_one_relationship()
        return bl
    def test__children(self):
        rcg = RecordingChannelGroup(name='rcg1')
        rcg.recordingchannels = [self.rchan1]
        rcg.create_many_to_many_relationship()

        self.assertEqual(self.rchan1._container_child_objects, ())
        self.assertEqual(self.rchan1._data_child_objects,
                         ('AnalogSignal', 'IrregularlySampledSignal'))
        self.assertEqual(self.rchan1._single_parent_objects, ())
        self.assertEqual(self.rchan1._multi_child_objects, ())
        self.assertEqual(self.rchan1._multi_parent_objects,
                         ('RecordingChannelGroup', ))
        self.assertEqual(self.rchan1._child_properties, ())

        self.assertEqual(self.rchan1._single_child_objects,
                         ('AnalogSignal', 'IrregularlySampledSignal'))

        self.assertEqual(self.rchan1._container_child_containers, ())
        self.assertEqual(self.rchan1._data_child_containers, (
            'analogsignals',
            'irregularlysampledsignals',
        ))
        self.assertEqual(self.rchan1._single_child_containers, (
            'analogsignals',
            'irregularlysampledsignals',
        ))
        self.assertEqual(self.rchan1._single_parent_containers, ())
        self.assertEqual(self.rchan1._multi_child_containers, ())
        self.assertEqual(self.rchan1._multi_parent_containers,
                         ('recordingchannelgroups', ))

        self.assertEqual(self.rchan1._child_objects,
                         ('AnalogSignal', 'IrregularlySampledSignal'))
        self.assertEqual(self.rchan1._child_containers, (
            'analogsignals',
            'irregularlysampledsignals',
        ))
        self.assertEqual(self.rchan1._parent_objects,
                         ('RecordingChannelGroup', ))
        self.assertEqual(self.rchan1._parent_containers,
                         ('recordingchannelgroups', ))

        self.assertEqual(len(self.rchan1.children),
                         len(self.sig1) + len(self.irsig1))
        self.assertEqual(self.rchan1.children[0].name, self.signames1[0])
        self.assertEqual(self.rchan1.children[1].name, self.signames1[1])
        self.assertEqual(self.rchan1.children[2].name, self.irsignames1[0])
        self.assertEqual(self.rchan1.children[3].name, self.irsignames1[1])
        self.assertEqual(len(self.rchan1.parents), 1)
        self.assertEqual(self.rchan1.parents[0].name, 'rcg1')

        self.rchan1.create_many_to_one_relationship()
        self.rchan1.create_many_to_many_relationship()
        self.rchan1.create_relationship()
        assert_neo_object_is_compliant(self.rchan1)
Exemple #10
0
    def read_block(self, lazy=False, cascade=True, **kargs):
        '''
        Reads a block from the raw data file "fname" generated
        with BrainWare
        '''

        # there are no keyargs implemented to so far.  If someone tries to pass
        # them they are expecting them to do something or making a mistake,
        # neither of which should pass silently
        if kargs:
            raise NotImplementedError('This method does not have any '
                                      'argument implemented yet')
        self._fsrc = None

        block = Block(file_origin=self._filename)

        # if we aren't doing cascade, don't load anything
        if not cascade:
            return block

        # create the objects to store other objects
        rcg = RecordingChannelGroup(file_origin=self._filename)
        rchan = RecordingChannel(file_origin=self._filename,
                                 index=1,
                                 name='Chan1')

        # load objects into their containers
        rcg.recordingchannels.append(rchan)
        block.recordingchannelgroups.append(rcg)
        rcg.channel_indexes = np.array([1])
        rcg.channel_names = np.array(['Chan1'], dtype='S')

        # open the file
        with open(self._path, 'rb') as fobject:
            # while the file is not done keep reading segments
            while True:
                seg = self._read_segment(fobject, lazy)
                # if there are no more Segments, stop
                if not seg:
                    break

                # store the segment and signals
                block.segments.append(seg)
                rchan.analogsignals.append(seg.analogsignals[0])

        # remove the file object
        self._fsrc = None

        create_many_to_one_relationship(block)
        return block
    def test__children(self):
        rcg = RecordingChannelGroup(name='rcg1')
        rcg.recordingchannels = [self.rchan1]
        rcg.create_many_to_many_relationship()

        self.assertEqual(self.rchan1._container_child_objects, ())
        self.assertEqual(self.rchan1._data_child_objects,
                         ('AnalogSignal', 'IrregularlySampledSignal'))
        self.assertEqual(self.rchan1._single_parent_objects, ())
        self.assertEqual(self.rchan1._multi_child_objects, ())
        self.assertEqual(self.rchan1._multi_parent_objects,
                         ('RecordingChannelGroup',))
        self.assertEqual(self.rchan1._child_properties, ())

        self.assertEqual(self.rchan1._single_child_objects,
                         ('AnalogSignal', 'IrregularlySampledSignal'))

        self.assertEqual(self.rchan1._container_child_containers, ())
        self.assertEqual(self.rchan1._data_child_containers,
                         ('analogsignals', 'irregularlysampledsignals',))
        self.assertEqual(self.rchan1._single_child_containers,
                         ('analogsignals', 'irregularlysampledsignals',))
        self.assertEqual(self.rchan1._single_parent_containers, ())
        self.assertEqual(self.rchan1._multi_child_containers, ())
        self.assertEqual(self.rchan1._multi_parent_containers,
                         ('recordingchannelgroups',))

        self.assertEqual(self.rchan1._child_objects,
                         ('AnalogSignal', 'IrregularlySampledSignal'))
        self.assertEqual(self.rchan1._child_containers,
                         ('analogsignals', 'irregularlysampledsignals',))
        self.assertEqual(self.rchan1._parent_objects,
                         ('RecordingChannelGroup',))
        self.assertEqual(self.rchan1._parent_containers,
                         ('recordingchannelgroups',))

        self.assertEqual(len(self.rchan1.children),
                         len(self.sig1) + len(self.irsig1))
        self.assertEqual(self.rchan1.children[0].name, self.signames1[0])
        self.assertEqual(self.rchan1.children[1].name, self.signames1[1])
        self.assertEqual(self.rchan1.children[2].name, self.irsignames1[0])
        self.assertEqual(self.rchan1.children[3].name, self.irsignames1[1])
        self.assertEqual(len(self.rchan1.parents), 1)
        self.assertEqual(self.rchan1.parents[0].name, 'rcg1')

        self.rchan1.create_many_to_one_relationship()
        self.rchan1.create_many_to_many_relationship()
        self.rchan1.create_relationship()
        assert_neo_object_is_compliant(self.rchan1)
    def read_block(self, lazy=False, cascade=True, **kargs):
        '''
        Reads a block from the raw data file "fname" generated
        with BrainWare
        '''

        # there are no keyargs implemented to so far.  If someone tries to pass
        # them they are expecting them to do something or making a mistake,
        # neither of which should pass silently
        if kargs:
            raise NotImplementedError('This method does not have any '
                                      'argument implemented yet')
        self._fsrc = None

        block = Block(file_origin=self._filename)

        # if we aren't doing cascade, don't load anything
        if not cascade:
            return block

        # create the objects to store other objects
        rcg = RecordingChannelGroup(file_origin=self._filename)
        rchan = RecordingChannel(file_origin=self._filename,
                                 index=1, name='Chan1')

        # load objects into their containers
        rcg.recordingchannels.append(rchan)
        block.recordingchannelgroups.append(rcg)
        rcg.channel_indexes = np.array([1])
        rcg.channel_names = np.array(['Chan1'], dtype='S')

        # open the file
        with open(self._path, 'rb') as fobject:
            # while the file is not done keep reading segments
            while True:
                seg = self._read_segment(fobject, lazy)
                # if there are no more Segments, stop
                if not seg:
                    break

                # store the segment and signals
                block.segments.append(seg)
                rchan.analogsignals.append(seg.analogsignals[0])

        # remove the file object
        self._fsrc = None

        create_many_to_one_relationship(block)
        return block
def proc_src_units(srcfile, filename):
    '''Get the units in an src file that has been processed by the official
    matlab function.  See proc_src for details'''
    rcg = RecordingChannelGroup(file_origin=filename)
    un_unit = Unit(name='UnassignedSpikes',
                   file_origin=filename,
                   elliptic=[],
                   boundaries=[],
                   timestamp=[],
                   max_valid=[])

    rcg.units.append(un_unit)

    sortInfo = srcfile['sortInfo'][0, 0]
    timeslice = sortInfo['timeslice'][0, 0]
    maxValid = timeslice['maxValid'][0, 0]
    cluster = timeslice['cluster'][0, 0]
    if len(cluster):
        maxValid = maxValid[0, 0]
        elliptic = [res.flatten() for res in cluster['elliptic'].flatten()]
        boundaries = [res.flatten() for res in cluster['boundaries'].flatten()]
        fullclust = zip(elliptic, boundaries)
        for ielliptic, iboundaries in fullclust:
            unit = Unit(file_origin=filename,
                        boundaries=[iboundaries],
                        elliptic=[ielliptic],
                        timeStamp=[],
                        max_valid=[maxValid])
            rcg.units.append(unit)
    return rcg
Exemple #14
0
    def test__children(self):
        rcg = RecordingChannelGroup(name='rcg1')
        rcg.units = [self.unit1]
        rcg.create_many_to_one_relationship()

        self.assertEqual(self.unit1._container_child_objects, ())
        self.assertEqual(self.unit1._data_child_objects,
                         ('Spike', 'SpikeTrain'))
        self.assertEqual(self.unit1._single_parent_objects,
                         ('RecordingChannelGroup',))
        self.assertEqual(self.unit1._multi_child_objects, ())
        self.assertEqual(self.unit1._multi_parent_objects, ())
        self.assertEqual(self.unit1._child_properties, ())

        self.assertEqual(self.unit1._single_child_objects,
                         ('Spike', 'SpikeTrain'))

        self.assertEqual(self.unit1._container_child_containers, ())
        self.assertEqual(self.unit1._data_child_containers,
                         ('spikes', 'spiketrains'))
        self.assertEqual(self.unit1._single_child_containers,
                         ('spikes', 'spiketrains'))
        self.assertEqual(self.unit1._single_parent_containers,
                         ('recordingchannelgroup',))
        self.assertEqual(self.unit1._multi_child_containers, ())
        self.assertEqual(self.unit1._multi_parent_containers, ())

        self.assertEqual(self.unit1._child_objects,
                         ('Spike', 'SpikeTrain'))
        self.assertEqual(self.unit1._child_containers,
                         ('spikes', 'spiketrains'))
        self.assertEqual(self.unit1._parent_objects,
                         ('RecordingChannelGroup',))
        self.assertEqual(self.unit1._parent_containers,
                         ('recordingchannelgroup',))

        self.assertEqual(len(self.unit1.children),
                         len(self.spike1) + len(self.train1))
        self.assertEqual(self.unit1.children[0].name, self.spikenames1[0])
        self.assertEqual(self.unit1.children[1].name, self.spikenames1[1])
        self.assertEqual(self.unit1.children[2].name, self.trainnames1[0])
        self.assertEqual(self.unit1.children[3].name, self.trainnames1[1])
        self.assertEqual(len(self.unit1.parents), 1)
        self.assertEqual(self.unit1.parents[0].name, 'rcg1')

        self.unit1.create_many_to_one_relationship()
        assert_neo_object_is_compliant(self.unit1)
Exemple #15
0
    def test__construct_subsegment_by_unit(self):
        nb_seg = 3
        nb_unit = 7
        unit_with_sig = np.array([0, 2, 5])
        signal_types = ['Vm', 'Conductances']
        sig_len = 100

        #recordingchannelgroups
        rcgs = [RecordingChannelGroup(name='Vm',
                                      channel_indexes=unit_with_sig),
                RecordingChannelGroup(name='Conductance',
                                      channel_indexes=unit_with_sig)]

        # Unit
        all_unit = []
        for u in range(nb_unit):
            un = Unit(name='Unit #%d' % u, channel_indexes=np.array([u]))
            assert_neo_object_is_compliant(un)
            all_unit.append(un)

        blk = Block()
        blk.recordingchannelgroups = rcgs
        for s in range(nb_seg):
            seg = Segment(name='Simulation %s' % s)
            for j in range(nb_unit):
                st = SpikeTrain([1, 2, 3], units='ms',
                                t_start=0., t_stop=10)
                st.unit = all_unit[j]

            for t in signal_types:
                anasigarr = AnalogSignalArray(np.zeros((sig_len,
                                                        len(unit_with_sig))),
                                              units='nA',
                                              sampling_rate=1000.*pq.Hz,
                                              channel_indexes=unit_with_sig)
                seg.analogsignalarrays.append(anasigarr)

        create_many_to_one_relationship(blk)
        for unit in all_unit:
            assert_neo_object_is_compliant(unit)
        for rcg in rcgs:
            assert_neo_object_is_compliant(rcg)
        assert_neo_object_is_compliant(blk)

        # what you want
        newseg = seg.construct_subsegment_by_unit(all_unit[:4])
        assert_neo_object_is_compliant(newseg)
Exemple #16
0
    def read_block(self, lazy=False, cascade=True, **kargs):
        '''
        Reads a block from the simple spike data file "fname" generated
        with BrainWare
        '''

        # there are no keyargs implemented to so far.  If someone tries to pass
        # them they are expecting them to do something or making a mistake,
        # neither of which should pass silently
        if kargs:
            raise NotImplementedError('This method does not have any '
                                      'argument implemented yet')
        self._fsrc = None
        self.__lazy = lazy

        self._blk = Block(file_origin=self._filename)
        block = self._blk

        # if we aren't doing cascade, don't load anything
        if not cascade:
            return block

        # create the objects to store other objects
        rcg = RecordingChannelGroup(file_origin=self._filename)
        self.__unit = Unit(file_origin=self._filename)

        # load objects into their containers
        block.recordingchannelgroups.append(rcg)
        rcg.units.append(self.__unit)

        # initialize values
        self.__t_stop = None
        self.__params = None
        self.__seg = None
        self.__spiketimes = None

        # open the file
        with open(self._path, 'rb') as self._fsrc:
            res = True
            # while the file is not done keep reading segments
            while res:
                res = self.__read_id()

        block.create_many_to_one_relationship()

        # cleanup attributes
        self._fsrc = None
        self.__lazy = False

        self._blk = None

        self.__t_stop = None
        self.__params = None
        self.__seg = None
        self.__spiketimes = None

        return block
    def read_block(self, lazy=False, cascade=True, load_waveforms=False):
        """
        """
        # Create block
        bl = Block(file_origin=self.filename)
        if not cascade:
            return bl

        seg = self.read_segment(self.filename,
                                lazy=lazy,
                                cascade=cascade,
                                load_waveforms=load_waveforms)
        bl.segments.append(seg)
        neo.io.tools.populate_RecordingChannel(bl,
                                               remove_from_annotation=False)

        # This create rc and RCG for attaching Units
        rcg0 = bl.recordingchannelgroups[0]

        def find_rc(chan):
            for rc in rcg0.recordingchannels:
                if rc.index == chan:
                    return rc

        for st in seg.spiketrains:
            chan = st.annotations['channel_index']
            rc = find_rc(chan)
            if rc is None:
                rc = RecordingChannel(index=chan)
                rcg0.recordingchannels.append(rc)
                rc.recordingchannelgroups.append(rcg0)
            if len(rc.recordingchannelgroups) == 1:
                rcg = RecordingChannelGroup(name='Group {}'.format(chan))
                rcg.recordingchannels.append(rc)
                rc.recordingchannelgroups.append(rcg)
                bl.recordingchannelgroups.append(rcg)
            else:
                rcg = rc.recordingchannelgroups[1]
            unit = Unit(name=st.name)
            rcg.units.append(unit)
            unit.spiketrains.append(st)
        bl.create_many_to_one_relationship()

        return bl
Exemple #18
0
def populate_RecordingChannel(bl, remove_from_annotation=True):
    """
    When a Block is
    Block>Segment>AnalogSIgnal
    this function auto create all RecordingChannel following these rules:
      * when 'channel_index ' is in AnalogSIgnal the corresponding
        RecordingChannel is created.
      * 'channel_index ' is then set to None if remove_from_annotation
      * only one RecordingChannelGroup is created

    It is a utility at the end of creating a Block for IO.

    Usage:
    >>> populate_RecordingChannel(a_block)
    """
    recordingchannels = {}
    for seg in bl.segments:
        for anasig in seg.analogsignals:
            if getattr(anasig, 'channel_index', None) is not None:
                ind = int(anasig.channel_index)
                if ind not in recordingchannels:
                    recordingchannels[ind] = RecordingChannel(index=ind)
                    if 'channel_name' in anasig.annotations:
                        channel_name = anasig.annotations['channel_name']
                        recordingchannels[ind].name = channel_name
                        if remove_from_annotation:
                            anasig.annotations.pop('channel_name')
                recordingchannels[ind].analogsignals.append(anasig)
                anasig.recordingchannel = recordingchannels[ind]
                if remove_from_annotation:
                    anasig.channel_index = None

    indexes = np.sort(list(recordingchannels.keys())).astype('i')
    names = np.array([recordingchannels[idx].name for idx in indexes],
                     dtype='S')
    rcg = RecordingChannelGroup(name='all channels',
                                channel_indexes=indexes,
                                channel_names=names)
    bl.recordingchannelgroups.append(rcg)
    for ind in indexes:
        # many to many relationship
        rcg.recordingchannels.append(recordingchannels[ind])
        recordingchannels[ind].recordingchannelgroups.append(rcg)
Exemple #19
0
    def read_block(self, lazy=False, cascade=True, channel_index=None):
        """
        Arguments:
            Channel_index: can be int, iterable or None to select one, many or all channel(s)

        """

        blk = Block()
        if cascade:
            seg = Segment(file_origin=self._filename)
            blk.segments += [seg]

            if channel_index:
                if type(channel_index) is int: channel_index = [channel_index]
                if type(channel_index) is list:
                    channel_index = np.array(channel_index)
            else:
                channel_index = np.arange(0, self._attrs['shape'][1])

            rcg = RecordingChannelGroup(name='all channels',
                                        channel_indexes=channel_index)
            blk.recordingchannelgroups.append(rcg)

            for idx in channel_index:
                # read nested analosignal
                ana = self.read_analogsignal(
                    channel_index=idx,
                    lazy=lazy,
                    cascade=cascade,
                )
                chan = RecordingChannel(index=int(idx))
                seg.analogsignals += [ana]
                chan.analogsignals += [ana]
                rcg.recordingchannels.append(chan)
            seg.duration = (self._attrs['shape'][0] /
                            self._attrs['kwik']['sample_rate']) * pq.s

            # neo.tools.populate_RecordingChannel(blk)
        blk.create_many_to_one_relationship()
        return blk
def proc_f32(filename):
    """Load an f32 file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareF32IO to
    make sure BrainwareF32IO is working properly

    block = proc_f32(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_f32_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.f32', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_f32_py2.npz'
             f32 file name = 'file1.f32'
    """

    filenameorig = os.path.basename(filename[:-12] + ".f32")

    # create the objects to store other objects
    block = Block(file_origin=filenameorig)
    rcg = RecordingChannelGroup(file_origin=filenameorig)
    rcg.channel_indexes = np.array([], dtype=np.int)
    rcg.channel_names = np.array([], dtype="S")
    unit = Unit(file_origin=filenameorig)

    # load objects into their containers
    block.recordingchannelgroups.append(rcg)
    rcg.units.append(unit)

    try:
        with np.load(filename) as f32obj:
            f32file = f32obj.items()[0][1].flatten()
    except IOError as exc:
        if "as a pickle" in exc.message:
            block.create_many_to_one_relationship()
            return block
        else:
            raise

    sweeplengths = [res[0, 0].tolist() for res in f32file["sweeplength"]]
    stims = [res.flatten().tolist() for res in f32file["stim"]]

    sweeps = [res["spikes"].flatten() for res in f32file["sweep"] if res.size]

    fullf32 = zip(sweeplengths, stims, sweeps)
    for sweeplength, stim, sweep in fullf32:
        for trainpts in sweep:
            if trainpts.size:
                trainpts = trainpts.flatten().astype("float32")
            else:
                trainpts = []

            paramnames = ["Param%s" % i for i in range(len(stim))]
            params = dict(zip(paramnames, stim))
            train = SpikeTrain(trainpts, units=pq.ms, t_start=0, t_stop=sweeplength, file_origin=filenameorig)

            segment = Segment(file_origin=filenameorig, **params)
            segment.spiketrains = [train]
            unit.spiketrains.append(train)
            block.segments.append(segment)

    block.create_many_to_one_relationship()

    return block
def proc_f32(filename):
    '''Load an f32 file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareF32IO to
    make sure BrainwareF32IO is working properly

    block = proc_f32(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_f32_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.f32', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_f32_py2.npz'
             f32 file name = 'file1.f32'
    '''

    filenameorig = os.path.basename(filename[:-12] + '.f32')

    # create the objects to store other objects
    block = Block(file_origin=filenameorig)
    rcg = RecordingChannelGroup(file_origin=filenameorig)
    rcg.channel_indexes = np.array([], dtype=np.int)
    rcg.channel_names = np.array([], dtype='S')
    unit = Unit(file_origin=filenameorig)

    # load objects into their containers
    block.recordingchannelgroups.append(rcg)
    rcg.units.append(unit)

    try:
        with np.load(filename) as f32obj:
            f32file = f32obj.items()[0][1].flatten()
    except IOError as exc:
        if 'as a pickle' in exc.message:
            create_many_to_one_relationship(block)
            return block
        else:
            raise

    sweeplengths = [res[0, 0].tolist() for res in f32file['sweeplength']]
    stims = [res.flatten().tolist() for res in f32file['stim']]

    sweeps = [res['spikes'].flatten() for res in f32file['sweep'] if res.size]

    fullf32 = zip(sweeplengths, stims, sweeps)
    for sweeplength, stim, sweep in fullf32:
        for trainpts in sweep:
            if trainpts.size:
                trainpts = trainpts.flatten().astype('float32')
            else:
                trainpts = []

            paramnames = ['Param%s' % i for i in range(len(stim))]
            params = dict(zip(paramnames, stim))
            train = SpikeTrain(trainpts,
                               units=pq.ms,
                               t_start=0,
                               t_stop=sweeplength,
                               file_origin=filenameorig)

            segment = Segment(file_origin=filenameorig, **params)
            segment.spiketrains = [train]
            unit.spiketrains.append(train)
            block.segments.append(segment)

    create_many_to_one_relationship(block)

    return block
Exemple #22
0
    def read_block(
        self,
        lazy=False,
        cascade=True,
    ):
        """
        """

        tree = ElementTree.parse(self.filename)
        root = tree.getroot()
        acq = root.find('acquisitionSystem')
        nbits = int(acq.find('nBits').text)
        nbchannel = int(acq.find('nChannels').text)
        sampling_rate = float(acq.find('samplingRate').text) * pq.Hz
        voltage_range = float(acq.find('voltageRange').text)
        #offset = int(acq.find('offset').text)
        amplification = float(acq.find('amplification').text)

        bl = Block(
            file_origin=os.path.basename(self.filename).replace('.xml', ''))
        if cascade:
            seg = Segment()
            bl.segments.append(seg)

            # RC and RCG
            rc_list = []
            for i, xml_rcg in enumerate(
                    root.find('anatomicalDescription').find(
                        'channelGroups').findall('group')):
                rcg = RecordingChannelGroup(name='Group {}'.format(i))
                bl.recordingchannelgroups.append(rcg)
                for xml_rc in xml_rcg:
                    rc = RecordingChannel(index=int(xml_rc.text))
                    rc_list.append(rc)
                    rcg.recordingchannels.append(rc)
                    rc.recordingchannelgroups.append(rcg)
                rcg.channel_indexes = np.array(
                    [rc.index for rc in rcg.recordingchannels], dtype=int)
                rcg.channel_names = np.array([
                    'Channel{}'.format(rc.index)
                    for rc in rcg.recordingchannels
                ],
                                             dtype='S')

            # AnalogSignals
            reader = RawBinarySignalIO(
                filename=self.filename.replace('.xml', '.dat'))
            seg2 = reader.read_segment(
                cascade=True,
                lazy=lazy,
                sampling_rate=sampling_rate,
                t_start=0. * pq.s,
                unit=pq.V,
                nbchannel=nbchannel,
                bytesoffset=0,
                dtype=np.int16 if nbits <= 16 else np.int32,
                rangemin=-voltage_range / 2.,
                rangemax=voltage_range / 2.,
            )
            for s, sig in enumerate(seg2.analogsignals):
                if not lazy:
                    sig /= amplification
                sig.segment = seg
                seg.analogsignals.append(sig)
                rc_list[s].analogsignals.append(sig)

        create_many_to_one_relationship(bl)
        return bl
    def test__children(self):
        rcg1 = RecordingChannelGroup(name='rcg1')
        rcg2 = RecordingChannelGroup(name='rcg2')
        rcg1.recordingchannels = [self.rchan1]
        rcg2.recordingchannels = [self.rchan1]
        rcg2.create_relationship()
        rcg1.create_relationship()
        assert_neo_object_is_compliant(self.rchan1)
        assert_neo_object_is_compliant(rcg1)
        assert_neo_object_is_compliant(rcg2)

        self.assertEqual(self.rchan1._container_child_objects, ())
        self.assertEqual(self.rchan1._data_child_objects,
                         ('AnalogSignal', 'IrregularlySampledSignal'))
        self.assertEqual(self.rchan1._single_parent_objects, ())
        self.assertEqual(self.rchan1._multi_child_objects, ())
        self.assertEqual(self.rchan1._multi_parent_objects,
                         ('RecordingChannelGroup',))
        self.assertEqual(self.rchan1._child_properties, ())

        self.assertEqual(self.rchan1._single_child_objects,
                         ('AnalogSignal', 'IrregularlySampledSignal'))

        self.assertEqual(self.rchan1._container_child_containers, ())
        self.assertEqual(self.rchan1._data_child_containers,
                         ('analogsignals', 'irregularlysampledsignals',))
        self.assertEqual(self.rchan1._single_child_containers,
                         ('analogsignals', 'irregularlysampledsignals',))
        self.assertEqual(self.rchan1._single_parent_containers, ())
        self.assertEqual(self.rchan1._multi_child_containers, ())
        self.assertEqual(self.rchan1._multi_parent_containers,
                         ('recordingchannelgroups',))

        self.assertEqual(self.rchan1._child_objects,
                         ('AnalogSignal', 'IrregularlySampledSignal'))
        self.assertEqual(self.rchan1._child_containers,
                         ('analogsignals', 'irregularlysampledsignals',))
        self.assertEqual(self.rchan1._parent_objects,
                         ('RecordingChannelGroup',))
        self.assertEqual(self.rchan1._parent_containers,
                         ('recordingchannelgroups',))

        self.assertEqual(len(self.rchan1._single_children), self.nchildren*2)
        self.assertEqual(len(self.rchan1._multi_children), 0)
        self.assertEqual(len(self.rchan1.data_children), self.nchildren*2)
        self.assertEqual(len(self.rchan1.data_children_recur),
                         self.nchildren*2)
        self.assertEqual(len(self.rchan1.container_children), 0)
        self.assertEqual(len(self.rchan1.container_children_recur), 0)
        self.assertEqual(len(self.rchan1.children), self.nchildren*2)
        self.assertEqual(len(self.rchan1.children_recur), self.nchildren*2)

        self.assertEqual(self.rchan1._multi_children, ())
        self.assertEqual(self.rchan1.container_children, ())
        self.assertEqual(self.rchan1.container_children_recur, ())

        assert_same_sub_schema(list(self.rchan1._single_children),
                               self.sigs1a+self.irsigs1a)

        assert_same_sub_schema(list(self.rchan1.data_children),
                               self.sigs1a+self.irsigs1a)

        assert_same_sub_schema(list(self.rchan1.data_children_recur),
                               self.sigs1a+self.irsigs1a)

        assert_same_sub_schema(list(self.rchan1.children),
                               self.sigs1a+self.irsigs1a)

        assert_same_sub_schema(list(self.rchan1.children_recur),
                               self.sigs1a+self.irsigs1a)

        self.assertEqual(len(self.rchan1.parents), 2)
        self.assertEqual(self.rchan1.parents[0].name, 'rcg2')
        self.assertEqual(self.rchan1.parents[1].name, 'rcg1')
    def test__children(self):
        rcg1 = RecordingChannelGroup(name='rcg1')
        rcg2 = RecordingChannelGroup(name='rcg2')
        rcg1.recordingchannels = [self.rchan1]
        rcg2.recordingchannels = [self.rchan1]
        rcg2.create_relationship()
        rcg1.create_relationship()
        assert_neo_object_is_compliant(self.rchan1)
        assert_neo_object_is_compliant(rcg1)
        assert_neo_object_is_compliant(rcg2)

        self.assertEqual(self.rchan1._container_child_objects, ())
        self.assertEqual(self.rchan1._data_child_objects,
                         ('AnalogSignal', 'IrregularlySampledSignal'))
        self.assertEqual(self.rchan1._single_parent_objects, ())
        self.assertEqual(self.rchan1._multi_child_objects, ())
        self.assertEqual(self.rchan1._multi_parent_objects,
                         ('RecordingChannelGroup', ))
        self.assertEqual(self.rchan1._child_properties, ())

        self.assertEqual(self.rchan1._single_child_objects,
                         ('AnalogSignal', 'IrregularlySampledSignal'))

        self.assertEqual(self.rchan1._container_child_containers, ())
        self.assertEqual(self.rchan1._data_child_containers, (
            'analogsignals',
            'irregularlysampledsignals',
        ))
        self.assertEqual(self.rchan1._single_child_containers, (
            'analogsignals',
            'irregularlysampledsignals',
        ))
        self.assertEqual(self.rchan1._single_parent_containers, ())
        self.assertEqual(self.rchan1._multi_child_containers, ())
        self.assertEqual(self.rchan1._multi_parent_containers,
                         ('recordingchannelgroups', ))

        self.assertEqual(self.rchan1._child_objects,
                         ('AnalogSignal', 'IrregularlySampledSignal'))
        self.assertEqual(self.rchan1._child_containers, (
            'analogsignals',
            'irregularlysampledsignals',
        ))
        self.assertEqual(self.rchan1._parent_objects,
                         ('RecordingChannelGroup', ))
        self.assertEqual(self.rchan1._parent_containers,
                         ('recordingchannelgroups', ))

        self.assertEqual(len(self.rchan1._single_children), self.nchildren * 2)
        self.assertEqual(len(self.rchan1._multi_children), 0)
        self.assertEqual(len(self.rchan1.data_children), self.nchildren * 2)
        self.assertEqual(len(self.rchan1.data_children_recur),
                         self.nchildren * 2)
        self.assertEqual(len(self.rchan1.container_children), 0)
        self.assertEqual(len(self.rchan1.container_children_recur), 0)
        self.assertEqual(len(self.rchan1.children), self.nchildren * 2)
        self.assertEqual(len(self.rchan1.children_recur), self.nchildren * 2)

        self.assertEqual(self.rchan1._multi_children, ())
        self.assertEqual(self.rchan1.container_children, ())
        self.assertEqual(self.rchan1.container_children_recur, ())

        assert_same_sub_schema(list(self.rchan1._single_children),
                               self.sigs1a + self.irsigs1a)

        assert_same_sub_schema(list(self.rchan1.data_children),
                               self.sigs1a + self.irsigs1a)

        assert_same_sub_schema(list(self.rchan1.data_children_recur),
                               self.sigs1a + self.irsigs1a)

        assert_same_sub_schema(list(self.rchan1.children),
                               self.sigs1a + self.irsigs1a)

        assert_same_sub_schema(list(self.rchan1.children_recur),
                               self.sigs1a + self.irsigs1a)

        self.assertEqual(len(self.rchan1.parents), 2)
        self.assertEqual(self.rchan1.parents[0].name, 'rcg2')
        self.assertEqual(self.rchan1.parents[1].name, 'rcg1')
Exemple #25
0
    def test__children(self):
        rcg = RecordingChannelGroup(name='rcg1')
        rcg.units = [self.unit1]
        rcg.create_many_to_one_relationship()
        assert_neo_object_is_compliant(self.unit1)
        assert_neo_object_is_compliant(rcg)

        self.assertEqual(self.unit1._container_child_objects, ())
        self.assertEqual(self.unit1._data_child_objects,
                         ('Spike', 'SpikeTrain'))
        self.assertEqual(self.unit1._single_parent_objects,
                         ('RecordingChannelGroup',))
        self.assertEqual(self.unit1._multi_child_objects, ())
        self.assertEqual(self.unit1._multi_parent_objects, ())
        self.assertEqual(self.unit1._child_properties, ())

        self.assertEqual(self.unit1._single_child_objects,
                         ('Spike', 'SpikeTrain'))

        self.assertEqual(self.unit1._container_child_containers, ())
        self.assertEqual(self.unit1._data_child_containers,
                         ('spikes', 'spiketrains'))
        self.assertEqual(self.unit1._single_child_containers,
                         ('spikes', 'spiketrains'))
        self.assertEqual(self.unit1._single_parent_containers,
                         ('recordingchannelgroup',))
        self.assertEqual(self.unit1._multi_child_containers, ())
        self.assertEqual(self.unit1._multi_parent_containers, ())

        self.assertEqual(self.unit1._child_objects,
                         ('Spike', 'SpikeTrain'))
        self.assertEqual(self.unit1._child_containers,
                         ('spikes', 'spiketrains'))
        self.assertEqual(self.unit1._parent_objects,
                         ('RecordingChannelGroup',))
        self.assertEqual(self.unit1._parent_containers,
                         ('recordingchannelgroup',))

        self.assertEqual(len(self.unit1._single_children), self.nchildren*2)
        self.assertEqual(len(self.unit1._multi_children), 0)
        self.assertEqual(len(self.unit1.data_children), self.nchildren*2)
        self.assertEqual(len(self.unit1.data_children_recur), self.nchildren*2)
        self.assertEqual(len(self.unit1.container_children), 0)
        self.assertEqual(len(self.unit1.container_children_recur), 0)
        self.assertEqual(len(self.unit1.children), self.nchildren*2)
        self.assertEqual(len(self.unit1.children_recur), self.nchildren*2)

        self.assertEqual(self.unit1._multi_children, ())
        self.assertEqual(self.unit1.container_children, ())
        self.assertEqual(self.unit1.container_children_recur, ())

        assert_same_sub_schema(list(self.unit1._single_children),
                               self.spikes1a+self.trains1a)

        assert_same_sub_schema(list(self.unit1.data_children),
                               self.spikes1a+self.trains1a)

        assert_same_sub_schema(list(self.unit1.data_children_recur),
                               self.spikes1a+self.trains1a)

        assert_same_sub_schema(list(self.unit1.children),
                               self.spikes1a+self.trains1a)

        assert_same_sub_schema(list(self.unit1.children_recur),
                               self.spikes1a+self.trains1a)

        self.assertEqual(len(self.unit1.parents), 1)
        self.assertEqual(self.unit1.parents[0].name, 'rcg1')