def setup_recordingchannels(self):
        params = {'testarg2': 'yes', 'testarg3': True}
        self.rchan1 = RecordingChannel(index=10,
                                       coordinate=[1.1, 1.5, 1.7] * pq.mm,
                                       name='test',
                                       description='tester 1',
                                       file_origin='test.file',
                                       testarg1=1,
                                       **params)
        self.rchan2 = RecordingChannel(index=100,
                                       coordinate=[11., 15., 17.] * pq.mm,
                                       name='test',
                                       description='tester 2',
                                       file_origin='test.file',
                                       testarg1=1,
                                       **params)
        self.rchan1.annotate(testarg1=1.1, testarg0=[1, 2, 3])
        self.rchan2.annotate(testarg11=1.1, testarg10=[1, 2, 3])

        self.rchan1.analogsignals = self.sig1
        self.rchan2.analogsignals = self.sig2

        self.rchan1.irregularlysampledsignals = self.irsig1
        self.rchan2.irregularlysampledsignals = self.irsig2

        create_many_to_one_relationship(self.rchan1)
        create_many_to_one_relationship(self.rchan2)
Ejemplo n.º 2
0
 def read_segment(self, lazy=False, cascade=True):
     data, metadata = self._read_file_contents()
     annotations = dict(
         (k, metadata.get(k, 'unknown'))
         for k in ("label", "variable", "first_id", "last_id"))
     seg = Segment(**annotations)
     if cascade:
         if metadata['variable'] == 'spikes':
             for i in range(metadata['first_index'],
                            metadata['last_index']):
                 spiketrain = self._extract_spikes(data, metadata, i, lazy)
                 if spiketrain is not None:
                     seg.spiketrains.append(spiketrain)
             seg.annotate(
                 dt=metadata['dt']
             )  # store dt for SpikeTrains only, as can be retrieved from sampling_period for AnalogSignal
         else:
             for i in range(metadata['first_index'],
                            metadata['last_index']):
                 # probably slow. Replace with numpy-based version from 0.1
                 signal = self._extract_signal(data, metadata, i, lazy)
                 if signal is not None:
                     seg.analogsignals.append(signal)
         create_many_to_one_relationship(seg)
     return seg
Ejemplo n.º 3
0
    def setup_units(self):
        params = {'testarg2': 'yes', 'testarg3': True}
        self.unit1 = Unit(name='test',
                          description='tester 1',
                          file_origin='test.file',
                          channels_indexes=[1],
                          testarg1=1,
                          **params)
        self.unit2 = Unit(name='test',
                          description='tester 2',
                          file_origin='test.file',
                          channels_indexes=[2],
                          testarg1=1,
                          **params)
        self.unit1.annotate(testarg1=1.1, testarg0=[1, 2, 3])
        self.unit2.annotate(testarg11=1.1, testarg10=[1, 2, 3])

        self.unit1.spiketrains = self.train1
        self.unit2.spiketrains = self.train2

        self.unit1.spikes = self.spike1
        self.unit2.spikes = self.spike2

        create_many_to_one_relationship(self.unit1)
        create_many_to_one_relationship(self.unit2)
Ejemplo n.º 4
0
    def setup_units(self):
        params = {'testarg2': 'yes', 'testarg3': True}
        self.unit1 = Unit(name='test', description='tester 1',
                          file_origin='test.file',
                          channel_indexes=np.array([1]),
                          testarg1=1, **params)
        self.unit2 = Unit(name='test', description='tester 2',
                          file_origin='test.file',
                          channel_indexes=np.array([2]),
                          testarg1=1, **params)
        self.unit1.annotate(testarg1=1.1, testarg0=[1, 2, 3])
        self.unit2.annotate(testarg11=1.1, testarg10=[1, 2, 3])

        self.unit1train = [self.train1[0], self.train2[1]]
        self.unit2train = [self.train1[1], self.train2[0]]

        self.unit1.spiketrains = self.unit1train
        self.unit2.spiketrains = self.unit2train

        self.unit1spike = [self.spike1[0], self.spike2[1]]
        self.unit2spike = [self.spike1[1], self.spike2[0]]

        self.unit1.spikes = self.unit1spike
        self.unit2.spikes = self.unit2spike

        create_many_to_one_relationship(self.unit1)
        create_many_to_one_relationship(self.unit2)
Ejemplo n.º 5
0
 def read_segment(self,
                 lazy = False,
                 cascade = True,
                 group = 0,
                 series = 0):
     seg = Segment( name = 'test')
     if cascade:
         tree = getbyroute(self.pul.tree,[0,group,series])
         for sw,sweep in enumerate(tree['children']):
             if sw == 0:
                 starttime = pq.Quantity(float(sweep['contents'].swTimer),'s')
             for ch,channel in enumerate(sweep['children']):
                 sig = self.read_analogsignal(group=group,
                                         series=series,
                                         sweep=sw,
                                         channel = ch)
                 annotations = sweep['contents'].__dict__.keys()
                 annotations.remove('readlist')
                 for a in annotations:
                     d = {a:str(sweep['contents'].__dict__[a])}
                     sig.annotate(**d)
                 sig.t_start = pq.Quantity(float(sig.annotations['swTimer']),'s') - starttime
                 seg.analogsignals.append(sig)
         annotations = tree['contents'].__dict__.keys()
         annotations.remove('readlist')
         for a in annotations:
             d = {a:str(tree['contents'].__dict__[a])}
             seg.annotate(**d)
     create_many_to_one_relationship(seg)
     return seg
Ejemplo n.º 6
0
def proc_dam(filename):
    '''Load an dam file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareDamIO to
    make sure BrainwareDamIO is working properly

    block = proc_dam(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_dam_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.dam', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_dam_py2.npz'
             dam file name = 'file1.dam'
    '''
    with np.load(filename) as damobj:
        damfile = damobj.items()[0][1].flatten()

    filename = os.path.basename(filename[:-12]+'.dam')

    signals = [res.flatten() for res in damfile['signal']]
    stimIndexes = [int(res[0, 0].tolist()) for res in damfile['stimIndex']]
    timestamps = [res[0, 0] for res in damfile['timestamp']]

    block = Block(file_origin=filename)

    rcg = RecordingChannelGroup(file_origin=filename)
    chan = RecordingChannel(file_origin=filename, index=0, name='Chan1')
    rcg.channel_indexes = np.array([1])
    rcg.channel_names = np.array(['Chan1'], dtype='S')

    block.recordingchannelgroups.append(rcg)
    rcg.recordingchannels.append(chan)

    params = [res['params'][0, 0].flatten() for res in damfile['stim']]
    values = [res['values'][0, 0].flatten() for res in damfile['stim']]
    params = [[res1[0] for res1 in res] for res in params]
    values = [[res1 for res1 in res] for res in values]
    stims = [dict(zip(param, value)) for param, value in zip(params, values)]

    fulldam = zip(stimIndexes, timestamps, signals, stims)
    for stimIndex, timestamp, signal, stim in fulldam:
        sig = AnalogSignal(signal=signal*pq.mV,
                           t_start=timestamp*pq.d,
                           file_origin=filename,
                           sampling_period=1.*pq.s)
        segment = Segment(file_origin=filename,
                          index=stimIndex,
                          **stim)
        segment.analogsignals = [sig]
        block.segments.append(segment)

    create_many_to_one_relationship(block)

    return block
Ejemplo n.º 7
0
    def read_block(self, lazy=False, cascade=True, **kargs):
        '''
        Reads a block from the simple spike data file "fname" generated
        with BrainWare
        '''

        # there are no keyargs implemented to so far.  If someone tries to pass
        # them they are expecting them to do something or making a mistake,
        # neither of which should pass silently
        if kargs:
            raise NotImplementedError('This method does not have any '
                                      'argument implemented yet')
        self._fsrc = None
        self.__lazy = lazy

        self._blk = Block(file_origin=self._filename)
        block = self._blk

        # if we aren't doing cascade, don't load anything
        if not cascade:
            return block

        # create the objects to store other objects
        rcg = RecordingChannelGroup(file_origin=self._filename)
        rcg.channel_indexes = np.array([], dtype=np.int)
        rcg.channel_names = np.array([], dtype='S')
        self.__unit = Unit(file_origin=self._filename)

        # load objects into their containers
        block.recordingchannelgroups.append(rcg)
        rcg.units.append(self.__unit)

        # initialize values
        self.__t_stop = None
        self.__params = None
        self.__seg = None
        self.__spiketimes = None

        # open the file
        with open(self._path, 'rb') as self._fsrc:
            res = True
            # while the file is not done keep reading segments
            while res:
                res = self.__read_id()

        create_many_to_one_relationship(block)

        # cleanup attributes
        self._fsrc = None
        self.__lazy = False

        self._blk = None

        self.__t_stop = None
        self.__params = None
        self.__seg = None
        self.__spiketimes = None

        return block
Ejemplo n.º 8
0
 def _read_entity(self, path="/", cascade=True, lazy=False):
     """
     Wrapper for base io "reader" functions.
     """
     ob = self.get(path, cascade, lazy)
     if cascade and cascade != 'lazy':
         create_many_to_one_relationship(ob)
     return ob
Ejemplo n.º 9
0
    def read_block(self, lazy=False, cascade=True, **kargs):
        '''
        Reads a block from the simple spike data file "fname" generated
        with BrainWare
        '''

        # there are no keyargs implemented to so far.  If someone tries to pass
        # them they are expecting them to do something or making a mistake,
        # neither of which should pass silently
        if kargs:
            raise NotImplementedError('This method does not have any '
                                      'argument implemented yet')
        self._fsrc = None
        self.__lazy = lazy

        self._blk = Block(file_origin=self._filename)
        block = self._blk

        # if we aren't doing cascade, don't load anything
        if not cascade:
            return block

        # create the objects to store other objects
        rcg = RecordingChannelGroup(file_origin=self._filename)
        self.__unit = Unit(file_origin=self._filename)

        # load objects into their containers
        block.recordingchannelgroups.append(rcg)
        rcg.units.append(self.__unit)

        # initialize values
        self.__t_stop = None
        self.__params = None
        self.__seg = None
        self.__spiketimes = None

        # open the file
        with open(self._path, 'rb') as self._fsrc:
            res = True
            # while the file is not done keep reading segments
            while res:
                res = self.__read_id()

        create_many_to_one_relationship(block)

        # cleanup attributes
        self._fsrc = None
        self.__lazy = False

        self._blk = None

        self.__t_stop = None
        self.__params = None
        self.__seg = None
        self.__spiketimes = None

        return block
Ejemplo n.º 10
0
def proc_dam(filename):
    '''Load an dam file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareDamIO to
    make sure BrainwareDamIO is working properly

    block = proc_dam(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_dam_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.dam', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_dam_py2.npz'
             dam file name = 'file1.dam'
    '''
    with np.load(filename) as damobj:
        damfile = damobj.items()[0][1].flatten()

    filename = os.path.basename(filename[:-12] + '.dam')

    signals = [res.flatten() for res in damfile['signal']]
    stimIndexes = [int(res[0, 0].tolist()) for res in damfile['stimIndex']]
    timestamps = [res[0, 0] for res in damfile['timestamp']]

    block = Block(file_origin=filename)

    rcg = RecordingChannelGroup(file_origin=filename)
    chan = RecordingChannel(file_origin=filename, index=0, name='Chan1')
    rcg.channel_indexes = np.array([1])
    rcg.channel_names = np.array(['Chan1'], dtype='S')

    block.recordingchannelgroups.append(rcg)
    rcg.recordingchannels.append(chan)

    params = [res['params'][0, 0].flatten() for res in damfile['stim']]
    values = [res['values'][0, 0].flatten() for res in damfile['stim']]
    params = [[res1[0] for res1 in res] for res in params]
    values = [[res1 for res1 in res] for res in values]
    stims = [dict(zip(param, value)) for param, value in zip(params, values)]

    fulldam = zip(stimIndexes, timestamps, signals, stims)
    for stimIndex, timestamp, signal, stim in fulldam:
        sig = AnalogSignal(signal=signal * pq.mV,
                           t_start=timestamp * pq.d,
                           file_origin=filename,
                           sampling_period=1. * pq.s)
        segment = Segment(file_origin=filename, index=stimIndex, **stim)
        segment.analogsignals = [sig]
        block.segments.append(segment)

    create_many_to_one_relationship(block)

    return block
Ejemplo n.º 11
0
def proc_src(filename):
    '''Load an src file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareSrcIO to
    make sure BrainwareSrcIO is working properly

    block = proc_src(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_src_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.src', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_src_py2.npz'
             src file name = 'file1.src'
    '''
    with np.load(filename) as srcobj:
        srcfile = srcobj.items()[0][1]

    filename = os.path.basename(filename[:-12]+'.src')

    block = Block(file_origin=filename)

    NChannels = srcfile['NChannels'][0, 0][0, 0]
    side = str(srcfile['side'][0, 0][0])
    ADperiod = srcfile['ADperiod'][0, 0][0, 0]

    comm_seg = proc_src_comments(srcfile, filename)
    block.segments.append(comm_seg)

    rcg = proc_src_units(srcfile, filename)
    chan_nums = np.arange(NChannels, dtype='int')
    chan_names = []
    for i in chan_nums:
        name = 'Chan'+str(i)
        chan_names.append(name)
        chan = RecordingChannel(file_origin='filename',
                                name=name,
                                index=i)
        rcg.recordingchannels.append(chan)
    rcg.channel_indexes = chan_nums
    rcg.channel_names = np.array(chan_names, dtype='string_')
    block.recordingchannelgroups.append(rcg)

    for rep in srcfile['sets'][0, 0].flatten():
        proc_src_condition(rep, filename, ADperiod, side, block)

    create_many_to_one_relationship(block)

    return block
Ejemplo n.º 12
0
    def test_block_list_units(self):
        blk = Block(name='a block')
        blk.recordingchannelgroups = [self.rcg1, self.rcg2]
        create_many_to_one_relationship(blk)
        #assert_neo_object_is_compliant(blk)

        unitres1 = [unit.name for unit in blk.recordingchannelgroups[0].units]
        unitres2 = [unit.name for unit in blk.recordingchannelgroups[1].units]
        unitres = [unit.name for unit in blk.list_units]

        self.assertEqual(self.unitnames1, unitres1)
        self.assertEqual(self.unitnames2, unitres2)
        self.assertEqual(self.unitnames, unitres)
Ejemplo n.º 13
0
def proc_src(filename):
    '''Load an src file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareSrcIO to
    make sure BrainwareSrcIO is working properly

    block = proc_src(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_src_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.src', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_src_py2.npz'
             src file name = 'file1.src'
    '''
    with np.load(filename) as srcobj:
        srcfile = srcobj.items()[0][1]

    filename = os.path.basename(filename[:-12] + '.src')

    block = Block(file_origin=filename)

    NChannels = srcfile['NChannels'][0, 0][0, 0]
    side = str(srcfile['side'][0, 0][0])
    ADperiod = srcfile['ADperiod'][0, 0][0, 0]

    comm_seg = proc_src_comments(srcfile, filename)
    block.segments.append(comm_seg)

    rcg = proc_src_units(srcfile, filename)
    chan_nums = np.arange(NChannels, dtype='int')
    chan_names = []
    for i in chan_nums:
        name = 'Chan' + str(i)
        chan_names.append(name)
        chan = RecordingChannel(file_origin='filename', name=name, index=i)
        rcg.recordingchannels.append(chan)
    rcg.channel_indexes = chan_nums
    rcg.channel_names = np.array(chan_names, dtype='string_')
    block.recordingchannelgroups.append(rcg)

    for rep in srcfile['sets'][0, 0].flatten():
        proc_src_condition(rep, filename, ADperiod, side, block)

    create_many_to_one_relationship(block)

    return block
Ejemplo n.º 14
0
    def read_block(self, lazy=False, cascade=True, **kargs):
        '''
        Reads a block from the raw data file "fname" generated
        with BrainWare
        '''

        # there are no keyargs implemented to so far.  If someone tries to pass
        # them they are expecting them to do something or making a mistake,
        # neither of which should pass silently
        if kargs:
            raise NotImplementedError('This method does not have any '
                                      'argument implemented yet')
        self._fsrc = None

        block = Block(file_origin=self._filename)

        # if we aren't doing cascade, don't load anything
        if not cascade:
            return block

        # create the objects to store other objects
        rcg = RecordingChannelGroup(file_origin=self._filename)
        rchan = RecordingChannel(file_origin=self._filename,
                                 index=1,
                                 name='Chan1')

        # load objects into their containers
        rcg.recordingchannels.append(rchan)
        block.recordingchannelgroups.append(rcg)
        rcg.channel_indexes = np.array([1])
        rcg.channel_names = np.array(['Chan1'], dtype='S')

        # open the file
        with open(self._path, 'rb') as fobject:
            # while the file is not done keep reading segments
            while True:
                seg = self._read_segment(fobject, lazy)
                # if there are no more Segments, stop
                if not seg:
                    break

                # store the segment and signals
                block.segments.append(seg)
                rchan.analogsignals.append(seg.analogsignals[0])

        # remove the file object
        self._fsrc = None

        create_many_to_one_relationship(block)
        return block
Ejemplo n.º 15
0
    def read_block(self, lazy=False, cascade=True, **kargs):
        '''
        Reads a block from the raw data file "fname" generated
        with BrainWare
        '''

        # there are no keyargs implemented to so far.  If someone tries to pass
        # them they are expecting them to do something or making a mistake,
        # neither of which should pass silently
        if kargs:
            raise NotImplementedError('This method does not have any '
                                      'argument implemented yet')
        self._fsrc = None

        block = Block(file_origin=self._filename)

        # if we aren't doing cascade, don't load anything
        if not cascade:
            return block

        # create the objects to store other objects
        rcg = RecordingChannelGroup(file_origin=self._filename)
        rchan = RecordingChannel(file_origin=self._filename,
                                 index=1, name='Chan1')

        # load objects into their containers
        rcg.recordingchannels.append(rchan)
        block.recordingchannelgroups.append(rcg)
        rcg.channel_indexes = np.array([1])
        rcg.channel_names = np.array(['Chan1'], dtype='S')

        # open the file
        with open(self._path, 'rb') as fobject:
            # while the file is not done keep reading segments
            while True:
                seg = self._read_segment(fobject, lazy)
                # if there are no more Segments, stop
                if not seg:
                    break

                # store the segment and signals
                block.segments.append(seg)
                rchan.analogsignals.append(seg.analogsignals[0])

        # remove the file object
        self._fsrc = None

        create_many_to_one_relationship(block)
        return block
Ejemplo n.º 16
0
 def read_segment(self,
                 lazy = False,
                 cascade = True,
                 group = 0,
                 series = 0):
     seg = Segment( name = 'test')
     if cascade:
         tree = getbyroute(self.pul.tree,[0,group,series])
         for sw,sweep in enumerate(tree['children']):
             if sw == 0:
                 starttime = pq.Quantity(float(sweep['contents'].swTimer),'s')
             for ch,channel in enumerate(sweep['children']):
                 sig = self.read_analogsignal(group=group,
                                         series=series,
                                         sweep=sw,
                                         channel = ch)
                 annotations = sweep['contents'].__dict__.keys()
                 annotations.remove('readlist')
                 for a in annotations:
                     d = {a:str(sweep['contents'].__dict__[a])}
                     sig.annotate(**d)
                 sig.t_start = pq.Quantity(float(sig.annotations['swTimer']),'s') - starttime
                 seg.analogsignals.append(sig)
         annotations = tree['contents'].__dict__.keys()
         annotations.remove('readlist')
         for a in annotations:
             d = {a:str(tree['contents'].__dict__[a])}
             seg.annotate(**d)
     create_many_to_one_relationship(seg)
     ### add protocols to signals
     for sig_index,sig in enumerate(seg.analogsignals):
         pgf_index = sig.annotations['pgf_index']
         st_rec = self.pgf.tree['children'][pgf_index]['contents']
         chnls = [ch for ch in self.pgf.tree['children'][pgf_index]['children']]
         for ch_index, chnl in enumerate(chnls):
             ep_start = sig.t_start
             for se_epoch_index, se_epoch in enumerate(chnl['children']):
                 se_rec = se_epoch['contents']
                 se_duration = pq.Quantity(float(se_rec.seDuration),'s')
                 if not(int(se_rec.seVoltageSource)):
                     se_voltage = pq.Quantity(float(se_rec.seVoltage),'V')
                 else:
                     se_voltage = pq.Quantity(float(chnl['contents'].chHolding),'V')
                 epoch = neo.Epoch(ep_start,se_duration,'protocol_epoch',value=se_voltage,channel_index=ch_index)
                 fully_annototate(chnl,epoch)
                 epoch.annotations['sig_index'] = sig_index
                 ep_start = ep_start + se_duration
                 seg.epochs.append(epoch)
     return seg
Ejemplo n.º 17
0
 def read_block(self,
                lazy = False,
                cascade = True,
                group = 0):
     blo = Block(name = 'test')
     if cascade:
         tree = getbyroute(self.pul.tree,[0,group])
         for i,child in enumerate(tree['children']):
             blo.segments.append(self.read_segment(group=group,series = i))
         annotations = tree['contents'].__dict__.keys()
         annotations.remove('readlist')
         for a in annotations:
             d = {a:str(tree['contents'].__dict__[a])}
             blo.annotate(**d)
     create_many_to_one_relationship(blo)
     return blo
Ejemplo n.º 18
0
    def test__construct_subsegment_by_unit(self):
        nb_seg = 3
        nb_unit = 7
        unit_with_sig = np.array([0, 2, 5])
        signal_types = ['Vm', 'Conductances']
        sig_len = 100

        #recordingchannelgroups
        rcgs = [RecordingChannelGroup(name='Vm',
                                      channel_indexes=unit_with_sig),
                RecordingChannelGroup(name='Conductance',
                                      channel_indexes=unit_with_sig)]

        # Unit
        all_unit = []
        for u in range(nb_unit):
            un = Unit(name='Unit #%d' % u, channel_indexes=np.array([u]))
            assert_neo_object_is_compliant(un)
            all_unit.append(un)

        blk = Block()
        blk.recordingchannelgroups = rcgs
        for s in range(nb_seg):
            seg = Segment(name='Simulation %s' % s)
            for j in range(nb_unit):
                st = SpikeTrain([1, 2, 3], units='ms',
                                t_start=0., t_stop=10)
                st.unit = all_unit[j]

            for t in signal_types:
                anasigarr = AnalogSignalArray(np.zeros((sig_len,
                                                        len(unit_with_sig))),
                                              units='nA',
                                              sampling_rate=1000.*pq.Hz,
                                              channel_indexes=unit_with_sig)
                seg.analogsignalarrays.append(anasigarr)

        create_many_to_one_relationship(blk)
        for unit in all_unit:
            assert_neo_object_is_compliant(unit)
        for rcg in rcgs:
            assert_neo_object_is_compliant(rcg)
        assert_neo_object_is_compliant(blk)

        # what you want
        newseg = seg.construct_subsegment_by_unit(all_unit[:4])
        assert_neo_object_is_compliant(newseg)
Ejemplo n.º 19
0
 def read(self, lazy=False, cascade=True,  **kargs):
     if Block in self.readable_objects:
         if (hasattr(self, 'read_all_blocks') and
                 callable(getattr(self, 'read_all_blocks'))):
             return self.read_all_blocks(lazy=lazy, cascade=cascade,
                                         **kargs)
         return [self.read_block(lazy=lazy, cascade=cascade, **kargs)]
     elif Segment in self.readable_objects:
         bl = Block(name='One segment only')
         if not cascade:
             return bl
         seg = self.read_segment(lazy=lazy, cascade=cascade,  **kargs)
         bl.segments.append(seg)
         create_many_to_one_relationship(bl)
         return [bl]
     else:
         raise NotImplementedError
Ejemplo n.º 20
0
def generate_from_supported_objects( supported_objects ):
    #~ create_many_to_one_relationship
    if Block in supported_objects:
        higher = generate_one_simple_block(supported_objects= supported_objects)

        # Chris we do not create RC and RCG if it is not in supported_objects
        # there is a test in generate_one_simple_block so I removed
        #finalize_block(higher)

    elif Segment in supported_objects:
        higher = generate_one_simple_segment(supported_objects= supported_objects)
    else:
        #TODO
        return None

    create_many_to_one_relationship(higher)
    return higher
Ejemplo n.º 21
0
def generate_from_supported_objects(supported_objects):
    #~ create_many_to_one_relationship
    objects = supported_objects
    if Block in objects:
        higher = generate_one_simple_block(supported_objects=objects)

        # Chris we do not create RC and RCG if it is not in objects
        # there is a test in generate_one_simple_block so I removed
        #finalize_block(higher)

    elif Segment in objects:
        higher = generate_one_simple_segment(supported_objects=objects)
    else:
        #TODO
        return None

    create_many_to_one_relationship(higher)
    return higher
Ejemplo n.º 22
0
    def test_block_list_recordingchannel(self):
        blk = Block(name='a block')
        blk.recordingchannelgroups = [self.rcg1, self.rcg2]
        create_many_to_one_relationship(blk)
        #assert_neo_object_is_compliant(blk)

        chanres1 = [
            chan.name
            for chan in blk.recordingchannelgroups[0].recordingchannels
        ]
        chanres2 = [
            chan.name
            for chan in blk.recordingchannelgroups[1].recordingchannels
        ]
        chanres = [chan.name for chan in blk.list_recordingchannels]

        self.assertEqual(self.channames1, chanres1)
        self.assertEqual(self.channames2, chanres2)
        self.assertEqual(self.channames, chanres)
Ejemplo n.º 23
0
    def read_block(
        self,
        cascade=True,
        lazy=False,
    ):
        """
        Arguments:

        """
        d = scipy.io.loadmat(self.filename,
                             struct_as_record=False,
                             squeeze_me=True)
        assert 'block' in d, 'no block in' + self.filename
        bl_struct = d['block']
        bl = self.create_ob_from_struct(bl_struct,
                                        'Block',
                                        cascade=cascade,
                                        lazy=lazy)
        create_many_to_one_relationship(bl)
        return bl
Ejemplo n.º 24
0
    def read_segment(self,
                            lazy = False,
                            cascade = True,
                            delimiter = '\t',
                            t_start = 0.*pq.s,
                            unit = pq.s,
                            ):
        """
        Arguments:
            delimiter  :  columns delimiter in file  '\t' or one space or two space or ',' or ';'
            t_start : time start of all spiketrain 0 by default
            unit : unit of spike times, can be a str or directly a Quantities
        """
        unit = pq.Quantity(1, unit)

        seg = Segment(file_origin = os.path.basename(self.filename))
        if not cascade:
            return seg

        f = open(self.filename, 'Ur')
        for i,line in enumerate(f) :
            alldata = line[:-1].split(delimiter)
            if alldata[-1] == '': alldata = alldata[:-1]
            if alldata[0] == '': alldata = alldata[1:]
            if lazy:
                spike_times = [ ]
                t_stop = t_start
            else:
                spike_times = np.array(alldata).astype('f')
                t_stop = spike_times.max()*unit

            sptr = SpikeTrain(spike_times*unit, t_start=t_start, t_stop=t_stop)
            if lazy:
                sptr.lazy_shape = len(alldata)

            sptr.annotate(channel_index = i)
            seg.spiketrains.append(sptr)
        f.close()

        create_many_to_one_relationship(seg)
        return seg
Ejemplo n.º 25
0
    def setup_segments(self):
        params = {'testarg2': 'yes', 'testarg3': True}
        self.segment1 = Segment(name='test', description='tester 1',
                                file_origin='test.file',
                                testarg1=1, **params)
        self.segment2 = Segment(name='test', description='tester 2',
                                file_origin='test.file',
                                testarg1=1, **params)
        self.segment1.annotate(testarg1=1.1, testarg0=[1, 2, 3])
        self.segment2.annotate(testarg11=1.1, testarg10=[1, 2, 3])

        self.segment1.analogsignals = self.sig1
        self.segment2.analogsignals = self.sig2

        self.segment1.analogsignalarrays = self.sigarr1
        self.segment2.analogsignalarrays = self.sigarr2

        self.segment1.epochs = self.epoch1
        self.segment2.epochs = self.epoch2

        self.segment1.epocharrays = self.epocharr1
        self.segment2.epocharrays = self.epocharr2

        self.segment1.events = self.event1
        self.segment2.events = self.event2

        self.segment1.eventarrays = self.eventarr1
        self.segment2.eventarrays = self.eventarr2

        self.segment1.irregularlysampledsignals = self.irsig1
        self.segment2.irregularlysampledsignals = self.irsig2

        self.segment1.spikes = self.spike1
        self.segment2.spikes = self.spike2

        self.segment1.spiketrains = self.train1
        self.segment2.spiketrains = self.train2

        create_many_to_one_relationship(self.segment1)
        create_many_to_one_relationship(self.segment2)
Ejemplo n.º 26
0
    def setup_recordingchannelgroups(self):
        params = {'testarg2': 'yes', 'testarg3': True}
        self.rcg1 = RecordingChannelGroup(name='test',
                                          description='tester 1',
                                          file_origin='test.file',
                                          testarg1=1,
                                          **params)
        self.rcg2 = RecordingChannelGroup(name='test',
                                          description='tester 2',
                                          file_origin='test.file',
                                          testarg1=1,
                                          **params)
        self.rcg1.annotate(testarg1=1.1, testarg0=[1, 2, 3])
        self.rcg2.annotate(testarg11=1.1, testarg10=[1, 2, 3])

        self.rcg1.units = self.units1
        self.rcg2.units = self.units2
        self.rcg1.recordingchannels = self.rchan1
        self.rcg2.recordingchannels = self.rchan2
        self.rcg1.analogsignalarrays = self.sigarr1
        self.rcg2.analogsignalarrays = self.sigarr2

        create_many_to_one_relationship(self.rcg1)
        create_many_to_one_relationship(self.rcg2)
Ejemplo n.º 27
0
    def read_block(
        self,
        lazy=False,
        cascade=True,
    ):
        """
        """

        tree = ElementTree.parse(self.filename)
        root = tree.getroot()
        acq = root.find('acquisitionSystem')
        nbits = int(acq.find('nBits').text)
        nbchannel = int(acq.find('nChannels').text)
        sampling_rate = float(acq.find('samplingRate').text) * pq.Hz
        voltage_range = float(acq.find('voltageRange').text)
        #offset = int(acq.find('offset').text)
        amplification = float(acq.find('amplification').text)

        bl = Block(
            file_origin=os.path.basename(self.filename).replace('.xml', ''))
        if cascade:
            seg = Segment()
            bl.segments.append(seg)

            # RC and RCG
            rc_list = []
            for i, xml_rcg in enumerate(
                    root.find('anatomicalDescription').find(
                        'channelGroups').findall('group')):
                rcg = RecordingChannelGroup(name='Group {}'.format(i))
                bl.recordingchannelgroups.append(rcg)
                for xml_rc in xml_rcg:
                    rc = RecordingChannel(index=int(xml_rc.text))
                    rc_list.append(rc)
                    rcg.recordingchannels.append(rc)
                    rc.recordingchannelgroups.append(rcg)
                rcg.channel_indexes = np.array(
                    [rc.index for rc in rcg.recordingchannels], dtype=int)
                rcg.channel_names = np.array([
                    'Channel{}'.format(rc.index)
                    for rc in rcg.recordingchannels
                ],
                                             dtype='S')

            # AnalogSignals
            reader = RawBinarySignalIO(
                filename=self.filename.replace('.xml', '.dat'))
            seg2 = reader.read_segment(
                cascade=True,
                lazy=lazy,
                sampling_rate=sampling_rate,
                t_start=0. * pq.s,
                unit=pq.V,
                nbchannel=nbchannel,
                bytesoffset=0,
                dtype=np.int16 if nbits <= 16 else np.int32,
                rangemin=-voltage_range / 2.,
                rangemax=voltage_range / 2.,
            )
            for s, sig in enumerate(seg2.analogsignals):
                if not lazy:
                    sig /= amplification
                sig.segment = seg
                seg.analogsignals.append(sig)
                rc_list[s].analogsignals.append(sig)

        create_many_to_one_relationship(bl)
        return bl
Ejemplo n.º 28
0
    def read_segment(
        self,
        lazy=False,
        cascade=True,
        delimiter='\t',
        usecols=None,
        skiprows=0,
        timecolumn=None,
        sampling_rate=1. * pq.Hz,
        t_start=0. * pq.s,
        unit=pq.V,
        method='genfromtxt',
    ):
        """
        Arguments:
            delimiter  :  columns delimiter in file  '\t' or one space or two space or ',' or ';'
            usecols : if None take all columns otherwise a list for selected columns
            skiprows : skip n first lines in case they contains header informations
            timecolumn :  None or a valid int that point the time vector
            samplerate : the samplerate of signals if timecolumn is not None this is not take in account
            t_start : time of the first sample
            unit : unit of AnalogSignal can be a str or directly a Quantities

            method :  'genfromtxt' or 'csv' or 'homemade'
                        in case of bugs you can try one of this methods

                        'genfromtxt' use numpy.genfromtxt
                        'csv' use cvs module
                        'homemade' use a intuitive more robust but slow method

        """
        seg = Segment(file_origin=os.path.basename(self.filename))
        if not cascade:
            return seg

        if type(sampling_rate) == float or type(sampling_rate) == int:
            # if not quantitities Hz by default
            sampling_rate = sampling_rate * pq.Hz

        if type(t_start) == float or type(t_start) == int:
            # if not quantitities s by default
            t_start = t_start * pq.s

        unit = pq.Quantity(1, unit)

        #loadtxt
        if method == 'genfromtxt':
            sig = np.genfromtxt(self.filename,
                                delimiter=delimiter,
                                usecols=usecols,
                                skiprows=skiprows,
                                dtype='f')
            if len(sig.shape) == 1:
                sig = sig[:, np.newaxis]
        elif method == 'csv':
            tab = [
                l for l in csv.reader(file(self.filename, 'rU'),
                                      delimiter=delimiter)
            ]
            tab = tab[skiprows:]
            sig = np.array(tab, dtype='f')
        elif method == 'homemade':
            fid = open(self.filename, 'rU')
            for l in range(skiprows):
                fid.readline()
            tab = []
            for line in fid.readlines():
                line = line.replace('\r', '')
                line = line.replace('\n', '')
                l = line.split(delimiter)
                while '' in l:
                    l.remove('')
                tab.append(l)
            sig = np.array(tab, dtype='f')

        if timecolumn is not None:
            sampling_rate = 1. / np.mean(np.diff(sig[:, timecolumn])) * pq.Hz
            t_start = sig[0, timecolumn] * pq.s

        for i in range(sig.shape[1]):
            if timecolumn == i: continue
            if usecols is not None and i not in usecols: continue

            if lazy:
                signal = [] * unit
            else:
                signal = sig[:, i] * unit

            anaSig = AnalogSignal(signal,
                                  sampling_rate=sampling_rate,
                                  t_start=t_start,
                                  channel_index=i,
                                  name='Column %d' % i)
            if lazy:
                anaSig.lazy_shape = sig.shape
            seg.analogsignals.append(anaSig)

        create_many_to_one_relationship(seg)
        return seg
Ejemplo n.º 29
0
    def read_segment(
        self,
        # the 2 first keyword arguments are imposed by neo.io API
        lazy=False,
        cascade=True,
        # all following arguments are decied by this IO and are free
        segment_duration=15.,
        num_analogsignal=4,
        num_spiketrain_by_channel=3,
    ):
        """
        Return a fake Segment.

        The self.filename does not matter.

        In this IO read by default a Segment.

        This is just a example to be adapted to each ClassIO.
        In this case these 3 paramters are  taken in account because this function
        return a generated segment with fake AnalogSignal and fake SpikeTrain.

        Parameters:
            segment_duration :is the size in secend of the segment.
            num_analogsignal : number of AnalogSignal in this segment
            num_spiketrain : number of SpikeTrain in this segment

        """

        sampling_rate = 10000.  #Hz
        t_start = -1.

        #time vector for generated signal
        timevect = np.arange(t_start, t_start + segment_duration,
                             1. / sampling_rate)

        # create an empty segment
        seg = Segment(name='it is a seg from exampleio')

        if cascade:
            # read nested analosignal
            for i in range(num_analogsignal):
                ana = self.read_analogsignal(lazy=lazy,
                                             cascade=cascade,
                                             channel_index=i,
                                             segment_duration=segment_duration,
                                             t_start=t_start)
                seg.analogsignals += [ana]

            # read nested spiketrain
            for i in range(num_analogsignal):
                for _ in range(num_spiketrain_by_channel):
                    sptr = self.read_spiketrain(
                        lazy=lazy,
                        cascade=cascade,
                        segment_duration=segment_duration,
                        t_start=t_start,
                        channel_index=i)
                    seg.spiketrains += [sptr]

            # create an EventArray that mimic triggers.
            # note that ExampleIO  do not allow to acess directly to EventArray
            # for that you need read_segment(cascade = True)
            eva = EventArray()
            if lazy:
                # in lazy case no data are readed
                # eva is empty
                pass
            else:
                # otherwise it really contain data
                n = 1000

                # neo.io support quantities my vector use second for unit
                eva.times = timevect[(np.random.rand(n) *
                                      timevect.size).astype('i')] * pq.s
                # all duration are the same
                eva.durations = np.ones(n) * 500 * pq.ms
                # label
                l = []
                for i in range(n):
                    if np.random.rand() > .6: l.append('TriggerA')
                    else: l.append('TriggerB')
                eva.labels = np.array(l)

            seg.eventarrays += [eva]

        create_many_to_one_relationship(seg)
        return seg
Ejemplo n.º 30
0
    def read_segment(self, cascade = True, lazy = False,):
        """
        Arguments:
        """
        f = struct_file(self.filename, 'rb')

        #Name
        f.seek(64,0)
        surname = f.read(22)
        while surname[-1] == ' ' :
            if len(surname) == 0 :break
            surname = surname[:-1]
        firstname = f.read(20)
        while firstname[-1] == ' ' :
            if len(firstname) == 0 :break
            firstname = firstname[:-1]

        #Date
        f.seek(128,0)
        day, month, year, hour, minute, sec = f.read_f('bbbbbb')
        rec_datetime = datetime.datetime(year+1900 , month , day, hour, minute, sec)

        f.seek(138,0)
        Data_Start_Offset , Num_Chan , Multiplexer , Rate_Min , Bytes = f.read_f('IHHHH')
        #~ print Num_Chan, Bytes

        #header version
        f.seek(175,0)
        header_version, = f.read_f('b')
        assert header_version == 4

        seg = Segment(  name = firstname+' '+surname,
                                    file_origin = os.path.basename(self.filename),
                                    )
        seg.annotate(surname = surname)
        seg.annotate(firstname = firstname)
        seg.annotate(rec_datetime = rec_datetime)

        if not cascade:
            return seg

        # area
        f.seek(176,0)
        zone_names = ['ORDER', 'LABCOD', 'NOTE', 'FLAGS', 'TRONCA', 'IMPED_B', 'IMPED_E', 'MONTAGE',
                'COMPRESS', 'AVERAGE', 'HISTORY', 'DVIDEO', 'EVENT A', 'EVENT B', 'TRIGGER']
        zones = { }
        for zname in zone_names:
            zname2, pos, length = f.read_f('8sII')
            zones[zname] = zname2, pos, length
            #~ print zname2, pos, length

        # reading raw data
        if not lazy:
            f.seek(Data_Start_Offset,0)
            rawdata = np.fromstring(f.read() , dtype = 'u'+str(Bytes))
            rawdata = rawdata.reshape(( rawdata.size/Num_Chan , Num_Chan))

        # Reading Code Info
        zname2, pos, length = zones['ORDER']
        f.seek(pos,0)
        code = np.fromfile(f, dtype='u2', count=Num_Chan)

        units = {-1: pq.nano*pq.V, 0:pq.uV, 1:pq.mV, 2:1, 100: pq.percent,  101:pq.dimensionless, 102:pq.dimensionless}

        for c in range(Num_Chan):
            zname2, pos, length = zones['LABCOD']
            f.seek(pos+code[c]*128+2,0)

            label = f.read(6).strip("\x00")
            ground = f.read(6).strip("\x00")
            logical_min , logical_max, logical_ground, physical_min, physical_max = f.read_f('iiiii')
            k, = f.read_f('h')
            if k in units.keys() :
                unit = units[k]
            else :
                unit = pq.uV

            f.seek(8,1)
            sampling_rate, = f.read_f('H') * pq.Hz
            sampling_rate *= Rate_Min

            if lazy:
                signal = [ ]*unit
            else:
                factor = float(physical_max - physical_min) / float(logical_max-logical_min+1)
                signal = ( rawdata[:,c].astype('f') - logical_ground )* factor*unit

            anaSig = AnalogSignal(signal, sampling_rate=sampling_rate,
                                  name=label, channel_index=c)
            if lazy:
                anaSig.lazy_shape = None
            anaSig.annotate(ground = ground)

            seg.analogsignals.append( anaSig )


        sampling_rate = np.mean([ anaSig.sampling_rate for anaSig in seg.analogsignals ])*pq.Hz

        # Read trigger and notes
        for zname, label_dtype in [ ('TRIGGER', 'u2'), ('NOTE', 'S40') ]:
            zname2, pos, length = zones[zname]
            f.seek(pos,0)
            triggers = np.fromstring(f.read(length) , dtype = [('pos','u4'), ('label', label_dtype)] ,  )
            ea = EventArray(name =zname[0]+zname[1:].lower())
            if not lazy:
                keep = (triggers['pos']>=triggers['pos'][0]) & (triggers['pos']<rawdata.shape[0]) & (triggers['pos']!=0)
                triggers = triggers[keep]
                ea.labels = triggers['label'].astype('S')
                ea.times = (triggers['pos']/sampling_rate).rescale('s')
            else:
                ea.lazy_shape = triggers.size
            seg.eventarrays.append(ea)
        
        # Read Event A and B
        # Not so well  tested
        for zname in ['EVENT A', 'EVENT B']:
            zname2, pos, length = zones[zname]
            f.seek(pos,0)
            epochs = np.fromstring(f.read(length) , 
                            dtype = [('label','u4'),('start','u4'),('stop','u4'),]  )
            ep = EpochArray(name =zname[0]+zname[1:].lower())
            if not lazy:
                keep = (epochs['start']>0) & (epochs['start']<rawdata.shape[0]) & (epochs['stop']<rawdata.shape[0])
                epochs = epochs[keep]
                ep.labels = epochs['label'].astype('S')
                ep.times = (epochs['start']/sampling_rate).rescale('s')
                ep.durations = ((epochs['stop'] - epochs['start'])/sampling_rate).rescale('s')
            else:
                ep.lazy_shape = triggers.size
            seg.epocharrays.append(ep)
        
        
        create_many_to_one_relationship(seg)
        return seg
Ejemplo n.º 31
0
    def read_block(self,
                   lazy=False,
                   cascade=True,
                   n_starts=None,
                   n_stops=None,
                   channel_list=None):
        """Reads the file and returns contents as a Block.

        The Block contains one Segment for each entry in zip(n_starts,
        n_stops). If these parameters are not specified, the default is
        to store all data in one Segment.

        The Block also contains one RecordingChannelGroup for all channels.

        n_starts: list or array of starting times of each Segment in
            samples from the beginning of the file.
        n_stops: similar, stopping times of each Segment
        channel_list: list of channel numbers to get. The neural data channels
            are 1 - 128. The analog inputs are 129 - 144. The default
            is to acquire all channels.

        Returns: Block object containing the data.
        """

        # Create block
        block = Block(file_origin=self.filename)

        if not cascade:
            return block

        self.loader = Loader(self.filename)
        self.loader.load_file()
        self.header = self.loader.header

        # If channels not specified, get all
        if channel_list is None:
            channel_list = self.loader.get_neural_channel_numbers()

        # If not specified, load all as one Segment
        if n_starts is None:
            n_starts = [0]
            n_stops = [self.loader.header.n_samples]

        #~ # Add channel hierarchy
        #~ rcg = RecordingChannelGroup(name='allchannels',
        #~ description='group of all channels', file_origin=self.filename)
        #~ block.recordingchannelgroups.append(rcg)
        #~ self.channel_number_to_recording_channel = {}

        #~ # Add each channel at a time to hierarchy
        #~ for ch in channel_list:
        #~ ch_object = RecordingChannel(name='channel%d' % ch,
        #~ file_origin=self.filename, index=ch)
        #~ rcg.channel_indexes.append(ch_object.index)
        #~ rcg.channel_names.append(ch_object.name)
        #~ rcg.recordingchannels.append(ch_object)
        #~ self.channel_number_to_recording_channel[ch] = ch_object

        # Iterate through n_starts and n_stops and add one Segment
        # per each.
        for n, (t1, t2) in enumerate(zip(n_starts, n_stops)):
            # Create segment and add metadata
            seg = self.read_segment(n_start=t1,
                                    n_stop=t2,
                                    chlist=channel_list,
                                    lazy=lazy,
                                    cascade=cascade)
            seg.name = 'Segment %d' % n
            seg.index = n
            t1sec = t1 / self.loader.header.f_samp
            t2sec = t2 / self.loader.header.f_samp
            seg.description = 'Segment %d from %f to %f' % (n, t1sec, t2sec)

            # Link to block
            block.segments.append(seg)

        # Create hardware view, and bijectivity
        tools.populate_RecordingChannel(block)
        tools.create_many_to_one_relationship(block)

        return block
Ejemplo n.º 32
0
def proc_f32(filename):
    '''Load an f32 file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareF32IO to
    make sure BrainwareF32IO is working properly

    block = proc_f32(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_f32_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.f32', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_f32_py2.npz'
             f32 file name = 'file1.f32'
    '''

    filenameorig = os.path.basename(filename[:-12] + '.f32')

    # create the objects to store other objects
    block = Block(file_origin=filenameorig)
    rcg = RecordingChannelGroup(file_origin=filenameorig)
    rcg.channel_indexes = np.array([], dtype=np.int)
    rcg.channel_names = np.array([], dtype='S')
    unit = Unit(file_origin=filenameorig)

    # load objects into their containers
    block.recordingchannelgroups.append(rcg)
    rcg.units.append(unit)

    try:
        with np.load(filename) as f32obj:
            f32file = f32obj.items()[0][1].flatten()
    except IOError as exc:
        if 'as a pickle' in exc.message:
            create_many_to_one_relationship(block)
            return block
        else:
            raise

    sweeplengths = [res[0, 0].tolist() for res in f32file['sweeplength']]
    stims = [res.flatten().tolist() for res in f32file['stim']]

    sweeps = [res['spikes'].flatten() for res in f32file['sweep'] if res.size]

    fullf32 = zip(sweeplengths, stims, sweeps)
    for sweeplength, stim, sweep in fullf32:
        for trainpts in sweep:
            if trainpts.size:
                trainpts = trainpts.flatten().astype('float32')
            else:
                trainpts = []

            paramnames = ['Param%s' % i for i in range(len(stim))]
            params = dict(zip(paramnames, stim))
            train = SpikeTrain(trainpts,
                               units=pq.ms,
                               t_start=0,
                               t_stop=sweeplength,
                               file_origin=filenameorig)

            segment = Segment(file_origin=filenameorig, **params)
            segment.spiketrains = [train]
            unit.spiketrains.append(train)
            block.segments.append(segment)

    create_many_to_one_relationship(block)

    return block
Ejemplo n.º 33
0
    def read_block(self, lazy=False, cascade=True, 
        n_starts=None, n_stops=None, channel_list=None):
        """Reads the file and returns contents as a Block.
        
        The Block contains one Segment for each entry in zip(n_starts,
        n_stops). If these parameters are not specified, the default is
        to store all data in one Segment.
        
        The Block also contains one RecordingChannelGroup for all channels.
        
        n_starts: list or array of starting times of each Segment in
            samples from the beginning of the file.
        n_stops: similar, stopping times of each Segment
        channel_list: list of channel numbers to get. The neural data channels
            are 1 - 128. The analog inputs are 129 - 144. The default
            is to acquire all channels.
        
        Returns: Block object containing the data.
        """


        # Create block
        block = Block(file_origin=self.filename)
        
        if not cascade:
            return block
        
        self.loader = Loader(self.filename)
        self.loader.load_file()
        self.header = self.loader.header
        
        # If channels not specified, get all
        if channel_list is None:
            channel_list = self.loader.get_neural_channel_numbers()
        
        # If not specified, load all as one Segment
        if n_starts is None:
            n_starts = [0]
            n_stops = [self.loader.header.n_samples]
        
        #~ # Add channel hierarchy
        #~ rcg = RecordingChannelGroup(name='allchannels',
            #~ description='group of all channels', file_origin=self.filename)
        #~ block.recordingchannelgroups.append(rcg)
        #~ self.channel_number_to_recording_channel = {}

        #~ # Add each channel at a time to hierarchy
        #~ for ch in channel_list:            
            #~ ch_object = RecordingChannel(name='channel%d' % ch,
                #~ file_origin=self.filename, index=ch)
            #~ rcg.channel_indexes.append(ch_object.index)
            #~ rcg.channel_names.append(ch_object.name)
            #~ rcg.recordingchannels.append(ch_object)
            #~ self.channel_number_to_recording_channel[ch] = ch_object

        # Iterate through n_starts and n_stops and add one Segment
        # per each.
        for n, (t1, t2) in enumerate(zip(n_starts, n_stops)):
            # Create segment and add metadata
            seg = self.read_segment(n_start=t1, n_stop=t2, chlist=channel_list,
                lazy=lazy, cascade=cascade)
            seg.name = 'Segment %d' % n
            seg.index = n
            t1sec = t1 / self.loader.header.f_samp
            t2sec = t2 / self.loader.header.f_samp
            seg.description = 'Segment %d from %f to %f' % (n, t1sec, t2sec)
            
            # Link to block
            block.segments.append(seg)
        
        # Create hardware view, and bijectivity
        tools.populate_RecordingChannel(block)
        tools.create_many_to_one_relationship(block)        
        
        return block
Ejemplo n.º 34
0
    def read_block(self, lazy=False, cascade=True, waveform=False):
        """Returns a Block containing spike information.

        There is no obvious way to infer the segment boundaries from
        raw spike times, so for now all spike times are returned in one
        big segment. The way around this would be to specify the segment
        boundaries, and then change this code to put the spikes in the right
        segments.
        """
        # Create block and segment to hold all the data
        block = Block()
        # Search data directory for KlustaKwik files.
        # If nothing found, return empty block
        self._fetfiles = self._fp.read_filenames("fet")
        self._clufiles = self._fp.read_filenames("clu")
        if len(self._fetfiles) == 0 or not cascade:
            return block

        # Create segments to hold all of the data
        segs = {}
        for rec_name, rec_offset in self.split_table:
            seg = Segment(name=(rec_name), index=int(rec_name), file_origin=self.filename)
            block.segments.append(seg)
            segs[rec_name] = seg

        # Load spike times from each group and store in a dict, keyed
        # by group number
        self.spiketrains = dict()
        for group in sorted(self._fetfiles.keys()):
            # Load spike times
            fetfile = self._fetfiles[group]
            spks, features = self._load_spike_times(fetfile)

            # Load cluster ids or generate
            if group in self._clufiles:
                clufile = self._clufiles[group]
                uids = self._load_unit_id(clufile)
            else:
                # unclustered data, assume all zeros
                uids = np.zeros(spks.shape, dtype=np.int32)

            # error check
            if len(spks) != len(uids):
                raise ValueError("lengths of fet and clu files are different")

            # Create a recording channel group
            rcg = RecordingChannelGroup(name=group)
            block.recordingchannelgroups.append(rcg)
            # Create Unit for each cluster
            unique_unit_ids = np.unique(uids)
            for unit_id in sorted(unique_unit_ids):
                if unit_id > 1:
                    # Initialize the unit
                    u = Unit(name=("unit %d from group %d" % (unit_id, group)), index=unit_id, group=group)
                    offset = 0
                    for rec_name, rec_offset in self.split_table:
                        idx = (spks > offset) & (spks <= (offset + rec_offset))
                        # print str(offset) + ' : ' + str(rec_offset)
                        tmp_spks = spks[idx] - offset
                        offset += rec_offset
                        tmp_uids = uids[idx]
                        if len(tmp_spks) > 0:
                            tmp_t_stop = tmp_spks.max() / self.sampling_rate
                        else:
                            tmp_t_stop = 0
                        # Initialize a new SpikeTrain for the spikes from this unit
                        if lazy:
                            st = SpikeTrain(
                                times=[],
                                units="sec",
                                t_start=0.0,
                                t_stop=tmp_t_stop,
                                name=("unit %d from group %d" % (unit_id, group)),
                            )
                            st.lazy_shape = len(tmp_spks[tmp_uids == unit_id])
                        else:
                            st = SpikeTrain(
                                times=tmp_spks[tmp_uids == unit_id] / self.sampling_rate,
                                units="sec",
                                t_start=0.0,
                                t_stop=tmp_t_stop,
                                name=("unit %d from group %d" % (unit_id, group)),
                            )
                        st.annotations["cluster"] = unit_id
                        st.annotations["group"] = group

                        # put features in
                        if waveform:
                            st.waveforms = features
                            # .spk.n (spike waveforms)
                            # .upsk.n (unfiltered spike waveforms)

                        # Link
                        u.spiketrains.append(st)
                        # TODO add u to block!!!
                        tmp_seg = segs[rec_name]
                        tmp_seg.spiketrains.append(st)
                    rcg.units.append(u)
        print block.recordingchannelgroups
        create_many_to_one_relationship(block)
        return block
Ejemplo n.º 35
0
    def test1(self):
        """Write data to binary file, then read it back in and verify"""
        # delete temporary file before trying to write to it
        if os.path.exists(self.fn):
            os.remove(self.fn)

        block = neo.Block()
        full_range = 234 * pq.mV

        # Create segment1 with analogsignals
        segment1 = neo.Segment()
        sig1 = neo.AnalogSignal([3, 4, 5],
                                units='mV',
                                channel_index=3,
                                sampling_rate=30000. * pq.Hz)
        sig2 = neo.AnalogSignal([6, -4, -5],
                                units='mV',
                                channel_index=4,
                                sampling_rate=30000. * pq.Hz)
        segment1.analogsignals.append(sig1)
        segment1.analogsignals.append(sig2)

        # Create segment2 with analogsignals
        segment2 = neo.Segment()
        sig3 = neo.AnalogSignal([-3, -4, -5],
                                units='mV',
                                channel_index=3,
                                sampling_rate=30000. * pq.Hz)
        sig4 = neo.AnalogSignal([-6, 4, 5],
                                units='mV',
                                channel_index=4,
                                sampling_rate=30000. * pq.Hz)
        segment2.analogsignals.append(sig3)
        segment2.analogsignals.append(sig4)

        # Link segments to block
        block.segments.append(segment1)
        block.segments.append(segment2)

        # Create hardware view, and bijectivity
        #tools.populate_RecordingChannel(block)
        #print "problem happening"
        #print block.recordingchannelgroups[0].recordingchannels
        #chan = block.recordingchannelgroups[0].recordingchannels[0]
        #print chan.analogsignals
        #tools.create_many_to_one_relationship(block)
        #print "here: "
        #print block.segments[0].analogsignals[0].recordingchannel

        # Chris I prefer that:
        #tools.finalize_block(block)
        tools.populate_RecordingChannel(block)
        tools.create_many_to_one_relationship(block)

        # Check that blackrockio is correctly extracting channel indexes
        self.assertEqual(
            neo.io.blackrockio.channel_indexes_in_segment(segment1), [3, 4])
        self.assertEqual(
            neo.io.blackrockio.channel_indexes_in_segment(segment2), [3, 4])

        # Create writer. Write block, then read back in.
        bio = neo.io.BlackrockIO(filename=self.fn, full_range=full_range)
        bio.write_block(block)
        fi = file(self.fn)

        # Text header
        self.assertEqual(fi.read(16), 'NEURALSG30 kS/s\x00')
        self.assertEqual(fi.read(8), '\x00\x00\x00\x00\x00\x00\x00\x00')

        # Integers: period, channel count, channel index1, channel index2
        self.assertEqual(struct.unpack('<4I', fi.read(16)), (1, 2, 3, 4))

        # What should the signals be after conversion?
        conv = float(full_range) / 2**16
        sigs = np.array(
            [np.concatenate((sig1, sig3)),
             np.concatenate((sig2, sig4))])
        sigs_converted = np.rint(sigs / conv).astype(np.int)

        # Check that each time point is the same
        for time_slc in sigs_converted.transpose():
            written_data = struct.unpack('<2h', fi.read(4))
            self.assertEqual(list(time_slc), list(written_data))

        # Check that we read to the end
        currentpos = fi.tell()
        fi.seek(0, 2)
        truelen = fi.tell()
        self.assertEqual(currentpos, truelen)
        fi.close()
Ejemplo n.º 36
0
    def test1(self):
        """Write data to binary file, then read it back in and verify"""
        # delete temporary file before trying to write to it
        if os.path.exists(self.fn):
            os.remove(self.fn)

        block = neo.Block()
        full_range = 234 * pq.mV

        # Create segment1 with analogsignals
        segment1 = neo.Segment()
        sig1 = neo.AnalogSignal([3,4,5], units='mV', channel_index=3,
            sampling_rate=30000.*pq.Hz)
        sig2 = neo.AnalogSignal([6,-4,-5], units='mV', channel_index=4,
            sampling_rate=30000.*pq.Hz)
        segment1.analogsignals.append(sig1)
        segment1.analogsignals.append(sig2)

        # Create segment2 with analogsignals
        segment2 = neo.Segment()
        sig3 = neo.AnalogSignal([-3,-4,-5], units='mV', channel_index=3,
            sampling_rate=30000.*pq.Hz)
        sig4 = neo.AnalogSignal([-6,4,5], units='mV', channel_index=4,
            sampling_rate=30000.*pq.Hz)
        segment2.analogsignals.append(sig3)
        segment2.analogsignals.append(sig4)


        # Link segments to block
        block.segments.append(segment1)
        block.segments.append(segment2)

        # Create hardware view, and bijectivity
        #tools.populate_RecordingChannel(block)
        #print "problem happening"
        #print block.recordingchannelgroups[0].recordingchannels
        #print block.recordingchannelgroups[0].recordingchannels[0].analogsignals
        #tools.create_many_to_one_relationship(block)
        #print "here: "
        #print block.segments[0].analogsignals[0].recordingchannel

        # Chris I prefer that:
        #tools.finalize_block(block)
        tools.populate_RecordingChannel(block)
        tools.create_many_to_one_relationship(block)


        # Check that blackrockio is correctly extracting channel indexes
        self.assertEqual(neo.io.blackrockio.channel_indexes_in_segment(
            segment1), [3,4])
        self.assertEqual(neo.io.blackrockio.channel_indexes_in_segment(
            segment2), [3,4])

        # Create writer. Write block, then read back in.
        bio = neo.io.BlackrockIO(filename=self.fn, full_range=full_range)
        bio.write_block(block)
        fi = file(self.fn)

        # Text header
        self.assertEqual(fi.read(16), 'NEURALSG30 kS/s\x00')
        self.assertEqual(fi.read(8), '\x00\x00\x00\x00\x00\x00\x00\x00')

        # Integers: period, channel count, channel index1, channel index2
        self.assertEqual(struct.unpack('<4I', fi.read(16)), (1,2,3,4))

        # What should the signals be after conversion?
        conv = float(full_range) / 2**16
        sigs = np.array(\
            [np.concatenate((sig1,sig3)), np.concatenate((sig2, sig4))])
        sigs_converted = np.rint(sigs / conv).astype(np.int)

        # Check that each time point is the same
        for time_slc in sigs_converted.transpose():
            written_data = struct.unpack('<2h', fi.read(4))
            self.assertEqual(list(time_slc), list(written_data))

        # Check that we read to the end
        currentpos = fi.tell()
        fi.seek(0, 2)
        truelen = fi.tell()
        self.assertEqual(currentpos, truelen)
        fi.close()
Ejemplo n.º 37
0
    def read_segment(self,
                     # the 2 first keyword arguments are imposed by neo.io API
                     lazy = False,
                     cascade = True,
                     # all following arguments are decided by this IO and are free
                     t_start = 0.,
                     segment_duration = 0.,
                    ):
        """
        Return a Segment containing all analog and spike channels, as well as
        all trigger events.

        Parameters:
            segment_duration :is the size in secend of the segment.
            num_analogsignal : number of AnalogSignal in this segment
            num_spiketrain : number of SpikeTrain in this segment
            
        """
        #if no segment duration is given, use the complete file
        if segment_duration == 0. :
            segment_duration=float(self.metadata["TimeSpan"])
        #if the segment duration is bigger than file, use the complete file
        if segment_duration >=float(self.metadata["TimeSpan"]):
            segment_duration=float(self.metadata["TimeSpan"])
        #if the time sum of start point and segment duration is bigger than
        #the file time span, cap it at the end
        if segment_duration+t_start>float(self.metadata["TimeSpan"]):
            segment_duration = float(self.metadata["TimeSpan"])-t_start
        
        # create an empty segment
        seg = Segment( name = "segment from the NeuroshareapiIO")

        if cascade:
            # read nested analosignal
            
            if self.metadata["num_analogs"] == 0:
                print ("no analog signals in this file!")
            else:
                #run through the number of analog channels found at the __init__ function
                for i in range(self.metadata["num_analogs"]):
                    #create an analog signal object for each channel found
                    ana = self.read_analogsignal( lazy = lazy , cascade = cascade ,
                                             channel_index = self.metadata["elecChanId"][i],
                                            segment_duration = segment_duration, t_start=t_start)
                    #add analog signal read to segment object
                    seg.analogsignals += [ ana ]
            
            # read triggers (in this case without any duration)
            for i in range(self.metadata["num_trigs"]):
                #create event object for each trigger/bit found
                eva = self.read_eventarray(lazy = lazy , 
                                           cascade = cascade,
                                           channel_index = self.metadata["triggersId"][i],
                                           segment_duration = segment_duration,
                                           t_start = t_start,)
                #add event object to segment
                seg.eventarrays +=  [eva]
            #read epochs (digital events with duration)
            for i in range(self.metadata["num_digiEpochs"]):
                #create event object for each trigger/bit found
                epa = self.read_epocharray(lazy = lazy, 
                                           cascade = cascade,
                                           channel_index = self.metadata["digiEpochId"][i],
                                            segment_duration = segment_duration,
                                            t_start = t_start,)
                #add event object to segment
                seg.epocharrays +=  [epa]
            # read nested spiketrain
            #run through all spike channels found
            for i in range(self.metadata["num_spkChans"]):
                #create spike object
                sptr = self.read_spiketrain(lazy = lazy, cascade = cascade,
                        channel_index = self.metadata["spkChanId"][i],
                        segment_duration = segment_duration,
                        t_start = t_start)
                #add the spike object to segment
                seg.spiketrains += [sptr]

        create_many_to_one_relationship(seg)
        
        return seg
Ejemplo n.º 38
0
def test3():
    """
    With no db : just a file
    """
    
    url = 'sqlite://'
    
    
    
    dbinfo = open_db(url, 
                        object_number_in_cache = 3000,
                        use_global_session = False,
                        compress = None,
                        )
    session = dbinfo.Session()
    
    #~ bl = neo.AxonIO(filename = 'File_axon_1.abf').read()
    #~ print bl.segments
    #~ bl2 = OEBase.from_neo(bl, generic_classes, cascade = True)
    
    from neo.test.io.generate_datasets import generate_one_simple_block
    from neo.io.tools import create_many_to_one_relationship, populate_RecordingChannel
    bl = generate_one_simple_block(supported_objects = [neo.Segment, neo.AnalogSignal, ])
    create_many_to_one_relationship(bl)
    populate_RecordingChannel(bl)
    bl2 = OEBase.from_neo(bl, dbinfo.mapped_classes, cascade = True)
    session.add(bl2)
    session.commit()
    print bl2
    
    treedescription1 = TreeDescription(
                            dbinfo =  dbinfo,
                            table_children = { 
                                                    'Block' : ['Segment' ],
                                                    'Segment' : [ 'AnalogSignal'],
                                                    },
                            columns_to_show = { },
                            table_on_top = 'Block',
                            #~ table_order = None,
                            )
    treedescription2 = TreeDescription(
                            dbinfo =  dbinfo,
                            table_children = { 
                                                    'Block' : ['RecordingChannelGroup' ],
                                                    'RecordingChannelGroup' : [ 'RecordingChannel', ],
                                                    'RecordingChannel' : [ 'AnalogSignal'],
                                                    },
                            columns_to_show = { },
                            table_on_top = 'Block',
                            #~ table_order = None,
                            )
    
    app = QApplication([ ])
    
    from OpenElectrophy.gui.contextmenu import context_menu
    
    w1 = QtSqlTreeView(session = session, treedescription = treedescription1,  context_menu = context_menu)
    w2 = QtSqlTreeView(session = session, treedescription = treedescription2,  context_menu = context_menu)
    w1.show()
    w2.show()
    sys.exit(app.exec_())
Ejemplo n.º 39
0
    def read_segment(self,
                                        lazy = False,
                                        cascade = True,
                                        load_spike_waveform = True,
                                            ):
        """

        """

        fid = open(self.filename, 'rb')
        globalHeader = HeaderReader(fid , GlobalHeader ).read_f(offset = 0)

        # metadatas
        seg = Segment()
        seg.rec_datetime = datetime.datetime(  globalHeader['Year'] , globalHeader['Month']  , globalHeader['Day'] ,
                    globalHeader['Hour'] , globalHeader['Minute'] , globalHeader['Second'] )
        seg.file_origin = os.path.basename(self.filename)
        seg.annotate(plexon_version = globalHeader['Version'])

        if not cascade:
            return seg

        ## Step 1 : read headers
        # dsp channels header = sipkes and waveforms
        dspChannelHeaders = { }
        maxunit=0
        maxchan = 0
        for _ in range(globalHeader['NumDSPChannels']):
            # channel is 1 based
            channelHeader = HeaderReader(fid , ChannelHeader ).read_f(offset = None)
            channelHeader['Template'] = np.array(channelHeader['Template']).reshape((5,64))
            channelHeader['Boxes'] = np.array(channelHeader['Boxes']).reshape((5,2,4))
            dspChannelHeaders[channelHeader['Channel']]=channelHeader
            maxunit = max(channelHeader['NUnits'],maxunit)
            maxchan = max(channelHeader['Channel'],maxchan)

       # event channel header
        eventHeaders = { }
        for _ in range(globalHeader['NumEventChannels']):
            eventHeader = HeaderReader(fid , EventHeader ).read_f(offset = None)
            eventHeaders[eventHeader['Channel']] = eventHeader

        # slow channel header = signal
        slowChannelHeaders = { }
        for _ in range(globalHeader['NumSlowChannels']):
            slowChannelHeader = HeaderReader(fid , SlowChannelHeader ).read_f(offset = None)
            slowChannelHeaders[slowChannelHeader['Channel']] = slowChannelHeader

        ## Step 2 : a first loop for counting size
        # signal
        nb_samples = np.zeros(len(slowChannelHeaders))
        sample_positions = np.zeros(len(slowChannelHeaders))
        t_starts = np.zeros(len(slowChannelHeaders), dtype = 'f')

        #spiketimes and waveform
        nb_spikes = np.zeros((maxchan+1, maxunit+1) ,dtype='i')
        wf_sizes = np.zeros((maxchan+1, maxunit+1, 2) ,dtype='i')

        # eventarrays
        nb_events = { }
        #maxstrsizeperchannel = { }
        for chan, h in iteritems(eventHeaders):
            nb_events[chan] = 0
            #maxstrsizeperchannel[chan] = 0

        start = fid.tell()
        while fid.tell() !=-1 :
            # read block header
            dataBlockHeader = HeaderReader(fid , DataBlockHeader ).read_f(offset = None)
            if dataBlockHeader is None : break
            chan = dataBlockHeader['Channel']
            unit = dataBlockHeader['Unit']
            n1,n2 = dataBlockHeader['NumberOfWaveforms'] , dataBlockHeader['NumberOfWordsInWaveform']
            time = (dataBlockHeader['UpperByteOf5ByteTimestamp']*2.**32 +
                    dataBlockHeader['TimeStamp'])

            if dataBlockHeader['Type'] == 1:
                nb_spikes[chan,unit] +=1
                wf_sizes[chan,unit,:] = [n1,n2]
                fid.seek(n1*n2*2,1)
            elif dataBlockHeader['Type'] ==4:
                #event
                nb_events[chan] += 1
            elif dataBlockHeader['Type'] == 5:
                #continuous signal
                fid.seek(n2*2, 1)
                if n2> 0:
                    nb_samples[chan] += n2
                if nb_samples[chan] ==0:
                    t_starts[chan] = time
                    

        ## Step 3: allocating memory and 2 loop for reading if not lazy
        if not lazy:
            # allocating mem for signal
            sigarrays = { }
            for chan, h in iteritems(slowChannelHeaders):
                sigarrays[chan] = np.zeros(nb_samples[chan])
                
            # allocating mem for SpikeTrain
            stimearrays = np.zeros((maxchan+1, maxunit+1) ,dtype=object)
            swfarrays = np.zeros((maxchan+1, maxunit+1) ,dtype=object)
            for (chan, unit), _ in np.ndenumerate(nb_spikes):
                stimearrays[chan,unit] = np.zeros(nb_spikes[chan,unit], dtype = 'f')
                if load_spike_waveform:
                    n1,n2 = wf_sizes[chan, unit,:]
                    swfarrays[chan, unit] = np.zeros( (nb_spikes[chan, unit], n1, n2 ) , dtype = 'f4' )
            pos_spikes = np.zeros(nb_spikes.shape, dtype = 'i')
                    
            # allocating mem for event
            eventpositions = { }
            evarrays = { }
            for chan, nb in iteritems(nb_events):
                evarrays[chan] = np.zeros(nb, dtype = 'f' )
                eventpositions[chan]=0 
                
            fid.seek(start)
            while fid.tell() !=-1 :
                dataBlockHeader = HeaderReader(fid , DataBlockHeader ).read_f(offset = None)
                if dataBlockHeader is None : break
                chan = dataBlockHeader['Channel']
                n1,n2 = dataBlockHeader['NumberOfWaveforms'] , dataBlockHeader['NumberOfWordsInWaveform']
                time = dataBlockHeader['UpperByteOf5ByteTimestamp']*2.**32 + dataBlockHeader['TimeStamp']
                time/= globalHeader['ADFrequency']

                if n2 <0: break
                if dataBlockHeader['Type'] == 1:
                    #spike
                    unit = dataBlockHeader['Unit']
                    pos = pos_spikes[chan,unit]
                    stimearrays[chan, unit][pos] = time
                    if load_spike_waveform and n1*n2 != 0 :
                        swfarrays[chan,unit][pos,:,:] = np.fromstring( fid.read(n1*n2*2) , dtype = 'i2').reshape(n1,n2).astype('f4')
                    else:
                        fid.seek(n1*n2*2,1)
                    pos_spikes[chan,unit] +=1
                
                elif dataBlockHeader['Type'] == 4:
                    # event
                    pos = eventpositions[chan]
                    evarrays[chan][pos] = time
                    eventpositions[chan]+= 1

                elif dataBlockHeader['Type'] == 5:
                    #signal
                    data = np.fromstring( fid.read(n2*2) , dtype = 'i2').astype('f4')
                    sigarrays[chan][sample_positions[chan] : sample_positions[chan]+data.size] = data
                    sample_positions[chan] += data.size


        ## Step 3: create neo object
        for chan, h in iteritems(eventHeaders):
            if lazy:
                times = [ ]
            else:
                times = evarrays[chan]
            ea = EventArray(times*pq.s,
                                            channel_name= eventHeaders[chan]['Name'],
                                            channel_index = chan)
            if lazy:
                ea.lazy_shape = nb_events[chan]
            seg.eventarrays.append(ea)
            
        for chan, h in iteritems(slowChannelHeaders):
            if lazy:
                signal = [ ]
            else:
                if globalHeader['Version'] ==100 or globalHeader['Version'] ==101 :
                    gain = 5000./(2048*slowChannelHeaders[chan]['Gain']*1000.)
                elif globalHeader['Version'] ==102 :
                    gain = 5000./(2048*slowChannelHeaders[chan]['Gain']*slowChannelHeaders[chan]['PreampGain'])
                elif globalHeader['Version'] >= 103:
                    gain = globalHeader['SlowMaxMagnitudeMV']/(.5*(2**globalHeader['BitsPerSpikeSample'])*\
                                                        slowChannelHeaders[chan]['Gain']*slowChannelHeaders[chan]['PreampGain'])
                signal = sigarrays[chan]*gain
            anasig =  AnalogSignal(signal*pq.V,
                                                        sampling_rate = float(slowChannelHeaders[chan]['ADFreq'])*pq.Hz,
                                                        t_start = t_starts[chan]*pq.s,
                                                        channel_index = slowChannelHeaders[chan]['Channel'],
                                                        channel_name = slowChannelHeaders[chan]['Name'],
                                                        )
            if lazy:
                anasig.lazy_shape = nb_samples[chan]
            seg.analogsignals.append(anasig)
            
        for (chan, unit), value in np.ndenumerate(nb_spikes):
            if nb_spikes[chan, unit] == 0: continue
            if lazy:
                times = [ ]
                waveforms = None
                t_stop = 0
            else:
                times = stimearrays[chan,unit]
                t_stop = times.max()
                if load_spike_waveform:
                    if globalHeader['Version'] <103:
                        gain = 3000./(2048*dspChannelHeaders[chan]['Gain']*1000.)
                    elif globalHeader['Version'] >=103 and globalHeader['Version'] <105:
                        gain = globalHeader['SpikeMaxMagnitudeMV']/(.5*2.**(globalHeader['BitsPerSpikeSample'])*1000.)
                    elif globalHeader['Version'] >105:
                        gain = globalHeader['SpikeMaxMagnitudeMV']/(.5*2.**(globalHeader['BitsPerSpikeSample'])*globalHeader['SpikePreAmpGain'])                    
                    waveforms = swfarrays[chan, unit] * gain * pq.V
                else:
                    waveforms = None
            sptr = SpikeTrain(times,
                                            units='s', t_stop=t_stop*pq.s,
                                            waveforms = waveforms,
                                            )
            sptr.annotate(unit_name = dspChannelHeaders[chan]['Name'])
            sptr.annotate(channel_index = chan)
            if lazy:
                sptr.lazy_shape = nb_spikes[chan,unit]
            seg.spiketrains.append(sptr)
        
        create_many_to_one_relationship(seg)
        return seg                          
Ejemplo n.º 40
0
    def read_segment(self, lazy=False, cascade=True):
        seg = Segment(file_origin=os.path.basename(self.filename), )

        if not cascade:
            return seg

        fid = open(self.filename, 'rb')

        headertext = fid.read(2048)
        if PY3K:
            headertext = headertext.decode('ascii')
        header = {}
        for line in headertext.split('\r\n'):
            if '=' not in line: continue
            #print '#' , line , '#'
            key, val = line.split('=')
            if key in [
                    'NC', 'NR', 'NBH', 'NBA', 'NBD', 'ADCMAX', 'NP', 'NZ',
                    'ADCMAX'
            ]:
                val = int(val)
            elif key in [
                    'AD',
                    'DT',
            ]:
                val = val.replace(',', '.')
                val = float(val)
            header[key] = val

        if not lazy:
            data = np.memmap(
                self.filename,
                np.dtype('i2'),
                'r',
                #shape = (header['NC'], header['NP']) ,
                shape=(
                    header['NP'] / header['NC'],
                    header['NC'],
                ),
                offset=header['NBH'])

        for c in range(header['NC']):

            YCF = float(header['YCF%d' % c].replace(',', '.'))
            YAG = float(header['YAG%d' % c].replace(',', '.'))
            YZ = float(header['YZ%d' % c].replace(',', '.'))

            ADCMAX = header['ADCMAX']
            AD = header['AD']
            DT = header['DT']

            if 'TU' in header:
                if header['TU'] == 'ms':
                    DT *= .001

            unit = header['YU%d' % c]
            try:
                unit = pq.Quantity(1., unit)
            except:
                unit = pq.Quantity(1., '')

            if lazy:
                signal = [] * unit
            else:
                signal = (data[:, header['YO%d' % c]].astype('f4') -
                          YZ) * AD / (YCF * YAG * (ADCMAX + 1)) * unit

            ana = AnalogSignal(signal,
                               sampling_rate=pq.Hz / DT,
                               t_start=0. * pq.s,
                               name=header['YN%d' % c],
                               channel_index=c)
            if lazy:
                ana.lazy_shape = header['NP'] / header['NC']

            seg.analogsignals.append(ana)

        create_many_to_one_relationship(seg)
        return seg
Ejemplo n.º 41
0
    def read_segment(self, lazy=False, cascade=True):

        ## Read header file (vhdr)
        header = readBrainSoup(self.filename)

        assert header['Common Infos'][
            'DataFormat'] == 'BINARY', NotImplementedError
        assert header['Common Infos'][
            'DataOrientation'] == 'MULTIPLEXED', NotImplementedError
        nb_channel = int(header['Common Infos']['NumberOfChannels'])
        sampling_rate = 1.e6 / float(
            header['Common Infos']['SamplingInterval']) * pq.Hz

        fmt = header['Binary Infos']['BinaryFormat']
        fmts = {
            'INT_16': np.int16,
            'IEEE_FLOAT_32': np.float32,
        }
        assert fmt in fmts, NotImplementedError
        dt = fmts[fmt]

        seg = Segment(file_origin=os.path.basename(self.filename), )
        if not cascade: return seg

        # read binary
        if not lazy:
            binary_file = os.path.splitext(self.filename)[0] + '.eeg'
            sigs = np.memmap(
                binary_file,
                dt,
                'r',
            ).astype('f')

            n = int(sigs.size / nb_channel)
            sigs = sigs[:n * nb_channel]
            sigs = sigs.reshape(n, nb_channel)

        for c in range(nb_channel):
            name, ref, res, units = header['Channel Infos']['Ch%d' %
                                                            (c +
                                                             1, )].split(',')
            units = pq.Quantity(1, units.replace('µ', 'u'))
            if lazy:
                signal = [] * units
            else:
                signal = sigs[:, c] * units
            anasig = AnalogSignal(
                signal=signal,
                channel_index=c,
                name=name,
                sampling_rate=sampling_rate,
            )
            if lazy:
                anasig.lazy_shape = -1
            seg.analogsignals.append(anasig)

        # read marker
        marker_file = os.path.splitext(self.filename)[0] + '.vmrk'
        all_info = readBrainSoup(marker_file)['Marker Infos']
        all_types = []
        times = []
        labels = []
        for i in range(len(all_info)):
            type_, label, pos, size, channel = all_info['Mk%d' %
                                                        (i +
                                                         1, )].split(',')[:5]
            all_types.append(type_)
            times.append(float(pos) / sampling_rate.magnitude)
            labels.append(label)
        all_types = np.array(all_types)
        times = np.array(times) * pq.s
        labels = np.array(labels, dtype='S')
        for type_ in np.unique(all_types):
            ind = type_ == all_types
            if lazy:
                ea = EventArray(name=str(type_))
                ea.lazy_shape = -1
            else:
                ea = EventArray(
                    times=times[ind],
                    labels=labels[ind],
                    name=str(type_),
                )
            seg.eventarrays.append(ea)

        create_many_to_one_relationship(seg)
        return seg
Ejemplo n.º 42
0
    def read_block(
        self,
        lazy=False,
        cascade=True,
    ):
        bl = Block(file_origin=os.path.basename(self.filename), )
        if not cascade:
            return bl

        fid = open(self.filename, 'rb')

        headertext = fid.read(1024)
        if PY3K:
            headertext = headertext.decode('ascii')
        header = {}
        for line in headertext.split('\r\n'):
            if '=' not in line: continue
            #print '#' , line , '#'
            key, val = line.split('=')
            if key in [
                    'NC',
                    'NR',
                    'NBH',
                    'NBA',
                    'NBD',
                    'ADCMAX',
                    'NP',
                    'NZ',
            ]:
                val = int(val)
            elif key in [
                    'AD',
                    'DT',
            ]:
                val = val.replace(',', '.')
                val = float(val)
            header[key] = val

        #print header

        SECTORSIZE = 512
        # loop for record number
        for i in range(header['NR']):
            #print 'record ',i
            offset = 1024 + i * (SECTORSIZE * header['NBD'] + 1024)

            # read analysis zone
            analysisHeader = HeaderReader(
                fid, AnalysisDescription).read_f(offset=offset)
            #print analysisHeader

            # read data
            NP = (SECTORSIZE * header['NBD']) / 2
            NP = NP - NP % header['NC']
            NP = NP / header['NC']
            if not lazy:
                data = np.memmap(
                    self.filename,
                    np.dtype('i2'),
                    'r',
                    #shape = (header['NC'], header['NP']) ,
                    shape=(
                        NP,
                        header['NC'],
                    ),
                    offset=offset + header['NBA'] * SECTORSIZE)

            # create a segment
            seg = Segment()
            bl.segments.append(seg)

            for c in range(header['NC']):

                unit = header['YU%d' % c]
                try:
                    unit = pq.Quantity(1., unit)
                except:
                    unit = pq.Quantity(1., '')

                if lazy:
                    signal = [] * unit
                else:
                    YG = float(header['YG%d' % c].replace(',', '.'))
                    ADCMAX = header['ADCMAX']
                    VMax = analysisHeader['VMax'][c]
                    signal = data[:, header['YO%d' % c]].astype(
                        'f4') * VMax / ADCMAX / YG * unit
                anaSig = AnalogSignal(
                    signal,
                    sampling_rate=pq.Hz / analysisHeader['SamplingInterval'],
                    t_start=analysisHeader['TimeRecorded'] * pq.s,
                    name=header['YN%d' % c],
                    channel_index=c)

                if lazy:
                    anaSig.lazy_shape = NP
                seg.analogsignals.append(anaSig)

        fid.close()

        create_many_to_one_relationship(bl)
        return bl
Ejemplo n.º 43
0
    def read_segment(
        self,
        lazy=False,
        cascade=True,
    ):

        fid = open(self.filename, 'rb')
        globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)
        #~ print globalHeader
        #~ print 'version' , globalHeader['version']
        seg = Segment()
        seg.file_origin = os.path.basename(self.filename)
        seg.annotate(neuroexplorer_version=globalHeader['version'])
        seg.annotate(comment=globalHeader['comment'])

        if not cascade:
            return seg

        offset = 544
        for i in range(globalHeader['nvar']):
            entityHeader = HeaderReader(
                fid, EntityHeader).read_f(offset=offset + i * 208)
            entityHeader['name'] = entityHeader['name'].replace('\x00', '')

            #print 'i',i, entityHeader['type']

            if entityHeader['type'] == 0:
                # neuron
                if lazy:
                    spike_times = [] * pq.s
                else:
                    spike_times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'],
                    )
                    spike_times = spike_times.astype(
                        'f8') / globalHeader['freq'] * pq.s
                sptr = SpikeTrain(
                    times=spike_times,
                    t_start=globalHeader['tbeg'] / globalHeader['freq'] * pq.s,
                    t_stop=globalHeader['tend'] / globalHeader['freq'] * pq.s,
                    name=entityHeader['name'],
                )
                if lazy:
                    sptr.lazy_shape = entityHeader['n']
                sptr.annotate(channel_index=entityHeader['WireNumber'])
                seg.spiketrains.append(sptr)

            if entityHeader['type'] == 1:
                # event
                if lazy:
                    event_times = [] * pq.s
                else:
                    event_times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'],
                    )
                    event_times = event_times.astype(
                        'f8') / globalHeader['freq'] * pq.s
                labels = np.array([''] * event_times.size, dtype='S')
                evar = EventArray(times=event_times,
                                  labels=labels,
                                  channel_name=entityHeader['name'])
                if lazy:
                    evar.lazy_shape = entityHeader['n']
                seg.eventarrays.append(evar)

            if entityHeader['type'] == 2:
                # interval
                if lazy:
                    start_times = [] * pq.s
                    stop_times = [] * pq.s
                else:
                    start_times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'],
                    )
                    start_times = start_times.astype(
                        'f8') / globalHeader['freq'] * pq.s
                    stop_times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'] + entityHeader['n'] * 4,
                    )
                    stop_times = stop_times.astype(
                        'f') / globalHeader['freq'] * pq.s
                epar = EpochArray(times=start_times,
                                  durations=stop_times - start_times,
                                  labels=np.array([''] * start_times.size,
                                                  dtype='S'),
                                  channel_name=entityHeader['name'])
                if lazy:
                    epar.lazy_shape = entityHeader['n']
                seg.epocharrays.append(epar)

            if entityHeader['type'] == 3:
                # spiketrain and wavefoms
                if lazy:
                    spike_times = [] * pq.s
                    waveforms = None
                else:

                    spike_times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'],
                    )
                    spike_times = spike_times.astype(
                        'f8') / globalHeader['freq'] * pq.s

                    waveforms = np.memmap(
                        self.filename,
                        np.dtype('i2'),
                        'r',
                        shape=(entityHeader['n'], 1,
                               entityHeader['NPointsWave']),
                        offset=entityHeader['offset'] + entityHeader['n'] * 4,
                    )
                    waveforms = (waveforms.astype('f') * entityHeader['ADtoMV']
                                 + entityHeader['MVOffset']) * pq.mV
                t_stop = globalHeader['tend'] / globalHeader['freq'] * pq.s
                if spike_times.size > 0:
                    t_stop = max(t_stop, max(spike_times))
                sptr = SpikeTrain(
                    times=spike_times,
                    t_start=globalHeader['tbeg'] / globalHeader['freq'] * pq.s,
                    #~ t_stop = max(globalHeader['tend']/globalHeader['freq']*pq.s,max(spike_times)),
                    t_stop=t_stop,
                    name=entityHeader['name'],
                    waveforms=waveforms,
                    sampling_rate=entityHeader['WFrequency'] * pq.Hz,
                    left_sweep=0 * pq.ms,
                )
                if lazy:
                    sptr.lazy_shape = entityHeader['n']
                sptr.annotate(channel_index=entityHeader['WireNumber'])
                seg.spiketrains.append(sptr)

            if entityHeader['type'] == 4:
                # popvectors
                pass

            if entityHeader['type'] == 5:
                # analog

                timestamps = np.memmap(
                    self.filename,
                    np.dtype('i4'),
                    'r',
                    shape=(entityHeader['n']),
                    offset=entityHeader['offset'],
                )
                timestamps = timestamps.astype('f8') / globalHeader['freq']
                fragmentStarts = np.memmap(
                    self.filename,
                    np.dtype('i4'),
                    'r',
                    shape=(entityHeader['n']),
                    offset=entityHeader['offset'],
                )
                fragmentStarts = fragmentStarts.astype(
                    'f8') / globalHeader['freq']
                t_start = timestamps[0] - fragmentStarts[0] / float(
                    entityHeader['WFrequency'])
                del timestamps, fragmentStarts

                if lazy:
                    signal = [] * pq.mV
                else:
                    signal = np.memmap(
                        self.filename,
                        np.dtype('i2'),
                        'r',
                        shape=(entityHeader['NPointsWave']),
                        offset=entityHeader['offset'],
                    )
                    signal = signal.astype('f')
                    signal *= entityHeader['ADtoMV']
                    signal += entityHeader['MVOffset']
                    signal = signal * pq.mV

                anaSig = AnalogSignal(
                    signal=signal,
                    t_start=t_start * pq.s,
                    sampling_rate=entityHeader['WFrequency'] * pq.Hz,
                    name=entityHeader['name'],
                    channel_index=entityHeader['WireNumber'])
                if lazy:
                    anaSig.lazy_shape = entityHeader['NPointsWave']
                seg.analogsignals.append(anaSig)

            if entityHeader['type'] == 6:
                # markers  : TO TEST
                if lazy:
                    times = [] * pq.s
                    labels = np.array([], dtype='S')
                    markertype = None
                else:
                    times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'],
                    )
                    times = times.astype('f8') / globalHeader['freq'] * pq.s
                    fid.seek(entityHeader['offset'] + entityHeader['n'] * 4)
                    markertype = fid.read(64).replace('\x00', '')
                    labels = np.memmap(
                        self.filename,
                        np.dtype('S' + str(entityHeader['MarkerLength'])),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'] + entityHeader['n'] * 4 +
                        64)
                ea = EventArray(times=times,
                                labels=labels.view(np.ndarray),
                                name=entityHeader['name'],
                                channel_index=entityHeader['WireNumber'],
                                marker_type=markertype)
                if lazy:
                    ea.lazy_shape = entityHeader['n']
                seg.eventarrays.append(ea)

        create_many_to_one_relationship(seg)
        return seg
Ejemplo n.º 44
0
def proc_f32(filename):
    '''Load an f32 file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareF32IO to
    make sure BrainwareF32IO is working properly

    block = proc_f32(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_f32_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.f32', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_f32_py2.npz'
             f32 file name = 'file1.f32'
    '''

    filenameorig = os.path.basename(filename[:-12]+'.f32')

    # create the objects to store other objects
    block = Block(file_origin=filenameorig)
    rcg = RecordingChannelGroup(file_origin=filenameorig)
    rcg.channel_indexes = np.array([], dtype=np.int)
    rcg.channel_names = np.array([], dtype='S')
    unit = Unit(file_origin=filenameorig)

    # load objects into their containers
    block.recordingchannelgroups.append(rcg)
    rcg.units.append(unit)

    try:
        with np.load(filename) as f32obj:
            f32file = f32obj.items()[0][1].flatten()
    except IOError as exc:
        if 'as a pickle' in exc.message:
            create_many_to_one_relationship(block)
            return block
        else:
            raise

    sweeplengths = [res[0, 0].tolist() for res in f32file['sweeplength']]
    stims = [res.flatten().tolist() for res in f32file['stim']]

    sweeps = [res['spikes'].flatten() for res in f32file['sweep'] if res.size]

    fullf32 = zip(sweeplengths, stims, sweeps)
    for sweeplength, stim, sweep in fullf32:
        for trainpts in sweep:
            if trainpts.size:
                trainpts = trainpts.flatten().astype('float32')
            else:
                trainpts = []

            paramnames = ['Param%s' % i for i in range(len(stim))]
            params = dict(zip(paramnames, stim))
            train = SpikeTrain(trainpts, units=pq.ms,
                               t_start=0, t_stop=sweeplength,
                               file_origin=filenameorig)

            segment = Segment(file_origin=filenameorig, **params)
            segment.spiketrains = [train]
            unit.spiketrains.append(train)
            block.segments.append(segment)

    create_many_to_one_relationship(block)

    return block