Пример #1
0
    def read_block(self, lazy=False, cascade=True, **kargs):
        '''
        Reads a block from the simple spike data file "fname" generated
        with BrainWare
        '''

        # there are no keyargs implemented to so far.  If someone tries to pass
        # them they are expecting them to do something or making a mistake,
        # neither of which should pass silently
        if kargs:
            raise NotImplementedError('This method does not have any '
                                      'argument implemented yet')
        self._fsrc = None
        self.__lazy = lazy

        self._blk = Block(file_origin=self._filename)
        block = self._blk

        # if we aren't doing cascade, don't load anything
        if not cascade:
            return block

        # create the objects to store other objects
        rcg = RecordingChannelGroup(file_origin=self._filename)
        rcg.channel_indexes = np.array([], dtype=np.int)
        rcg.channel_names = np.array([], dtype='S')
        self.__unit = Unit(file_origin=self._filename)

        # load objects into their containers
        block.recordingchannelgroups.append(rcg)
        rcg.units.append(self.__unit)

        # initialize values
        self.__t_stop = None
        self.__params = None
        self.__seg = None
        self.__spiketimes = None

        # open the file
        with open(self._path, 'rb') as self._fsrc:
            res = True
            # while the file is not done keep reading segments
            while res:
                res = self.__read_id()

        create_many_to_one_relationship(block)

        # cleanup attributes
        self._fsrc = None
        self.__lazy = False

        self._blk = None

        self.__t_stop = None
        self.__params = None
        self.__seg = None
        self.__spiketimes = None

        return block
Пример #2
0
def proc_dam(filename):
    '''Load an dam file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareDamIO to
    make sure BrainwareDamIO is working properly

    block = proc_dam(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_dam_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.dam', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_dam_py2.npz'
             dam file name = 'file1.dam'
    '''
    with np.load(filename) as damobj:
        damfile = damobj.items()[0][1].flatten()

    filename = os.path.basename(filename[:-12]+'.dam')

    signals = [res.flatten() for res in damfile['signal']]
    stimIndexes = [int(res[0, 0].tolist()) for res in damfile['stimIndex']]
    timestamps = [res[0, 0] for res in damfile['timestamp']]

    block = Block(file_origin=filename)

    rcg = RecordingChannelGroup(file_origin=filename)
    chan = RecordingChannel(file_origin=filename, index=0, name='Chan1')
    rcg.channel_indexes = np.array([1])
    rcg.channel_names = np.array(['Chan1'], dtype='S')

    block.recordingchannelgroups.append(rcg)
    rcg.recordingchannels.append(chan)

    params = [res['params'][0, 0].flatten() for res in damfile['stim']]
    values = [res['values'][0, 0].flatten() for res in damfile['stim']]
    params = [[res1[0] for res1 in res] for res in params]
    values = [[res1 for res1 in res] for res in values]
    stims = [dict(zip(param, value)) for param, value in zip(params, values)]

    fulldam = zip(stimIndexes, timestamps, signals, stims)
    for stimIndex, timestamp, signal, stim in fulldam:
        sig = AnalogSignal(signal=signal*pq.mV,
                           t_start=timestamp*pq.d,
                           file_origin=filename,
                           sampling_period=1.*pq.s)
        segment = Segment(file_origin=filename,
                          index=stimIndex,
                          **stim)
        segment.analogsignals = [sig]
        block.segments.append(segment)

    create_many_to_one_relationship(block)

    return block
def proc_dam(filename):
    '''Load an dam file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareDamIO to
    make sure BrainwareDamIO is working properly

    block = proc_dam(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_dam_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.dam', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_dam_py2.npz'
             dam file name = 'file1.dam'
    '''
    with np.load(filename) as damobj:
        damfile = damobj.items()[0][1].flatten()

    filename = os.path.basename(filename[:-12] + '.dam')

    signals = [res.flatten() for res in damfile['signal']]
    stimIndexes = [int(res[0, 0].tolist()) for res in damfile['stimIndex']]
    timestamps = [res[0, 0] for res in damfile['timestamp']]

    block = Block(file_origin=filename)

    rcg = RecordingChannelGroup(file_origin=filename)
    chan = RecordingChannel(file_origin=filename, index=0, name='Chan1')
    rcg.channel_indexes = np.array([1])
    rcg.channel_names = np.array(['Chan1'], dtype='S')

    block.recordingchannelgroups.append(rcg)
    rcg.recordingchannels.append(chan)

    params = [res['params'][0, 0].flatten() for res in damfile['stim']]
    values = [res['values'][0, 0].flatten() for res in damfile['stim']]
    params = [[res1[0] for res1 in res] for res in params]
    values = [[res1 for res1 in res] for res in values]
    stims = [dict(zip(param, value)) for param, value in zip(params, values)]

    fulldam = zip(stimIndexes, timestamps, signals, stims)
    for stimIndex, timestamp, signal, stim in fulldam:
        sig = AnalogSignal(signal=signal * pq.mV,
                           t_start=timestamp * pq.d,
                           file_origin=filename,
                           sampling_period=1. * pq.s)
        segment = Segment(file_origin=filename, index=stimIndex, **stim)
        segment.analogsignals = [sig]
        block.segments.append(segment)

    create_many_to_one_relationship(block)

    return block
Пример #4
0
    def read_block(self,
                     lazy = False,
                     cascade = True,
                    ):
        """
        """

        
        tree = ElementTree.parse(self.filename)
        root = tree.getroot()
        acq = root.find('acquisitionSystem')
        nbits = int(acq.find('nBits').text)
        nbchannel = int(acq.find('nChannels').text)
        sampling_rate = float(acq.find('samplingRate').text)*pq.Hz
        voltage_range = float(acq.find('voltageRange').text)
        #offset = int(acq.find('offset').text)
        amplification = float(acq.find('amplification').text)
        
        bl = Block(file_origin = os.path.basename(self.filename).replace('.xml', ''))
        if cascade:
            seg = Segment()
            bl.segments.append(seg)
            
            # RC and RCG
            rc_list = [ ]
            for i, xml_rcg in  enumerate(root.find('anatomicalDescription').find('channelGroups').findall('group')):
                rcg = RecordingChannelGroup(name = 'Group {0}'.format(i))
                bl.recordingchannelgroups.append(rcg)
                for xml_rc in xml_rcg:
                    rc = RecordingChannel(index = int(xml_rc.text))
                    rc_list.append(rc)
                    rcg.recordingchannels.append(rc)
                    rc.recordingchannelgroups.append(rcg)
                rcg.channel_indexes = np.array([rc.index for rc in rcg.recordingchannels], dtype = int)
                rcg.channel_names = np.array(['Channel{0}'.format(rc.index) for rc in rcg.recordingchannels], dtype = 'S')
        
            # AnalogSignals
            reader = RawBinarySignalIO(filename = self.filename.replace('.xml', '.dat'))
            seg2 = reader.read_segment(cascade = True, lazy = lazy,
                                                        sampling_rate = sampling_rate,
                                                        t_start = 0.*pq.s,
                                                        unit = pq.V, nbchannel = nbchannel,
                                                        bytesoffset = 0,
                                                        dtype = np.int16 if nbits<=16 else np.int32,
                                                        rangemin = -voltage_range/2.,
                                                        rangemax = voltage_range/2.,)
            for s, sig in enumerate(seg2.analogsignals):
                if not lazy:
                    sig /= amplification
                sig.segment = seg
                seg.analogsignals.append(sig)
                rc_list[s].analogsignals.append(sig)
            
        bl.create_many_to_one_relationship()
        return bl
Пример #5
0
    def read_block(self, lazy=False, cascade=True, **kargs):
        '''
        Reads a block from the raw data file "fname" generated
        with BrainWare
        '''

        # there are no keyargs implemented to so far.  If someone tries to pass
        # them they are expecting them to do something or making a mistake,
        # neither of which should pass silently
        if kargs:
            raise NotImplementedError('This method does not have any '
                                      'argument implemented yet')
        self._fsrc = None

        block = Block(file_origin=self._filename)

        # if we aren't doing cascade, don't load anything
        if not cascade:
            return block

        # create the objects to store other objects
        rcg = RecordingChannelGroup(file_origin=self._filename)
        rchan = RecordingChannel(file_origin=self._filename,
                                 index=1,
                                 name='Chan1')

        # load objects into their containers
        rcg.recordingchannels.append(rchan)
        block.recordingchannelgroups.append(rcg)
        rcg.channel_indexes = np.array([1])
        rcg.channel_names = np.array(['Chan1'], dtype='S')

        # open the file
        with open(self._path, 'rb') as fobject:
            # while the file is not done keep reading segments
            while True:
                seg = self._read_segment(fobject, lazy)
                # if there are no more Segments, stop
                if not seg:
                    break

                # store the segment and signals
                block.segments.append(seg)
                rchan.analogsignals.append(seg.analogsignals[0])

        # remove the file object
        self._fsrc = None

        create_many_to_one_relationship(block)
        return block
Пример #6
0
    def read_block(self, lazy=False, cascade=True, **kargs):
        '''
        Reads a block from the raw data file "fname" generated
        with BrainWare
        '''

        # there are no keyargs implemented to so far.  If someone tries to pass
        # them they are expecting them to do something or making a mistake,
        # neither of which should pass silently
        if kargs:
            raise NotImplementedError('This method does not have any '
                                      'argument implemented yet')
        self._fsrc = None

        block = Block(file_origin=self._filename)

        # if we aren't doing cascade, don't load anything
        if not cascade:
            return block

        # create the objects to store other objects
        rcg = RecordingChannelGroup(file_origin=self._filename)
        rchan = RecordingChannel(file_origin=self._filename,
                                 index=1, name='Chan1')

        # load objects into their containers
        rcg.recordingchannels.append(rchan)
        block.recordingchannelgroups.append(rcg)
        rcg.channel_indexes = np.array([1])
        rcg.channel_names = np.array(['Chan1'], dtype='S')

        # open the file
        with open(self._path, 'rb') as fobject:
            # while the file is not done keep reading segments
            while True:
                seg = self._read_segment(fobject, lazy)
                # if there are no more Segments, stop
                if not seg:
                    break

                # store the segment and signals
                block.segments.append(seg)
                rchan.analogsignals.append(seg.analogsignals[0])

        # remove the file object
        self._fsrc = None

        create_many_to_one_relationship(block)
        return block
Пример #7
0
def proc_f32(filename):
    """Load an f32 file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareF32IO to
    make sure BrainwareF32IO is working properly

    block = proc_f32(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_f32_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.f32', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_f32_py2.npz'
             f32 file name = 'file1.f32'
    """

    filenameorig = os.path.basename(filename[:-12] + ".f32")

    # create the objects to store other objects
    block = Block(file_origin=filenameorig)
    rcg = RecordingChannelGroup(file_origin=filenameorig)
    rcg.channel_indexes = np.array([], dtype=np.int)
    rcg.channel_names = np.array([], dtype="S")
    unit = Unit(file_origin=filenameorig)

    # load objects into their containers
    block.recordingchannelgroups.append(rcg)
    rcg.units.append(unit)

    try:
        with np.load(filename) as f32obj:
            f32file = f32obj.items()[0][1].flatten()
    except IOError as exc:
        if "as a pickle" in exc.message:
            block.create_many_to_one_relationship()
            return block
        else:
            raise

    sweeplengths = [res[0, 0].tolist() for res in f32file["sweeplength"]]
    stims = [res.flatten().tolist() for res in f32file["stim"]]

    sweeps = [res["spikes"].flatten() for res in f32file["sweep"] if res.size]

    fullf32 = zip(sweeplengths, stims, sweeps)
    for sweeplength, stim, sweep in fullf32:
        for trainpts in sweep:
            if trainpts.size:
                trainpts = trainpts.flatten().astype("float32")
            else:
                trainpts = []

            paramnames = ["Param%s" % i for i in range(len(stim))]
            params = dict(zip(paramnames, stim))
            train = SpikeTrain(trainpts, units=pq.ms, t_start=0, t_stop=sweeplength, file_origin=filenameorig)

            segment = Segment(file_origin=filenameorig, **params)
            segment.spiketrains = [train]
            unit.spiketrains.append(train)
            block.segments.append(segment)

    block.create_many_to_one_relationship()

    return block
def proc_f32(filename):
    '''Load an f32 file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareF32IO to
    make sure BrainwareF32IO is working properly

    block = proc_f32(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_f32_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.f32', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_f32_py2.npz'
             f32 file name = 'file1.f32'
    '''

    filenameorig = os.path.basename(filename[:-12] + '.f32')

    # create the objects to store other objects
    block = Block(file_origin=filenameorig)
    rcg = RecordingChannelGroup(file_origin=filenameorig)
    rcg.channel_indexes = np.array([], dtype=np.int)
    rcg.channel_names = np.array([], dtype='S')
    unit = Unit(file_origin=filenameorig)

    # load objects into their containers
    block.recordingchannelgroups.append(rcg)
    rcg.units.append(unit)

    try:
        with np.load(filename) as f32obj:
            f32file = f32obj.items()[0][1].flatten()
    except IOError as exc:
        if 'as a pickle' in exc.message:
            create_many_to_one_relationship(block)
            return block
        else:
            raise

    sweeplengths = [res[0, 0].tolist() for res in f32file['sweeplength']]
    stims = [res.flatten().tolist() for res in f32file['stim']]

    sweeps = [res['spikes'].flatten() for res in f32file['sweep'] if res.size]

    fullf32 = zip(sweeplengths, stims, sweeps)
    for sweeplength, stim, sweep in fullf32:
        for trainpts in sweep:
            if trainpts.size:
                trainpts = trainpts.flatten().astype('float32')
            else:
                trainpts = []

            paramnames = ['Param%s' % i for i in range(len(stim))]
            params = dict(zip(paramnames, stim))
            train = SpikeTrain(trainpts,
                               units=pq.ms,
                               t_start=0,
                               t_stop=sweeplength,
                               file_origin=filenameorig)

            segment = Segment(file_origin=filenameorig, **params)
            segment.spiketrains = [train]
            unit.spiketrains.append(train)
            block.segments.append(segment)

    create_many_to_one_relationship(block)

    return block
Пример #9
0
    def read_block(
        self,
        lazy=False,
        cascade=True,
    ):
        """
        """

        tree = ElementTree.parse(self.filename)
        root = tree.getroot()
        acq = root.find('acquisitionSystem')
        nbits = int(acq.find('nBits').text)
        nbchannel = int(acq.find('nChannels').text)
        sampling_rate = float(acq.find('samplingRate').text) * pq.Hz
        voltage_range = float(acq.find('voltageRange').text)
        #offset = int(acq.find('offset').text)
        amplification = float(acq.find('amplification').text)

        bl = Block(
            file_origin=os.path.basename(self.filename).replace('.xml', ''))
        if cascade:
            seg = Segment()
            bl.segments.append(seg)

            # RC and RCG
            rc_list = []
            for i, xml_rcg in enumerate(
                    root.find('anatomicalDescription').find(
                        'channelGroups').findall('group')):
                rcg = RecordingChannelGroup(name='Group {}'.format(i))
                bl.recordingchannelgroups.append(rcg)
                for xml_rc in xml_rcg:
                    rc = RecordingChannel(index=int(xml_rc.text))
                    rc_list.append(rc)
                    rcg.recordingchannels.append(rc)
                    rc.recordingchannelgroups.append(rcg)
                rcg.channel_indexes = np.array(
                    [rc.index for rc in rcg.recordingchannels], dtype=int)
                rcg.channel_names = np.array([
                    'Channel{}'.format(rc.index)
                    for rc in rcg.recordingchannels
                ],
                                             dtype='S')

            # AnalogSignals
            reader = RawBinarySignalIO(
                filename=self.filename.replace('.xml', '.dat'))
            seg2 = reader.read_segment(
                cascade=True,
                lazy=lazy,
                sampling_rate=sampling_rate,
                t_start=0. * pq.s,
                unit=pq.V,
                nbchannel=nbchannel,
                bytesoffset=0,
                dtype=np.int16 if nbits <= 16 else np.int32,
                rangemin=-voltage_range / 2.,
                rangemax=voltage_range / 2.,
            )
            for s, sig in enumerate(seg2.analogsignals):
                if not lazy:
                    sig /= amplification
                sig.segment = seg
                seg.analogsignals.append(sig)
                rc_list[s].analogsignals.append(sig)

        create_many_to_one_relationship(bl)
        return bl