示例#1
0
def proc_src_units(srcfile, filename):
    '''Get the units in an src file that has been processed by the official
    matlab function.  See proc_src for details'''
    all_units = []
    un_unit = Group(name='UnassignedSpikes', file_origin=filename,
                   elliptic=[], boundaries=[], timestamp=[], max_valid=[])

    all_units.append(un_unit)

    sortInfo = srcfile['sortInfo'][0, 0]
    timeslice = sortInfo['timeslice'][0, 0]
    maxValid = timeslice['maxValid'][0, 0]
    cluster = timeslice['cluster'][0, 0]
    if len(cluster):
        maxValid = maxValid[0, 0]
        elliptic = [res.flatten() for res in cluster['elliptic'].flatten()]
        boundaries = [res.flatten() for res in cluster['boundaries'].flatten()]
        fullclust = zip(elliptic, boundaries)
        for ielliptic, iboundaries in fullclust:
            unit = Group(file_origin=filename,
                        boundaries=[iboundaries],
                        elliptic=[ielliptic], timeStamp=[],
                        max_valid=[maxValid])
            all_units.append(unit)
    return all_units
示例#2
0
def proc_dam(filename):
    '''Load an dam file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareDamIO to
    make sure BrainwareDamIO is working properly

    block = proc_dam(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_dam_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.dam', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_dam_py2.npz'
             dam file name = 'file1.dam'
    '''
    with np.load(filename, allow_pickle=True) as damobj:
        damfile = list(damobj.items())[0][1].flatten()

    filename = os.path.basename(filename[:-12] + '.dam')

    signals = [res.flatten() for res in damfile['signal']]
    stimIndexes = [int(res[0, 0].tolist()) for res in damfile['stimIndex']]
    timestamps = [res[0, 0] for res in damfile['timestamp']]

    block = Block(file_origin=filename)

    gr = Group(file_origin=filename)

    block.groups.append(gr)

    params = [res['params'][0, 0].flatten() for res in damfile['stim']]
    values = [res['values'][0, 0].flatten() for res in damfile['stim']]
    params = [[res1[0] for res1 in res] for res in params]
    values = [[res1 for res1 in res] for res in values]
    stims = [dict(zip(param, value)) for param, value in zip(params, values)]

    fulldam = zip(stimIndexes, timestamps, signals, stims)
    for stimIndex, timestamp, signal, stim in fulldam:
        sig = AnalogSignal(signal=signal * pq.mV,
                           t_start=timestamp * pq.d,
                           file_origin=filename,
                           sampling_period=1. * pq.s)
        segment = Segment(file_origin=filename, index=stimIndex, **stim)
        segment.analogsignals = [sig]
        block.segments.append(segment)
        gr.analogsignals.append(sig)
        sig.group = gr

    block.create_many_to_one_relationship()

    return block
示例#3
0
def random_group(candidates):
    if len(candidates) == 0:
        return None
    elif len(candidates) == 1:
        objects = candidates
    else:
        k = random.randint(1, len(candidates))
        objects = random.sample(candidates, k)
    obj = Group(objects=objects,
                name=random_string(),
                **random_annotations(5))
    return obj
示例#4
0
    def read_block(self, lazy=False, **kargs):
        '''
        Reads a block from the simple spike data file "fname" generated
        with BrainWare
        '''
        assert not lazy, 'Do not support lazy'

        # there are no keyargs implemented to so far.  If someone tries to pass
        # them they are expecting them to do something or making a mistake,
        # neither of which should pass silently
        if kargs:
            raise NotImplementedError('This method does not have any '
                                      'argument implemented yet')
        self._fsrc = None

        self._blk = Block(file_origin=self._filename)
        block = self._blk

        # create the objects to store other objects
        self.__unit_group =  Group(file_origin=self._filename)
        block.groups.append(self.__unit_group)

        # initialize values
        self.__t_stop = None
        self.__params = None
        self.__seg = None
        self.__spiketimes = None

        # open the file
        with open(self._path, 'rb') as self._fsrc:
            res = True
            # while the file is not done keep reading segments
            while res:
                res = self.__read_id()

        block.create_many_to_one_relationship()

        # cleanup attributes
        self._fsrc = None

        self._blk = None

        self.__t_stop = None
        self.__params = None
        self.__seg = None
        self.__spiketimes = None

        return block
示例#5
0
    def read_block(self, lazy=False, **kargs):
        '''
        Reads a block from the raw data file "fname" generated
        with BrainWare
        '''
        assert not lazy, 'Do not support lazy'

        # there are no keyargs implemented to so far.  If someone tries to pass
        # them they are expecting them to do something or making a mistake,
        # neither of which should pass silently
        if kargs:
            raise NotImplementedError('This method does not have any '
                                      'arguments implemented yet')
        self._fsrc = None

        block = Block(file_origin=self._filename)

        # create the objects to store other objects
        gr = Group(file_origin=self._filename)

        # load objects into their containers
        block.groups.append(gr)

        # open the file
        with open(self._path, 'rb') as fobject:
            # while the file is not done keep reading segments
            while True:
                seg = self._read_segment(fobject)
                # if there are no more Segments, stop
                if not seg:
                    break

                # store the segment and signals
                block.segments.append(seg)
                gr.analogsignals.append(seg.analogsignals[0])

        # remove the file object
        self._fsrc = None

        block.create_many_to_one_relationship()
        return block
示例#6
0
    def read_block(self, lazy=False):
        """Returns a Block containing spike information.

        There is no obvious way to infer the segment boundaries from
        raw spike times, so for now all spike times are returned in one
        big segment. The way around this would be to specify the segment
        boundaries, and then change this code to put the spikes in the right
        segments.
        """
        assert not lazy, 'Do not support lazy'

        # Create block and segment to hold all the data
        block = Block()
        # Search data directory for KlustaKwik files.
        # If nothing found, return empty block
        self._fetfiles = self._fp.read_filenames('fet')
        self._clufiles = self._fp.read_filenames('clu')
        if len(self._fetfiles) == 0:
            return block

        # Create a single segment to hold all of the data
        seg = Segment(name='seg0', index=0, file_origin=self.filename)
        block.segments.append(seg)

        # Load spike times from each group and store in a dict, keyed
        # by group number
        self.spiketrains = dict()
        for group in sorted(self._fetfiles.keys()):
            # Load spike times
            fetfile = self._fetfiles[group]
            spks, features = self._load_spike_times(fetfile)

            # Load cluster ids or generate
            if group in self._clufiles:
                clufile = self._clufiles[group]
                uids = self._load_unit_id(clufile)
            else:
                # unclustered data, assume all zeros
                uids = np.zeros(spks.shape, dtype=np.int32)

            # error check
            if len(spks) != len(uids):
                raise ValueError("lengths of fet and clu files are different")

            # Create Group for each cluster
            unique_unit_ids = np.unique(uids)
            for unit_id in sorted(unique_unit_ids):
                # Initialize the unit
                u = Group(name=('unit %d from group %d' % (unit_id, group)),
                         index=unit_id, group=group)

                # Initialize a new SpikeTrain for the spikes from this unit
                st = SpikeTrain(
                    times=spks[uids == unit_id] / self.sampling_rate,
                    units='sec', t_start=0.0,
                    t_stop=spks.max() / self.sampling_rate,
                    name=('unit %d from group %d' % (unit_id, group)))
                st.annotations['cluster'] = unit_id
                st.annotations['group'] = group

                # put features in
                if len(features) != 0:
                    st.annotations['waveform_features'] = features

                # Link
                u.add(st)
                seg.spiketrains.append(st)

        block.create_many_to_one_relationship()
        return block
示例#7
0
def proc_f32(filename):
    '''Load an f32 file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareF32IO to
    make sure BrainwareF32IO is working properly

    block = proc_f32(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_f32_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.f32', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_f32_py2.npz'
             f32 file name = 'file1.f32'
    '''

    filenameorig = os.path.basename(filename[:-12] + '.f32')

    # create the objects to store other objects
    block = Block(file_origin=filenameorig)
    gr = Group(file_origin=filenameorig)
    block.groups.append(gr)

    try:
        with np.load(filename, allow_pickle=True) as f32obj:
            f32file = list(f32obj.items())[0][1].flatten()
    except OSError as exc:
        if 'as a pickle' in exc.message:
            block.create_many_to_one_relationship()
            return block
        else:
            raise

    sweeplengths = [res[0, 0].tolist() for res in f32file['sweeplength']]
    stims = [res.flatten().tolist() for res in f32file['stim']]

    sweeps = [res['spikes'].flatten() for res in f32file['sweep'] if res.size]

    fullf32 = zip(sweeplengths, stims, sweeps)
    for sweeplength, stim, sweep in fullf32:
        for trainpts in sweep:
            if trainpts.size:
                trainpts = trainpts.flatten().astype('float32')
            else:
                trainpts = []

            paramnames = ['Param%s' % i for i in range(len(stim))]
            params = dict(zip(paramnames, stim))
            train = SpikeTrain(trainpts,
                               units=pq.ms,
                               t_start=0,
                               t_stop=sweeplength,
                               file_origin=filenameorig)

            segment = Segment(file_origin=filenameorig, **params)
            segment.spiketrains = [train]
            gr.spiketrains.append(train)
            block.segments.append(segment)

    block.create_many_to_one_relationship()

    return block
示例#8
0
    def read_block(
        self,
        lazy=False,
        get_waveforms=True,
        cluster_group=None,
        raw_data_units='uV',
        get_raw_data=False,
    ):
        """
        Reads a block with segments and groups

        Parameters:
        get_waveforms: bool, default = False
            Wether or not to get the waveforms
        get_raw_data: bool, default = False
            Wether or not to get the raw traces
        raw_data_units: str, default = "uV"
            SI units of the raw trace according to voltage_gain given to klusta
        cluster_group: str, default = None
            Which clusters to load, possibilities are "noise", "unsorted",
            "good", if None all is loaded.
        """
        assert not lazy, 'Do not support lazy'

        blk = Block()
        seg = Segment(file_origin=self.filename)
        blk.segments += [seg]
        for model in self.models:
            group_id = model.channel_group
            group_meta = {'group_id': group_id}
            group_meta.update(model.metadata)
            chx = Group(name='channel group #{}'.format(group_id),
                        index=model.channels,
                        **group_meta)
            blk.groups.append(chx)
            clusters = model.spike_clusters
            for cluster_id in model.cluster_ids:
                meta = model.cluster_metadata[cluster_id]
                if cluster_group is None:
                    pass
                elif cluster_group != meta:
                    continue
                sptr = self.read_spiketrain(cluster_id=cluster_id,
                                            model=model,
                                            get_waveforms=get_waveforms,
                                            raw_data_units=raw_data_units)
                sptr.annotations.update({
                    'cluster_group': meta,
                    'group_id': model.channel_group
                })
                unit = Group(cluster_group=meta,
                             group_id=model.channel_group,
                             name='unit #{}'.format(cluster_id))
                unit.add(sptr)
                chx.add(unit)
                seg.spiketrains.append(sptr)
            if get_raw_data:
                ana = self.read_analogsignal(model, units=raw_data_units)
                chx.add(ana)
                seg.analogsignals.append(ana)

        seg.duration = model.duration * pq.s

        blk.create_many_to_one_relationship()
        return blk
示例#9
0
    def read_block(self, block_index=0, lazy=False,
                    create_group_across_segment=None,
                    signal_group_mode=None, load_waveforms=False):
        """
        :param block_index: int default 0. In case of several block block_index can be specified.

        :param lazy: False by default.

        :param create_group_across_segment: bool or dict
            If True :
              * Create a neo.Group to group AnalogSignal segments
              * Create a neo.Group to group SpikeTrain across segments
              * Create a neo.Group to group Event across segments
              * Create a neo.Group to group Epoch across segments
            With a dict the behavior can be controlled more finely
            create_group_across_segment = { 'AnalogSignal': True, 'SpikeTrain': False, ...}

        :param signal_group_mode: 'split-all' or 'group-by-same-units' (default depend IO):
        This control behavior for grouping channels in AnalogSignal.
            * 'split-all': each channel will give an AnalogSignal
            * 'group-by-same-units' all channel sharing the same quantity units ar grouped in
            a 2D AnalogSignal

        :param load_waveforms: False by default. Control SpikeTrains.waveforms is None or not.

        """

        if signal_group_mode is None:
            signal_group_mode = self._prefered_signal_group_mode
            if self._default_group_mode_have_change_in_0_9:
                warnings.warn('default "signal_group_mode" have change in version 0.9:'
                        'now all channels are group together in AnalogSignal')

        l = ['AnalogSignal', 'SpikeTrain', 'Event', 'Epoch']
        if create_group_across_segment is None:
            # @andrew @ julia @michael ?
            # I think here the default None could give this
            create_group_across_segment = {
                'AnalogSignal': True,   #because mimic the old ChannelIndex for AnalogSignals
                'SpikeTrain': False,  # False by default because can create too many object for simulation
                'Event': False,  # not implemented yet
                'Epoch': False,  # not implemented yet
            }
        elif isinstance(create_group_across_segment, bool):
            # bool to dict
            v = create_group_across_segment
            create_group_across_segment = { k: v for k in l}
        elif isinstance(create_group_across_segment, dict):
            # put False to missing keys
            create_group_across_segment = {k: create_group_across_segment.get(k, False) for k in l}
        else:
            raise ValueError('create_group_across_segment must be bool or dict')

        # annotations
        bl_annotations = dict(self.raw_annotations['blocks'][block_index])
        bl_annotations.pop('segments')
        bl_annotations = check_annotations(bl_annotations)

        bl = Block(**bl_annotations)

        # Group for AnalogSignals
        if create_group_across_segment['AnalogSignal']:
            all_channels = self.header['signal_channels']
            channel_indexes_list = self.get_group_signal_channel_indexes()
            sig_groups = []
            for channel_index in channel_indexes_list:
                for i, (ind_within, ind_abs) in self._make_signal_channel_subgroups(
                        channel_index, signal_group_mode=signal_group_mode).items():
                        group = Group(name='AnalogSignal group {}'.format(i))
                        # @andrew @ julia @michael : do we annotate group across segment with this arrays ?
                        group.annotate(ch_names=all_channels[ind_abs]['name'].astype('U'))  # ??
                        group.annotate(channel_ids=all_channels[ind_abs]['id'])  # ??
                        bl.groups.append(group)
                        sig_groups.append(group)

        if create_group_across_segment['SpikeTrain']:
            unit_channels = self.header['unit_channels']
            st_groups = []
            for c in range(unit_channels.size):
                group = Group(name='SpikeTrain group {}'.format(c))
                group.annotate(unit_name=unit_channels[c]['name'])
                group.annotate(unit_id=unit_channels[c]['id'])
                unit_annotations = self.raw_annotations['unit_channels'][c]
                unit_annotations = check_annotations(unit_annotations)
                group.annotate(**unit_annotations)
                bl.groups.append(group)
                st_groups.append(group)

        if create_group_across_segment['Event']:
            # @andrew @ julia @michael :
            # Do we need this ? I guess yes
            raise NotImplementedError()

        if create_group_across_segment['Epoch']:
            # @andrew @ julia @michael :
            # Do we need this ? I guess yes
            raise NotImplementedError()

        # Read all segments
        for seg_index in range(self.segment_count(block_index)):
            seg = self.read_segment(block_index=block_index, seg_index=seg_index,
                                    lazy=lazy, signal_group_mode=signal_group_mode,
                                    load_waveforms=load_waveforms)
            bl.segments.append(seg)

        # create link between group (across segment) and data objects
        for seg in bl.segments:
            if create_group_across_segment['AnalogSignal']:
                for c, anasig in enumerate(seg.analogsignals):
                    sig_groups[c].add(anasig)

            if create_group_across_segment['SpikeTrain']:
                for c, sptr in enumerate(seg.spiketrains):
                    st_groups[c].add(sptr)

        bl.create_many_to_one_relationship()

        return bl
示例#10
0
    def test__issue_285(self):
        # Spiketrain
        train = SpikeTrain([3, 4, 5] * pq.s, t_stop=10.0)
        unit = Group()
        unit.add(train)

        epoch = Epoch(np.array([0, 10, 20]),
                      np.array([2, 2, 2]),
                      np.array(["a", "b", "c"]),
                      units="ms")

        blk = Block()
        seg = Segment()
        seg.spiketrains.append(train)
        seg.epochs.append(epoch)
        epoch.segment = seg
        blk.segments.append(seg)

        reader = PickleIO(filename="blk.pkl")
        reader.write(blk)

        reader = PickleIO(filename="blk.pkl")
        r_blk = reader.read_block()
        r_seg = r_blk.segments[0]

        self.assertIsInstance(r_seg.epochs[0], Epoch)
        os.remove('blk.pkl')

        # Epoch
        epoch = Epoch(times=np.arange(0, 30, 10) * pq.s,
                      durations=[10, 5, 7] * pq.ms,
                      labels=np.array(['btn0', 'btn1', 'btn2'], dtype='U'))
        epoch.segment = Segment()
        blk = Block()
        seg = Segment()
        seg.epochs.append(epoch)
        blk.segments.append(seg)

        reader = PickleIO(filename="blk.pkl")
        reader.write(blk)

        reader = PickleIO(filename="blk.pkl")
        r_blk = reader.read_block()
        r_seg = r_blk.segments[0]
        self.assertIsInstance(r_seg.epochs[0].segment, Segment)
        os.remove('blk.pkl')

        # Event
        event = Event(np.arange(0, 30, 10) * pq.s,
                      labels=np.array(['trig0', 'trig1', 'trig2'], dtype='U'))
        event.segment = Segment()

        blk = Block()
        seg = Segment()
        seg.events.append(event)
        blk.segments.append(seg)

        reader = PickleIO(filename="blk.pkl")
        reader.write(blk)

        reader = PickleIO(filename="blk.pkl")
        r_blk = reader.read_block()
        r_seg = r_blk.segments[0]
        self.assertIsInstance(r_seg.events[0].segment, Segment)
        os.remove('blk.pkl')

        # IrregularlySampledSignal
        signal = IrregularlySampledSignal(
            [0.0, 1.23, 6.78], [1, 2, 3], units='mV', time_units='ms')
        signal.segment = Segment()

        blk = Block()
        seg = Segment()
        seg.irregularlysampledsignals.append(signal)
        blk.segments.append(seg)
        blk.segments[0].block = blk

        reader = PickleIO(filename="blk.pkl")
        reader.write(blk)

        reader = PickleIO(filename="blk.pkl")
        r_blk = reader.read_block()
        r_seg = r_blk.segments[0]
        self.assertIsInstance(r_seg.irregularlysampledsignals[0].segment, Segment)
        os.remove('blk.pkl')