def setUp(self): spike_time_array = np.array([0.5, 0.6, 0.7, 1.1, 11.2, 23.6, 88.5, 99.2]) channel_id_array = np.array([0, 0, 1, 2, 1, 0, 2, 0]) all_channel_ids = (0, 1, 2, 3) self.stl_from_array = SpikeTrainList.from_spike_time_array( spike_time_array, channel_id_array, all_channel_ids=all_channel_ids, units='ms', t_start=0 * pq.ms, t_stop=100.0 * pq.ms, identifier=["A", "B", "C", "D"] # annotation ) self.stl_from_obj_list = SpikeTrainList(items=( SpikeTrain([0.5, 0.6, 23.6, 99.2], units="ms", t_start=0 * pq.ms, t_stop=100.0 * pq.ms, channel_id=101), SpikeTrain([0.0007, 0.0112], units="s", t_start=0 * pq.ms, t_stop=100.0 * pq.ms, channel_id=102), SpikeTrain([1100, 88500], units="us", t_start=0 * pq.ms, t_stop=100.0 * pq.ms, channel_id=103), SpikeTrain([], units="ms", t_start=0 * pq.ms, t_stop=100.0 * pq.ms, channel_id=104), )) self.stl_from_obj_list_incl_proxy = SpikeTrainList(items=( SpikeTrain([0.5, 0.6, 23.6, 99.2], units="ms", t_start=0 * pq.ms, t_stop=100.0 * pq.ms), SpikeTrain([0.0007, 0.0112], units="s", t_start=0 * pq.ms, t_stop=100.0 * pq.ms), SpikeTrainProxy(rawio=MockRawIO(), spike_channel_index=0), SpikeTrain([], units="ms", t_start=0 * pq.ms, t_stop=100.0 * pq.ms), ))
def test_spade_msip_spiketrainlist(self): output_msip = spade.spade(SpikeTrainList(self.msip), self.bin_size, self.winlen, approx_stab_pars=dict( n_subsets=self.n_subset, stability_thresh=self.stability_thresh), n_surr=self.n_surr, alpha=self.alpha, psr_param=self.psr_param, stat_corr='no', output_format='patterns')['patterns'] elements_msip = [] occ_msip = [] lags_msip = [] # collecting spade output for out in output_msip: elements_msip.append(out['neurons']) occ_msip.append(list(out['times'].magnitude)) lags_msip.append(list(out['lags'].magnitude)) elements_msip = sorted(elements_msip, key=len) occ_msip = sorted(occ_msip, key=len) lags_msip = sorted(lags_msip, key=len) # check neurons in the patterns assert_array_equal(elements_msip, self.elements_msip) # check the occurrences time of the patters assert_array_equal(occ_msip, self.occ_msip) # check the lags assert_array_equal(lags_msip, self.lags_msip)
def __init__(self, name=None, description=None, file_origin=None, file_datetime=None, rec_datetime=None, index=None, **annotations): ''' Initialize a new :class:`Segment` instance. ''' super().__init__(name=name, description=description, file_origin=file_origin, **annotations) self.spiketrains = SpikeTrainList(segment=self) self.file_datetime = file_datetime self.rec_datetime = rec_datetime self.index = index
def test_regression_431(self): """ Addresses issue 431 This unittest addresses an issue where a SpikeTrainList obejct was not correctly handled by the constructor """ st1 = neo.SpikeTrain(times=np.array([1, 2, 3]) * pq.ms, t_start=0 * pq.ms, t_stop=10 * pq.ms) st2 = neo.SpikeTrain(times=np.array([4, 5, 6]) * pq.ms, t_start=0 * pq.ms, t_stop=10 * pq.ms) real_list = [st1, st2] spiketrainlist = SpikeTrainList([st1, st2]) real_list_binary = cv.BinnedSpikeTrain(real_list, bin_size=1 * pq.ms) spiketrainlist_binary = cv.BinnedSpikeTrain(spiketrainlist, bin_size=1 * pq.ms) assert_array_equal(real_list_binary.to_array(), spiketrainlist_binary.to_array())
def test__filter_none(self): for segment in self.segments: targ = [] # collecting all data objects in target block targ.extend(segment.analogsignals) targ.extend(segment.epochs) targ.extend(segment.events) targ.extend(segment.irregularlysampledsignals) targ.extend(segment.spiketrains) targ.extend(segment.imagesequences) # occasionally we randomly get only spike trains, # and then we have to convert to a SpikeTrainList # to match the output of segment.filter if all(isinstance(obj, SpikeTrain) for obj in targ): targ = SpikeTrainList(items=targ, segment=segment) res0 = segment.filter() res1 = segment.filter({}) res2 = segment.filter([]) res3 = segment.filter([{}]) res4 = segment.filter([{}, {}]) res5 = segment.filter([{}, {}]) res6 = segment.filter(targdict={}) res7 = segment.filter(targdict=[]) res8 = segment.filter(targdict=[{}]) res9 = segment.filter(targdict=[{}, {}]) assert_same_sub_schema(res0, targ) assert_same_sub_schema(res1, targ) assert_same_sub_schema(res2, targ) assert_same_sub_schema(res3, targ) assert_same_sub_schema(res4, targ) assert_same_sub_schema(res5, targ) assert_same_sub_schema(res6, targ) assert_same_sub_schema(res7, targ) assert_same_sub_schema(res8, targ) assert_same_sub_schema(res9, targ)
def get_all_spiketrains(container): """ Get all `neo.Spiketrain` objects from a container. The objects can be any list, dict, or other iterable or mapping containing spiketrains, as well as any Neo object that can hold spiketrains: `neo.Block`, `neo.ChannelIndex`, `neo.Unit`, and `neo.Segment`. Containers are searched recursively, so the objects can be nested (such as a list of blocks). Parameters ---------- container : list, tuple, iterable, dict, neo.Block, neo.Segment, neo.Unit, neo.ChannelIndex The container for the spiketrains. Returns ------- list A `neo.SpikeTrainList` object of the unique `neo.SpikeTrain` objects in `container`. """ return SpikeTrainList(_get_all_objs(container, 'SpikeTrain'))
def filter(self, targdict=None, data=True, container=False, recursive=True, objects=None, **kwargs): """ Return a list of child objects matching *any* of the search terms in either their attributes or annotations. Search terms can be provided as keyword arguments or a dictionary, either as a positional argument after data or to the argument targdict. targdict can also be a list of dictionaries, in which case the filters are applied sequentially. If targdict and kwargs are both supplied, the targdict filters are applied first, followed by the kwarg filters. A targdict of None or {} corresponds to no filters applied, therefore returning all child objects. Default targdict is None. If data is True (default), include data objects. If container is True (default False), include container objects. If recursive is True (default), descend into child containers for objects. objects (optional) should be the name of a Neo object type, a neo object class, or a list of one or both of these. If specified, only these objects will be returned. If not specified any type of object is returned. Default is None. Note that if recursive is True, containers not in objects will still be descended into. This overrides data and container. Examples:: >>> obj.filter(name="Vm") >>> obj.filter(objects=neo.SpikeTrain) >>> obj.filter(targdict={'myannotation':3}) """ if isinstance(targdict, str): raise TypeError("filtering is based on key-value pairs." " Only a single string was provided.") # if objects are specified, get the classes if objects: data = True container = True if objects == SpikeTrain: children = SpikeTrainList() else: children = [] # get the objects we want if data: if recursive: children.extend(self.data_children_recur) else: children.extend(self.data_children) if container: if recursive: children.extend(self.container_children_recur) else: children.extend(self.container_children) return filterdata(children, objects=objects, targdict=targdict, **kwargs)
def filterdata(data, targdict=None, objects=None, **kwargs): """ Return a list of the objects in data matching *any* of the search terms in either their attributes or annotations. Search terms can be provided as keyword arguments or a dictionary, either as a positional argument after data or to the argument targdict. targdict can also be a list of dictionaries, in which case the filters are applied sequentially. If targdict and kwargs are both supplied, the targdict filters are applied first, followed by the kwarg filters. A targdict of None or {} and objects = None corresponds to no filters applied, therefore returning all child objects. Default targdict and objects is None. objects (optional) should be the name of a Neo object type, a neo object class, or a list of one or both of these. If specified, only these objects will be returned. """ # if objects are specified, get the classes if objects: if hasattr(objects, 'lower') or isinstance(objects, type): objects = [objects] elif objects is not None: return [] # handle cases with targdict if targdict is None: targdict = kwargs elif not kwargs: pass elif hasattr(targdict, 'keys'): targdict = [targdict, kwargs] else: targdict += [kwargs] if not targdict: results = data # if multiple dicts are provided, apply each filter sequentially elif not hasattr(targdict, 'keys'): # for performance reasons, only do the object filtering on the first # iteration results = filterdata(data, targdict=targdict[0], objects=objects) for targ in targdict[1:]: results = filterdata(results, targdict=targ) return results else: # do the actual filtering results = [] for key, value in sorted(targdict.items()): for obj in data: if (hasattr(obj, key) and getattr(obj, key) == value and all([obj is not res for res in results])): results.append(obj) elif (key in obj.annotations and obj.annotations[key] == value and all([obj is not res for res in results])): results.append(obj) # keep only objects of the correct classes if objects: results = [ result for result in results if result.__class__ in objects or result.__class__.__name__ in objects ] if results and all(isinstance(obj, SpikeTrain) for obj in results): return SpikeTrainList(results) else: return results