def merge(self, other): ''' Merge another signal into this one. The signal objects are concatenated horizontally (column-wise, :func:`np.hstack`). If the attributes of the two signals are not compatible, an Exception is raised. Required attributes of the signal are used. ''' if not np.array_equal(self.times, other.times): raise MergeError( "Cannot merge these two signals as the sample times differ.") if self.segment != other.segment: raise MergeError( "Cannot merge these two signals as they belong to different segments." ) if hasattr(self, "lazy_shape"): if hasattr(other, "lazy_shape"): if self.lazy_shape[0] != other.lazy_shape[0]: raise MergeError( "Cannot merge signals of different length.") merged_lazy_shape = (self.lazy_shape[0], self.lazy_shape[1] + other.lazy_shape[1]) else: raise MergeError( "Cannot merge a lazy object with a real object.") if other.units != self.units: other = other.rescale(self.units) stack = np.hstack((self.magnitude, other.magnitude)) kwargs = {} for name in ("name", "description", "file_origin"): attr_self = getattr(self, name) attr_other = getattr(other, name) if attr_self == attr_other: kwargs[name] = attr_self else: kwargs[name] = "merge({}, {})".format(attr_self, attr_other) merged_annotations = merge_annotations(self.annotations, other.annotations) kwargs.update(merged_annotations) signal = self.__class__(self.times, stack, units=self.units, dtype=self.dtype, copy=False, **kwargs) signal.segment = self.segment signal.array_annotate(**self._merge_array_annotations(other)) if hasattr(self, "lazy_shape"): signal.lazy_shape = merged_lazy_shape return signal
def merge(self, other): ''' Merge another :class:`AnalogSignal` into this one. The :class:`AnalogSignal` objects are concatenated horizontally (column-wise, :func:`np.hstack`). If the attributes of the two :class:`AnalogSignal` are not compatible, an Exception is raised. ''' if self.sampling_rate != other.sampling_rate: raise MergeError("Cannot merge, different sampling rates") if self.t_start != other.t_start: raise MergeError("Cannot merge, different t_start") if self.segment != other.segment: raise MergeError("Cannot merge these two signals as they belong to different segments.") if hasattr(self, "lazy_shape"): if hasattr(other, "lazy_shape"): if self.lazy_shape[0] != other.lazy_shape[0]: raise MergeError("Cannot merge signals of different length.") merged_lazy_shape = (self.lazy_shape[0], self.lazy_shape[1] + other.lazy_shape[1]) else: raise MergeError("Cannot merge a lazy object with a real object.") if other.units != self.units: other = other.rescale(self.units) stack = np.hstack(map(np.array, (self, other))) kwargs = {} for name in ("name", "description", "file_origin"): attr_self = getattr(self, name) attr_other = getattr(other, name) if attr_self == attr_other: kwargs[name] = attr_self else: kwargs[name] = "merge(%s, %s)" % (attr_self, attr_other) merged_annotations = merge_annotations(self.annotations, other.annotations) kwargs.update(merged_annotations) signal = AnalogSignal(stack, units=self.units, dtype=self.dtype, copy=False, t_start=self.t_start, sampling_rate=self.sampling_rate, **kwargs) signal.segment = self.segment # merge channel_index (move to ChannelIndex.merge()?) if self.channel_index and other.channel_index: signal.channel_index = ChannelIndex( index=np.arange(signal.shape[1]), channel_ids=np.hstack([self.channel_index.channel_ids, other.channel_index.channel_ids]), channel_names=np.hstack([self.channel_index.channel_names, other.channel_index.channel_names])) else: signal.channel_index = ChannelIndex(index=np.arange(signal.shape[1])) if hasattr(self, "lazy_shape"): signal.lazy_shape = merged_lazy_shape return signal
def merge(self, other): ''' Merge another :class:`IrregularlySampledSignal` with this one, and return the merged signal. The :class:`IrregularlySampledSignal` objects are concatenated horizontally (column-wise, :func:`np.hstack`). If the attributes of the two :class:`IrregularlySampledSignal` are not compatible, a :class:`MergeError` is raised. ''' if not np.array_equal(self.times, other.times): raise MergeError( "Cannot merge these two signals as the sample times differ.") if self.segment != other.segment: raise MergeError( "Cannot merge these two signals as they belong to different segments." ) if hasattr(self, "lazy_shape"): if hasattr(other, "lazy_shape"): if self.lazy_shape[0] != other.lazy_shape[0]: raise MergeError( "Cannot merge signals of different length.") merged_lazy_shape = (self.lazy_shape[0], self.lazy_shape[1] + other.lazy_shape[1]) else: raise MergeError( "Cannot merge a lazy object with a real object.") if other.units != self.units: other = other.rescale(self.units) stack = np.hstack(map(np.array, (self, other))) kwargs = {} for name in ("name", "description", "file_origin"): attr_self = getattr(self, name) attr_other = getattr(other, name) if attr_self == attr_other: kwargs[name] = attr_self else: kwargs[name] = "merge(%s, %s)" % (attr_self, attr_other) merged_annotations = merge_annotations(self.annotations, other.annotations) kwargs.update(merged_annotations) signal = IrregularlySampledSignal(self.times, stack, units=self.units, dtype=self.dtype, copy=False, **kwargs) signal.segment = self.segment if hasattr(self, "lazy_shape"): signal.lazy_shape = merged_lazy_shape return signal
def _read_recordingchannelgroup(self, node, parent): # todo: handle Units attributes = self._get_standard_attributes(node) channel_indexes = node["channel_indexes"].value channel_names = node["channel_names"].value if channel_indexes.size: if len(node['recordingchannels']): raise MergeError( "Cannot handle a RecordingChannelGroup which both has a " "'channel_indexes' attribute and contains " "RecordingChannel objects") raise NotImplementedError( "todo") # need to handle node['analogsignalarrays'] else: channels = [] for name, child_node in node['recordingchannels'].items(): if "RecordingChannel" in name: channels.append(self._read_recordingchannel(child_node)) channel_index = ChannelIndex(None, **attributes) channel_index._channels = channels # construction of the index is deferred until we have processed # all RecordingChannelGroup nodes units = [] for name, child_node in node['units'].items(): if "Unit" in name: units.append( self._read_unit(child_node, parent=channel_index)) channel_index.units = units channel_index.block = parent return channel_index
def merge(self, other): ''' Merge another :class:`SpikeTrain` into this one. The times of the :class:`SpikeTrain` objects combined in one array and sorted. If the attributes of the two :class:`SpikeTrain` are not compatible, an Exception is raised. ''' if self.sampling_rate != other.sampling_rate: raise MergeError("Cannot merge, different sampling rates") if self.t_start != other.t_start: raise MergeError("Cannot merge, different t_start") if self.t_stop != other.t_stop: raise MemoryError("Cannot merge, different t_stop") if self.left_sweep != other.left_sweep: raise MemoryError("Cannot merge, different left_sweep") if self.segment != other.segment: raise MergeError("Cannot merge these two signals as they belong to" " different segments.") if hasattr(self, "lazy_shape"): if hasattr(other, "lazy_shape"): merged_lazy_shape = (self.lazy_shape[0] + other.lazy_shape[0]) else: raise MergeError("Cannot merge a lazy object with a real" " object.") if other.units != self.units: other = other.rescale(self.units) wfs = [self.waveforms is not None, other.waveforms is not None] if any(wfs) and not all(wfs): raise MergeError("Cannot merge signal with waveform and signal " "without waveform.") stack = np.concatenate((np.asarray(self), np.asarray(other))) sorting = np.argsort(stack) stack = stack[sorting] kwargs = {} for name in ("name", "description", "file_origin"): attr_self = getattr(self, name) attr_other = getattr(other, name) if attr_self == attr_other: kwargs[name] = attr_self else: kwargs[name] = "merge(%s, %s)" % (attr_self, attr_other) merged_annotations = merge_annotations(self.annotations, other.annotations) kwargs.update(merged_annotations) train = SpikeTrain(stack, units=self.units, dtype=self.dtype, copy=False, t_start=self.t_start, t_stop=self.t_stop, sampling_rate=self.sampling_rate, left_sweep=self.left_sweep, **kwargs) if all(wfs): wfs_stack = np.vstack((self.waveforms, other.waveforms)) wfs_stack = wfs_stack[sorting] train.waveforms = wfs_stack train.segment = self.segment if train.segment is not None: self.segment.spiketrains.append(train) if hasattr(self, "lazy_shape"): train.lazy_shape = merged_lazy_shape return train
def merge(self, other): ''' Merge another signal into this one. The signal objects are concatenated horizontally (column-wise, :func:`np.hstack`). If the attributes of the two signal are not compatible, an Exception is raised. Required attributes of the signal are used. ''' for attr in self._necessary_attrs: if 'signal' != attr[0]: if getattr(self, attr[0], None) != getattr( other, attr[0], None): raise MergeError( "Cannot merge these two signals as the %s differ." % attr[0]) if self.segment != other.segment: raise MergeError( "Cannot merge these two signals as they belong to different segments." ) if hasattr(self, "lazy_shape"): if hasattr(other, "lazy_shape"): if self.lazy_shape[0] != other.lazy_shape[0]: raise MergeError( "Cannot merge signals of different length.") merged_lazy_shape = (self.lazy_shape[0], self.lazy_shape[1] + other.lazy_shape[1]) else: raise MergeError( "Cannot merge a lazy object with a real object.") if other.units != self.units: other = other.rescale(self.units) stack = np.hstack((self.magnitude, other.magnitude)) kwargs = {} for name in ("name", "description", "file_origin"): attr_self = getattr(self, name) attr_other = getattr(other, name) if attr_self == attr_other: kwargs[name] = attr_self else: kwargs[name] = "merge({}, {})".format(attr_self, attr_other) merged_annotations = merge_annotations(self.annotations, other.annotations) kwargs.update(merged_annotations) kwargs['array_annotations'] = self._merge_array_annotations(other) signal = self.__class__(stack, units=self.units, dtype=self.dtype, copy=False, t_start=self.t_start, sampling_rate=self.sampling_rate, **kwargs) signal.segment = self.segment if hasattr(self, "lazy_shape"): signal.lazy_shape = merged_lazy_shape # merge channel_index (move to ChannelIndex.merge()?) if self.channel_index and other.channel_index: signal.channel_index = ChannelIndex( index=np.arange(signal.shape[1]), channel_ids=np.hstack([ self.channel_index.channel_ids, other.channel_index.channel_ids ]), channel_names=np.hstack([ self.channel_index.channel_names, other.channel_index.channel_names ])) else: signal.channel_index = ChannelIndex( index=np.arange(signal.shape[1])) return signal
def concatenate(self, other, allow_overlap=False): ''' Combine this and another signal along the time axis. The signal objects are concatenated vertically (row-wise, :func:`np.vstack`). Patching can be used to combine signals across segments. Note: Only array annotations common to both signals are attached to the concatenated signal. If the attributes of the two signal are not compatible, an Exception is raised. Required attributes of the signal are used. Parameters ---------- other : neo.BaseSignal The object that is merged into this one. allow_overlap : bool If false, overlapping samples between the two signals are not permitted and an ValueError is raised. If true, no check for overlapping samples is performed and all samples are combined. Returns ------- signal : neo.IrregularlySampledSignal Signal containing all non-overlapping samples of both source signals. Raises ------ MergeError If `other` object has incompatible attributes. ''' for attr in self._necessary_attrs: if not (attr[0] in ['signal', 'times', 't_start', 't_stop', 'times']): if getattr(self, attr[0], None) != getattr( other, attr[0], None): raise MergeError( "Cannot concatenate these two signals as the %s differ." % attr[0]) if hasattr(self, "lazy_shape"): if hasattr(other, "lazy_shape"): if self.lazy_shape[-1] != other.lazy_shape[-1]: raise MergeError( "Cannot concatenate signals as they contain" " different numbers of traces.") merged_lazy_shape = (self.lazy_shape[0] + other.lazy_shape[0], self.lazy_shape[-1]) else: raise MergeError( "Cannot concatenate a lazy object with a real object.") if other.units != self.units: other = other.rescale(self.units) new_times = np.hstack((self.times, other.times)) sorting = np.argsort(new_times) new_samples = np.vstack((self.magnitude, other.magnitude)) kwargs = {} for name in ("name", "description", "file_origin"): attr_self = getattr(self, name) attr_other = getattr(other, name) if attr_self == attr_other: kwargs[name] = attr_self else: kwargs[name] = "merge({}, {})".format(attr_self, attr_other) merged_annotations = merge_annotations(self.annotations, other.annotations) kwargs.update(merged_annotations) kwargs['array_annotations'] = intersect_annotations( self.array_annotations, other.array_annotations) if not allow_overlap: if max(self.t_start, other.t_start) <= min(self.t_stop, other.t_stop): raise ValueError( 'Can not combine signals that overlap in time. Allow for ' 'overlapping samples using the "no_overlap" parameter.') t_start = min(self.t_start, other.t_start) t_stop = max(self.t_start, other.t_start) signal = IrregularlySampledSignal(signal=new_samples[sorting], times=new_times[sorting], units=self.units, dtype=self.dtype, copy=False, t_start=t_start, t_stop=t_stop, **kwargs) signal.segment = None signal.channel_index = None if hasattr(self, "lazy_shape"): signal.lazy_shape = merged_lazy_shape return signal
def concatenate(self, *signals, overwrite=False, padding=False): """ Concatenate multiple neo.AnalogSignal objects across time. Units, sampling_rate and number of signal traces must be the same for all signals. Otherwise a ValueError is raised. Note that timestamps of concatenated signals might shift in oder to align the sampling times of all signals. Parameters ---------- signals: neo.AnalogSignal objects AnalogSignals that will be concatenated overwrite : bool If True, samples of the earlier (lower index in `signals`) signals are overwritten by that of later (higher index in `signals`) signals. If False, samples of the later are overwritten by earlier signal. Default: False padding : bool, scalar quantity Sampling values to use as padding in case signals do not overlap. If False, do not apply padding. Signals have to align or overlap. If True, signals will be padded using np.NaN as pad values. If a scalar quantity is provided, this will be used for padding. The other signal is moved forward in time by maximum one sampling period to align the sampling times of both signals. Default: False Returns ------- signal: neo.AnalogSignal concatenated output signal """ # Sanity of inputs if not hasattr(signals, '__iter__'): raise TypeError('signals must be iterable') if not all([isinstance(a, AnalogSignal) for a in signals]): raise TypeError( 'Entries of anasiglist have to be of type neo.AnalogSignal') if len(signals) == 0: return self signals = [self] + list(signals) # Check required common attributes: units, sampling_rate and shape[-1] shared_attributes = ['units', 'sampling_rate'] attribute_values = [ tuple((getattr(anasig, attr) for attr in shared_attributes)) for anasig in signals ] # add shape dimensions that do not relate to time attribute_values = [(attribute_values[i] + (signals[i].shape[1:], )) for i in range(len(signals))] if not all( [attrs == attribute_values[0] for attrs in attribute_values]): raise MergeError( f'AnalogSignals have to share {shared_attributes} attributes to be concatenated.' ) units, sr, shape = attribute_values[0] # find gaps between Analogsignals combined_time_ranges = self._concatenate_time_ranges([ (s.t_start, s.t_stop) for s in signals ]) missing_time_ranges = self._invert_time_ranges(combined_time_ranges) if len(missing_time_ranges): diffs = np.diff(np.asarray(missing_time_ranges), axis=1) else: diffs = [] if padding is False and any(diffs > signals[0].sampling_period): raise MergeError( f'Signals are not continuous. Can not concatenate signals with gaps. ' f'Please provide a padding value.') if padding is not False: logger.warning('Signals will be padded using {}.'.format(padding)) if padding is True: padding = np.NaN * units if isinstance(padding, pq.Quantity): padding = padding.rescale(units).magnitude else: raise MergeError( 'Invalid type of padding value. Please provide a bool value ' 'or a quantities object.') t_start = min([a.t_start for a in signals]) t_stop = max([a.t_stop for a in signals]) n_samples = int( np.rint( ((t_stop - t_start) * sr).rescale('dimensionless').magnitude)) shape = (n_samples, ) + shape # Collect attributes and annotations across all concatenated signals kwargs = {} common_annotations = signals[0].annotations common_array_annotations = signals[0].array_annotations for anasig in signals[1:]: common_annotations = intersect_annotations(common_annotations, anasig.annotations) common_array_annotations = intersect_annotations( common_array_annotations, anasig.array_annotations) kwargs['annotations'] = common_annotations kwargs['array_annotations'] = common_array_annotations for name in ("name", "description", "file_origin"): attr = [getattr(s, name) for s in signals] if all([a == attr[0] for a in attr]): kwargs[name] = attr[0] else: kwargs[name] = f'concatenation ({attr})' conc_signal = AnalogSignal(np.full(shape=shape, fill_value=padding, dtype=signals[0].dtype), sampling_rate=sr, t_start=t_start, units=units, **kwargs) if not overwrite: signals = signals[::-1] while len(signals) > 0: conc_signal.splice(signals.pop(0), copy=False) return conc_signal
def merge(self, *others): ''' Merge other :class:`SpikeTrain` objects into this one. The times of the :class:`SpikeTrain` objects combined in one array and sorted. If the attributes of the :class:`SpikeTrain` objects are not compatible, an Exception is raised. ''' for other in others: if isinstance(other, neo.io.proxyobjects.SpikeTrainProxy): raise MergeError("Cannot merge, SpikeTrainProxy objects cannot be merged" "into regular SpikeTrain objects, please load them first.") elif not isinstance(other, SpikeTrain): raise MergeError("Cannot merge, only SpikeTrain" "can be merged into a SpikeTrain.") if self.sampling_rate != other.sampling_rate: raise MergeError("Cannot merge, different sampling rates") if self.t_start != other.t_start: raise MergeError("Cannot merge, different t_start") if self.t_stop != other.t_stop: raise MergeError("Cannot merge, different t_stop") if self.left_sweep != other.left_sweep: raise MergeError("Cannot merge, different left_sweep") if self.segment != other.segment: raise MergeError("Cannot merge these signals as they belong to" " different segments.") all_spiketrains = [self] all_spiketrains.extend([st.rescale(self.units) for st in others]) wfs = [st.waveforms is not None for st in all_spiketrains] if any(wfs) and not all(wfs): raise MergeError("Cannot merge signal with waveform and signal " "without waveform.") stack = np.concatenate([np.asarray(st) for st in all_spiketrains]) sorting = np.argsort(stack) stack = stack[sorting] kwargs = {} kwargs['array_annotations'] = self._merge_array_annotations(others, sorting=sorting) for name in ("name", "description", "file_origin"): attr = getattr(self, name) # check if self is already a merged spiketrain # if it is, get rid of the bracket at the end to append more attributes if attr is not None: if attr.startswith('merge(') and attr.endswith(')'): attr = attr[:-1] for other in others: attr_other = getattr(other, name) # both attributes are None --> nothing to do if attr is None and attr_other is None: continue # one of the attributes is None --> convert to string in order to merge them elif attr is None or attr_other is None: attr = str(attr) attr_other = str(attr_other) # check if the other spiketrain is already a merged spiketrain # if it is, append all of its merged attributes that aren't already in attr if attr_other.startswith('merge(') and attr_other.endswith(')'): for subattr in attr_other[6:-1].split('; '): if subattr not in attr: attr += '; ' + subattr if not attr.startswith('merge('): attr = 'merge(' + attr # if the other attribute is not in the list --> append # if attr doesn't already start with merge add merge( in the beginning elif attr_other not in attr: attr += '; ' + attr_other if not attr.startswith('merge('): attr = 'merge(' + attr # close the bracket of merge(...) if necessary if attr is not None: if attr.startswith('merge('): attr += ')' # write attr into kwargs dict kwargs[name] = attr merged_annotations = merge_annotations(*(st.annotations for st in all_spiketrains)) kwargs.update(merged_annotations) train = SpikeTrain(stack, units=self.units, dtype=self.dtype, copy=False, t_start=self.t_start, t_stop=self.t_stop, sampling_rate=self.sampling_rate, left_sweep=self.left_sweep, **kwargs) if all(wfs): wfs_stack = np.vstack([st.waveforms.rescale(self.waveforms.units) for st in all_spiketrains]) wfs_stack = wfs_stack[sorting] * self.waveforms.units train.waveforms = wfs_stack train.segment = self.segment if train.segment is not None: self.segment.spiketrains.append(train) return train