def scale(self, peaks: Peaks, time: float, corresponding_events: List[CorpusEvent], corresponding_transforms: List[AbstractTransform], corpus: Corpus = None, **kwargs) -> Peaks: events_band_distribution: np.ndarray = np.array([event.get_feature(OctaveBands).value() for event in corresponding_events]) factor: np.ndarray = 1 / np.sqrt(np.sum(np.power(events_band_distribution - self.band_distribution, 2), axis=1)) print(np.min(factor), np.max(factor), factor.shape) # TODO: THIS SHOULD BE HERE UNTIL PROPERLY DEBUGGED peaks.scale(factor) return peaks
def scale(self, peaks: Peaks, time: float, corresponding_events: List[CorpusEvent], corresponding_transforms: List[AbstractTransform], corpus: Corpus = None, **kwargs) -> Peaks: event_indices: np.ndarray = np.array([e.state_index for e in corresponding_events], dtype=int) matching_indices: np.ndarray = np.zeros(len(corresponding_events), dtype=bool) for taboo_index in self._taboo_indices: matching_indices += event_indices == taboo_index peaks.scale(0, matching_indices) return peaks
def scale(self, peaks: Peaks, _time: float, corresponding_events: List[CorpusEvent], corresponding_transforms: List[AbstractTransform], corpus: Corpus = None, **_kwargs) -> Peaks: # TODO: This could be optimized and stored if ScaleAction had direct access to Corpus low_index: int = int(self._low_thresh.value * corpus.length()) high_index: int = int(self._high_thresh.value * corpus.length()) corresponding_indices: np.ndarray = np.array([e.state_index for e in corresponding_events], dtype=int) mask: np.ndarray = ((low_index <= corresponding_indices) & (corresponding_indices <= high_index)).astype(int) peaks.scale(mask) return peaks
def scale(self, peaks: Peaks, time: float, corresponding_events: List[CorpusEvent], _corresponding_transforms: List[AbstractTransform], corpus: Corpus = None, **_kwargs) -> Peaks: if self._previous_output_index is None: return peaks else: event_indices: np.ndarray = np.array([e.state_index for e in corresponding_events], dtype=int) is_matching: np.ndarray = event_indices == self._previous_output_index + 1 peaks.scale(self.factor, is_matching) return peaks
def scale(self, peaks: Peaks, time: float, corresponding_events: List[CorpusEvent], corresponding_transforms: List[AbstractTransform], corpus: Corpus = None, **kwargs) -> Peaks: if self._previous_transform is None or self._transform_handler: return peaks else: peak_transform_ids: np.ndarray = np.array(peaks.transform_ids) previous_transform_id: int = self._transform_handler.get_id(self._previous_transform) not_matching: np.ndarray = peak_transform_ids != previous_transform_id peaks.scale(self.factor, not_matching) return peaks
def scale(self, peaks: Peaks, time: float, corresponding_events: List[CorpusEvent], corresponding_transforms: List[AbstractTransform], corpus: Corpus = None, **kwargs) -> Peaks: event_indices: List[int] = [e[0].state_index for e in self._history.get_n_last(self.jump_threshold + 1)] if not event_indices: return peaks previous_index: int = event_indices[0] num_consecutive_events: int = len(list(itertools.takewhile(lambda a: a == -1, np.diff(event_indices)))) if num_consecutive_events <= self.activation_threshold: factor: float = 1.0 elif num_consecutive_events >= self.jump_threshold: factor: float = 0.0 else: factor: float = 0.5 ** (num_consecutive_events - self.activation_threshold) event_indices: np.ndarray = np.array([e.state_index for e in corresponding_events], dtype=int) is_matching: np.ndarray = event_indices == previous_index + 1 peaks.scale(factor, is_matching) return peaks