예제 #1
0
 def scale(self, peaks: Peaks, _time: float, corresponding_events: List[CorpusEvent],
           corresponding_transforms: List[AbstractTransform], corpus: Corpus = None, **_kwargs) -> Peaks:
     # TODO: This could be optimized and stored if ScaleAction had direct access to Corpus
     low_index: int = int(self._low_thresh.value * corpus.length())
     high_index: int = int(self._high_thresh.value * corpus.length())
     corresponding_indices: np.ndarray = np.array([e.state_index for e in corresponding_events], dtype=int)
     mask: np.ndarray = ((low_index <= corresponding_indices) & (corresponding_indices <= high_index)).astype(int)
     peaks.scale(mask)
     return peaks
예제 #2
0
 def _decide_fallback(
         self, peaks: Peaks, corpus: Corpus,
         transform_handler: TransformHandler,
         **kwargs) -> Optional[Tuple[CorpusEvent, AbstractTransform]]:
     self.logger.debug("[decide] _decide_fallback called.")
     try:
         last_entry: Optional[Tuple[
             CorpusEvent, float, AbstractTransform]] = self._history.last()
         last_event, _, last_transform = last_entry
         next_state_idx: int = (last_event.state_index +
                                1) % corpus.length()
         return corpus.event_at(next_state_idx), last_transform
     except IndexError:
         # If history is empty: play the first event in the corpus
         return corpus.event_at(0), NoTransform()
예제 #3
0
    def merge(self,
              peaks: Peaks,
              _time: float,
              corpus: Corpus = None,
              **_kwargs) -> Peaks:
        if peaks.size() <= 1:
            return peaks
        self.logger.debug(
            f"[merge] Merging activity with {peaks.size()} peaks.")

        duration: float = corpus.duration()
        inv_duration: float = 1 / duration
        num_rows: int = int(duration / self._t_width.value)

        peaks_list: List[Peaks] = []
        for transform_hash in np.unique(peaks.transform_ids):
            indices: np.ndarray = np.argwhere(
                peaks.transform_ids == transform_hash)
            indices = indices.reshape((indices.size, ))
            scores: np.ndarray = peaks.scores[indices]
            times: np.ndarray = peaks.times[indices]
            num_cols: int = scores.size

            row_indices: np.ndarray = np.floor(times * inv_duration *
                                               num_rows).astype(np.int32)
            interp_matrix: sparse.coo_matrix = sparse.coo_matrix(
                (np.ones(num_cols), (row_indices, np.arange(num_cols))),
                shape=(num_rows + 1, num_cols))
            interp_matrix: sparse.csc_matrix = interp_matrix.tocsc()

            interpolated_scores: np.ndarray = interp_matrix.dot(scores)
            interpolated_times: np.ndarray = interp_matrix.dot(times)
            num_peaks_per_index: np.ndarray = np.array(
                interp_matrix.sum(axis=1)).reshape(interp_matrix.shape[0])
            peak_indices: np.ndarray = interpolated_scores.nonzero()[0]

            scores: np.ndarray = interpolated_scores[peak_indices]
            times: np.ndarray = np.divide(interpolated_times[peak_indices],
                                          num_peaks_per_index[peak_indices])
            transforms: np.ndarray = np.ones(peak_indices.size,
                                             dtype=np.int32) * transform_hash
            # print("After merge:", scores.shape, times.shape, transforms.shape)

            peaks_list.append(Peaks(scores, times, transforms))

        merged_peaks: Peaks = Peaks.concatenate(peaks_list)
        self.logger.debug(
            f"[merge] Merge successful. Number of peaks after merge: {merged_peaks.size()}."
        )
        return merged_peaks
예제 #4
0
 def _decide_default(
         self, peaks: Peaks, corpus: Corpus,
         transform_handler: TransformHandler,
         **kwargs) -> Optional[Tuple[CorpusEvent, AbstractTransform]]:
     if peaks.is_empty():
         return None
     score_cumsum: np.ndarray = np.cumsum(peaks.scores)
     max_value: float = score_cumsum[
         -1] - 1e-5  # slight offset to avoid an extremely rare case of a fp error
     output_target_score: float = float(np.random.random(1) * max_value)
     peak_idx: int = np.argwhere(score_cumsum > output_target_score)[0]
     transform_hash: int = int(peaks.transform_ids[peak_idx])
     return corpus.event_around(
         peaks.times[peak_idx]), transform_handler.get_transform(
             transform_hash)
예제 #5
0
파일: player.py 프로젝트: DYCI2/Somax2
 def _scale_peaks(self, peaks: Peaks, scheduler_time: float, corpus: Corpus,
                  **kwargs):
     if peaks.is_empty():
         return peaks
     corresponding_events: List[CorpusEvent] = corpus.events_around(
         peaks.times)
     corresponding_transforms: List[AbstractTransform] = [
         self._transform_handler.get_transform(t)
         for t in np.unique(peaks.transform_ids)
     ]
     for scale_action in self.scale_actions.values():
         if scale_action.is_enabled_and_eligible():
             peaks = scale_action.scale(peaks, scheduler_time,
                                        corresponding_events,
                                        corresponding_transforms, corpus,
                                        **kwargs)
     return peaks
예제 #6
0
 def _decide_default(
         self, peaks: Peaks, corpus: Corpus,
         transform_handler: TransformHandler,
         **kwargs) -> Optional[Tuple[CorpusEvent, AbstractTransform]]:
     self.logger.debug("[decide] _decide_default called.")
     if peaks.is_empty():
         return None
     max_peak_value: float = np.max(peaks.scores)
     self.logger.debug(
         f"[decide_default] Max peak value is {max_peak_value}.")
     max_peaks_idx: List[int] = np.argwhere(
         np.abs(peaks.scores - max_peak_value) < 0.001)
     peak_idx: int = random.choice(max_peaks_idx)
     transform_hash: int = int(peaks.transform_ids[peak_idx])
     return corpus.event_around(
         peaks.times[peak_idx]), transform_handler.get_transform(
             transform_hash)
예제 #7
0
 def cluster(self, corpus: Corpus) -> None:
     """ :raises InvalidCorpus if number of events in corpus is lower than `self.num_components`."""
     chromas: List[np.ndarray] = [event.get_feature(OnsetChroma).value() for event in corpus.events]
     gmm_data: np.ndarray = np.row_stack(chromas)
     max_per_col: np.ndarray = np.max(chromas, axis=1)
     max_per_col[max_per_col == 0] = 1  # don't normalize empty vectors - avoid div0 error
     chromas /= max_per_col[:, np.newaxis]
     try:
         self.gmm = GaussianMixture(n_components=self.num_components, max_iter=self.max_iter).fit(gmm_data)
     except ValueError as e:
         if self.num_components > corpus.length():
             raise InvalidCorpus(f"{self.__class__.__name__} could not classify corpus '{str(corpus)}' since corpus "
                                 f"length ({corpus.length()}) is lower than number of requested clusters "
                                 f"({self.num_components}). "
                                 f"Reduce the number of clusters or select another classifier")
         else:
             raise InvalidCorpus(f"Unknown error encountered in {self.__class__.__name__}. Error: {repr(e)}.")
예제 #8
0
 def _is_eligible_for(self, corpus: Corpus) -> bool:
     return corpus.has_feature(OctaveBands)
예제 #9
0
 def _is_eligible_for(self, corpus: Corpus) -> bool:
     return corpus.has_feature(VerticalDensity)
예제 #10
0
 def _is_eligible_for(self, corpus: Corpus) -> bool:
     return corpus.has_feature(TotalEnergyDb)
예제 #11
0
 def _is_eligible_for(self, corpus: Corpus) -> bool:
     return corpus.has_feature(Tempo)
예제 #12
0
 def _is_eligible_for(self, corpus: Corpus) -> bool:
     return corpus.has_feature(OnsetChroma)
예제 #13
0
 def export(self,
            corpus: Corpus,
            file_path: str,
            time_signature: Tuple[int, int] = (4, 4)):
     note_matrix: NoteMatrix = corpus.to_note_matrix()
     note_matrix.to_midi_file()
예제 #14
0
 def _is_eligible_for(self, corpus: Corpus) -> bool:
     return corpus.has_feature(self.midi_pitch_type) and isinstance(corpus, MidiCorpus) or \
            corpus.has_feature(self.audio_pitch_type) and isinstance(corpus, AudioCorpus)