示例#1
0
    def process(self):

        if self._inputBuffer is not None and self._outputBuffer is not None:
            audio = self._inputBuffer[0]
            col_melody = self._inputBuffer[1]
            col_bass = self._inputBuffer[2]
            if col_melody is None:
                # default color: all white
                col_melody = np.ones(self.num_pixels) * np.array(
                    [[255.0], [255.0], [255.0]])
            if col_bass is None:
                # default color: all white
                col_bass = np.ones(self.num_pixels) * np.array(
                    [[255.0], [255.0], [255.0]])
            if audio is not None:
                if self._gen is None:
                    g = self.buffer_coroutine()
                    next(g)
                    self._lastAudioChunk = audio
                    self._gen = self._audio_gen(g)
                self._lastAudioChunk = audio
                y = next(self._gen)
                bass = dsp.warped_psd(y, self.fft_bins, self._fs_ds,
                                      [32.7, 261.0], 'bark')
                melody = dsp.warped_psd(y, self.fft_bins, self._fs_ds,
                                        [261.0, self.fmax], 'bark')
                bass = self.process_line(bass, self._bass_rms)
                melody = self.process_line(melody, self._melody_rms)
                pixels = colors.blend(
                    1. / 255.0 * np.multiply(col_bass, bass),
                    1. / 255. * np.multiply(col_melody, melody),
                    self.col_blend)
                self._outputBuffer[0] = pixels.clip(0, 255).astype(int)
示例#2
0
 def process(self):
     if self._inputBuffer is None or self._outputBuffer is None:
         return
     if not self._inputBufferValid(0, buffer_type=effect.AudioBuffer.__name__):
         self._outputBuffer[0] = None
         return
     audio = self._inputBuffer[0].audio
     self._fs = self._inputBuffer[0].sample_rate
     col_melody = self._inputBuffer[1]
     col_bass = self._inputBuffer[2]
     if col_melody is None:
         # default color: all white
         col_melody = np.ones(self._num_pixels) * np.array([[255.0], [255.0], [255.0]])
     if col_bass is None:
         # default color: all white
         col_bass = np.ones(self._num_pixels) * np.array([[255.0], [255.0], [255.0]])
     if audio is not None:
         if self._gen is None:
             g = self.buffer_coroutine()
             next(g)
             self._lastAudioChunk = audio
             self._gen = self._audio_gen(g)
         self._lastAudioChunk = audio
         y = next(self._gen)
         bass = dsp.warped_psd(y, self.fft_bins, self._fs_ds, [32.7, 261.0], 'bark')
         melody = dsp.warped_psd(y, self.fft_bins, self._fs_ds, [261.0, self.fmax], 'bark')
         bass = self.process_line(bass)
         melody = self.process_line(melody)
         pixels = colors.blend(
             1. / 255.0 * np.multiply(col_bass, bass),
             1. / 255. * np.multiply(col_melody, melody),
             self.col_blend,
         )
         self._outputBuffer[0] = pixels.clip(0, 255).astype(int)
 def process(self):
     if self._inputBuffer is None or self._outputBuffer is None:
         return
     if not self._inputBufferValid(0) and not self._inputBufferValid(1):
         # no input on any channels
         self._outputBuffer[0] = None
     elif self._inputBufferValid(0) and self._inputBufferValid(1):
         # input on both channels
         self._outputBuffer[0] = colors.blend(self._inputBuffer[0], self._inputBuffer[1], self.mode)
     elif self._inputBufferValid(0):
         # only channel 0 valid
         self._outputBuffer[0] = self._inputBuffer[0]
     elif self._inputBufferValid(1):
         # only channel 1 valid
         self._outputBuffer[0] = self._inputBuffer[0]
     else:
         self._outputBuffer[0] = None