Esempio n. 1
0
    def process(self):
        if self._inputBuffer is None or self._outputBuffer is None:
            return
        if not self._inputBufferValid(0, buffer_type=effect.AudioBuffer.__name__):
            self._outputBuffer[0] = None
            return
        color = self._inputBuffer[1]
        if color is None:
            color = self._default_color

        y = self._inputBuffer[0].audio
        fs = self._inputBuffer[0].sample_rate

        if self.lowcut_hz > 0 or self.highcut_hz < 20000:
            # construct filter if needed
            if self._bandpass is None:
                self._bandpass = dsp.Bandpass(self.lowcut_hz, self.highcut_hz, fs, 3)
            # update bandpass
            self._bandpass.updateParams(self.lowcut_hz, self.highcut_hz, fs, 3)
            # process audio
            y = self._bandpass.filter(np.array(y), fs)

        rms = dsp.rms(y)
        # calculate rms over hold_time
        while len(self._hold_values) > self.n_overlaps:
            self._hold_values.pop()
        self._hold_values.insert(0, rms)
        rms = dsp.rms(self._hold_values)
        db = 20 * math.log10(max(rms, 1e-16))
        scal_value = (self.db_range + db) / self.db_range
        bar = np.zeros(self._num_pixels) * np.array([[0], [0], [0]])
        index = int(self._num_pixels * scal_value)
        index = np.clip(index, 0, self._num_pixels - 1)
        bar[0:3, 0:index] = color[0:3, 0:index]
        self._outputBuffer[0] = bar
    def process(self):
        if self._inputBuffer is None or self._outputBuffer is None:
            return
        if not self._inputBufferValid(0, buffer_type=effect.AudioBuffer.__name__):
            self._outputBuffer[0] = None
            return
        if self._inputBufferValid(1):
            color = self._inputBuffer[1]
        else:
            color = np.ones(self._num_pixels) * np.array([[255.0], [255.0], [255.0]])

        audio = self._inputBuffer[0].audio
        fs = self._inputBuffer[0].sample_rate

        # construct filter if needed
        if self._bandpass is None:
            self._bandpass = dsp.Bandpass(self.lowcut_hz, self.highcut_hz, fs, 3)
        # update bandpass
        self._bandpass.updateParams(self.lowcut_hz, self.highcut_hz, fs, 3)
        # apply bandpass to audio
        y = self._bandpass.filter(np.array(audio), fs)

        # adjust probability according to peak of audio
        peak = np.max(y) * 1.0
        try:
            peak = peak**self.peak_filter
        except Exception:
            peak = peak
        prob = min(self.probability + peak, 1.0)
        if self._outputBuffer is not None:
            self._output = np.multiply(
                color,
                self.starControl(prob, peak)
                * np.array([[self.peak_scale * 1.0], [self.peak_scale * 1.0], [self.peak_scale * 1.0]]))
        self._outputBuffer[0] = self._output.clip(0.0, 255.0)
Esempio n. 3
0
 def process(self):
     if self._inputBuffer is None or self._outputBuffer is None:
         return
     if not self._inputBufferValid(0, buffer_type=effect.AudioBuffer.__name__):
         self._outputBuffer[0] = None
         return
     audio = self._inputBuffer[0].audio
     fs = self._inputBuffer[0].sample_rate
     color = self._inputBuffer[1]
     if color is None:
         # default color: all white
         color = np.ones(self._num_pixels) * np.array([[255.0], [255.0], [255.0]])
     # construct filter if needed
     if self._bandpass is None:
         self._bandpass = dsp.Bandpass(self.lowcut_hz, self.highcut_hz, fs, 3)
     # update bandpass
     self._bandpass.updateParams(self.lowcut_hz, self.highcut_hz, fs, 3)
     # apply bandpass to audio
     y = self._bandpass.filter(np.array(audio), fs)
     # move in speed
     dt_move = self._t - self._last_move_t
     # calculate number of pixels to shift
     shift_pixels = int(dt_move * self.speed)
     shift_pixels = np.clip(shift_pixels, 1, self._num_pixels - 1)
     if dt_move * self.speed > 1:
         self._pixel_state[:, shift_pixels:] = self._pixel_state[:, :-shift_pixels]
         self._pixel_state[:, 0:shift_pixels] = self._pixel_state[:, shift_pixels:shift_pixels + 1]
         # convolve to smooth edges
         self._pixel_state[:, 0:2 * shift_pixels] = gaussian_filter1d(self._pixel_state[:, 0:2 * shift_pixels],
                                                                      sigma=0.5,
                                                                      axis=1)
         self._last_move_t = self._t
     # dim with time
     dt = self._t - self._last_t
     self._last_t = self._t
     self._pixel_state *= (1.0 - dt / self.dim_time)
     self._pixel_state = gaussian_filter1d(self._pixel_state, sigma=0.5, axis=1)
     self._pixel_state = gaussian_filter1d(self._pixel_state, sigma=0.5, axis=1)
     # calculate current peak
     peak = np.max(y) * 1.0
     while len(self._hold_values) > 20 * self.smoothing:
         self._hold_values.pop()
     self._hold_values.insert(0, peak)
     peak = np.max(self._hold_values)
     # apply peak filter and scale
     try:
         peak = peak**self.peak_filter
     except Exception:
         peak = peak
     peak = peak * self.peak_scale
     # new pixel at origin with peak
     r, g, b = color[0, 0], color[1, 0], color[2, 0]
     self._pixel_state[0][0:shift_pixels] = r * peak + self.highlight * peak * 255.0
     self._pixel_state[1][0:shift_pixels] = g * peak + self.highlight * peak * 255.0
     self._pixel_state[2][0:shift_pixels] = b * peak + self.highlight * peak * 255.0
     self._pixel_state = np.nan_to_num(self._pixel_state).clip(0.0, 255.0)
     self._outputBuffer[0] = self._pixel_state
Esempio n. 4
0
    def process(self):
        def jrange(value0To1, minRange, maxRange):
            value0To1 = min(max(value0To1, 0), 1)
            return (maxRange-minRange)*value0To1 + minRange

        def jvalue(minRange, maxRange, value):
            if maxRange - minRange == 0:
                return 0
            return (value - minRange) / (maxRange - minRange)
            
        if self._inputBuffer is None or self._outputBuffer is None:
            return
        if not self._inputBufferValid(0, buffer_type=effect.AudioBuffer.__name__):
            self._outputBuffer[0] = None
            return
        if self._inputBufferValid(1):
            color = self._inputBuffer[1]
        else:
            color = np.ones(self._num_pixels) * np.array([[255.0], [255.0], [255.0]])

        audio = self._inputBuffer[0].audio
        fs = self._inputBuffer[0].sample_rate

        # construct filter if needed
        if self._bandpass is None:
            self._bandpass = dsp.Bandpass(self.lowcut_hz, self.highcut_hz, fs, 3)
        # update bandpass
        self._bandpass.updateParams(self.lowcut_hz, self.highcut_hz, fs, 3)
        # apply bandpass to audio
        y = self._bandpass.filter(np.array(audio), fs)

        # adjust probability according to peak of audio
        peak = np.max(y) * 1.0
        maxpeak = 1
        try:
            peak = peak**self.peak_filter
            maxpeak = maxpeak**self.peak_filter
        except Exception:
            peak = peak
            maxpeak = peak
        prob = min(jvalue(0, maxpeak, self.probability) + peak, 1.0)
        # logger.debug("spawn start {}".format(prob))
        if self._outputBuffer is not None:
            self._output = np.multiply(
                color,
                self.starControl(prob, peak)
                * np.array([[self.peak_scale * 1.0], [self.peak_scale * 1.0], [self.peak_scale * 1.0]]))
        self._outputBuffer[0] = self._output.clip(0.0, 255.0)
Esempio n. 5
0
    def process(self):
        if self._inputBuffer is None or self._outputBuffer is None:
            return
        if not self._inputBufferValid(0, buffer_type=effect.AudioBuffer.__name__):
            return
        if not self._inputBufferValid(1):
            self._outputBuffer[0] = None
            return

        # Init audio
        audio = self._inputBuffer[0].audio
        fs = self._inputBuffer[0].sample_rate

        # construct filter if needed
        if self._bandpass is None:
            self._bandpass = dsp.Bandpass(self.lowcut_hz, self.highcut_hz, fs, 3)
        # update bandpass
        self._bandpass.updateParams(self.lowcut_hz, self.highcut_hz, fs, 3)
        # apply bandpass to audio
        y = self._bandpass.filter(np.array(audio), fs)

        x = self._inputBuffer[1]
        rms = dsp.rms(y)
        # calculate rms over hold_time
        while len(self._hold_values) > 20 * self.smoothing:
            self._hold_values.pop()
        self._hold_values.insert(0, rms)
        rms = dsp.rms(self._hold_values)
        db = 20 * math.log10(max(rms, 1e-16))
        db = max(db, -self.db_range)

        scal_value = (self.db_range + db) / self.db_range
        try:
            scal_value = scal_value**self.peak_filter
        except Exception:
            scal_value = scal_value
        scal_value = scal_value * self.peak_scale

        dt_move = self._t - self._last_t
        shift = dt_move * self.speed * 0.1 * scal_value
        self._shift_pixels = math.fmod((self._shift_pixels + shift), np.size(x, axis=1))
        self._last_t = self._t
        self._outputBuffer[0] = sp.ndimage.interpolation.shift(x, [0, self._shift_pixels], mode='wrap', prefilter=True)
Esempio n. 6
0
    def process(self):
        if self._inputBuffer is None or self._outputBuffer is None:
            return
        if not self._inputBufferValid(0, buffer_type=effect.AudioBuffer.__name__):
            self._outputBuffer[0] = None
            return
        if self._inputBufferValid(1):
            pixelbuffer = np.array(self._inputBuffer[1])
        else:
            # default color: all white
            pixelbuffer = np.ones(self._num_pixels) * np.array([[255.0], [255.0], [255.0]])

        audio = self._inputBuffer[0].audio
        fs = self._inputBuffer[0].sample_rate

        # construct filter if needed
        if self._bandpass is None:
            self._bandpass = dsp.Bandpass(self.lowcut_hz, self.highcut_hz, fs, 3)
        # update bandpass
        self._bandpass.updateParams(self.lowcut_hz, self.highcut_hz, fs, 3)
        # apply bandpass to audio
        y = self._bandpass.filter(np.array(audio), fs)
        peak = np.max(y) * 1.0
        while len(self._hold_values) > 20 * self.smoothing:
            self._hold_values.pop()
        self._hold_values.insert(0, peak)
        peak = np.max(self._hold_values)
        # apply peak filter and scale
        try:
            peak = peak**self.peak_filter
        except Exception:
            peak = peak
        peak = peak * self.peak_scale

        pixelbuffer[0] = sp.ndimage.interpolation.shift(pixelbuffer[0], -self.spread * peak, mode='wrap', prefilter=True)
        pixelbuffer[2] = sp.ndimage.interpolation.shift(pixelbuffer[2], self.spread * peak, mode='wrap', prefilter=True)
        self._outputBuffer[0] = pixelbuffer
Esempio n. 7
0
    def process(self):
        if self._inputBuffer is None or self._outputBuffer is None:
            return
        if not self._inputBufferValid(0, buffer_type=effect.AudioBuffer.__name__):
            return

        # Process every now and then (speed_fps)
        dt = self._t - self._last_process_dt
        # Prevent division by zero
        if dt == 0.0:
            return
        cur_fps = 1.0 / dt
        if cur_fps > self.speed_fps:
            # Return to exit
            # logger.info("Met t= {}".format(self._t))
            return

        # logger.info("Process")

        # Init color input
        cols = int(self._num_pixels / self._num_rows)
        if self._inputBufferValid(1):
            color = self._inputBuffer[1]
        else:
            color = np.ones(cols) * np.array([[255], [255], [255]])

        # Init audio
        audio = self._inputBuffer[0].audio * self.gain
        fs = self._inputBuffer[0].sample_rate

        # construct filter if needed
        if self._bandpass is None:
            self._bandpass = dsp.Bandpass(self.lowcut_hz, self.highcut_hz, fs, 3)
        # update bandpass
        self._bandpass.updateParams(self.lowcut_hz, self.highcut_hz, fs, 3)

        # apply bandpass to audio
        y = self._bandpass.filter(np.array(audio), fs)

        # adjust number of samples to respect window_fq_hz.
        # if we have 440 samples @ 44000 Hz -> 440/44000 = 0.01 s of data -> 100 Hz
        # if we have 880 samples @ 44000 Hz -> 880/44000 = 0.02 s of data -> 50 Hz
        # if we want to display 100 Hz in the entire window:
        # 1 / 100 Hz * 44000 -> 440 samples

        adjusted_window = int(1.0 / self.window_fq_hz * fs / 2.0)
        # update audio buffer
        if self._audioBuffer is None:
            self._audioBuffer = y
        elif self._audioBuffer is not None and (len(self._audioBuffer) + len(y)) > adjusted_window * 10:
            # audio buffer contains more samples than we need
            self._audioBuffer = self._audioBuffer[len(y):]
            self._audioBuffer = np.append(self._audioBuffer, y)
        else:
            self._audioBuffer = np.append(self._audioBuffer, y)

        y = self._audioBuffer
        adjusted_window = int(min(len(y), adjusted_window))

        # Find zero crossings to stabilize output
        zero_crossings = np.where(np.diff(np.sign(y)))[0]
        start_idx = 0
        if len(zero_crossings) > 1:
            if y[zero_crossings[0]] < 0 and y[zero_crossings[0] + 1] > 0:
                start_idx = zero_crossings[0]
            else:
                start_idx = zero_crossings[1]

        y = y[start_idx:start_idx + adjusted_window]

        output = np.zeros((3, self._num_rows, cols))
        # First downsample to half the cols
        decimation_ratio = np.round(len(y) / (cols + 1))
        downsampled_audio = sp.signal.decimate(y, int(decimation_ratio), ftype='fir', zero_phase=True)
        # Then resample to the number of cols -> prevents jumping between positive and negative values
        for i in range(0, cols):
            if i >= len(downsampled_audio):
                continue
            # determine index in audio array
            valIdx = i
            # get value
            val = downsampled_audio[valIdx]
            # convert to row idx
            rowIdx = max(0, min(int(self._num_rows / 2 + val * self._num_rows / 2), self._num_rows - 1))
            # set value for this col
            output[:, rowIdx, i] = color[:, i]
        self._outputBuffer[0] = output.reshape((3, -1))
        # Update timer
        self._last_process_dt = self._t