def __init__(self, desired_framerate):
     self.desired_framerate = desired_framerate
     self.previous_time = time.time() * 1000.0
     """The previous time that the frames_per_second() function was called"""
     self.fps = ExpFilter(val=self.desired_framerate,
                          alpha_decay=0.2,
                          alpha_rise=0.2)
     """The low-pass filter used to estimate frames-per-second"""
     self.prev_fps_update = time.time()
Exemple #2
0
class Spectrum():
    def initSpectrum(self):

        # if(self.strip_config.is_mirror):
        #     new_length = self._number_of_pixels // 2
        # else:
        new_length = self._number_of_pixels

        self.prev_spectrum = np.tile(0.01, new_length)

        self.r_filt = ExpFilter(np.tile(0.01, new_length),
                                alpha_decay=0.2,
                                alpha_rise=0.99)
        self.g_filt = ExpFilter(np.tile(0.01, new_length),
                                alpha_decay=0.05,
                                alpha_rise=0.3)
        self.b_filt = ExpFilter(np.tile(0.01, new_length),
                                alpha_decay=0.1,
                                alpha_rise=0.5)
        self.common_mode = ExpFilter(np.tile(0.01, new_length),
                                     alpha_decay=0.99,
                                     alpha_rise=0.01)

    def visualizeSpectrum(self):
        """Effect that maps the Mel filterbank frequencies onto the LED strip"""

        active_color_scheme = self.active_state._formatted_color_schemes[
            self.active_state.active_color_scheme_index]
        new_length = self._number_of_pixels

        audio_data = np.copy(interpolate(self.audio_data, new_length))

        self.common_mode.update(audio_data)
        diff = audio_data - self.prev_spectrum
        self.prev_spectrum = np.copy(audio_data)

        # Color channel mappings

        r = self.r_filt.update(audio_data - self.common_mode.value
                               ) * active_color_scheme[0][0] / 100
        g = np.abs(diff) * active_color_scheme[0][1] / 1000
        b = self.b_filt.update(np.copy(audio_data)) * \
            active_color_scheme[0][2] / 100

        # r = self.r_filt.update(
        #     audio_data - self.common_mode.value) * active_color_scheme[0][0]
        # g = self.g_filt.update(
        #     np.copy(audio_data - self.common_mode.value)) * active_color_scheme[0][1]
        # b = self.b_filt.update(
        #     np.copy(audio_data - self.common_mode.value)) * active_color_scheme[0][2]

        self.pixels = np.array([r, g, b]) * 255

        return self.pixelReshaper.reshapeFromPixels(self.pixels)
class Energy():
    def initEnergy(self):
        self.p_filt = ExpFilter(np.tile(1, (3, self._number_of_pixels)),
                                alpha_decay=0.1,
                                alpha_rise=0.99)

        # 0.01 to 0.99 for speed setting

    def visualizeEnergy(self):
        """Effect that expands from the center with increasing sound energy"""
        self.audio_data = np.copy(self.audio_data)
        self.gain.update(self.audio_data)
        self.audio_data /= self.gain.value
        # Scale by the width of the LED strip
        self.audio_data *= float((self._number_of_pixels // 2) - 1)
        # Map color channels according to energy in the different freq bands
        scale = 0.9
        r = 0
        g = 0
        b = 0

        # r = int(np.mean(self.audio_data[:len(self.audio_data) // 3]**scale))
        # g = int(np.mean(self.audio_data[len(self.audio_data) // 3: 2 * len(self.audio_data) // 3]**scale))
        # b = int(np.mean(self.audio_data[2 * len(self.audio_data) // 3:]**scale))

        active_color_scheme = self.active_state._formatted_color_schemes[
            self.active_state.active_color_scheme_index]
        chunk_size = len(self.audio_data) // len(active_color_scheme)
        for i in range(len(active_color_scheme)):
            x = chunk_size * i
            y = chunk_size * (i + 1)
            value = int(np.mean(self.audio_data[x:y]**scale))
            r += int(value * active_color_scheme[i][0] / 100)
            g += int(value * active_color_scheme[i][1] / 100)
            b += int(value * active_color_scheme[i][2] / 100)

        self.pixels[0, :r] = 255.0
        self.pixels[0, r:] = 0.0
        self.pixels[1, :g] = 255.0
        self.pixels[1, g:] = 0.0
        self.pixels[2, :b] = 255.0
        self.pixels[2, b:] = 0.0

        self.p_filt.update(self.pixels)
        self.pixels = np.round(self.p_filt.value)

        self.pixels = self.blurFrame(self.pixels, self.active_state.blur_value)

        return self.pixelReshaper.reshapeFromPixels(self.pixels)
Exemple #4
0
    def initVizualiser(self):

        self.active_state = self.strip_config.active_state
        self.number_of_pixels = self.active_state.shapes[
            self.active_state.active_shape_index].number_of_pixels

        self.timeSinceStart = self.config.timeSinceStart
        self.number_of_audio_samples = self.config.audio_ports[
            self.active_state.
            active_audio_channel_index].number_of_audio_samples

        self.gain = ExpFilter(np.tile(0.01, self.number_of_audio_samples),
                              alpha_decay=0.001,
                              alpha_rise=0.99)

        self.initEnergy()
        self.initSpectrum()
        self.initChannelIntensity()
        self.initChannelFlash()

        self.initPianoNote()
        self.initPianoScroll()
        self.initPitchwheelFlash()

        self.initAlternateColors()
        self.initTransitionColorShapes()

        self.initFullColor()
        self.initFadeOut()
        self.initFire()
        self.resetFrame()

        self.pixelReshaper.initActiveShape()
Exemple #5
0
class Spectrum():
    def initSpectrum(self):

        # if(self.strip_config.is_mirror):
        #     new_length = self.number_of_pixels // 2
        # else:
        new_length = self.number_of_pixels

        self.prev_spectrum = np.tile(0.01, new_length)

        self.r_filt = ExpFilter(np.tile(0.01, new_length),
                                alpha_decay=0.2,
                                alpha_rise=0.99)
        self.g_filt = ExpFilter(np.tile(0.01, new_length),
                                alpha_decay=0.05,
                                alpha_rise=0.3)
        self.b_filt = ExpFilter(np.tile(0.01, new_length),
                                alpha_decay=0.1,
                                alpha_rise=0.5)
        self.common_mode = ExpFilter(np.tile(0.01, new_length),
                                     alpha_decay=0.99,
                                     alpha_rise=0.01)

    def visualizeSpectrum(self):
        """Effect that maps the Mel filterbank frequencies onto the LED strip"""

        # if(self.strip_config.is_mirror):
        #     new_length = self.number_of_pixels // 2
        # else:
        new_length = self.number_of_pixels

        audio_data = np.copy(interpolate(self.audio_data, new_length))

        self.common_mode.update(audio_data)
        diff = audio_data - self.prev_spectrum
        self.prev_spectrum = np.copy(audio_data)

        # Color channel mappings
        r = self.g_filt.update(audio_data - self.common_mode.value)
        g = self.g_filt.update(np.copy(audio_data - self.common_mode.value))
        b = self.g_filt.update(np.copy(audio_data - self.common_mode.value))

        self.pixels = np.array([r, g, b]) * 255

        return self.pixelReshaper.reshapeFromPixels(self.pixels)
Exemple #6
0
    def __init__(
        self,
        min_frequency=200,
        max_frequency=12000,
        sampling_rate=44100,
        number_of_audio_samples=24,
        min_volume_threshold=1e-7,
        n_rolling_history=4,
        framerate=60
    ):

        self.samples_per_frame = int(sampling_rate / framerate)
        self.y_roll = np.random.rand(
            n_rolling_history, self.samples_per_frame) / 1e16
        self.min_volume_threshold = min_volume_threshold
        self.number_of_audio_samples = number_of_audio_samples
        self.melBank = MelBank(framerate, min_frequency, max_frequency,
                               sampling_rate, number_of_audio_samples, min_volume_threshold)

        self.fft_plot_filter = ExpFilter(
            np.tile(1e-1, number_of_audio_samples),
            alpha_decay=0.5,
            alpha_rise=0.99
        )

        self.mel_gain = ExpFilter(
            np.tile(1e-1, number_of_audio_samples),
            alpha_decay=0.01,
            alpha_rise=0.99
        )

        self.mel_smoothing = ExpFilter(
            np.tile(1e-1, number_of_audio_samples),
            alpha_decay=0.5,
            alpha_rise=0.99
        )

        self.fft_window = np.hamming(
            self.samples_per_frame * n_rolling_history
        )
class FramerateCalculator:
    def __init__(self, desired_framerate):
        self.desired_framerate = desired_framerate
        self.previous_time = time.time() * 1000.0
        """The previous time that the frames_per_second() function was called"""
        self.fps = ExpFilter(val=self.desired_framerate,
                             alpha_decay=0.2,
                             alpha_rise=0.2)
        """The low-pass filter used to estimate frames-per-second"""
        self.prev_fps_update = time.time()

    def frames_per_second(self):
        """Return the estimated frames per second

        Returns the current estimate for frames-per-second (FPS).
        FPS is estimated by measured the amount of time that has elapsed since
        this function was previously called. The FPS estimate is low-pass filtered
        to reduce noise.

        This function is intended to be called one time for every iteration of
        the program's main loop.

        Returns
        -------
        fps : float
            Estimated frames-per-second. This value is low-pass filtered
            to reduce noise.
        """
        time_now = time.time() * 1000.0
        dt = time_now - self.previous_time
        self.previous_time = time_now
        if dt == 0.0:
            return self.fps.value
        return self.fps.update(1000.0 / dt)

    def forceFrameDelay(self):
        time.sleep(1 / self.desired_framerate)

    def getFps(self):
        fps = self.frames_per_second()
        if time.time() - 0.5 > self.prev_fps_update:
            self.prev_fps_update = time.time()
            return '{:.0f}'.format(fps)
        else:
            return '{:.0f}'.format(fps)
Exemple #8
0
    def initSpectrum(self):

        # if(self.strip_config.is_mirror):
        #     new_length = self._number_of_pixels // 2
        # else:
        new_length = self._number_of_pixels

        self.prev_spectrum = np.tile(0.01, new_length)

        self.r_filt = ExpFilter(np.tile(0.01, new_length),
                                alpha_decay=0.2,
                                alpha_rise=0.99)
        self.g_filt = ExpFilter(np.tile(0.01, new_length),
                                alpha_decay=0.05,
                                alpha_rise=0.3)
        self.b_filt = ExpFilter(np.tile(0.01, new_length),
                                alpha_decay=0.1,
                                alpha_rise=0.5)
        self.common_mode = ExpFilter(np.tile(0.01, new_length),
                                     alpha_decay=0.99,
                                     alpha_rise=0.01)
Exemple #9
0
class AudioProcessing():
    def __init__(
        self,
        min_frequency=200,
        max_frequency=12000,
        sampling_rate=44100,
        number_of_audio_samples=24,
        min_volume_threshold=1e-7,
        n_rolling_history=4,
        framerate=60
    ):

        self.samples_per_frame = int(sampling_rate / framerate)
        self.y_roll = np.random.rand(
            n_rolling_history, self.samples_per_frame) / 1e16
        self.min_volume_threshold = min_volume_threshold
        self.number_of_audio_samples = number_of_audio_samples
        self.melBank = MelBank(framerate, min_frequency, max_frequency,
                               sampling_rate, number_of_audio_samples, min_volume_threshold)

        self.fft_plot_filter = ExpFilter(
            np.tile(1e-1, number_of_audio_samples),
            alpha_decay=0.5,
            alpha_rise=0.99
        )

        self.mel_gain = ExpFilter(
            np.tile(1e-1, number_of_audio_samples),
            alpha_decay=0.01,
            alpha_rise=0.99
        )

        self.mel_smoothing = ExpFilter(
            np.tile(1e-1, number_of_audio_samples),
            alpha_decay=0.5,
            alpha_rise=0.99
        )

        self.fft_window = np.hamming(
            self.samples_per_frame * n_rolling_history
        )
    def rfft(self, data, window=None):
        """Real-Valued Fast Fourier Transform"""
        window = 1.0 if window is None else window(len(data))
        ys = np.abs(np.fft.rfft(data * window))
        xs = np.fft.rfftfreq(len(data), 1.0 / self.sampling_rate)
        return xs, ys
    def fft(self, data, window=None):
        """Fast Fourier Transform"""
        window = 1.0 if window is None else window(len(data))
        ys = np.fft.fft(data * window)
        xs = np.fft.fftfreq(len(data), 1.0 / self.sampling_rate)
        return xs, ys

    def render(self, audio_samples):
        # Sound case
        # Normalize samples between 0 and 1
        y = audio_samples / 2.0**15
        # Construct a rolling window of audio samples
        self.y_roll[:-1] = self.y_roll[1:]
        self.y_roll[-1, :] = np.copy(y)
        y_data = np.concatenate(self.y_roll, axis=0).astype(np.float32)
        vol = np.max(np.abs(y_data))

        if vol < self.min_volume_threshold:
            # print('No audio input. Volume below threshold. Volume:', vol)
            return np.tile(0., self.number_of_audio_samples)
        else:
            # Transform audio input into the frequency domain
            N = len(y_data)
            N_zeros = 2**int(np.ceil(np.log2(N))) - N
            # Pad with zeros until the next power of two
            y_data *= self.fft_window
            y_padded = np.pad(y_data, (0, N_zeros), mode='constant')
            YS = np.abs(np.fft.rfft(y_padded)[:N // 2])
            # Construct a Mel filterbank from the FFT data
            mel = np.atleast_2d(YS).T * self.melBank.mel_y.T
            # Scale data to values more suitable for visualization
            mel = np.sum(mel, axis=0)
            mel = mel**2.0
            # Gain normalization
            self.mel_gain.update(np.max(gaussian_filter1d(mel, sigma=1.0)))
            mel /= self.mel_gain.value
            mel = self.mel_smoothing.update(mel)
            return mel
Exemple #10
0
 def initEnergy(self):
     self.p_filt = ExpFilter(np.tile(1, (3, self._number_of_pixels)),
                             alpha_decay=0.1,
                             alpha_rise=0.99)