Ejemplo n.º 1
0
    def audio_data_updated(self, data):

        # Calculate the low, mids, and high indexes scaling based on the pixel count
        intensities = np.array([
            np.mean(data.melbank_lows()),
            np.mean(data.melbank_mids()),
            np.mean(data.melbank_highs())
        ])

        self.update_drop_frames()

        if intensities[0] - self.filtered_intensities[0] > self._config[
                "lows_sensitivity"]:
            self.new_drop(randint(0, self.pixel_count - 1),
                          COLORS.get(self._config['lows_colour']))
        if intensities[1] - self.filtered_intensities[1] > self._config[
                "mids_sensitivity"]:
            self.new_drop(randint(0, self.pixel_count - 1),
                          COLORS.get(self._config['mids_colour']))
        if intensities[2] - self.filtered_intensities[2] > self._config[
                "high_sensitivity"]:
            self.new_drop(randint(0, self.pixel_count - 1),
                          COLORS.get(self._config['high_colour']))

        self.filtered_intensities = self.intensity_filter.update(intensities)

        self.pixels = self.get_drops()
Ejemplo n.º 2
0
    def _generate_bezier_curve(self, gradient_colors, gradient_length):

        # Check to see if we have a custom gradient, or a predefined one and
        # load the colors accordingly
        if isinstance(gradient_colors, str):
            gradient_name = gradient_colors.lower()
            gradient_colors = []
            if GRADIENTS.get(gradient_name):
                gradient_colors = GRADIENTS.get(gradient_name)
            elif COLORS.get(gradient_name):
                gradient_colors = [gradient_name]

        if not gradient_colors:
            gradient_colors = GRADIENTS.get('spectral')

        rgb_list = np.array(
            [COLORS[color.lower()] for color in gradient_colors]).T
        n_colors = len(rgb_list[0])

        t = np.linspace(0.0, 1.0, gradient_length)
        polynomial_array = np.array([
            self._bernstein_poly(i, n_colors - 1, t)
            for i in range(0, n_colors)
        ])
        gradient = np.array([
            np.dot(rgb_list[0], polynomial_array),
            np.dot(rgb_list[1], polynomial_array),
            np.dot(rgb_list[2], polynomial_array)
        ])

        _LOGGER.info(
            ('Generating new gradient curve for {}'.format(gradient_colors)))
        self._gradient_curve = gradient
Ejemplo n.º 3
0
    def audio_data_updated(self, data):

        # Calculate the low, mids, and high indexes scaling based on the pixel count
        lows_idx = int(np.mean(self.pixel_count * data.melbank_lows()))
        mids_idx = int(np.mean(self.pixel_count * data.melbank_mids()))
        highs_idx = int(np.mean(self.pixel_count * data.melbank_highs()))

        if self._config["color_cycler"]:
            beat_oscillator, beat_now = data.oscillator()
            if beat_now:
                # Cycle between 0,1,2 for lows, mids and highs
                self.color_cycler = (self.color_cycler + 1) % 3
                color = np.random.choice(list(COLORS.keys()))

                if self.color_cycler == 0:
                    self.lows_colour = COLORS[color]
                elif self.color_cycler == 1:
                    self.mids_colour = COLORS[color]
                elif self.color_cycler == 2:
                    self.high_colour = COLORS[color]

        # Build the new energy profile based on the mids, highs and lows setting
        # the colors as red, green, and blue channel respectively
        p = np.zeros(np.shape(self.pixels))
        if self._config["mixing_mode"] == "additive":
            p[:lows_idx] = self.lows_colour
            p[:mids_idx] += self.mids_colour
            p[:highs_idx] += self.high_colour
        elif self._config["mixing_mode"] == "overlap":
            p[:lows_idx] = self.lows_colour
            p[:mids_idx] = self.mids_colour
            p[:highs_idx] = self.high_colour

        # Filter and update the pixel values
        self.pixels = self._p_filter.update(p)
Ejemplo n.º 4
0
class SingleColorEffect(TemporalEffect, ModulateEffect):

    NAME = "Single Color"
    CONFIG_SCHEMA = vol.Schema({
        vol.Optional('color', description='Color of strip', default="red"):
        vol.In(list(COLORS.keys())),
    })

    def config_updated(self, config):
        self.color = np.array(COLORS[self._config['color']], dtype=float)

    def effect_loop(self):
        color_array = np.tile(self.color, (self.pixel_count, 1))
        self.pixels = self.modulate(color_array)
Ejemplo n.º 5
0
class SingleColorEffect(TemporalEffect, ModulateEffect):

    NAME = "Single Color"
    CONFIG_SCHEMA = vol.Schema({
        vol.Optional('color', description='Color of strip', default="red"):
        vol.In(list(COLORS.keys())),
    })

    def config_updated(self, config):
        self.color = np.array(COLORS[self._config['color']], dtype='B')

    def effect_loop(self):
        self.image = Image.new("RGB",
                               self._dimensions,
                               color=tuple(self.color))
Ejemplo n.º 6
0
    def _generate_gradient_curve(self, gradient_colors, gradient_length,
                                 repeat):

        # Check to see if we have a custom gradient, or a predefined one and
        # load the colors accordingly
        if isinstance(gradient_colors, str):
            gradient_name = gradient_colors
            gradient_colors = []
            if GRADIENTS.get(gradient_name):
                gradient_colors = GRADIENTS.get(gradient_name).get("colors")
                #gradient_method = GRADIENTS.get(gradient_name).get("method", gradient_method)
            elif COLORS.get(gradient_name):
                gradient_colors = [gradient_name]

        if not gradient_colors:
            gradient_colors = GRADIENTS.get('spectral')

        self.rgb_list = np.array(
            [COLORS[color.lower()] for color in gradient_colors]).T
        n_colors = len(self.rgb_list[0])

        # if gradient_method == "bezier":
        #     t = np.linspace(0.0, 1.0, gradient_length)
        #     polynomial_array = np.array([self._bernstein_poly(i, n_colors-1, t) for i in range(0, n_colors)])
        #     polynomial_array = np.fliplr(polynomial_array)
        #     gradient = np.array([np.dot(self.rgb_list[0], polynomial_array),
        #                          np.dot(self.rgb_list[1], polynomial_array),
        #                          np.dot(self.rgb_list[2], polynomial_array)])
        #     _LOGGER.info(('Generating new gradient curve for {}'.format(gradient_colors)))
        #     self._gradient_curve = gradient

        # elif gradient_method == "cubic_ease":

        gradient = np.zeros((3, gradient_length))
        gradient_split = np.array_split(gradient, repeat, axis=1)
        for i in range(len(gradient_split)):
            segment_length = len(gradient_split[i][0])
            t = np.zeros(segment_length)
            ease_chunks = np.array_split(t, n_colors - 1)
            color_pairs = np.array([(self.rgb_list.T[i],
                                     self.rgb_list.T[i + 1])
                                    for i in range(n_colors - 1)])
            gradient_split[i] = np.hstack(
                self._color_ease(len(ease_chunks[i]), *color_pairs[i])
                for i in range(n_colors - 1))
        _LOGGER.info(
            ('Generating new gradient curve for {}'.format(gradient_colors)))
        self._gradient_curve = np.hstack(gradient_split)
Ejemplo n.º 7
0
class ColorChordAudioEffect(AudioReactiveEffect, GradientEffect):
    NAME = "ColorChord"
    CONFIG_SCHEMA = vol.Schema({
        vol.Optional('sensitivity',
                     description='Responsiveness to changes in sound',
                     default=0.7):
        vol.All(vol.Coerce(float), vol.Range(min=0.2, max=0.99)),
        vol.Optional('color_lows',
                     description='Color of low, bassy sounds',
                     default="red"):
        vol.In(list(COLORS.keys())),
    })

    _nf = None

    def config_updated(self, config):
        #todo: pass sample rate
        self._nf = NoteFinder(48000)
        decay_sensitivity = (self._config["sensitivity"] - 0.2) * 0.25
        self._p_filter = self.create_filter(
            alpha_decay=decay_sensitivity,
            alpha_rise=self._config["sensitivity"])
        self.lows_colour = np.array(COLORS[self._config['color_lows']],
                                    dtype=float)

    def audio_data_updated(self, data):
        if self._nf is not None:
            self._nf.samples_updated(data.audio_sample(True))

        amps = self._nf.get_amplitudes()
        segment_length = int(self.pixel_count / len(amps))
        p = np.zeros(np.shape(self.pixels))
        for freq, amp in amps.items():
            p[int(freq % len(amps)) *
              segment_length:(int(freq % len(amps)) + 1) *
              segment_length] += self.lows_colour * amp * 20

        self.pixels = self._p_filter.update(p)
Ejemplo n.º 8
0
    def _generate_bezier_curve(self, gradient_colors, gradient_length):

        gradient_method = "bezier"

        # Check to see if we have a custom gradient, or a predefined one and
        # load the colors accordingly
        if isinstance(gradient_colors, str):
            gradient_name = gradient_colors
            gradient_colors = []
            if GRADIENTS.get(gradient_name):
                gradient_colors = GRADIENTS.get(gradient_name).get("colors")
                gradient_method = GRADIENTS.get(gradient_name).get("method", "bezier")
            elif COLORS.get(gradient_name):
                gradient_colors = [gradient_name]

        if not gradient_colors:
            gradient_colors = GRADIENTS.get('spectral')

        self.rgb_list = np.array([COLORS[color.lower()] for color in gradient_colors]).T
        n_colors = len(self.rgb_list[0])

        if gradient_method == "bezier":
            t = np.linspace(0.0, 1.0, gradient_length)
            polynomial_array = np.array([self._bernstein_poly(i, n_colors-1, t) for i in range(0, n_colors)])
            gradient = np.array([np.dot(self.rgb_list[0], polynomial_array),
                                np.dot(self.rgb_list[1], polynomial_array),
                                np.dot(self.rgb_list[2], polynomial_array)])

            _LOGGER.info(('Generating new gradient curve for {}'.format(gradient_colors)))
            self._gradient_curve = gradient
        else:
            gradient = np.zeros((gradient_length, 3))
            for i in range(gradient_length):
                rgb_i = i % n_colors
                gradient[i] = (self.rgb_list[0][rgb_i], self.rgb_list[1][rgb_i], self.rgb_list[2][rgb_i])
            self._gradient_curve = gradient.T
Ejemplo n.º 9
0
class Strobe(AudioReactiveEffect):

    NAME = "Strobe"
    CONFIG_SCHEMA = vol.Schema({
        vol.Optional("color", description="Strobe colour", default="white"):
        vol.In(list(COLORS.keys())),
        vol.Optional(
            "frequency",
            description="Strobe frequency",
            default="1/16 (◉﹏◉ )",
        ):
        vol.In(
            list([
                "1/2 (.-. )",
                "1/4 (.o. )",
                "1/8 (◉◡◉ )",
                "1/16 (◉﹏◉ )",
                "1/32 (⊙▃⊙ )",
            ])),
    })

    def config_updated(self, config):
        MAPPINGS = {
            "1/2 (.-. )": 2,
            "1/4 (.o. )": 4,
            "1/8 (◉◡◉ )": 8,
            "1/16 (◉﹏◉ )": 16,
            "1/32 (⊙▃⊙ )": 32,
        }
        self.color = np.array(COLORS[self._config["color"]], dtype=float)
        self.f = MAPPINGS[self._config["frequency"]]

    def audio_data_updated(self, data):
        beat_oscillator = data.oscillator()[0]
        brightness = (-beat_oscillator % (2 / self.f)) * (self.f / 2)
        self.pixels = np.tile(self.color * brightness, (self.pixel_count, 1))
Ejemplo n.º 10
0
class Strobe(AudioReactiveEffect, GradientEffect):

    NAME = "Real Strobe"
    CONFIG_SCHEMA = vol.Schema({
        vol.Optional(
            "gradient_name",
            description="Color scheme for bass strobe to cycle through",
            default="Dancefloor",
        ):
        vol.In(list(GRADIENTS.keys())),
        vol.Optional(
            "color_step",
            description="Amount of color change per bass strobe",
            default=0.0625,
        ):
        vol.All(vol.Coerce(float), vol.Range(min=0, max=0.25)),
        vol.Optional(
            "bass_threshold",
            description="Cutoff for quiet sounds. Higher -> only loud sounds are detected",
            default=0.4,
        ):
        vol.All(vol.Coerce(float), vol.Range(min=0, max=1)),
        vol.Optional(
            "bass_strobe_decay_rate",
            description="Bass strobe decay rate. Higher -> decays faster.",
            default=0.5,
        ):
        vol.All(vol.Coerce(float), vol.Range(min=0, max=1)),
        vol.Optional(
            "strobe_color",
            description="Colour for note strobes",
            default="white",
        ):
        vol.In(list(COLORS.keys())),
        vol.Optional(
            "strobe_width",
            description="Note strobe width, in pixels",
            default=10,
        ):
        vol.All(vol.Coerce(int), vol.Range(min=0, max=1000)),
        vol.Optional(
            "strobe_decay_rate",
            description="Note strobe decay rate. Higher -> decays faster.",
            default=0.5,
        ):
        vol.All(vol.Coerce(float), vol.Range(min=0, max=1)),
    })

    def activate(self, pixel_count):
        super().activate(pixel_count)
        self.strobe_overlay = np.zeros(np.shape(self.pixels))
        self.bass_strobe_overlay = np.zeros(np.shape(self.pixels))
        self.onsets_queue = queue.Queue()

    def config_updated(self, config):
        self.bass_threshold = self._config["bass_threshold"]
        self.color_shift_step = self._config["color_step"]

        self.strobe_color = np.array(COLORS[self._config["strobe_color"]],
                                     dtype=float)
        self.last_color_shift_time = 0
        self.strobe_width = self._config["strobe_width"]
        self.color_shift_delay_in_seconds = 1
        self.color_idx = 0

        self.last_strobe_time = 0
        self.strobe_wait_time = 0
        self.strobe_decay_rate = 1 - self._config["strobe_decay_rate"]

        self.last_bass_strobe_time = 0
        self.bass_strobe_wait_time = 0
        self.bass_strobe_decay_rate = (1 -
                                       self._config["bass_strobe_decay_rate"])

    def get_pixels(self):
        pixels = np.copy(self.bass_strobe_overlay)

        if not self.onsets_queue.empty():
            self.onsets_queue.get()
            strobe_width = min(self.strobe_width, self.pixel_count)
            length_diff = self.pixel_count - strobe_width
            position = (0 if length_diff == 0 else
                        np.random.randint(self.pixel_count - strobe_width))
            self.strobe_overlay[position:position +
                                strobe_width] = self.strobe_color

        pixels += self.strobe_overlay

        self.strobe_overlay *= self.strobe_decay_rate
        self.bass_strobe_overlay *= self.bass_strobe_decay_rate
        self.pixels = pixels
        return self.pixels

    def audio_data_updated(self, data):
        self._dirty = True

        currentTime = time.time()

        if (currentTime - self.last_color_shift_time >
                self.color_shift_delay_in_seconds):
            self.color_idx += self.color_shift_step
            self.color_idx = self.color_idx % 1
            self.bass_strobe_color = self.get_gradient_color(self.color_idx)
            self.last_color_shift_time = currentTime

        lows_intensity = np.mean(data.melbank_lows())
        if (lows_intensity > self.bass_threshold
                and currentTime - self.last_bass_strobe_time >
                self.bass_strobe_wait_time):
            self.bass_strobe_overlay = np.tile(self.bass_strobe_color,
                                               (self.pixel_count, 1))
            self.last_bass_strobe_time = currentTime

        onsets = data.onset()
        if (onsets["high"] and
                currentTime - self.last_strobe_time > self.strobe_wait_time):
            self.onsets_queue.put(True)
            self.last_strobe_time = currentTime
Ejemplo n.º 11
0
class ScrollAudioEffect(AudioReactiveEffect, Effect1D):

    NAME = "Scroll"

    CONFIG_SCHEMA = vol.Schema({
        vol.Optional('blur', description='Amount to blur the effect', default = 3.0): vol.All(vol.Coerce(float), vol.Range(min=0.0, max=10)),
        vol.Optional('mirror', description='Mirror the effect', default = True): bool,
        vol.Optional('speed', description='Speed of the effect', default = 5):  vol.All(vol.Coerce(int), vol.Range(min=1, max=10)),
        vol.Optional('decay', description='Decay rate of the scroll', default = 0.97):  vol.All(vol.Coerce(float), vol.Range(min=0.8, max=1.0)),
        vol.Optional('threshold', description='Cutoff for quiet sounds. Higher -> only loud sounds are detected', default = 0.0):  vol.All(vol.Coerce(float), vol.Range(min=0, max=1)),
        vol.Optional('color_lows', description='Color of low, bassy sounds', default = "red"): vol.In(list(COLORS.keys())),
        vol.Optional('color_mids', description='Color of midrange sounds', default = "green"): vol.In(list(COLORS.keys())),
        vol.Optional('color_high', description='Color of high sounds', default = "blue"): vol.In(list(COLORS.keys())),
    })

    lastValues = None

    def activated(self):
        # TODO: Determine how buffers based on the pixels should be
        # allocated. Technically there is no guarantee that the effect
        # is bound to a device while the config gets updated. Might need
        # to move to a model where effects are created for a device and
        # must be destroyed and recreated to be moved to another device.
        self.lows_colour = np.array(COLORS[self._config['color_lows']], dtype=float)
        self.mids_colour = np.array(COLORS[self._config['color_mids']], dtype=float)
        self.high_colour = np.array(COLORS[self._config['color_high']], dtype=float)

        self.lows_cutoff = self._config['threshold']
        self.mids_cutoff = self._config['threshold'] / 4
        self.high_cutoff = self._config['threshold'] / 7

        self.lastValues = np.zeros((self.pixel_count, 3))

    def audio_data_updated(self, data):
        # Divide the melbank into lows, mids and highs
        lows_max = np.clip(np.max(data.melbank_lows() ** 2), 0, 1)
        mids_max = np.clip(np.max(data.melbank_mids() ** 2), 0, 1)
        highs_max = np.clip(np.max(data.melbank_highs() ** 2), 0, 1)

        if lows_max < self.lows_cutoff:
            lows_max = 0
        if mids_max < self.mids_cutoff:
            mids_max = 0
        if highs_max < self.high_cutoff:
            highs_max = 0

        # Compute the value for each range based on the max
        #lows_val = (np.array((255,0,0)) * lows_max)
        #mids_val = (np.array((0,255,0)) * mids_max)
        #high_val = (np.array((0,0,255)) * highs_max)

        # Roll the effect and apply the decay
        speed = self.config['speed']
        self.lastValues[speed:,:] = self.lastValues[:-speed,:]
        self.lastValues = (self.lastValues * self.config['decay'])

        # Add in the new color from the signal maxes
        #self.output[:speed, 0] = lows_val[0] + mids_val[0] + high_val[0]
        #self.output[:speed, 1] = lows_val[1] + mids_val[1] + high_val[1]
        #self.output[:speed, 2] = lows_val[2] + mids_val[2] + high_val[2]

        self.lastValues[:speed] = self.lows_colour * lows_max
        self.lastValues[:speed] += self.mids_colour * mids_max
        self.lastValues[:speed] += self.high_colour * highs_max

        # Set the pixels
        self.pixels = Image.fromarray(self.lastValues.reshape((1, -1, 3)).astype(np.dtype('B')))
Ejemplo n.º 12
0
class Effect(BaseRegistry):
    """
    Manages an effect
    """

    NAME = ""
    _pixels = None
    _dirty = False
    _config = None
    _active = False

    # Basic effect properties that can be applied to all effects
    CONFIG_SCHEMA = vol.Schema({
        vol.Optional(
            "blur",
            description="Amount to blur the effect",
            default=0.0,
        ):
        vol.All(vol.Coerce(float), vol.Range(min=0.0, max=10)),
        vol.Optional("flip", description="Flip the effect", default=False):
        bool,
        vol.Optional(
            "mirror",
            description="Mirror the effect",
            default=False,
        ):
        bool,
        vol.Optional(
            "brightness",
            description="Brightness of strip",
            default=1.0,
        ):
        vol.All(vol.Coerce(float), vol.Range(min=0.0, max=1.0)),
        vol.Optional(
            "background_color",
            description="Apply a background colour",
            default="black",
        ):
        vol.In(list(COLORS.keys())),
    })

    def __init__(self, ledfx, config):
        self._ledfx = ledfx
        self._dirty_callback = None
        self.update_config(config)

    def __del__(self):
        if self._active:
            self.deactivate()

    def activate(self, pixel_count):
        """Attaches an output channel to the effect"""
        self._pixels = np.zeros((pixel_count, 3))
        self._active = True

        _LOGGER.info(f"Effect {self.NAME} activated.")

    def deactivate(self):
        """Detaches an output channel from the effect"""
        self._pixels = None
        self._active = False

        _LOGGER.info(f"Effect {self.NAME} deactivated.")

    def update_config(self, config):
        # TODO: Sync locks to ensure everything is thread safe
        validated_config = type(self).schema()(config)
        self._config = validated_config

        self._bg_color = np.array(COLORS[self._config["background_color"]],
                                  dtype=float)

        def inherited(cls, method):
            if hasattr(cls, method) and hasattr(super(cls, cls), method):
                return cls.foo == super(cls).foo
            return False

        # Iterate all the base classes and check to see if there is a custom
        # implementation of config updates. If to notify the base class.
        valid_classes = list(type(self).__bases__)
        valid_classes.append(type(self))
        for base in valid_classes:
            if base.config_updated != super(base, base).config_updated:
                base.config_updated(self, self._config)

        _LOGGER.info(
            f"Effect {self.NAME} config updated to {validated_config}.")

        self.configured_blur = self._config["blur"]

    def config_updated(self, config):
        """
        Optional event for when an effect's config is updated. This
        should be used by the subclass only if they need to build up
        complex properties off the configuration, otherwise the config
        should just be referenced in the effect's loop directly
        """
        self.configured_blur = self._config["blur"]
        pass

    @property
    def is_active(self):
        """Return if the effect is currently active"""
        return self._active

    def get_pixels(self):
        return self.pixels

    @property
    def pixels(self):
        """Returns the pixels for the channel"""
        if not self._active:
            raise Exception(
                "Attempting to access pixels before effect is active")

        return np.copy(self._pixels)

    @pixels.setter
    def pixels(self, pixels):
        """Sets the pixels for the channel"""
        if not self._active:
            _LOGGER.warning(
                "Attempting to set pixels before effect is active. Dropping.")
            return

        if isinstance(pixels, tuple):
            self._pixels = np.copy(pixels)
        elif isinstance(pixels, np.ndarray):

            # Apply some of the base output filters if necessary
            if self._config["flip"]:
                pixels = flip_pixels(pixels)
            if self._config["mirror"]:
                pixels = mirror_pixels(pixels)
            if self._config["background_color"]:
                # TODO: colours in future should have an alpha value, which would work nicely to apply to dim the background colour
                # for now, just set it a bit less bright.
                bg_brightness = np.max(pixels, axis=1)
                bg_brightness = (255 - bg_brightness) / 510
                _bg_color_array = np.tile(self._bg_color, (len(pixels), 1))
                pixels += np.multiply(_bg_color_array.T, bg_brightness).T
            if self._config["brightness"] is not None:
                pixels = brightness_pixels(pixels, self._config["brightness"])
            # If the configured blur is greater than 0 we need to blur it
            if self.configured_blur != 0.0:
                pixels = blur_pixels(pixels=pixels, sigma=self.configured_blur)
            self._pixels = np.copy(pixels)
        else:
            raise TypeError()

        self._dirty = True

        if self._dirty_callback:
            self._dirty_callback()

    def setDirtyCallback(self, callback):
        self._dirty_callback = callback

    @property
    def pixel_count(self):
        """Returns the number of pixels for the channel"""
        return len(self.pixels)

    @property
    def name(self):
        return self.NAME
Ejemplo n.º 13
0
class Text(TemporalEffect):

    NAME = "Text"
    CONFIG_SCHEMA = vol.Schema(
        {
            vol.Required('text',
                         description='Text to display',
                         default="Lorem Ipsum"):
            vol.Coerce(str),
            vol.Optional('font_size',
                         description='Size of the font',
                         default=6):
            vol.All(vol.Coerce(int), vol.Range(min=1, max=20)),
            vol.Optional('font_name',
                         description='Font',
                         default="Berkelium1541.ttf"):
            vol.In(list(FONT_LIST)),
            vol.Optional('font_antialias',
                         description='Use antialising',
                         default=False):
            bool,
            vol.Optional('text_color',
                         description='Text color',
                         default='white'):
            vol.In(list(COLORS.keys())),
            vol.Optional('bg_color',
                         description='Background color',
                         default='red'):
            vol.In(list(COLORS.keys())),
        })

    _time = 0.0

    def config_updated(self, config):
        fontpath = os.path.join(os.path.dirname(__file__),
                                "fonts/" + config['font_name'])
        font = ImageFont.truetype(fontpath, config['font_size'])

        # determine pixel size of our text
        self._size = font.getsize(config['text'])

        # render text to a transparent image that may be larger than the display
        self._fontimage = Image.new("RGBA", self._size)

        draw = ImageDraw.Draw(self._fontimage)
        if not config['font_antialias']:
            draw.fontmode = "1"  # disable antialiasing

        draw.text((0, 0), config['text'], font=font, fill=config['text_color'])
        _time = 0
        return

    def effect_loop(self):
        self._time = self._time + 0.01

        # background color
        _image = Image.new("RGB", self._dimensions, self.config['bg_color'])

        xoffset = int(self._time) % (self._size[0] + 2 * 14)

        _image.paste(self._fontimage, (14 - xoffset, 0),
                     self._fontimage.convert('RGBA'))

        self.image = _image
Ejemplo n.º 14
0
class ScrollAudioEffect(AudioReactiveEffect):

    NAME = "Scroll"

    CONFIG_SCHEMA = vol.Schema(
        {
            vol.Optional(
                "blur",
                description="Amount to blur the effect",
                default=3.0,
            ): vol.All(vol.Coerce(float), vol.Range(min=0.0, max=10)),
            vol.Optional(
                "mirror",
                description="Mirror the effect",
                default=True,
            ): bool,
            vol.Optional(
                "speed", description="Speed of the effect", default=5
            ): vol.All(vol.Coerce(int), vol.Range(min=1, max=10)),
            vol.Optional(
                "decay",
                description="Decay rate of the scroll",
                default=0.97,
            ): vol.All(vol.Coerce(float), vol.Range(min=0.8, max=1.0)),
            vol.Optional(
                "threshold",
                description="Cutoff for quiet sounds. Higher -> only loud sounds are detected",
                default=0.0,
            ): vol.All(vol.Coerce(float), vol.Range(min=0, max=1)),
            vol.Optional(
                "color_lows",
                description="Color of low, bassy sounds",
                default="red",
            ): vol.In(list(COLORS.keys())),
            vol.Optional(
                "color_mids",
                description="Color of midrange sounds",
                default="green",
            ): vol.In(list(COLORS.keys())),
            vol.Optional(
                "color_high",
                description="Color of high sounds",
                default="blue",
            ): vol.In(list(COLORS.keys())),
        }
    )

    def config_updated(self, config):

        # TODO: Determine how buffers based on the pixels should be
        # allocated. Technically there is no guarantee that the effect
        # is bound to a device while the config gets updated. Might need
        # to move to a model where effects are created for a device and
        # must be destroyed and recreated to be moved to another device.
        self.output = None

        self.lows_colour = np.array(
            COLORS[self._config["color_lows"]], dtype=float
        )
        self.mids_colour = np.array(
            COLORS[self._config["color_mids"]], dtype=float
        )
        self.high_colour = np.array(
            COLORS[self._config["color_high"]], dtype=float
        )

        self.lows_cutoff = self._config["threshold"]
        self.mids_cutoff = self._config["threshold"] / 4
        self.high_cutoff = self._config["threshold"] / 7

    def audio_data_updated(self, data):

        if self.output is None:
            self.output = self.pixels

        # Divide the melbank into lows, mids and highs
        lows_max = np.clip(np.max(data.melbank_lows() ** 2), 0, 1)
        mids_max = np.clip(np.max(data.melbank_mids() ** 2), 0, 1)
        highs_max = np.clip(np.max(data.melbank_highs() ** 2), 0, 1)

        if lows_max < self.lows_cutoff:
            lows_max = 0
        if mids_max < self.mids_cutoff:
            mids_max = 0
        if highs_max < self.high_cutoff:
            highs_max = 0

        # Compute the value for each range based on the max
        # lows_val = (np.array((255,0,0)) * lows_max)
        # mids_val = (np.array((0,255,0)) * mids_max)
        # high_val = (np.array((0,0,255)) * highs_max)

        # Roll the effect and apply the decay
        speed = self.config["speed"]
        self.output[speed:, :] = self.output[:-speed, :]
        self.output = self.output * self.config["decay"]

        # Add in the new color from the signal maxes
        # self.output[:speed, 0] = lows_val[0] + mids_val[0] + high_val[0]
        # self.output[:speed, 1] = lows_val[1] + mids_val[1] + high_val[1]
        # self.output[:speed, 2] = lows_val[2] + mids_val[2] + high_val[2]

        self.output[:speed] = self.lows_colour * lows_max
        self.output[:speed] += self.mids_colour * mids_max
        self.output[:speed] += self.high_colour * highs_max

        # Set the pixels
        self.pixels = self.output
Ejemplo n.º 15
0
class BladePowerAudioEffect(AudioReactiveEffect):

    NAME = "Blade Power"

    CONFIG_SCHEMA = vol.Schema({
        vol.Optional(
            "mirror",
            description="Mirror the effect",
            default=False,
        ):
        bool,
        vol.Optional(
            "blur",
            description="Amount to blur the effect",
            default=2,
        ):
        vol.All(vol.Coerce(float), vol.Range(min=0.0, max=10)),
        vol.Optional(
            "multiplier",
            description="Make the reactive bar bigger/smaller",
            default=0.5,
        ):
        vol.All(vol.Coerce(float), vol.Range(min=0.0, max=1.0)),
        vol.Optional(
            "background_color",
            description="Color of Background",
            default="orange",
        ):
        vol.In(list(COLORS.keys())),
        vol.Optional("color", description="Color of bar", default="brown"):
        vol.In(list(COLORS.keys())),
        vol.Optional(
            "frequency_range",
            description="Frequency range for the beat detection",
            default="Bass (60-250Hz)",
        ):
        vol.In(list(FREQUENCY_RANGES.keys())),
    })

    def config_updated(self, config):

        # Create the filters used for the effect
        self._bar_filter = self.create_filter(alpha_decay=0.1, alpha_rise=0.99)
        self.bar_color = np.array(COLORS[self._config["color"]], dtype=float)
        self._frequency_range = np.linspace(
            FREQUENCY_RANGES[self.config["frequency_range"]].min,
            FREQUENCY_RANGES[self.config["frequency_range"]].max,
            20,
        )

    def audio_data_updated(self, data):
        # Get frequency range power through filter
        out = np.zeros(np.shape(self.pixels))
        bar = (np.max(data.sample_melbank(list(self._frequency_range))) *
               self.config["multiplier"])
        bar = self._bar_filter.update(bar)
        # Map it to the length of the strip and apply it
        bar_idx = int(bar * self.pixel_count)
        out[:bar_idx] = self.bar_color

        # Update the pixels
        self.pixels = out
Ejemplo n.º 16
0
class RainAudioEffect(AudioReactiveEffect):

    NAME = "Rain"
    CONFIG_SCHEMA = vol.Schema({
        vol.Optional('mirror', description='Mirror the effect', default=True):
        bool,
        # TODO drops should be controlled by some sort of effectlet class, which will provide a list of available drop names rather than just this static range
        vol.Optional('lows_colour',
                     description='Colour for low sounds, ie beats',
                     default='white'):
        vol.In(list(COLORS.keys())),
        vol.Optional('mids_colour',
                     description='Colour for mid sounds, ie vocals',
                     default='red'):
        vol.In(list(COLORS.keys())),
        vol.Optional('high_colour',
                     description='Colour for high sounds, ie hi hat',
                     default='blue'):
        vol.In(list(COLORS.keys())),
        vol.Optional('lows_sensitivity',
                     description='Sensitivity to low sounds',
                     default=0.1):
        vol.All(vol.Coerce(float), vol.Range(min=0.03, max=0.3)),
        vol.Optional('mids_sensitivity',
                     description='Sensitivity to mid sounds',
                     default=0.05):
        vol.All(vol.Coerce(float), vol.Range(min=0.03, max=0.3)),
        vol.Optional('high_sensitivity',
                     description='Sensitivity to high sounds',
                     default=0.1):
        vol.All(vol.Coerce(float), vol.Range(min=0.03, max=0.3)),
        vol.Optional('raindrop_animation',
                     description='Droplet animation style',
                     default=EFFECTLET_LIST[0]):
        vol.In(list(EFFECTLET_LIST)),
    })

    def config_updated(self, config):
        # this could be cleaner but it's temporary, until an effectlet class is made to handle this stuff
        self.drop_animation = np.load(
            os.path.join(os.path.dirname(__file__),
                         "effectlets/" + config['raindrop_animation']))

        self.n_frames, self.frame_width = np.shape(self.drop_animation)
        self.frame_centre_index = self.frame_width // 2
        self.frame_side_lengths = self.frame_centre_index - 1

        self.intensity_filter = self.create_filter(alpha_decay=0.5,
                                                   alpha_rise=0.99)
        self.filtered_intensities = np.zeros(3)

        self.first_call = True

    def new_drop(self, location, colour):
        """
        Add a new drop animation
        TODO (?) this method overwrites a running drop animation in the same location
        would need a significant restructure to fix
        """
        self.drop_frames[location] = 1
        self.drop_colours[:, location] = colour

    def update_drop_frames(self):
        # TODO these should be made in config_updated or __init__ when pixel count is available there
        if self.first_call:
            self.drop_frames = np.zeros(self.pixel_count, dtype=int)
            self.drop_colours = np.zeros((3, self.pixel_count))
            self.first_call = False

        # Set any drops at final frame back to 0 and remove colour data
        finished_drops = self.drop_frames >= self.n_frames - 1
        self.drop_frames[finished_drops] = 0
        self.drop_colours[:, finished_drops] = 0
        # Add one to any running frames
        self.drop_frames[self.drop_frames > 0] += 1

    def get_drops(self):
        """
        Get coloured pixel data of all drops overlaid
        """
        # 2d array containing colour intensity data
        overlaid_frames = np.zeros((3, self.pixel_count + self.frame_width))
        # Indexes of active drop animations
        drop_indices = np.flatnonzero(self.drop_frames)
        # TODO vectorize this to remove for loop
        for index in drop_indices:
            coloured_frame = [
                self.drop_animation[self.drop_frames[index]] *
                self.drop_colours[colour, index] for colour in range(3)
            ]
            overlaid_frames[:,
                            index:index + self.frame_width] += coloured_frame

        np.clip(overlaid_frames, 0, 255, out=overlaid_frames)
        return overlaid_frames[:, self.
                               frame_side_lengths:self.frame_side_lengths +
                               self.pixel_count].T

    def audio_data_updated(self, data):

        # Calculate the low, mids, and high indexes scaling based on the pixel count
        intensities = np.array([
            np.mean(data.melbank_lows()),
            np.mean(data.melbank_mids()),
            np.mean(data.melbank_highs())
        ])

        self.update_drop_frames()

        if intensities[0] - self.filtered_intensities[0] > self._config[
                "lows_sensitivity"]:
            self.new_drop(randint(0, self.pixel_count - 1),
                          COLORS.get(self._config['lows_colour']))
        if intensities[1] - self.filtered_intensities[1] > self._config[
                "mids_sensitivity"]:
            self.new_drop(randint(0, self.pixel_count - 1),
                          COLORS.get(self._config['mids_colour']))
        if intensities[2] - self.filtered_intensities[2] > self._config[
                "high_sensitivity"]:
            self.new_drop(randint(0, self.pixel_count - 1),
                          COLORS.get(self._config['high_colour']))

        self.filtered_intensities = self.intensity_filter.update(intensities)

        self.pixels = self.get_drops()
Ejemplo n.º 17
0
class EnergyAudioEffect(AudioReactiveEffect):

    NAME = "Energy"
    CONFIG_SCHEMA = vol.Schema({
        vol.Optional('blur',
                     description='Amount to blur the effect',
                     default=4.0):
        vol.All(vol.Coerce(float), vol.Range(min=0.0, max=10)),
        vol.Optional('mirror', description='Mirror the effect', default=True):
        bool,
        vol.Optional('color_lows',
                     description='Color of low, bassy sounds',
                     default="red"):
        vol.In(list(COLORS.keys())),
        vol.Optional('color_mids',
                     description='Color of midrange sounds',
                     default="green"):
        vol.In(list(COLORS.keys())),
        vol.Optional('color_high',
                     description='Color of high sounds',
                     default="blue"):
        vol.In(list(COLORS.keys())),
        vol.Optional('sensitivity',
                     description='Responsiveness to changes in sound',
                     default=0.7):
        vol.All(vol.Coerce(float), vol.Range(min=0.2, max=0.99)),
        vol.Optional('mixing_mode',
                     description='Mode of combining colours',
                     default="overlap"):
        vol.In(["additive", "overlap"]),
    })

    def config_updated(self, config):
        # scale decay value between 0.1 and 0.2
        decay_sensitivity = (self._config["sensitivity"] - 0.2) * 0.25
        self._p_filter = self.create_filter(
            alpha_decay=decay_sensitivity,
            alpha_rise=self._config["sensitivity"])

        self.lows_colour = np.array(COLORS[self._config['color_lows']],
                                    dtype=float)
        self.mids_colour = np.array(COLORS[self._config['color_mids']],
                                    dtype=float)
        self.high_colour = np.array(COLORS[self._config['color_high']],
                                    dtype=float)

    def audio_data_updated(self, data):

        # Calculate the low, mids, and high indexes scaling based on the pixel count
        lows_idx = int(np.mean(self.pixel_count * data.melbank_lows()))
        mids_idx = int(np.mean(self.pixel_count * data.melbank_mids()))
        highs_idx = int(np.mean(self.pixel_count * data.melbank_highs()))

        # Build the new energy profile based on the mids, highs and lows setting
        # the colors as red, green, and blue channel respectively
        p = np.zeros(np.shape(self.pixels))
        if self._config["mixing_mode"] == "additive":
            p[:lows_idx] = self.lows_colour
            p[:mids_idx] += self.mids_colour
            p[:highs_idx] += self.high_colour
        elif self._config["mixing_mode"] == "overlap":
            p[:lows_idx] = self.lows_colour
            p[:mids_idx] = self.mids_colour
            p[:highs_idx] = self.high_colour

        # Filter and update the pixel values
        self.pixels = self._p_filter.update(p)
Ejemplo n.º 18
0
class EnergyAudioEffect(AudioReactiveEffect):

    NAME = "Energy"
    CONFIG_SCHEMA = vol.Schema({
        vol.Optional(
            "blur",
            description="Amount to blur the effect",
            default=4.0,
        ):
        vol.All(vol.Coerce(float), vol.Range(min=0.0, max=10)),
        vol.Optional(
            "mirror",
            description="Mirror the effect",
            default=True,
        ):
        bool,
        vol.Optional(
            "color_cycler",
            description="Change colors in time with the beat",
            default=False,
        ):
        bool,
        vol.Optional(
            "color_lows",
            description="Color of low, bassy sounds",
            default="red",
        ):
        vol.In(list(COLORS.keys())),
        vol.Optional(
            "color_mids",
            description="Color of midrange sounds",
            default="green",
        ):
        vol.In(list(COLORS.keys())),
        vol.Optional(
            "color_high",
            description="Color of high sounds",
            default="blue",
        ):
        vol.In(list(COLORS.keys())),
        vol.Optional(
            "sensitivity",
            description="Responsiveness to changes in sound",
            default=0.85,
        ):
        vol.All(vol.Coerce(float), vol.Range(min=0.3, max=0.99)),
        vol.Optional(
            "mixing_mode",
            description="Mode of combining each frequencies' colours",
            default="overlap",
        ):
        vol.In(["additive", "overlap"]),
    })

    def config_updated(self, config):
        # scale decay value between 0.1 and 0.2
        decay_sensitivity = (self._config["sensitivity"] - 0.2) * 0.25
        self._p_filter = self.create_filter(
            alpha_decay=decay_sensitivity,
            alpha_rise=self._config["sensitivity"],
        )

        self.color_cycler = 0

        self.lows_colour = np.array(COLORS[self._config["color_lows"]],
                                    dtype=float)
        self.mids_colour = np.array(COLORS[self._config["color_mids"]],
                                    dtype=float)
        self.high_colour = np.array(COLORS[self._config["color_high"]],
                                    dtype=float)

    def audio_data_updated(self, data):

        # Calculate the low, mids, and high indexes scaling based on the pixel
        # count
        lows_idx = int(np.mean(self.pixel_count * data.melbank_lows()))
        mids_idx = int(np.mean(self.pixel_count * data.melbank_mids()))
        highs_idx = int(np.mean(self.pixel_count * data.melbank_highs()))

        if self._config["color_cycler"]:
            beat_now = data.oscillator()
            if beat_now:
                # Cycle between 0,1,2 for lows, mids and highs
                self.color_cycler = (self.color_cycler + 1) % 3
                color = np.random.choice(list(COLORS.keys()))

                if self.color_cycler == 0:
                    self.lows_colour = COLORS[color]
                elif self.color_cycler == 1:
                    self.mids_colour = COLORS[color]
                elif self.color_cycler == 2:
                    self.high_colour = COLORS[color]

        # Build the new energy profile based on the mids, highs and lows setting
        # the colors as red, green, and blue channel respectively
        p = np.zeros(np.shape(self.pixels))
        if self._config["mixing_mode"] == "additive":
            p[:lows_idx] = self.lows_colour
            p[:mids_idx] += self.mids_colour
            p[:highs_idx] += self.high_colour
        elif self._config["mixing_mode"] == "overlap":
            p[:lows_idx] = self.lows_colour
            p[:mids_idx] = self.mids_colour
            p[:highs_idx] = self.high_colour

        # Filter and update the pixel values
        self.pixels = self._p_filter.update(p)