Exemple #1
0
def getRGB(h,l,s):
    """
    Hacking some color conversion here for Firefly
    This wraps colorsys's conversion with some type conversion and caching
    It's not nice enough to keep.
    """
    color = cache.get((h,l,s), None)

    if color == None:
        color = colorsys.hls_to_rgb(float(h) / cache_steps, float(l) / cache_steps, float(s) / cache_steps)
        color = (clip(0, int(color[0]*255), 255), clip(0, int(color[1]*255), 255), clip(0, int(color[2]*255), 255))
        cache[(h,l,s)] = color
        #print "cache miss", h,l,s,color

    return color
Exemple #2
0
    def get_color(self, progress):
        """
        Given a progress value between 0 and 1, returns the color for that
        progress and a (h, l, s) tuple with float values
        """

        progress = clip(0.0, progress, 1.0)

        color = self.color_cache.get(progress, None)
        if color is None:
            if progress > 1.0:
                progress = 1.0
            
            overall_progress = progress * (len(self.keyframes)-1)
            stage = int(overall_progress)
            stage_progress = overall_progress - stage

            # special case stage_progress=0, so if progress=1, we don't get
            # an IndexError
            if stage_progress == 0:
                return self.keyframes[stage]
                
            frame1 = self.keyframes[stage]
            frame1_weight = 1 - stage_progress

            frame2 = self.keyframes[stage + 1]
            frame2_weight = stage_progress

            color = tuple([c1 * frame1_weight + c2 * frame2_weight for c1, c2 in zip(frame1, frame2)])
            self.color_cache[progress] = color
        return color
Exemple #3
0
    def draw(self, dt):
        if self._mixer.is_onset():
            self._offset_z = -self._offset_z
            
        self._setup_pars()
        self._offset_x += dt * self.parameter('speed').get() * math.cos(self.parameter('angle').get() * 2 * math.pi)
        self._offset_y += dt * self.parameter('speed').get() * math.sin(self.parameter('angle').get() * 2 * math.pi)
        self._offset_z += dt * self.parameter('color-speed').get()
        if self._mixer.is_onset():
            posterization = 2
        else:
            posterization = self.parameter('resolution').get()

        luminance_table = []
        luminance = 0.0
        for input in range(self._luminance_steps):
            if input > self.parameter('blackout').get() * self._luminance_steps:
                luminance -= 0.01
            elif input < self.parameter('whiteout').get() * self._luminance_steps:
                luminance += 0.01
            else:
                luminance = 0.5
            luminance = clip(0, luminance, 1.0)
            luminance_table.append(math.floor(luminance * posterization) / posterization)
            
        
        for pixel, location in self.pixel_locations:
            hue = (1.0 + snoise3(self.scale * location[0] + self._offset_x, self.scale * location[1] + self._offset_y, self._offset_z, 1, 0.5, 0.5)) / 2
            hue = self.hue_min + ((math.floor(hue * posterization) / posterization) * (self.hue_max - self.hue_min))
            
            brightness = self._luminance_steps * (1.0 + snoise3(self.luminance_scale * location[0] + self._offset_x, self.luminance_scale * location[1] + self._offset_y, self._offset_z, 1, 0.5, 0.5)) / 2
            
            self.setPixelHLS(pixel, (hue, luminance_table[int(brightness)], 1.0))
Exemple #4
0
    def get_color(self, progress):
        """
        Given a progress value between 0 and steps, returns the color for that
        progress as a (h, l, s) tuple with float values
        """

        progress = clip(0, int(progress), self._steps)

        return self.color_cache[progress]
Exemple #5
0
    def draw(self, dt):
        if self._mixer.is_onset():
            self.hue_inner = math.fmod(self.hue_inner + self.parameter("hue-step").get(), 1.0)
            self.luminance_offset += self.parameter("hue-step").get()

        self.hue_inner += dt * self.parameter("speed").get()
        self.wave1_offset += self.parameter("wave1-speed").get() * dt
        self.wave2_offset += self.parameter("wave2-speed").get() * dt
        self.luminance_offset += self.parameter("luminance-speed").get() * dt

        luminance_table = []
        luminance = 0.0
        for input in range(self._luminance_steps):
            if input > self.parameter("blackout").get() * self._luminance_steps:
                luminance -= 0.01
                luminance = clip(0, luminance, 1.0)
            elif input < self.parameter("whiteout").get() * self._luminance_steps:
                luminance += 0.1
                luminance = clip(0, luminance, 1.0)
            else:
                luminance -= 0.01
                luminance = clip(0.5, luminance, 1.0)
            luminance_table.append(luminance)

        wave1_period = self.parameter("wave1-period").get()
        wave1_amplitude = self.parameter("wave1-amplitude").get()
        wave2_period = self.parameter("wave2-period").get()
        wave2_amplitude = self.parameter("wave2-amplitude").get()
        luminance_scale = self.parameter("luminance-scale").get()

        for pixel in self.pixels:
            wave1 = abs(math.cos(self.wave1_offset + self.pixel_angles[pixel] * wave1_period) * wave1_amplitude)
            wave2 = abs(math.cos(self.wave2_offset + self.pixel_angles[pixel] * wave2_period) * wave2_amplitude)
            hue = self.pixel_distances[pixel] + wave1 + wave2
            luminance = (
                abs(int((self.luminance_offset + hue * luminance_scale) * self._luminance_steps))
                % self._luminance_steps
            )
            hue = math.fmod(self.hue_inner + hue * self.parameter("hue-width").get(), 1.0)
            self.setPixelHLS(pixel, (hue, luminance_table[luminance], 1.0))
Exemple #6
0
    def draw(self, dt):
        if self._mixer.is_onset():
            self.hue_inner = math.fmod(self.hue_inner + self.parameter('hue-step').get(), 1.0)
            self.luminance_offset += self.parameter('hue-step').get()

        self.hue_inner += dt * self.parameter('speed').get()
        self.wave1_offset += self.parameter('wave1-speed').get() * dt
        self.wave2_offset += self.parameter('wave2-speed').get() * dt
        self.luminance_offset += self.parameter('luminance-speed').get() * dt

        luminance_table = []
        luminance = 0.0
        for input in range(self._luminance_steps):
            if input > self.parameter('blackout').get() * self._luminance_steps:
                luminance -= 0.01
                luminance = clip(0, luminance, 1.0)
            elif input < self.parameter('whiteout').get() * self._luminance_steps:
                luminance += 0.1
                luminance = clip(0, luminance, 1.0)
            else:
                luminance -= 0.01
                luminance = clip(0.5, luminance, 1.0)
            luminance_table.append(luminance)
        luminance_table = np.asarray(luminance_table)

        wave1_period = self.parameter('wave1-period').get()
        wave1_amplitude = self.parameter('wave1-amplitude').get()
        wave2_period = self.parameter('wave2-period').get()
        wave2_amplitude = self.parameter('wave2-amplitude').get()
        luminance_scale = self.parameter('luminance-scale').get()

        wave1 = np.abs(np.cos(self.wave1_offset + self.pixel_angles * wave1_period) * wave1_amplitude)
        wave2 = np.abs(np.cos(self.wave2_offset + self.pixel_angles * wave2_period) * wave2_amplitude)
        hues = self.pixel_distances + wave1 + wave2
        luminance_indices = np.mod(np.abs(np.int_((self.luminance_offset + hues * luminance_scale) * self._luminance_steps)), self._luminance_steps)
        luminances = luminance_table[luminance_indices]
        hues = np.fmod(self.hue_inner + hues * self.parameter('hue-width').get(), 1.0)

        self.setAllHLS(hues, luminances, 1.0)
Exemple #7
0
    def draw(self, dt):
        if self._mixer.is_onset():
            self.hue_inner = math.fmod(self.hue_inner + self.parameter('hue-step').get(), 1.0)
            self.luminance_offset += self.parameter('hue-step').get()

        self.hue_inner += dt * self.parameter('speed').get()
        self.wave1_offset += self.parameter('wave1-speed').get() * dt
        self.wave2_offset += self.parameter('wave2-speed').get() * dt
        self.luminance_offset += self.parameter('luminance-speed').get() * dt

        luminance_table = []
        luminance = 0.0
        for input in range(self._luminance_steps):
            if input > self.parameter('blackout').get() * self._luminance_steps:
                luminance -= 0.01
                luminance = clip(0, luminance, 1.0)
            elif input < self.parameter('whiteout').get() * self._luminance_steps:
                luminance += 0.1
                luminance = clip(0, luminance, 1.0)
            else:
                luminance -= 0.01
                luminance = clip(0.5, luminance, 1.0)
            luminance_table.append(luminance)
        luminance_table = np.asarray(luminance_table)

        wave1_period = self.parameter('wave1-period').get()
        wave1_amplitude = self.parameter('wave1-amplitude').get()
        wave2_period = self.parameter('wave2-period').get()
        wave2_amplitude = self.parameter('wave2-amplitude').get()
        luminance_scale = self.parameter('luminance-scale').get()

        wave1 = np.abs(np.cos(self.wave1_offset + self.pixel_angles * wave1_period) * wave1_amplitude)
        wave2 = np.abs(np.cos(self.wave2_offset + self.pixel_angles * wave2_period) * wave2_amplitude)
        hues = self.pixel_distances + wave1 + wave2
        luminance_indices = np.mod(np.abs(np.int_((self.luminance_offset + hues * luminance_scale) * self._luminance_steps)), self._luminance_steps)
        luminances = luminance_table[luminance_indices]
        hues = np.fmod(self.hue_inner + hues * self.parameter('hue-width').get(), 1.0)

        self.setAllHLS(hues, luminances, 1.0)
Exemple #8
0
    def render(self, start, end, progress, out):

        progress = clip(0.0, progress, 1.0)
        idx = int(progress * (len(self.rand_index) - 1))

        if idx >= self.last_idx:
            for i in range(self.last_idx, idx):
                offset = self.rand_index[i]
                self.mask.flat[offset] = True
        else:
            for i in range(idx, self.last_idx):
                offset = self.rand_index[i]
                self.mask.flat[offset] = False
        self.last_idx = idx

        start[self.mask] = (0.0, 0.0, 0.0)
        end[np.invert(self.mask)] = (0.0, 0.0, 0.0)
        np.add(struct_flat(start), struct_flat(end), struct_flat(out))
Exemple #9
0
    def render(self, start, end, progress, out):

        progress = clip(0.0, progress, 1.0)
        idx = int(progress * (len(self.rand_index) - 1))

        if idx >= self.last_idx:
            for i in range(self.last_idx, idx):
                offset = self.rand_index[i]
                self.mask.flat[offset] = True
        else:
            for i in range(idx, self.last_idx):
                offset = self.rand_index[i]
                self.mask.flat[offset] = False
        self.last_idx = idx

        start[self.mask] = (0.0, 0.0, 0.0)
        end[np.invert(self.mask)] = (0.0, 0.0, 0.0)
        np.add(struct_flat(start), struct_flat(end), struct_flat(out))
Exemple #10
0
    def get(self, start, end, progress):

        progress = clip(0.0, progress, 1.0)
        idx = int(progress * (len(self.rand_index) - 1))

        if idx >= self.last_idx:
            for i in range(self.last_idx, idx):
                offset = self.rand_index[i] * 3
                self.mask.flat[offset] = True
                self.mask.flat[offset + 1] = True
                self.mask.flat[offset + 2] = True
        else:
            for i in range(idx, self.last_idx):
                offset = self.rand_index[i] * 3
                self.mask.flat[offset] = False
                self.mask.flat[offset + 1] = False
                self.mask.flat[offset + 2] = False
        self.last_idx = idx

        start[self.mask] = 0.0
        end[np.invert(self.mask)] = 0.0

        return (start) + (end)
Exemple #11
0
    def get(self, start, end, progress):

        progress = clip(0.0, progress, 1.0)
        idx = int(progress * (len(self.rand_index) - 1))

        if idx >= self.last_idx:
            for i in range(self.last_idx, idx):
                offset = self.rand_index[i] * 3
                self.mask.flat[offset] = True
                self.mask.flat[offset + 1] = True
                self.mask.flat[offset + 2] = True
        else:
            for i in range(idx, self.last_idx):
                offset = self.rand_index[i] * 3
                self.mask.flat[offset] = False
                self.mask.flat[offset + 1] = False
                self.mask.flat[offset + 2] = False
        self.last_idx = idx

        start[self.mask] = 0.0
        end[np.invert(self.mask)] = 0.0

        return (start) + (end)
Exemple #12
0
    def write_strand(self, strand_data):
        """
        Performs a bulk strand write.
        Decodes the HLS-Float data according to client settings
        """
        strand_settings = self._app.scene.get_strand_settings()

        for client in [client for client in self._app.settings['networking']['clients'] if client["enabled"]]:
            # TODO: Split into smaller packets so that less-than-ideal networks will be OK
            packet = array.array('B', [])
            client_color_mode = client["color-mode"]

            for strand in strand_data.keys():
                if not strand_settings[strand]["enabled"]:
                    continue
                color_mode = strand_settings[strand]["color-mode"]

                data = []

                if client_color_mode == "HLSF32":
                    data = [channel for pixel in strand_data[strand][0:(3*160)] for channel in pixel]
                    data = array.array('B', struct.pack('%sf' % len(data), *data))
                elif client_color_mode == "RGB8":
                    data = [colorsys.hls_to_rgb(*pixel) for pixel in strand_data[strand][0:(3*160)]]
                    data = array.array('B', [clip(0, int(255.0 * item), 255) for sublist in data for item in sublist])
                elif client_color_mode == "HSVF32":
                    data = [colorsys.hls_to_rgb(*pixel) for pixel in strand_data[strand][0:(3*160)]]
                    data = array.array('B', [colorsys.rgb_to_hsv(*pixel) for pixel in data])

                length = len(data)
                command = COMMAND_SET_RGB if color_mode == "RGB8" else COMMAND_SET_BGR
                packet.extend(array.array('B', [strand, command, (length & 0xFF), (length & 0xFF00) >> 8]))
                packet.extend(data)


            self._socket.sendto(packet, (client["host"], client["port"]))
Exemple #13
0
    def tick(self, dt):
        self._num_frames += 1

        dt *= self.global_speed

        if len(self.playlist) > 0:

            active_preset = self.playlist.get_active_preset()
            next_preset = self.playlist.get_next_preset()

            if active_preset is None:
                return

            try:
                active_preset.tick(dt)
            except:
                log.error("Exception raised in preset %s" % active_preset.name())
                self.playlist.disable_presets_by_class(active_preset.__class__.__name__)
                raise

            # Handle transition by rendering both the active and the next
            # preset, and blending them together
            if self._in_transition and next_preset and (next_preset != active_preset):
                if self._start_transition:
                    self._start_transition = False
                    if self._app.settings.get('mixer')['transition'] == "Random":
                        self.get_next_transition()
                    if self._transition:
                        self._transition.reset()
                    next_preset._reset()
                    self._buffer_b = BufferUtils.create_buffer()

                if self._transition_duration > 0.0 and self._transition is not None:
                    if not self._paused and not self._transition_scrubbing:
                        self.transition_progress = clip(0.0,
                                                        self._elapsed / self._transition_duration,
                                                        1.0)
                else:
                    if not self._transition_scrubbing:
                        self.transition_progress = 1.0

                next_preset.tick(dt)

            # If the scene tree is available, we can do efficient mixing of presets.
            # If not, a tree would need to be constructed on-the-fly.
            # TODO: Support mixing without a scene tree available

            if self._in_transition:
                mixed_buffer = self.render_presets(
                    active_preset, self._buffer_a,
                    next_preset, self._buffer_b,
                    self._in_transition, self._transition,
                    self.transition_progress,
                    check_for_nan=self._enable_profiling)
            else:
                mixed_buffer = self.render_presets(
                    active_preset, self._buffer_a,
                    check_for_nan=self._enable_profiling)

            # render_presets writes all the desired pixels to
            # self._main_buffer.

            #else:
                # Global gamma correction.
                # TODO(jon): This should be a setting
                #mixed_buffer.T[1] = np.power(mixed_buffer.T[1], 4)

            # Mod hue by 1 (to allow wrap-around) and clamp lightness and
            # saturation to [0, 1].
            mixed_buffer.T[0] = np.mod(mixed_buffer.T[0], 1.0)
            np.clip(mixed_buffer.T[1], 0.0, 1.0, mixed_buffer.T[1])
            np.clip(mixed_buffer.T[2], 0.0, 1.0, mixed_buffer.T[2])

            # Write this buffer to enabled clients.
            if self._net is not None:
                self._net.write_buffer(mixed_buffer)

            if (not self._paused and (self._elapsed >= self._duration)
                and active_preset.can_transition()
                and not self._in_transition):

                if (self._elapsed >= (self._duration + self._transition_slop)) or self._onset:
                    if len(self.playlist) > 1:
                        self.start_transition()
                    self._elapsed = 0.0

            elif self._in_transition:
                if not self._transition_scrubbing and (self.transition_progress >= 1.0):
                    self._in_transition = False
                    # Reset the elapsed time counter so the preset runs for the
                    # full duration after the transition
                    self._elapsed = 0.0
                    self.playlist.advance()

        if self._reset_onset:
            self._onset = False
            self._reset_onset = False

        if self._enable_profiling:
            tick_time = (time.time() - self._last_frame_time)
            self._last_frame_time = time.time()
            if tick_time > 0.0:
                index = int((1.0 / tick_time))
                self._tick_time_data[index] = self._tick_time_data.get(index, 0) + 1
Exemple #14
0
 def fade_luminance(self):
     """
     Returns a luminance value that exponentially fades to minimal after a beat
     """
     dt = time.time() - self.beat_time #seconds since last beat
     return 0.01+clip(0.0, 0.0 + math.pow(math.e, -dt*4), 1.0)
Exemple #15
0
    def tick(self, dt):
        self._num_frames += 1

        dt *= self.global_speed

        if len(self.playlist) > 0:

            active_preset = self.playlist.get_active_preset()
            next_preset = self.playlist.get_next_preset()

            if active_preset is None:
                return

            try:
                active_preset.tick(dt)
            except:
                log.error("Exception raised in preset %s" %
                          active_preset.name())
                self.playlist.disable_presets_by_class(
                    active_preset.__class__.__name__)
                raise

            # Handle transition by rendering both the active and the next
            # preset, and blending them together
            if self._in_transition and next_preset and (next_preset !=
                                                        active_preset):
                if self._start_transition:
                    self._start_transition = False
                    if self._app.settings.get(
                            'mixer')['transition'] == "Random":
                        self.get_next_transition()
                    if self._transition:
                        self._transition.reset()
                    next_preset._reset()

                if self._transition_duration > 0.0 and self._transition is not None:
                    if not self._paused and not self._transition_scrubbing:
                        self.transition_progress = clip(
                            0.0,
                            old_div(self._elapsed, self._transition_duration),
                            1.0)
                else:
                    if not self._transition_scrubbing:
                        self.transition_progress = 1.0

                next_preset.tick(dt)

            # If the scene tree is available, we can do efficient mixing of presets.
            # If not, a tree would need to be constructed on-the-fly.
            # TODO: Support mixing without a scene tree available

            if self._in_transition:
                self.render_presets(active_preset,
                                    next_preset,
                                    self._transition,
                                    self.transition_progress,
                                    check_for_nan=self._enable_profiling)
            else:
                self.render_presets(active_preset,
                                    check_for_nan=self._enable_profiling)

            # Mod hue by 1 (to allow wrap-around) and clamp lightness and
            # saturation to [0, 1].
            np.mod(self._output_buffer['hue'], 1.0, self._output_buffer['hue'])
            np.clip(self._output_buffer['light'], 0.0, 1.0,
                    self._output_buffer['light'])
            np.clip(self._output_buffer['sat'], 0.0, 1.0,
                    self._output_buffer['sat'])

            # Write this buffer to enabled clients.
            if self._net is not None:
                self._net.write_buffer(self._output_buffer)

            if (not self._paused and (self._elapsed >= self._duration)
                    and active_preset.can_transition()
                    and not self._in_transition):

                if (self._elapsed >=
                    (self._duration + self._transition_slop)) or self._onset:
                    if len(self.playlist) > 1:
                        self.start_transition()
                    self._elapsed = 0.0

            elif self._in_transition:
                if not self._transition_scrubbing and (self.transition_progress
                                                       >= 1.0):
                    self._in_transition = False
                    # Reset the elapsed time counter so the preset runs for the
                    # full duration after the transition
                    self._elapsed = 0.0
                    self.playlist.advance()

        if self._reset_onset:
            self._onset = False
            self._reset_onset = False

        if self._enable_profiling:
            tick_time = (time.time() - self._last_frame_time)
            self._last_frame_time = time.time()
            if tick_time > 0.0:
                index = int((old_div(1.0, tick_time)))
                self._tick_time_data[index] = self._tick_time_data.get(
                    index, 0) + 1