Ejemplo n.º 1
0
def set_pixels(pixels, pixels_per_string, elapsed_time, palette, audio_level,
               audio_respond, colour_mash):
    global string_levels
    global string_brightness

    n_pixels = len(pixels)
    n_strings = int(n_pixels / pixels_per_string)
    n_peaks = 3
    time_factor = -0.5
    amplitude_factor = 12.5 + 5 * math.sin(elapsed_time * -0.2)

    for string in range(n_strings):
        radial_offset = (n_peaks * 2 * math.pi / n_strings) * string
        lateral_offset = math.sin(radial_offset + elapsed_time *
                                  time_factor) * amplitude_factor

        if lateral_offset > string_levels[string]:
            string_levels[string] = lateral_offset
            string_brightness[string] += (1.0 -
                                          string_brightness[string]) * 0.15

        else:
            string_levels[string] *= 0.95
            string_brightness[string] -= (string_brightness[string] -
                                          0.25) * 0.075

        midpoint = (
            (8 * pixels_per_string / 12) -
            1) + (pixels_per_string / 2.5) * math.sin(elapsed_time * -0.5)

        brightness_val = string_brightness[string]

        for pixel in range(pixels_per_string):
            palette_val = palette_utils.get_value(elapsed_time, pixel,
                                                  pixels_per_string, palette,
                                                  colour_mash)

            pixel_level = 1.0

            if audio_respond:
                pixel_response_proportion = (
                    1 - (float(pixel) / pixels_per_string))**1.2
                pixel_level = (1 - pixel_response_proportion) + (
                    pixel_response_proportion * audio_level)

            if pixel >= midpoint - string_levels[string]:
                pixels[string * pixels_per_string + pixel] = tuple(
                    pixel_level * string_brightness[string] * channel
                    for channel in palette_val)
            else:
                distance_from_midpoint = midpoint - pixel
                fade_factor = min(1.0 / (distance_from_midpoint**2), 1)
                pixels[string * pixels_per_string +
                       pixel] = tuple(fade_factor * string_brightness[string] *
                                      pixel_level * channel
                                      for channel in palette_val)
Ejemplo n.º 2
0
def set_pixels(pixel_buff, pixels_per_string, elapsed_time, palette,
               fade_level):
    for ii in range(len(pixel_buff)):
        pixel_level = 1

        pixel_index = ii % pixels_per_string
        ripple_level = math.sin(pixel_index - time.time() * 5) / 2 + 0.5
        pixel_val = tuple(
            pixel_level * channel * ripple_level
            for channel in palette_utils.get_value(
                elapsed_time, ii, pixels_per_string, palette, False))
        pixel_buff[ii] = tuple(pixel_val[channel] * fade_level +
                               pixel_buff[ii][channel] * (1.0 - fade_level)
                               for channel in range(3))
Ejemplo n.º 3
0
def set_pixels(pixel_buff, pixels_per_string, sparkle_chance,
               max_concurrent_sparkles, elapsed_time, palette, audio_level,
               audio_respond, colour_mash, add):
    global sparkle_colour
    global sparkle_time
    global last_sparkle

    if not initialised:
        return

    since_last_sparkle = elapsed_time - last_sparkle

    # if since_last_sparkle == 0:
    # sparkle_chance = 0
    # else:
    # sparkle_chance *= 1.0/((since_last_sparkle*2) ** 2)

    last_sparkle = elapsed_time

    if audio_respond:
        max_concurrent_sparkles = max(
            int(max_concurrent_sparkles * (audio_level**2) * 5), 1)
        sparkle_chance = max(sparkle_chance * (audio_level**2) * 5,
                             sparkle_chance / 4)

    for ii in range(max_concurrent_sparkles + 1):
        if random.random() < sparkle_chance:
            sparkle_index = random.randint(0, len(sparkle_time) - 1)
            sparkle_time[sparkle_index] = elapsed_time
            sparkle_colour[sparkle_index] = palette_utils.get_value(
                elapsed_time, sparkle_index, pixels_per_string, palette,
                colour_mash)

    for ii in range(len(pixel_buff)):
        sparkle_intensity = min(
            1.0 / ((max(elapsed_time - sparkle_time[ii], 0.001)) * 3)**2, 1.5)
        # sparkle_intensity = min(pattern_utils.inverse_square(elapsed_time, sparkle_time[ii], 2.0), 1)
        pixel_value = tuple(channel * sparkle_intensity
                            for channel in sparkle_colour[ii])
        #pixel_value = tuple(255 * channel for channel in color_utils.gamma(pixel_value, 2.2))

        if add:
            pixel_buff[ii] = tuple(
                max(pixel_value[channel], pixel_buff[ii][channel])
                for channel in range(3))
        else:
            pixel_buff[ii] = pixel_value
Ejemplo n.º 4
0
def set_pixels(pixel_buff, pixels_per_string, sparkle_chance,
               max_concurrent_sparkles, sparkle_fade_val, elapsed_time,
               palette, audio_level, audio_respond, colour_mash, fade_level):
    global sparkle_offset
    global sparkle_time
    global last_sparkle

    if not initialised:
        return

    since_last_sparkle = elapsed_time - last_sparkle

    if since_last_sparkle == 0:
        sparkle_chance = 0
    else:
        sparkle_chance *= 1.0 / ((since_last_sparkle * 2)**2)

    # account for number of total pixels
    # sparkle_chance *= ((len(pixel_buff) / pixels_per_string) / 32)

    last_sparkle = elapsed_time

    for ii in range(max_concurrent_sparkles + 1):
        if random.random() < sparkle_chance:
            sparkle_index = random.randint(0, len(sparkle_offset) - 1)
            sparkle_offset[sparkle_index] = numpy.random.randn(
            ) * palette.len / 2

    for ii in range(len(pixel_buff)):
        pixel_level = 1

        if audio_respond:
            pixel_index = ii % pixels_per_string
            pixel_response_proportion = (float(pixel_index) /
                                         pixels_per_string)**3.5
            pixel_level = (1 - pixel_response_proportion) + (
                pixel_response_proportion * audio_level)

        pixel_val = tuple(
            pixel_level * channel * fade_level
            for channel in palette_utils.get_value(
                elapsed_time / 2, ii +
                sparkle_offset[ii], pixels_per_string, palette, colour_mash))
        pixel_buff[ii] = pixel_val
        # pixel_buff[ii] = pattern_utils.fadeDownTo(pixel_buff[ii], pixel_val, 0.5)

        sparkle_offset[ii] *= sparkle_fade_val
Ejemplo n.º 5
0
def set_pixels(pixels, pixels_per_string, elapsed_time, palette, beat_now, audio_level, audio_respond, colour_mash):
    global pixel_order
    global last_pixel_order_switch

    n_pixels = len(pixels)
    n_strings = int(n_pixels / pixels_per_string)

    time_cos_factor = 2
    wobble_amplitude = 5
    band_radius = pixels_per_string/2 + math.cos(elapsed_time/time_cos_factor)*18 - 13
    colour_offset = 3.14/6
    cos_factor = 6*3.14/(n_pixels/pixels_per_string)
    t = elapsed_time*2
    offset_ordering = [ [ 0, 1, 2], [0, 2, 1], [1, 0, 2] ]

    if audio_respond:
        if beat_now:
            pixel_order = (pixel_order + 1) % len(offset_ordering)
            last_pixel_order_switch = elapsed_time

    else:
        if math.cos(elapsed_time/time_cos_factor) < -0.99 and elapsed_time - last_pixel_order_switch > min_pixel_order_switch_interval:
            pixel_order = (pixel_order + 1) % len(offset_ordering)
            last_pixel_order_switch = elapsed_time

    for string in range(n_strings):
        bandLocation = tuple(band_radius + wobble_amplitude*math.cos(t + string*cos_factor + colour_offset*offset_ordering[pixel_order][colour]) for colour in range(3))
        for pixel in range(pixels_per_string):
            pixCol = [0, 0, 0]
            for colour in range(3):
                distance = bandLocation[colour] - pixel
                if distance < 0:
                    distance *= -1

                pixCol[colour] = (2 + max(band_radius, 0.0000001)/10)/distance

                if audio_respond:
                    pixCol[colour] *= math.sqrt(audio_level)/3 + 0.84
            r, g, b = color_utils.gamma(pixCol, 2.2)
            # pixels[string*pixels_per_string + pixel] = pattern_utils.fadeDownTo(pixels[string*pixels_per_string + pixel], (g*255, r*255, b*255), 0.5)

            palette_val = palette_utils.get_value(elapsed_time, pixel, pixels_per_string, palette, colour_mash)
            r *= palette_val[0]/255.0
            g *= palette_val[1]/255.0
            b *= palette_val[2]/255.0

            pixels[string*pixels_per_string + pixel] = (g*255, r*255, b*255)
Ejemplo n.º 6
0
def set_pixels(pixel_buff, pixels_per_string, elapsed_time, palette,
               audio_level, audio_respond, colour_mash, fade_level):
    for ii in range(len(pixel_buff)):
        pixel_level = 1

        if audio_respond:
            pixel_index = ii % pixels_per_string
            pixel_response_proportion = (float(pixel_index) /
                                         pixels_per_string)**1.2
            pixel_level = (1 - pixel_response_proportion) + (
                pixel_response_proportion * audio_level)

        pixel_val = tuple(
            pixel_level * channel * fade_level
            for channel in palette_utils.get_value(
                elapsed_time, ii, pixels_per_string, palette, colour_mash))
        pixel_buff[ii] = pixel_val
Ejemplo n.º 7
0
def set_pixels(pixel_buff, pixels_per_string, num_strings, waves_per_string,
               add_spiral, elapsed_time, palette, audio_level, audio_respond,
               colour_mash):
    for ii in range(len(pixel_buff)):
        pixel_index = float(ii) % pixels_per_string
        string_index = float(ii) / pixels_per_string

        scaling_factor = math.pi * 2 / pixels_per_string * waves_per_string

        spiral_offset = 0

        if add_spiral:
            spiral_offset = (float(string_index) / num_strings) * math.pi * 2

        upshift = 0.55
        if audio_respond:
            upshift = math.sqrt(audio_level) / 3 + 0.33

        brightness_level = min(
            (math.sin(-elapsed_time + pixel_index * scaling_factor +
                      spiral_offset) / 1.82) + upshift, 1.55)

        if audio_respond:
            brightness_level *= min(math.sqrt(audio_level) + 0.35, 1)

        pixel_value = palette_utils.get_value(elapsed_time, ii,
                                              pixels_per_string, palette,
                                              colour_mash)
        pixel_value = tuple(brightness_level * channel / 255
                            for channel in pixel_value)

        pixel_value = tuple(channel * 255
                            for channel in color_utils.gamma(pixel_value, 2.2))

        # pixel_buff[ii] = pattern_utils.fadeDownTo(pixel_buff[ii], pixel_value, 0.5)
        pixel_buff[ii] = pixel_value
Ejemplo n.º 8
0
 def __init__(self, palette, pixels_per_string, time, colour_mash):
     self.colour = palette_utils.get_value(time, 0, 10, palette,
                                           colour_mash)
     self.time = time
     self.pixels_per_string = pixels_per_string
     self.active = True
Ejemplo n.º 9
0
def set_pixels(pixel_buff, pixels_per_string, elapsed_time, speed_r, speed_g,
               speed_b, palette, audio_level, audio_respond, colour_mash):
    # how many sine wave cycles are squeezed into our n_pixels
    # 24 happens to create nice diagonal stripes on the wall layout
    freq_r = 24
    freq_g = 24
    freq_b = 24

    t = elapsed_time * 5
    n_pixels = len(pixel_buff)

    audio_factor = 1.0

    if audio_respond:
        audio_factor = 1.0

    for ii in range(n_pixels):
        pct = (ii / n_pixels)
        # diagonal black stripes
        pct_jittered = (pct * 77) % 37

        blackstripes = color_utils.cos(pct_jittered,
                                       offset=t * 0.05,
                                       period=1,
                                       minn=-1.5,
                                       maxx=1.5)

        blackstripes_offset = color_utils.cos(t,
                                              offset=0.9,
                                              period=60,
                                              minn=-0.5,
                                              maxx=3)

        if audio_respond:
            root_lev = math.sqrt(audio_level)
            blackstripes = color_utils.clamp(
                blackstripes + blackstripes_offset, 0 + root_lev / 2,
                0.5 + root_lev / 2)
        else:
            blackstripes = color_utils.clamp(
                blackstripes + blackstripes_offset, 0, 1)

        # 3 sine waves for r, g, b which are out of sync with each other
        r = blackstripes * color_utils.remap(
            math.cos(
                (t / speed_r + pct * freq_r) * math.pi * 2), -1, 1, 0, 256)
        g = blackstripes * color_utils.remap(
            math.cos(
                (t / speed_g + pct * freq_g) * math.pi * 2), -1, 1, 0, 256)
        b = blackstripes * color_utils.remap(
            math.cos(
                (t / speed_b + pct * freq_b) * math.pi * 2), -1, 1, 0, 256)
        # pixel_buff[ii] = pattern_utils.fadeDownTo(pixel_buff[ii], (r, g, b), 0.5)

        palette_val = palette_utils.get_value(elapsed_time, ii,
                                              pixels_per_string, palette,
                                              colour_mash)
        r *= palette_val[0] / 255.0
        g *= palette_val[1] / 255.0
        b *= palette_val[2] / 255.0

        pixel_buff[ii] = tuple(
            min(channel * audio_factor, 255) for channel in (r, g, b))