コード例 #1
0
ファイル: noisemaker.py プロジェクト: Kulinark/py-noisemaker
def mashup(ctx, input_dir, filename, control_filename, time, speed, seed):
    filenames = []

    for root, _, files in os.walk(input_dir):
        for f in files:
            if f.endswith(('.png', '.jpg')):
                filenames.append(os.path.join(root, f))

    collage_count = min(random.randint(4, 6), len(filenames))
    collage_images = []

    for i in range(collage_count + 1):
        index = random.randint(0, len(filenames) - 1)

        input_filename = os.path.join(input_dir, filenames[index])

        collage_input = tf.image.convert_image_dtype(util.load(input_filename, channels=3), dtype=tf.float32)

        collage_images.append(collage_input)

    if control_filename:
        control_shape = util.shape_from_file(control_filename)
        control = tf.image.convert_image_dtype(util.load(control_filename, channels=control_shape[2]), dtype=tf.float32)

    else:
        control = collage_images.pop()

    shape = tf.shape(control)  # All images need to be the same size!

    control = value.value_map(control, shape, keepdims=True)

    base = generators.basic(freq=random.randint(2, 5), shape=shape, lattice_drift=random.randint(0, 1), hue_range=random.random(),
                            seed=seed, time=time, speed=speed)

    value_shape = value.value_shape(shape)

    control = value.convolve(kernel=effects.ValueMask.conv2d_blur, tensor=control, shape=value_shape)

    with tf.compat.v1.Session().as_default():
        tensor = effects.blend_layers(control, shape, random.random() * .5, *collage_images)

        tensor = value.blend(tensor, base, .125 + random.random() * .125)

        tensor = effects.bloom(tensor, shape, alpha=.25 + random.random() * .125)
        tensor = effects.shadow(tensor, shape, alpha=.25 + random.random() * .125, reference=control)

        tensor = tf.image.adjust_brightness(tensor, .05)
        tensor = tf.image.adjust_contrast(tensor, 1.25)

        util.save(tensor, filename)

    print('mashup')
コード例 #2
0
def blended():
    shape = [LARGE_Y, LARGE_X, 3]

    erode_kwargs = {
        "alpha": .025,
        "density": 250,
        "iterations": 50,
        "inverse": True,
    }

    control = tf.image.convert_image_dtype(load(CONTROL_FILENAME), tf.float32)

    water = tf.ones(shape) * tf.stack([.05, .2, .333])
    water = effects.blend(water, control * 4.0, .125)

    low = tf.image.convert_image_dtype(load(LOW_FILENAME), tf.float32)
    mid = tf.image.convert_image_dtype(load(MID_FILENAME), tf.float32)
    high = tf.image.convert_image_dtype(load(HIGH_FILENAME), tf.float32)

    blend_control = generators.multires(shape=shape,
                                        freq=FREQ * 4,
                                        ridges=True,
                                        octaves=4)
    blend_control = 1.0 - effects.value_map(
        blend_control, shape, keep_dims=True) * .5

    combined_land = effects.blend_layers(control, shape, blend_control,
                                         control * 2, low, mid, high)
    combined_land = effects.erode(combined_land,
                                  shape,
                                  xy_blend=.25,
                                  **erode_kwargs)
    combined_land = effects.erode(combined_land, shape, **erode_kwargs)

    combined_land_0 = effects.shadow(combined_land, shape, alpha=1.0)
    combined_land_1 = effects.shadow(combined_land,
                                     shape,
                                     alpha=1.0,
                                     reference=control)

    combined_land = effects.blend(combined_land_0, combined_land_1, .5)

    combined = effects.blend_layers(control, shape, .01, water, combined_land,
                                    combined_land, combined_land)
    combined = effects.blend(combined_land, combined, .625)

    combined = tf.image.adjust_brightness(combined, .1)
    combined = tf.image.adjust_contrast(combined, .75)
    combined = tf.image.adjust_saturation(combined, .625)

    with tf.Session().as_default():
        save(combined, BLENDED_FILENAME)
コード例 #3
0
def main(ctx, name, input_filename):
    tensor = tf.image.convert_image_dtype(load(input_filename), tf.float32)

    max_height = 1024
    max_width = 1024

    with tf.Session().as_default():
        height, width, channels = tf.shape(tensor).eval()

        need_resample = False

        if height != width:
            length = min(height, width)
            height = length
            width = length

            tensor = tf.image.resize_image_with_crop_or_pad(tensor, length, length)

        if height > max_height:
            need_resample = True
            width = int(width * (max_height / height))
            height = max_height

        if width > max_width:
            need_resample = True
            height = int(height * (max_width / width))
            width = max_width

        shape = [height, width, channels]

        if need_resample:
            tensor = effects.resample(tensor, shape)

        save(tensor, name)
コード例 #4
0
ファイル: noisemaker.py プロジェクト: Kulinark/py-noisemaker
def effect(ctx, seed, filename, no_resize, time, speed, preset_name, input_filename):
    if not seed:
        seed = random.randint(1, MAX_SEED_VALUE)

    value.set_seed(seed)
    reload_presets(PRESETS)

    input_shape = util.shape_from_file(input_filename)

    input_shape[2] = min(input_shape[2], 3)

    tensor = tf.image.convert_image_dtype(util.load(input_filename, channels=input_shape[2]), dtype=tf.float32)

    if preset_name == "random":
        preset_name = list(EFFECT_PRESETS)[random.randint(0, len(EFFECT_PRESETS) - 1)]

    print(f"{preset_name} (seed: {seed})")

    preset = EFFECT_PRESETS[preset_name]

    if no_resize:
        shape = input_shape

    else:
        shape = [1024, 1024, input_shape[2]]

        tensor = effects.square_crop_and_resize(tensor, input_shape, shape[0])

    try:
        preset.render(tensor=tensor, shape=shape, time=time, speed=speed, filename=filename)

    except Exception as e:
        util.logger.error(f"preset.render() failed: {e}\nSeed: {seed}\nArgs: {preset.__dict__}")
        raise
コード例 #5
0
def main(ctx, seed, name, preset_name, input_filename):
    generators.set_seed(seed)

    tensor = tf.image.convert_image_dtype(load(input_filename), tf.float32)

    max_height = 1024
    max_width = 1024

    with tf.Session().as_default():
        height, width, channels = tf.shape(tensor).eval()

        need_resample = False

        # Some presets only like square images. Work around for now by cropping.
        if height != width:
            length = min(height, width)
            height = length
            width = length

            tensor = tf.image.resize_image_with_crop_or_pad(tensor, length, length)

        if height > max_height:
            need_resample = True
            width = int(width * (max_height / height))
            height = max_height

        if width > max_width:
            need_resample = True
            height = int(height * (max_width / width))
            width = max_width

        shape = [height, width, channels]

        if need_resample:
            tensor = effects.resample(tensor, shape)

        kwargs, preset_name = presets.load(preset_name, presets.EFFECTS_PRESETS())

        kwargs["shape"] = [height, width, channels]

        if "freq" not in kwargs:
            kwargs["freq"] = [3, 3]

        if "octaves" not in kwargs:
            kwargs["octaves"] = 1

        if "ridges" not in kwargs:
            kwargs["ridges"] = False

        tensor = effects.post_process(tensor, **kwargs)
        tensor = recipes.post_process(tensor, **kwargs)

        print(preset_name)
        save(tensor, name)
コード例 #6
0
ファイル: crop.py プロジェクト: aayars/py-noisemaker
def main(ctx, name, retro_upscale, input_filename):
    shape = shape_from_file(input_filename)

    tensor = tf.image.convert_image_dtype(load(input_filename, channels=3),
                                          tf.float32)

    if retro_upscale:
        shape = [shape[0] * 2, shape[1] * 2, shape[2]]

        tensor = value.resample(tensor, shape, spline_order=0)

    tensor = effects.square_crop_and_resize(tensor, shape, 1024)

    with tf.compat.v1.Session().as_default():
        save(tensor, name)
コード例 #7
0
def clouds(input_filename):
    tensor = tf.image.convert_image_dtype(load(input_filename), tf.float32)

    pre_shape = [SMALL_Y, SMALL_X, 1]
    post_shape = [LARGE_Y, LARGE_X, 1]

    control_kwargs = {
        "freq": FREQ * 2,
        "lattice_drift": 1,
        "octaves": OCTAVES,
        "ridges": True,
        "shape": pre_shape,
        "warp_freq": 3,
        "warp_range": .25,
        "warp_octaves": 2,
    }

    control = generators.multires(**control_kwargs)

    layer_0 = tf.ones(pre_shape)
    layer_1 = tf.zeros(pre_shape)

    combined = effects.blend_layers(control, pre_shape, 1.0, layer_0, layer_1)

    shadow = effects.offset(combined, pre_shape, random.randint(-15, 15),
                            random.randint(-15, 15))
    shadow = tf.minimum(shadow * 2.5, 1.0)
    shadow = effects.convolve(effects.ConvKernel.blur, shadow, pre_shape)
    shadow = effects.convolve(effects.ConvKernel.blur, shadow, pre_shape)
    shadow = effects.convolve(effects.ConvKernel.blur, shadow, pre_shape)

    shadow = effects.resample(shadow, post_shape)
    combined = effects.resample(combined, post_shape)

    tensor = effects.blend(tensor, tf.zeros(post_shape), shadow * .5)
    tensor = effects.blend(tensor, tf.ones(post_shape), combined)

    post_shape = [LARGE_Y, LARGE_X, 3]

    tensor = effects.shadow(tensor, post_shape, alpha=.25)

    tensor = effects.bloom(tensor, post_shape, .333)
    tensor = recipes.dither(tensor, post_shape, .075)

    combined = tf.image.adjust_contrast(combined, 1.125)

    with tf.Session().as_default():
        save(tensor, FINAL_FILENAME)
コード例 #8
0
def frame(ctx, input_dir, frame, seed, filename, width, height):
    value.set_seed(seed)

    shape = [height, width, 3]

    dirnames = [d for d in os.listdir(input_dir) if os.path.isdir(os.path.join(input_dir, d))]

    if not dirnames:
        click.echo("Couldn't determine directory names inside of input dir " + input_dir)
        sys.exit(1)

    collage_count = min(random.randint(4, 6), len(dirnames))
    collage_images = []

    for i in range(collage_count + 1):
        index = random.randint(0, len(dirnames) - 1)

        dirname = dirnames[index]

        filenames = [f for f in sorted(os.listdir(os.path.join(input_dir, dirname))) if f.endswith('.png')]

        if not filenames:
            continue

        input_filename = os.path.join(input_dir, dirname, filenames[frame])

        collage_images.append(tf.image.convert_image_dtype(util.load(input_filename, channels=3), dtype=tf.float32))

    base = generators.basic(freq=random.randint(2, 4), shape=shape, hue_range=random.random(), seed=seed, time=frame/30.0, speed=0.125)

    control = value.value_map(collage_images.pop(), shape, keepdims=True)

    control = value.convolve(kernel=effects.ValueMask.conv2d_blur, tensor=control, shape=[shape[0], shape[1], 1])

    with tf.compat.v1.Session().as_default():
        tensor = effects.blend_layers(control, shape, random.random() * .5, *collage_images)

        tensor = value.blend(tensor, base, .125 + random.random() * .125)

        tensor = effects.bloom(tensor, shape, alpha=.25 + random.random() * .125)
        tensor = effects.shadow(tensor, shape, alpha=.25 + random.random() * .125, reference=control)

        tensor = tf.image.adjust_brightness(tensor, .05)
        tensor = tf.image.adjust_contrast(tensor, 1.25)

        util.save(tensor, filename)
コード例 #9
0
ファイル: glitchmaker.py プロジェクト: aayars/py-noisemaker
def render(ctx, glitch, vhs, crt, scan_error, snow, dither, aberration, bloom,
           name, input_filename):
    tensor = tf.image.convert_image_dtype(load(input_filename), tf.float32)

    freq = [3, 3]

    max_height = 1024
    max_width = 1024

    with tf.compat.v1.Session().as_default():
        height, width, channels = tf.shape(tensor).eval()

        need_resample = False

        if height > max_height:
            need_resample = True
            width = int(width * (max_height / height))
            height = max_height

        if width > max_width:
            need_resample = True
            height = int(height * (max_width / width))
            width = max_width

        shape = [height, width, channels]

        if need_resample:
            tensor = value.resample(tensor, shape)

        tensor = effects.post_process(tensor,
                                      shape=shape,
                                      freq=freq,
                                      with_bloom=bloom,
                                      with_aberration=aberration,
                                      with_glitch=glitch,
                                      with_vhs=vhs,
                                      with_crt=crt,
                                      with_scan_error=scan_error,
                                      with_snow=snow,
                                      with_dither=dither)

        save(tensor, name)

    print(name)
コード例 #10
0
def main(ctx, seed, name, no_resize, overrides, time, preset_name, input_filename):
    presets.bake_presets(seed)

    input_shape = effects.shape_from_file(input_filename)

    input_shape[2] = min(input_shape[2], 3)

    tensor = tf.image.convert_image_dtype(load(input_filename, channels=input_shape[2]), dtype=tf.float32)

    if preset_name == 'random':
        preset_name = 'random-effect'

    kwargs = presets.preset(preset_name)

    print(kwargs['name'])

    kwargs['time'] = time

    if 'freq' not in kwargs:
        kwargs['freq'] = [3, 3]

    if 'octaves' not in kwargs:
        kwargs['octaves'] = 1

    if 'ridges' not in kwargs:
        kwargs['ridges'] = False

    if no_resize:
        kwargs['shape'] = input_shape

    else:
        kwargs['shape'] = [1024, 1024, input_shape[2]]

        tensor = effects.square_crop_and_resize(tensor, input_shape, kwargs['shape'][0])

    if overrides:
        kwargs.update(json.loads(overrides))

    tensor = effects.post_process(tensor, **kwargs)
    tensor = recipes.post_process(tensor, **kwargs)

    with tf.Session().as_default():
        save(tensor, name)
コード例 #11
0
def basic(ctx, width, height, input_dir, name):
    shape = [height, width, 3]

    filenames = [
        f for f in os.listdir(input_dir)
        if f.endswith(".png") or f.endswith(".jpg")
    ]

    collage_count = min(random.randint(3, 5), len(filenames))
    collage_images = []

    for i in range(collage_count + 1):
        index = random.randint(0, len(filenames) - 1)

        collage_input = tf.image.convert_image_dtype(util.load(
            os.path.join(input_dir, filenames[index])),
                                                     dtype=tf.float32)
        collage_images.append(effects.resample(collage_input, shape))

    base = generators.basic(freq=random.randint(2, 5),
                            shape=shape,
                            lattice_drift=random.randint(0, 1),
                            hue_range=random.random())

    control = effects.value_map(collage_images.pop(), shape, keep_dims=True)

    tensor = effects.blend_layers(control, shape,
                                  random.random() * .5, *collage_images)

    tensor = effects.blend(tensor, base, .125 + random.random() * .125)

    tensor = effects.bloom(tensor, shape, alpha=.25 + random.random() * .125)
    tensor = effects.shadow(tensor, shape, alpha=.25 + random.random() * .125)

    tensor = tf.image.adjust_brightness(tensor, .05)
    tensor = tf.image.adjust_contrast(tensor, 1.25)

    with tf.Session().as_default():
        save(tensor, name)

    print(name)
コード例 #12
0
ファイル: worldmaker.py プロジェクト: raboof/py-noisemaker
def clouds(input_filename):
    tensor = tf.image.convert_image_dtype(load(input_filename), tf.float32)

    run_preset("clouds", [LARGE_Y, LARGE_X, 3], FINAL_FILENAME, tensor=tensor)
コード例 #13
0
ファイル: collagemaker.py プロジェクト: raboof/py-noisemaker
def basic(ctx, width, height, input_dir, name, control_filename,
          retro_upscale):
    shape = [height, width,
             3]  # Any shape you want, as long as it's [1024, 1024, 3]

    filenames = []

    for root, _, files in os.walk(input_dir):
        for filename in files:
            if filename.endswith(('.png', '.jpg')):
                filenames.append(os.path.join(root, filename))

    collage_count = min(random.randint(4, 6), len(filenames))
    collage_images = []

    for i in range(collage_count + 1):
        index = random.randint(0, len(filenames) - 1)

        input_filename = os.path.join(input_dir, filenames[index])

        collage_input = tf.image.convert_image_dtype(util.load(input_filename,
                                                               channels=3),
                                                     dtype=tf.float32)

        input_shape = effects.shape_from_file(input_filename)

        if retro_upscale:
            input_shape = [
                input_shape[0] * 2, input_shape[1] * 2, input_shape[2]
            ]

            collage_input = effects.resample(collage_input,
                                             input_shape,
                                             spline_order=0)

        collage_input = effects.square_crop_and_resize(collage_input,
                                                       input_shape, 1024)

        collage_images.append(collage_input)

    base = generators.basic(freq=random.randint(2, 5),
                            shape=shape,
                            lattice_drift=random.randint(0, 1),
                            hue_range=random.random())

    if control_filename:
        control = tf.image.convert_image_dtype(util.load(control_filename,
                                                         channels=1),
                                               dtype=tf.float32)

        control = effects.square_crop_and_resize(
            control, effects.shape_from_file(control_filename), 1024)

        control = effects.value_map(control, shape, keep_dims=True)

    else:
        control = effects.value_map(collage_images.pop(),
                                    shape,
                                    keep_dims=True)

    control = effects.convolve(effects.ValueMask.conv2d_blur, control,
                               [height, width, 1])

    with tf.Session().as_default():
        # sort collage images by brightness
        collage_images = [
            j[1] for j in sorted([(tf.reduce_sum(i).eval(), i)
                                  for i in collage_images])
        ]

        tensor = effects.blend_layers(control, shape,
                                      random.random() * .5, *collage_images)

        tensor = effects.blend(tensor, base, .125 + random.random() * .125)

        tensor = effects.bloom(tensor,
                               shape,
                               alpha=.25 + random.random() * .125)
        tensor = effects.shadow(tensor,
                                shape,
                                alpha=.25 + random.random() * .125,
                                reference=control)

        tensor = tf.image.adjust_brightness(tensor, .05)
        tensor = tf.image.adjust_contrast(tensor, 1.25)

        save(tensor, name)

    print('mashup')