Exemple #1
0
def old_target_for_fno(fno):
    layer = 'mixed5a_3x3_bottleneck_pre_relu'
    channel_1 = 10
    channel_2 = 3
    if (fno % 240) < 120:
        rt = (fno % 240) / 120.0
        rit = 1.0 - rt
        target = tf.reduce_mean(rit * tfi.T(layer)[:, :, :, channel_1] +
                                rt * tfi.T(layer)[:, :, :, channel_2])
    else:
        target = tf.reduce_mean(tfi.T(layer)[:, :, :, channel_2])
    return target
Exemple #2
0
def add_texture_ring( current_img, pattern_step, offset, size, layer, channel ):
    if pattern_step >= offset and pattern_step < offset + size:
        ring_id = pattern_step - offset
        ring_img = ring_masks[ring_id]
        target = tfi.T(layer)[:,:,:,channel]
        textured = tfi.render_deepdream( target, current_img, iter_n=3, step=1.5, octave_n=4, octave_scale=1.5 )
        return tfi.masked_mix( current_img, textured, ring_img )
    else:
        return current_img
Exemple #3
0
def target_for_fno(fno):
    return tf.reduce_mean(tfi.T('mixed5a_3x3_bottleneck_pre_relu')[:, :, :, 3])
import tfi
import os
import numpy as np
import PIL.Image
import tensorflow as tf

in_name = 'images/nfrac_1400x840.jpg'
out_name = 'images/start_frame_1400x840.jpeg'

img0 = PIL.Image.open(in_name)
img0 = np.float32(img0)

tfi.reset_graph_and_session()

target = tf.square(tfi.T('mixed4c'))

print('Rendering {}'.format(out_name))
test_img = tfi.render_deepdream(target,
                                img0,
                                iter_n=20,
                                step=0.75,
                                octave_n=4,
                                octave_scale=1.5)
tfi.savejpeg(test_img, out_name)
Exemple #5
0
end_frame = 5445

end_colours = np.float32(PIL.Image.open('images/stage03_end_colours.jpeg'))
credit_img = np.float32(PIL.Image.open('images/credits.jpeg'))
complete_fade_img = credit_img * 0

# Stage 3b - rapid zoom in and fade to credits
for frame in range(start_frame, end_frame):
    fno = frame + 1
    print('Stage 03b - frame {}'.format(fno))

    section_id = ((fno - start_frame) // 30) + 2
    layer_1 = targets[section_id * 2]
    channel_1 = targets[section_id * 2 + 1]

    target = tf.reduce_mean(tfi.T(layer_1)[:, :, :, channel_1])

    delta_rot = 0.1
    delta_zoom = 1.05

    total_rot += delta_rot
    total_zoom *= delta_zoom

    current_img = tfi.affine_zoom(current_img, delta_zoom, delta_rot)

    r = (fno - start_frame) / (end_frame - start_frame)
    mix_amount = 0.99 * (1 - r) + 0.96 * r
    current_img = tfi.mix_images(current_img, end_colours, mix_amount)
    current_img = tfi.render_deepdream(target,
                                       current_img,
                                       iter_n=2,
for frame in range(nframes):
    fno = end_frame - 1 - frame
    section_id = (fno // channel_step)

    prev_layer = targets[(section_id - 1) * 2]
    prev_channel = targets[(section_id - 1) * 2 + 1]

    layer = targets[section_id * 2]
    channel = targets[section_id * 2 + 1]

    print('Rendering frame {}, using layer {}, channel {}'.format(
        fno, layer, channel))

    if channel > 1000:
        target = tf.square(tfi.T(layer))
    else:
        target = tfi.T(layer)[:, :, :, channel]

    # Mixed target for first half of each channel_step
    if (fno % 240) < 120:
        r = (fno % 240) / 120.0
        ri = 1.0 - r
        if prev_layer == layer:
            # If the layers match, then shape matches, and we can get a simpler combination
            target = tf.reduce_mean(ri *
                                    tfi.T(prev_layer)[:, :, :, prev_channel] +
                                    r * tfi.T(layer)[:, :, :, channel])
        else:
            target = ri * tf.reduce_mean(
                tfi.T(prev_layer)[:, :, :, prev_channel]) + r * tf.reduce_mean(
Exemple #7
0
    print('Stage 01 - overlap, frame {}'.format(fno))

    section_id = 7
    layer_1 = targets[ section_id  * 2 + 2]
    channel_1 = targets[ section_id * 2 + 3]

    # Lattice with gems, which is what we expect to merge with in stage 2
    layer_2 = 'mixed5a_3x3_bottleneck_pre_relu'
    channel_2 = 3

    if ( fno % 240 ) < 120:
        r = (fno - frames)/120.0
        ri = 1.0 - r
        if layer_1 == layer_2:
            # If the layers match, then shape matches, and we can get a simpler combination
            target = tf.reduce_mean( ri * tfi.T(layer_1)[:,:,:,channel_1] + r * tfi.T(layer_2)[:,:,:,channel_2] )
        else:
            target = ri *  tf.reduce_mean( tfi.T(layer_1)[:,:,:,channel_1] ) + r * tf.reduce_mean( tfi.T(layer_2)[:,:,:,channel_2] )
    else:
        target = tf.reduce_mean( tfi.T(layer_2)[:,:,:,channel_2] )

    delta_rot = transition_rot
    delta_zoom = transition_zoom

    if ( fno % 240 ) > 230:
        delta_rot *= ( 240 - ( fno % 240 ) ) / 10.0

    if fno > 980 and ( fno % 240 ) < 10:
        delta_rot *= ( fno % 240 ) / 10.0

    total_rot += delta_rot
Exemple #8
0
test_layers = [
    'mixed4a_3x3_bottleneck_pre_relu', 'mixed4b_3x3_bottleneck_pre_relu',
    'head1_bottleneck_pre_relu', 'mixed4c_3x3_bottleneck_pre_relu',
    'mixed5b_3x3_bottleneck_pre_relu', 'mixed4e_3x3_bottleneck_pre_relu'
]
iterations = 30
source_img = 'images/example.jpg'

img0 = PIL.Image.open(source_img)
img0 = np.float32(img0)

if not os.path.exists('explore_layers'):
    os.makedirs('explore_layers')

for layer in test_layers:
    num_channels = tfi.T(layer).get_shape()[3]
    directory = 'explore_layers/{}'.format(layer)
    if not os.path.exists(directory):
        os.makedirs(directory)

    print('Rendering {}, all {} channels squared'.format(layer, num_channels))
    test_img = tfi.render_deepdream(tf.square(tfi.T(layer)),
                                    img0,
                                    iter_n=iterations,
                                    step=2.0,
                                    octave_n=4,
                                    octave_scale=1.5)
    tfi.savejpeg(test_img, ('{}/all_channels_squared.jpeg'.format(directory)))

    for channel in range(0, num_channels):
        print('Rendering {}, channel {}'.format(layer, channel))