コード例 #1
0
ファイル: neural_style.py プロジェクト: xiongAlen/cs231n
def create_content_features(content_image):
    content_features = {}
    with tf.Graph().as_default(), tf.Session() as sess:
        X_content = tf.placeholder(tf.float32,
                                   shape=batch_shape,
                                   name="X_content")
        X_pre = img_utils.preprocess(X_content)
        content_np = np.array([content_image])
        content_net, _ = load_graph(
            data_path='vgg_weights/imagenet-vgg-verydeep-19.mat',
            input_image=X_pre)
        content_features[CONTENT_LAYER] = content_net[CONTENT_LAYER].eval(
            feed_dict={X_content: content_np})
    return content_features
コード例 #2
0
ファイル: neural_style.py プロジェクト: xiongAlen/cs231n
def create_style_features(style_image):
    style_features = {}
    with tf.Graph().as_default(), tf.Session() as sess:
        X_style = tf.placeholder(tf.float32,
                                 shape=batch_shape,
                                 name='style_image')
        X_pre = img_utils.preprocess(X_style)
        net, _ = load_graph(
            data_path='vgg_weights/imagenet-vgg-verydeep-19.mat',
            input_image=X_pre)
        style_np = np.array([style_image])
        for layer in STYLE_LAYERS:
            features = net[layer].eval(feed_dict={X_style: style_np})
            features = features.reshape((-1, features.shape[3]))
            gram = np.matmul(features.T, features) / features.size
            style_features[layer] = gram
    return style_features
コード例 #3
0
ファイル: neural_style.py プロジェクト: xiongAlen/cs231n
        input_image = tf.get_variable(name="image",
                                      trainable=True,
                                      dtype=tf.float32,
                                      shape=batch_shape,
                                      initializer=tf.constant_initializer(
                                          value=0., dtype=tf.float32))
    net, _ = load_graph(data_path='vgg_weights/imagenet-vgg-verydeep-19.mat',
                        input_image=input_image)
    l_content = create_content_loss(content_features, net)
    l_style = create_style_loss(style_features, net)

    total_loss = l_content + 512 * l_style

    import matplotlib.pyplot as plt

    content_image_file = img_utils.preprocess(content_image_file)

    initial_image = img_utils.generate_noise_image(content_image_file,
                                                   IMAGE_HEIGHT, IMAGE_WIDTH,
                                                   0.1)
    #initial_image = img_utils.postprocess(initial_image)#undo the preprocessing;

    assign_op = input_image.assign(initial_image)
    opt = tf.train.AdamOptimizer(learning_rate=LR).minimize(total_loss)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        an_image = sess.run(assign_op)
        plt.figure()
        plt.subplot(121)
        plt.imshow(