from model import VGGNet
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
import numpy as np

data = np.load("train_val_test.npz")
train_X, train_Y, val_X, val_Y, test_X, test_Y = data["arr_0"], data[
    "arr_1"], data["arr_2"], data["arr_3"], data["arr_4"], data["arr_5"]

vgg = VGGNet()
model = vgg.build()
opt = Adam(lr=1e-3)
early_stop = EarlyStopping(monitor='val_loss', patience=5)
model.compile(loss="categorical_crossentropy",
              optimizer=opt,
              metrics=["accuracy"])
history = model.fit(train_X,
                    train_Y,
                    epochs=30,
                    validation_data=(val_X, val_Y),
                    callbacks=[early_stop])

model.save("model.h5")
Example #2
0
import tensorflow as tf
import sys
from model import VGGNet
from data_loader import DataLoader
net = VGGNet([224, 224], 128)
net.build()
loss = net.loss()
# print(tf.global_variables())
ckpt_path = '../ckpt/model.ckpt-0'

loader = DataLoader()

sess = tf.Session()
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=None)

ls = tf.summary.scalar('loss', loss)

train_writer = tf.summary.FileWriter('../log_train', sess.graph)
valid_writer = tf.summary.FileWriter('../log_valid', sess.graph)

batch = 32
batch_num = loader.images_urls.shape[0] // batch
# config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.7
valid_batch_num = loader.valid_urls.shape[0] // batch

if ckpt_path:
    saver.restore(sess, ckpt_path)
else:
    sess.run(tf.global_variables_initializer())
Example #3
0
def main():
    # 生成一个图像,均值为127.5,方差为20
    result = initial_result((1, 466, 712, 3), 127.5, 20)

    # 读取内容图像和风格图像
    content_val = read_img(content_img_path)
    style_val = read_img(style_img_path)

    content = tf.placeholder(tf.float32, shape=[1, 466, 712, 3])
    style = tf.placeholder(tf.float32, shape=[1, 615, 500, 3])

    # 载入模型,注意:在python3中,需要添加一句: encoding='latin1'
    data_dict = np.load(vgg_16_npy_pyth, encoding='latin1').item()

    # 创建这三张图像的 vgg 对象
    vgg_for_content = VGGNet(data_dict)
    vgg_for_style = VGGNet(data_dict)
    vgg_for_result = VGGNet(data_dict)

    # 创建每个神经网络
    vgg_for_content.build(content)
    vgg_for_style.build(style)
    vgg_for_result.build(result)

    # 提取哪些层特征
    # 需要注意的是:内容特征抽取的层数和结果特征抽取的层数必须相同
    # 风格特征抽取的层数和结果特征抽取的层数必须相同
    content_features = [
        # vgg_for_content.conv1_2,
        # vgg_for_content.conv2_2,
        # vgg_for_content.conv3_3,
        vgg_for_content.conv4_3,
        vgg_for_content.conv5_3,
    ]

    result_content_features = [
        # vgg_for_result.conv1_2,
        # vgg_for_result.conv2_2,
        # vgg_for_result.conv3_3,
        vgg_for_result.conv4_3,
        vgg_for_result.conv5_3,
    ]

    style_features = [
        vgg_for_style.conv2_2,
    ]

    result_style_features = [
        vgg_for_result.conv2_2,
    ]

    style_gram = [gram_matrix(feature) for feature in style_features]
    result_style_gram = [
        gram_matrix(feature) for feature in result_style_features
    ]

    # 计算内容损失
    content_loss = tf.zeros(1, tf.float32)
    for c, c_ in zip(content_features, result_content_features):
        content_loss += tf.reduce_mean((c - c_)**2, axis=[1, 2, 3])

    # 计算风格损失
    style_loss = tf.zeros(1, tf.float32)
    for s, s_ in zip(style_gram, result_style_gram):
        # 因为在计算gram矩阵的时候,降低了一维,所以,只需要在[1, 2]两个维度求均值即可
        style_loss += tf.reduce_mean((s - s_)**2, axis=[1, 2])

    # 总的损失函数
    loss = content_loss * lambda_c + style_loss * lambda_s

    train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        print("1111111")
        for step in range(num_steps):
            loss_value, content_loss_value, style_loss_value, _ = sess.run(
                [loss, content_loss, style_loss, train_op],
                feed_dict={
                    content: content_val,
                    style: style_val
                })

            print(
                'step: %d, loss_value: %.4f, content_loss: %.4f, style_loss: %.4f'
                % (step + 1, loss_value[0], content_loss_value[0],
                   style_loss_value[0]))
            if step % 100 == 0:
                result_img_path = os.path.join(output_dir,
                                               'result_%05d.jpg' % (step + 1))
                # 将图像取出,因为之前是4维,所以需要使用一个索引0,将其取出
                result_val = result.eval(sess)[0]
                # np.clip() numpy.clip(a, a_min, a_max, out=None)[source]
                # 其中a是一个数组,后面两个参数分别表示最小和最大值
                result_val = np.clip(result_val, 0, 255)

                img_arr = np.asarray(result_val, np.uint8)
                img = Image.fromarray(img_arr)
                img.save(result_img_path)