def run(content_path, style_path, iteration): content_weight = 1e3 style_weight = 1 model = model_init() for layer in model.layers: layer.trainable = False artist_style_features, artist_content_features = get_feature( model, style_path, content_path) init_image = image.pre_process_img( content_path) # initialize the generated image with content image init_image = tf.Variable(init_image, dtype=tf.float32) opt = tf.keras.optimizers.Adam(5, beta_1=0.99, epsilon=1e-1) loss_weights = (content_weight, style_weight) cfg = { 'model': model, 'loss_weights': loss_weights, 'init_image': init_image, 'artist_content_features': artist_content_features, 'artist_style_features': artist_style_features } #我不太知道这个norm means是怎么来的,image.py里用的也是相同的值,norm means是用来normalize图片的,我看几个github版本用的数值都差不多,但不知道怎么算的 norm_means = np.array([103.939, 116.779, 123.68]) min_vals = -norm_means max_vals = 255 - norm_means #store the loss and the img best_loss, best_img = float('inf'), None imgs = [] start = datetime.now() for i in range(iteration): print(i) grads, all_loss = compute_grads(cfg) losss, content_losss, style_losss = all_loss opt.apply_gradients([(grads, init_image)]) clipped = tf.clip_by_value(init_image, min_vals, max_vals) init_image.assign(clipped) # 以下用了一些image.py里的function,用的github上的代码,之后改掉 if losss < best_loss: # Update best loss and best image from total loss. best_loss = losss best_img = image.deprocess_img(init_image.numpy()) if i % 20 == 0: end = datetime.now() print('[INFO]Iteration: {}'.format(i)) print('Total loss: {:.4e}, ' 'style loss: {:.4e}, ' 'content loss: {:.4e}'.format(losss, style_losss, content_losss)) print(f'20 iters takes {end -start}') start = datetime.now() img = init_image.numpy() img = image.deprocess_img(img) path = 'output_' + str(i) + '.jpg' image.saveimg(img, path) imgs.append(img) return best_img, best_loss
def run_nst(content_path, style_path, iteration=1000, content_weight=1e3, style_weight=1): model = model_init() for layer in model.layers: layer.trainable = False content_features = get_feature_representation(model, content_path, mode='content') style_features = get_feature_representation(model, style_path, mode='style') init_image = image.pre_process_img( content_path) # initialize the generated image with content image init_image = tf.Variable(init_image, dtype=tf.float32) opt = tf.keras.optimizers.Adam(5, beta_1=0.99, epsilon=1e-1) epoch = 1 loss_weights = (content_weight, style_weight) cfg = { 'model': model, 'loss_weights': loss_weights, 'init_image': init_image, 'content_features': content_features, 'style_features': style_features } norm_means = np.array([103.939, 116.779, 123.68]) min_vals = -norm_means max_vals = 255 - norm_means #store the loss and the img best_loss, best_img = float('inf'), None imgs = [] start = datetime.now() for i in range(iteration): grads, all_loss = compute_grads(cfg) losss, content_losss, style_losss = all_loss opt.apply_gradients([(grads, init_image)]) clipped = tf.clip_by_value(init_image, min_vals, max_vals) init_image.assign(clipped) if losss < best_loss: # Update best loss and best image from total loss. best_loss = losss best_img = image.deprocess_img(init_image.numpy()) if i % 100 == 0: end = datetime.now() print('[INFO]Iteration: {}'.format(i)) print('Total loss: {:.4e}, ' 'style loss: {:.4e}, ' 'content loss: {:.4e}'.format(losss, style_losss, content_losss)) print(f'100 iters takes {end -start}') start = datetime.now() if i % 500 == 0: # Use the .numpy() method to get the concrete numpy array plot_img = init_image.numpy() plot_img = image.deprocess_img(plot_img) path = 'output/output_' + str(i) + '.jpg' image.saveimg(plot_img, path) imgs.append(plot_img) return best_img, best_loss