def main(data, temp_id, size=256, delay=0, is_save=False): save_folder = os.path.join(os.path.dirname(data), 'preprocessing') if is_save and not os.path.exists(save_folder): os.makedirs(save_folder) save_folder2 = os.path.join(os.path.dirname(data), 'post') if is_save and not os.path.exists(save_folder2): os.makedirs(save_folder2) # read all files paths filenames = all_files_under(data, extension='png') # read template image temp_filename = filenames[temp_id] ref_img = cv2.imread(temp_filename, cv2.IMREAD_GRAYSCALE) ref_img = ref_img[:, -size:].copy() _, ref_img = n4itk(ref_img) # N4 bias correction for the reference image for idx, filename in enumerate(filenames): print('idx: {}, filename: {}'.format(idx, filename)) img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE) ct_img = img[:, :size] mr_img = img[:, -size:] # N4 bias correction ori_img, cor_img = n4itk(mr_img) # Dynamic histogram matching between two images his_mr = histogram_matching(cor_img, ref_img) # Mask estimation based on Otsu auto-thresholding mask = get_mask(his_mr, task='m2c') # Masked out masked_ct = ct_img & mask masked_mr = his_mr & mask canvas = imshow(ori_img, cor_img, his_mr, masked_mr, mask, ct_img, masked_ct, size=size, delay=delay) canvas2 = np.hstack((masked_mr, masked_ct, mask)) if is_save: cv2.imwrite(os.path.join(save_folder, os.path.basename(filename)), canvas) cv2.imwrite(os.path.join(save_folder2, os.path.basename(filename)), canvas2)
def run_tensorflow(image, noiseImage, contentImage, output_directory, depth, weightsLayers, weightsLayersContent, weightsPyramid, weightsPyramidContent, iter, betaPar, vgg_class=vgg19.Vgg19): print('Begin execution of run_tensorflow') print(np.shape(image)) # Variable for storing the style image style = tf.get_variable(name="style_image", dtype=tf.float64, initializer=image, trainable=False) style = tf.cast(style, tf.float32) noise = tf.get_variable(name="noise_image", dtype=tf.float32, initializer=tf.constant(noiseImage), trainable=True) content = tf.get_variable(name="content_image", dtype=tf.float64, initializer=tf.constant(contentImage), trainable=False) content = tf.cast(content, tf.float32) #noise = tf.cast(noise, tf.float32) styleList = [style] noiseList = [noise] contentList = [content] fpassListContent = [] fpassListstyle = [] #list of vgg objects fpassListNoise = [] outListstyle = [] #list of output layer of vgg objects outListNoise = [] outListContent = [] ## TODO ## # move the pyramid code to a funciton #it recieves the styleList and namescope name, returns the updated list with tf.name_scope('build_pyramid_style'): gaussKerr = tf.get_variable(initializer=np.reshape( utils.gkern(5), (5, 5, 1, 1)), trainable=False, dtype='float64', name='gauss_kernel') gaussKerr = tf.cast(gaussKerr, tf.float32) downsamp_filt = tf.get_variable(initializer=np.reshape( np.array([[1., 0.], [0., 0.]]), (2, 2, 1, 1)), trainable=False, dtype='float64', name='downsample_filter') downsamp_filt = tf.cast(downsamp_filt, tf.float32) for i in range(depth): with tf.name_scope('cycle%d' % (i)): [tR, tG, tB] = tf.unstack(styleList[i], num=3, axis=3) tR = tf.expand_dims(tR, 3) tG = tf.expand_dims(tG, 3) tB = tf.expand_dims(tB, 3) #convolve each input image with the gaussian filter tR_gauss = tf.nn.conv2d(tR, gaussKerr, strides=[1, 1, 1, 1], padding='SAME') tG_gauss = tf.nn.conv2d(tG, gaussKerr, strides=[1, 1, 1, 1], padding='SAME') tB_gauss = tf.nn.conv2d(tB, gaussKerr, strides=[1, 1, 1, 1], padding='SAME') tR_downs = tf.nn.conv2d(tR_gauss, downsamp_filt, strides=[1, 2, 2, 1], padding='SAME') tG_downs = tf.nn.conv2d(tG_gauss, downsamp_filt, strides=[1, 2, 2, 1], padding='SAME') tB_downs = tf.nn.conv2d(tB_gauss, downsamp_filt, strides=[1, 2, 2, 1], padding='SAME') tmp = tf.concat([tR_downs, tG_downs, tB_downs], axis=3) styleList.append(tmp) ## TODO ## ## Find out what to do with the reuse with tf.name_scope('build_pyramid_noise'): #gaussKerr = tf.get_variable(initializer = np.reshape(utils.gkern(5), (5,5,1,1)), trainable=False, dtype='float64', name='gauss_kernel') #gaussKerr = tf.cast(gaussKerr, tf.float32, reuse=True) #downsamp_filt = tf.get_variable(initializer = np.reshape(np.array([[1.,0.],[0.,0.]]), (2,2,1,1)), trainable=False, dtype='float64', name='downsample_filter') #downsamp_filt = tf.cast(downsamp_filt, tf.float32, reuse=True) for i in range(depth): with tf.name_scope('cycle%d' % (i)): [tR, tG, tB] = tf.unstack(noiseList[i], num=3, axis=3) tR = tf.expand_dims(tR, 3) tG = tf.expand_dims(tG, 3) tB = tf.expand_dims(tB, 3) #convolve each input image with the gaussian filter tR_gauss = tf.nn.conv2d(tR, gaussKerr, strides=[1, 1, 1, 1], padding='SAME') tG_gauss = tf.nn.conv2d(tG, gaussKerr, strides=[1, 1, 1, 1], padding='SAME') tB_gauss = tf.nn.conv2d(tB, gaussKerr, strides=[1, 1, 1, 1], padding='SAME') tR_downs = tf.nn.conv2d(tR_gauss, downsamp_filt, strides=[1, 2, 2, 1], padding='SAME') tG_downs = tf.nn.conv2d(tG_gauss, downsamp_filt, strides=[1, 2, 2, 1], padding='SAME') tB_downs = tf.nn.conv2d(tB_gauss, downsamp_filt, strides=[1, 2, 2, 1], padding='SAME') tmp = tf.concat([tR_downs, tG_downs, tB_downs], axis=3) noiseList.append(tmp) with tf.name_scope('build_pyramid_content'): #gaussKerr = tf.get_variable(initializer = np.reshape(utils.gkern(5), (5,5,1,1)), trainable=False, dtype='float64', name='gauss_kernel') #gaussKerr = tf.cast(gaussKerr, tf.float32, reuse=True) #downsamp_filt = tf.get_variable(initializer = np.reshape(np.array([[1.,0.],[0.,0.]]), (2,2,1,1)), trainable=False, dtype='float64', name='downsample_filter') #downsamp_filt = tf.cast(downsamp_filt, tf.float32, reuse=True) for i in range(depth): with tf.name_scope('cycle%d' % (i)): [tR, tG, tB] = tf.unstack(contentList[i], num=3, axis=3) tR = tf.expand_dims(tR, 3) tG = tf.expand_dims(tG, 3) tB = tf.expand_dims(tB, 3) #convolve each input image with the gaussian filter tR_gauss = tf.nn.conv2d(tR, gaussKerr, strides=[1, 1, 1, 1], padding='SAME') tG_gauss = tf.nn.conv2d(tG, gaussKerr, strides=[1, 1, 1, 1], padding='SAME') tB_gauss = tf.nn.conv2d(tB, gaussKerr, strides=[1, 1, 1, 1], padding='SAME') tR_downs = tf.nn.conv2d(tR_gauss, downsamp_filt, strides=[1, 2, 2, 1], padding='SAME') tG_downs = tf.nn.conv2d(tG_gauss, downsamp_filt, strides=[1, 2, 2, 1], padding='SAME') tB_downs = tf.nn.conv2d(tB_gauss, downsamp_filt, strides=[1, 2, 2, 1], padding='SAME') tmp = tf.concat([tR_downs, tG_downs, tB_downs], axis=3) contentList.append(tmp) # fpassList is a list fo vgg instances # here we run the build method for each instance and # store the output (last layer) on outList with tf.name_scope('forward_pass_style'): for j in range(len(styleList)): with tf.name_scope('cycle%d' % (j)): fpassListstyle.append(vgg_class()) out = fpassListstyle[j].build(styleList[j]) outListstyle.append(out) with tf.name_scope('forward_pass_noise'): for j in range(len(styleList)): with tf.name_scope('cycle%d' % (j)): fpassListNoise.append(vgg_class()) out = fpassListNoise[j].build(noiseList[j]) outListNoise.append(out) with tf.name_scope('forward_pass_content'): for j in range(len(contentList)): with tf.name_scope('cycle%d' % (j)): fpassListContent.append(vgg_class()) out = fpassListContent[j].build(contentList[j]) outListContent.append(out) ################################################### ## Loss function with tf.name_scope('lossStyle'): #Check that there are as many weigthLayers assert len(weightsLayers) >= fpassListstyle[0].getLayersCount() assert len(weightsPyramid) >= len(fpassListstyle) loss_style = 0.0 #for i in range(0,5): #layers for j in range(len(fpassListstyle)): #pyramid levels with tf.name_scope('cyclePyramid%d' % (j)): loss_pyra = 0.0 for i in range(0, fpassListstyle[0].getLayersCount()): #layers with tf.name_scope('cycleLayer%d' % (i)): origin = fpassListstyle[j].conv_list[i] new = fpassListNoise[j].conv_list[i] shape = origin.get_shape().as_list() N = shape[3] #number of channels (filters) M = shape[1] * shape[2] #width x height F = tf.reshape(origin, (-1, N), name='CreateF_style') #N x M Gram_o = ( tf.matmul(tf.transpose(F, name='transpose_style'), F, name='Gram_style') / (N * M)) F_t = tf.reshape(new, (-1, N), name='CreateF_noise') Gram_n = tf.matmul(tf.transpose( F_t, name='transpose_noise'), F_t, name='Gram_noise') / (N * M) loss = tf.nn.l2_loss( (Gram_o - Gram_n), name='lossGramsubstraction') / 4 loss = tf.scalar_mul(weightsLayers[i], loss) loss_pyra = tf.add(loss_pyra, loss) loss_pyra = tf.scalar_mul(weightsPyramid[j], loss_pyra) loss_style = tf.add(loss_style, loss_pyra) tf.summary.scalar("loss_style", loss_style) with tf.name_scope('lossContent'): #Check that there are as many weigthLayers assert len( weightsLayersContent) >= fpassListContent[0].getLayersCount() assert len(weightsPyramidContent) >= len(fpassListContent) loss_content = 0.0 #for i in range(0,5): #layers for j in range(len(fpassListContent)): #pyramid levels with tf.name_scope('cyclePyramid%d' % (j)): loss_pyra = 0.0 for i in range(0, fpassListContent[0].getLayersCount()): #layers with tf.name_scope('cycleLayer%d' % (i)): con = fpassListContent[j].conv_list[i] new = fpassListNoise[j].conv_list[i] shape = con.get_shape().as_list() N = shape[3] #number of channels (filters) M = shape[1] * shape[2] #width x height P = tf.reshape(con, (-1, N), name='CreateF_content') #N x M #Gram_o = (tf.matmul(tf.transpose(F, name='transpose_style'), F, name='Gram_style') / (N * M)) F = tf.reshape(new, (-1, N), name='CreateF_noise') #Gram_n = tf.matmul(tf.transpose(F_t, name='transpose_noise'), F_t, name='Gram_noise') / (N * M) loss = tf.nn.l2_loss( (F - P), name='lossGramsubstraction') / 2 loss = tf.scalar_mul(weightsLayersContent[i], loss) loss_pyra = tf.add(loss_pyra, loss) loss_pyra = tf.scalar_mul(weightsPyramidContent[j], loss_pyra) loss_content = tf.add(loss_content, loss_pyra) tf.summary.scalar("loss_content", loss_content) #betaPar = 0.5 alpha = tf.constant(1, dtype=tf.float32, name="alpha") beta = tf.constant(betaPar, dtype=tf.float32, name="beta") loss_sum = tf.scalar_mul(loss_content, alpha) + tf.scalar_mul( loss_style, beta) train_step = tf.train.AdamOptimizer(0.01).minimize(loss_sum, var_list=[noise]) #train_step = tf.train.AdagradOptimizer(0.01).minimize(loss_sum, var_list=[noise]) restrict = tf.maximum(0., tf.minimum(1., noise), name="Restrict_noise") r_noise = noise.assign(restrict) tmpFile = os.path.join(output_directory, "tensor/") if not os.path.exists(tmpFile): os.makedirs(tmpFile) #https://www.tensorflow.org/api_docs/python/tf/contrib/opt/ScipyOptimizerInterface optimizer = tf.contrib.opt.ScipyOptimizerInterface( loss_sum, var_to_bounds={noise: (0, 1)}, method='L-BFGS-B', options={'maxiter': iter}) #trainOP = optimizer.minimze summary_writer = tf.summary.FileWriter(tmpFile, tf.get_default_graph()) merged_summary_op = tf.summary.merge_all() Iterations = iter counter = 0 temp_loss = 0 allLoss = [] with tf.Session() as sess: sess.run(tf.global_variables_initializer()) optimizer.minimize(sess) #tmp = fpassListContent[0].eval() #tf.summary.image('content', tmp, 3) answer = noise.eval() answer = answer.reshape(np.shape(answer)[1], np.shape(answer)[2], 3) skimage.io.imsave( os.path.join(output_directory, "final_texture_noHistMatch.png"), answer) answer = (utils.histogram_matching(answer, image) * 255.).astype('uint8') skimage.io.imsave(os.path.join(output_directory, "final_texture.png"), answer) #Save the pyramid for w in range(1, len(noiseList)): outputPyramid = noiseList[w].eval() tmp = outputPyramid.reshape( np.shape(outputPyramid)[1], np.shape(outputPyramid)[2], 3) tmp = (utils.histogram_matching(tmp, image) * 255.).astype('uint8') skimage.io.imsave( os.path.join(output_directory, "final_texture_pyra%s.png" % (str(w))), tmp)
counter = 0 temp_loss = 0 for i in range(0,Iteration): sess.run(train_step) sess.run(r_tex) if i == 0: temp_loss = loss_sum.eval() if i%10==0: loss = loss_sum.eval() if loss > temp_loss: counter+=1 sys.stdout.write('\r') sys.stdout.write("[%-50s] %d/%d ,loss=%e" % ('='*int(i*50/Iteration),i,Iteration,loss)) sys.stdout.flush() temp_loss = loss if i%500 == 0 and i!=0: answer = tex.eval() answer = answer.reshape(256,256,3) answer = (answer*255).astype('uint8') # print('Mean = ', np.mean(answer)) filename = "./result/%safter.jpg"%(str(i)) skimage.io.imsave(filename,answer) if counter > 3: print('\n','Early Stop!') break answer=tex.eval() answer=answer.reshape(256,256,3) answer = (utils.histogram_matching(answer,proc_img)*255.).astype('uint8') skimage.io.imsave("./result/final_texture.jpg",answer)
def run_tensorflow(image, noiseImage, output_directory, depth, weightsLayers, weightsPyramid, iter, vgg_class=vgg19.Vgg19): print('Begin execution of run_tensorflow') print(np.shape(image)) # Variable for storing the target image target = tf.get_variable(name="target_image", dtype=tf.float64, initializer=image, trainable=False) target = tf.cast(target, tf.float32) noise = tf.get_variable(name="noise_image", dtype=tf.float32, initializer=tf.constant(noiseImage), trainable=True) #noise = tf.cast(noise, tf.float32) targetList = [target] noiseList = [noise] fpassListTarget = [] #list of vgg objects fpassListNoise = [] outListTarget = [] #list of output layer of vgg objects outListNoise = [] ## TODO ## # move the pyramid code to a funciton #it recieves the targetList and namescope name, returns the updated list with tf.name_scope('build_pyramid_target'): gaussKerr = tf.get_variable(initializer=np.reshape( utils.gkern(5), (5, 5, 1, 1)), trainable=False, dtype='float64', name='gauss_kernel') gaussKerr = tf.cast(gaussKerr, tf.float32) downsamp_filt = tf.get_variable(initializer=np.reshape( np.array([[1., 0.], [0., 0.]]), (2, 2, 1, 1)), trainable=False, dtype='float64', name='downsample_filter') downsamp_filt = tf.cast(downsamp_filt, tf.float32) for i in range(depth): with tf.name_scope('cycle%d' % (i)): [tR, tG, tB] = tf.unstack(targetList[i], num=3, axis=3) tR = tf.expand_dims(tR, 3) tG = tf.expand_dims(tG, 3) tB = tf.expand_dims(tB, 3) #convolve each input image with the gaussian filter tR_gauss = tf.nn.conv2d(tR, gaussKerr, strides=[1, 1, 1, 1], padding='SAME') tG_gauss = tf.nn.conv2d(tG, gaussKerr, strides=[1, 1, 1, 1], padding='SAME') tB_gauss = tf.nn.conv2d(tB, gaussKerr, strides=[1, 1, 1, 1], padding='SAME') tR_downs = tf.nn.conv2d(tR_gauss, downsamp_filt, strides=[1, 2, 2, 1], padding='SAME') tG_downs = tf.nn.conv2d(tG_gauss, downsamp_filt, strides=[1, 2, 2, 1], padding='SAME') tB_downs = tf.nn.conv2d(tB_gauss, downsamp_filt, strides=[1, 2, 2, 1], padding='SAME') tmp = tf.concat([tR_downs, tG_downs, tB_downs], axis=3) targetList.append(tmp) ## TODO ## ## Find out what to do with the reuse with tf.name_scope('build_pyramid_noise'): #gaussKerr = tf.get_variable(initializer = np.reshape(utils.gkern(5), (5,5,1,1)), trainable=False, dtype='float64', name='gauss_kernel') #gaussKerr = tf.cast(gaussKerr, tf.float32, reuse=True) #downsamp_filt = tf.get_variable(initializer = np.reshape(np.array([[1.,0.],[0.,0.]]), (2,2,1,1)), trainable=False, dtype='float64', name='downsample_filter') #downsamp_filt = tf.cast(downsamp_filt, tf.float32, reuse=True) for i in range(depth): with tf.name_scope('cycle%d' % (i)): [tR, tG, tB] = tf.unstack(noiseList[i], num=3, axis=3) tR = tf.expand_dims(tR, 3) tG = tf.expand_dims(tG, 3) tB = tf.expand_dims(tB, 3) #convolve each input image with the gaussian filter tR_gauss = tf.nn.conv2d(tR, gaussKerr, strides=[1, 1, 1, 1], padding='SAME') tG_gauss = tf.nn.conv2d(tG, gaussKerr, strides=[1, 1, 1, 1], padding='SAME') tB_gauss = tf.nn.conv2d(tB, gaussKerr, strides=[1, 1, 1, 1], padding='SAME') tR_downs = tf.nn.conv2d(tR_gauss, downsamp_filt, strides=[1, 2, 2, 1], padding='SAME') tG_downs = tf.nn.conv2d(tG_gauss, downsamp_filt, strides=[1, 2, 2, 1], padding='SAME') tB_downs = tf.nn.conv2d(tB_gauss, downsamp_filt, strides=[1, 2, 2, 1], padding='SAME') tmp = tf.concat([tR_downs, tG_downs, tB_downs], axis=3) noiseList.append(tmp) # fpassList is a list fo vgg instances # here we run the build method for each instance and # store the output (last layer) on outList with tf.name_scope('forward_pass_target'): for j in range(len(targetList)): with tf.name_scope('cycle%d' % (j)): fpassListTarget.append(vgg_class()) out = fpassListTarget[j].build(targetList[j]) outListTarget.append(out) with tf.name_scope('forward_pass_noise'): for j in range(len(targetList)): with tf.name_scope('cycle%d' % (j)): fpassListNoise.append(vgg_class()) out = fpassListNoise[j].build(noiseList[j]) outListNoise.append(out) ################################################### ## Loss function with tf.name_scope('lossFunction'): #Check that there are as many weigthLayers assert len(weightsLayers) >= fpassListTarget[0].getLayersCount() assert len(weightsPyramid) >= len(fpassListTarget) loss_sum = 0.0 #for i in range(0,5): #layers for j in range(len(fpassListTarget)): #pyramid levels with tf.name_scope('cyclePyramid%d' % (i)): loss_pyra = 0.0 for i in range(0, fpassListTarget[0].getLayersCount()): #layers with tf.name_scope('cycleLayer%d' % (i)): origin = fpassListTarget[j].conv_list[i] new = fpassListNoise[j].conv_list[i] shape = origin.get_shape().as_list() N = shape[3] #number of channels (filters) M = shape[1] * shape[2] #width x height F = tf.reshape(origin, (-1, N), name='CreateF_target') #N x M Gram_o = ( tf.matmul(tf.transpose(F, name='transpose_target'), F, name='Gram_target') / (N * M)) F_t = tf.reshape(new, (-1, N), name='CreateF_noise') Gram_n = tf.matmul(tf.transpose( F_t, name='transpose_noise'), F_t, name='Gram_noise') / (N * M) loss = tf.nn.l2_loss( (Gram_o - Gram_n), name='lossGramsubstraction') / 2 loss = tf.scalar_mul(weightsLayers[i], loss) loss_pyra = tf.add(loss_pyra, loss) loss_pyra = tf.scalar_mul(weightsPyramid[j], loss_pyra) loss_sum = tf.add(loss_sum, loss_pyra) tf.summary.scalar("loss_sum", loss_sum) yolo = tf.Variable(np.zeros((20, 20)), name='yolo') yolo2 = tf.get_variable("big_matrix", shape=(784, 10), initializer=tf.zeros_initializer()) print(yolo) print(yolo2) print(noise) dummy = tf.get_variable(name="dummy", dtype=tf.float64, initializer=np.zeros((5, 5)), trainable=True) #train_step = tf.train.AdamOptimizer(0.01).minimize(loss_sum, var_list=[noise]) train_step = tf.train.AdagradOptimizer(0.01).minimize(loss_sum, var_list=[noise]) restrict = tf.maximum(0., tf.minimum(1., noise), name="Restrict_noise") r_noise = noise.assign(restrict) tmpFile = os.path.join(output_directory, "tensor/") if not os.path.exists(tmpFile): os.makedirs(tmpFile) summary_writer = tf.summary.FileWriter(tmpFile, tf.get_default_graph()) merged_summary_op = tf.summary.merge_all() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) #fpass2 = sess.run(out2) #print(np.shape(fpass2)) #pyramid = sess.run(targetList) #print(np.shape(targetList)) #sess.run([outListTarget, outListNoise]) Iterations = iter counter = 0 temp_loss = 0 allLoss = [] for i in range(0, Iterations): a = sess.run([train_step]) print(type(a)) sess.run([r_noise]) print(np.shape(r_noise)) if i == 0: temp_loss = loss_sum.eval() if i % 10 == 0: loss = loss_sum.eval() if loss > temp_loss: counter += 1 sys.stdout.write('\r') sys.stdout.write("[%-50s] %d/%d ,loss=%e" % ('=' * int(i * 50 / iter), i, iter, loss)) sys.stdout.flush() temp_loss = loss if i % 10 == 0 and i != 0 and i <= 200: answer = noise.eval() answer = answer.reshape( np.shape(answer)[1], np.shape(answer)[2], 3) #answer = (answer*255).astype('uint8') answer = (utils.histogram_matching(answer, image) * 255.).astype('uint8') # print('Mean = ', np.mean(answer)) filename = os.path.join(output_directory, "%safter.jpg" % (str(i))) skimage.io.imsave(filename, answer) #Save the pyramid for w in range(1, len(noiseList)): outputPyramid = noiseList[w].eval() tmp = outputPyramid.reshape( np.shape(outputPyramid)[1], np.shape(outputPyramid)[2], 3) tmp = (utils.histogram_matching(tmp, image) * 255.).astype('uint8') filename = os.path.join( output_directory, "%safter%spyra.jpg" % (str(i), str(w))) skimage.io.imsave(filename, tmp) if i % 200 == 0 and i != 0 and i > 200: answer = noise.eval() answer = answer.reshape( np.shape(answer)[1], np.shape(answer)[2], 3) #answer = (answer*255).astype('uint8') answer = (utils.histogram_matching(answer, image) * 255.).astype('uint8') # print('Mean = ', np.mean(answer)) filename = os.path.join(output_directory, "%safter.jpg" % (str(i))) skimage.io.imsave(filename, answer) #Save the pyramid for w in range(1, len(noiseList)): outputPyramid = noiseList[w].eval() tmp = outputPyramid.reshape( np.shape(outputPyramid)[1], np.shape(outputPyramid)[2], 3) tmp = (utils.histogram_matching(tmp, image) * 255.).astype('uint8') filename = os.path.join( output_directory, "%safter%spyra.jpg" % (str(i), str(w))) skimage.io.imsave(filename, tmp) #allLoss.append(loss_sum.eval()) allLoss.append(temp_loss) if counter > 3000: print('\n', 'Early Stop!') break summary_str = sess.run(merged_summary_op) summary_writer.add_summary(summary_str, 1) answer = noise.eval() answer = answer.reshape(np.shape(answer)[1], np.shape(answer)[2], 3) skimage.io.imsave( os.path.join(output_directory, "final_texture_noHistMatch.png"), answer) answer = (utils.histogram_matching(answer, image) * 255.).astype('uint8') skimage.io.imsave(os.path.join(output_directory, "final_texture.png"), answer) #Save the pyramid for w in range(1, len(noiseList)): outputPyramid = noiseList[w].eval() tmp = outputPyramid.reshape( np.shape(outputPyramid)[1], np.shape(outputPyramid)[2], 3) tmp = (utils.histogram_matching(tmp, image) * 255.).astype('uint8') skimage.io.imsave( os.path.join(output_directory, "final_texture_pyra%s.png" % (str(w))), tmp) #Some plotting plotting(allLoss, iter, output_directory)
def run_synthesis_pyramid(tex, images, proc_img, iterations, output_directory, weightsLayers, weightsPyramid, vgg_class=vgg19.Vgg19): config = tf.ConfigProto() gaussKerr = tf.cast( tf.Variable(np.reshape(utils.gkern(5), (5, 5, 1, 1)), trainable=False, dtype='float64'), tf.float32) #os.environ["CUDA_VISIBLE_DEVICES"]="0" #exit(0) with tf.Session(config=config) as sess: vggs = [vgg_class() for i in range(len(images))] vggs2 = [vgg_class() for i in range(len(images))] for j in range(len(tex) - 1): # Pyramid in TF [tR, tG, tB] = tf.unstack(tex[j], num=3, axis=3) tR = tf.expand_dims(tR, 3) tG = tf.expand_dims(tG, 3) tB = tf.expand_dims(tB, 3) #convolve each input image with the gaussian filter tR_gauss = tf.nn.conv2d(tR, gaussKerr, strides=[1, 1, 1, 1], padding='SAME') tG_gauss = tf.nn.conv2d(tG, gaussKerr, strides=[1, 1, 1, 1], padding='SAME') tB_gauss = tf.nn.conv2d(tB, gaussKerr, strides=[1, 1, 1, 1], padding='SAME') #tmpR = tf.py_func(downSample, tR_gauss, tf.float32) #tmpG = tf.py_func(downSample, tG_gauss, tf.float32) #tmpB = tf.py_func(downSample, tB_gauss, tf.float32) tmp = tf.stack([tR_gauss, tG_gauss, tB_gauss], axis=3) tmp = tf.concat([tR_gauss, tG_gauss, tB_gauss], axis=3) print("<<<<<<<<<<<<<<<HYPNOTOAD>>>>>>>>>>>>>>>>>>>>>>>>") print(tmp) print(tmp.get_shape()) print(tmp.get_shape().as_list()[1:]) print("<<<<<<<<<<<<<<<HYPNOTOAD>>>>>>>>>>>>>>>>>>>>>>>>") newTmp = tf.py_func(downSample, [tmp], tf.float32) #print("<<<<<<<<<<<<<<<HYPNOTOAD>>>>>>>>>>>>>>>>>>>>>>>>") #print(newTmp) #print(newTmp.get_shape()) #print(newTmp.get_shape().as_list()[1:]) #print("<<<<<<<<<<<<<<<HYPNOTOAD>>>>>>>>>>>>>>>>>>>>>>>>") yolo = tex[j + 1].assign(newTmp) with tf.name_scope("origin"): for i in range(len(images)): vggs[i].build(images[i]) with tf.name_scope("new"): for i in range(len(images)): vggs2[i].build(tex[i]) #Check that there are as many weigthLayers assert len(weightsLayers) >= vggs[0].getLayersCount() assert len(weightsPyramid) >= len(images) loss_sum = 0.0 #for i in range(0,5): #layers for j in range(len(images)): #pyramid levels loss_pyra = 0.0 for i in range(0, vggs[0].getLayersCount()): #layers origin = vggs[j].conv_list[i] new = vggs2[j].conv_list[i] shape = origin.get_shape().as_list() N = shape[3] #number of channels (filters) M = shape[1] * shape[2] #width x height F = tf.reshape(origin, (-1, N)) #N x M Gram_o = (tf.matmul(tf.transpose(F), F) / (N * M)) F_t = tf.reshape(new, (-1, N)) Gram_n = tf.matmul(tf.transpose(F_t), F_t) / (N * M) loss = tf.nn.l2_loss((Gram_o - Gram_n)) / 2 loss = tf.scalar_mul(weightsLayers[i], loss) loss_pyra = tf.add(loss_pyra, loss) loss_pyra = tf.scalar_mul(weightsPyramid[j], loss_pyra) loss_sum = tf.add(loss_sum, loss_pyra) tf.summary.scalar("loss_sum", loss_sum) train_step = tf.train.AdamOptimizer(0.01).minimize(loss_sum, var_list=[tex]) restrict = tf.maximum(0., tf.minimum(1., tex[0])) r_tex = tex[0].assign(restrict) merged_summary_op = tf.summary.merge_all() #sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer()) Iteration = iterations counter = 0 temp_loss = 0 allLoss = [] for i in range(0, Iteration): #print('ITERATION'+str(i)) sess.run(train_step) sess.run(r_tex) sess.run(yolo) tmpFile = os.path.join(output_directory, "tensor/") if not os.path.exists(tmpFile): os.makedirs(tmpFile) #aa = "Users/falconr1/Documents/tmpDL/Code/result" print(tmpFile) summary_writer = tf.summary.FileWriter(tmpFile, sess.graph) if i == 0: temp_loss = loss_sum.eval() if i % 10 == 0: loss = loss_sum.eval() if loss > temp_loss: counter += 1 sys.stdout.write('\r') sys.stdout.write( "[%-50s] %d/%d ,loss=%e" % ('=' * int(i * 50 / Iteration), i, Iteration, loss)) sys.stdout.flush() temp_loss = loss if i % 100 == 0 and i != 0: answer = tex[0].eval() answer = answer.reshape( np.shape(answer)[1], np.shape(answer)[2], 3) #answer = (answer*255).astype('uint8') answer = (utils.histogram_matching(answer, proc_img) * 255.).astype('uint8') # print('Mean = ', np.mean(answer)) filename = os.path.join(output_directory, "%safter.jpg" % (str(i))) skimage.io.imsave(filename, answer) #Save the pyramid for w in range(1, len(tex)): outputPyramid = tex[w].eval() tmp = outputPyramid.reshape( np.shape(outputPyramid)[1], np.shape(outputPyramid)[2], 3) tmp = (utils.histogram_matching(tmp, proc_img) * 255.).astype('uint8') filename = os.path.join( output_directory, "%safter%spyra.jpg" % (str(i), str(w))) skimage.io.imsave(filename, tmp) #allLoss.append(loss_sum.eval()) allLoss.append(temp_loss) if counter > 3000: print('\n', 'Early Stop!') break ''' answer = tex[0].eval() #print(answer) pyramid = create_pyramids((answer.reshape(np.shape(answer)[1], np.shape(answer)[2], 3)*255).astype('uint8'), levels) im2 = [i.reshape((1, np.shape(i)[0], np.shape(i)[1], 3)) for i in pyramid] im2 = [tf.cast(tf.convert_to_tensor(i, dtype="float64"), tf.float32) for i in im2] #t_pyramid = tuple(tf.convert_to_tensor(np.reshape(i, (1, np.shape[0], np.shape[1], 3))) for i in pyramid) #t_pyramid = tuple(tf.convert_to_tensor(i) for i in im2) #print(t_pyramid[0].get_shape()) #print("**********************") for j in range(1,len(im2)): sess.run(tex[j].assign(im2[j])) #print(pyramid) ''' summary_str = sess.run(merged_summary_op) summary_writer.add_summary(summary_str, 1) answer = tex[0].eval() answer = answer.reshape(np.shape(answer)[1], np.shape(answer)[2], 3) answer = (utils.histogram_matching(answer, proc_img) * 255.).astype('uint8') skimage.io.imsave(os.path.join(output_directory, "final_texture.png"), answer) #Save the pyramid for w in range(1, len(tex)): outputPyramid = tex[w].eval() tmp = outputPyramid.reshape( np.shape(outputPyramid)[1], np.shape(outputPyramid)[2], 3) tmp = (utils.histogram_matching(tmp, proc_img) * 255.).astype('uint8') skimage.io.imsave( os.path.join(output_directory, "final_texture_pyra%s.png" % (str(w))), tmp) #Some plotting plotting(allLoss, iterations, output_directory)
def run_synthesis(tex, images, proc_img, iterations, output_directory, weightsLayers, vgg_class=vgg19.Vgg19): config = tf.ConfigProto() os.environ["CUDA_VISIBLE_DEVICEs"] = "0" with tf.Session(config=config) as sess: vgg = vgg_class() vgg2 = vgg_class() with tf.name_scope("origin"): vgg.build(images) with tf.name_scope("new"): vgg2.build(tex) #Check that there are as many weigthLayers assert len(weightsLayers) == vgg.getLayersCount() ## Caculate the Loss according to the paper loss_sum = 0. for i in range(0, vgg.getLayersCount()): origin = vgg.conv_list[i] new = vgg2.conv_list[i] shape = origin.get_shape().as_list() N = shape[3] M = shape[1] * shape[2] F = tf.reshape(origin, (-1, N)) Gram_o = (tf.matmul(tf.transpose(F), F) / (N * M)) F_t = tf.reshape(new, (-1, N)) Gram_n = tf.matmul(tf.transpose(F_t), F_t) / (N * M) loss = tf.nn.l2_loss((Gram_o - Gram_n)) / 2 loss = tf.scalar_mul(weightsLayers[i], loss) loss_sum = tf.add(loss_sum, loss) train_step = tf.train.AdamOptimizer(0.01).minimize(loss_sum, var_list=[tex]) restrict = tf.maximum(0., tf.minimum(1., tex)) r_tex = tex.assign(restrict) #sess.run(tf.initialize_all_variables()) sess.run(tf.global_variables_initializer()) Iteration = iterations counter = 0 temp_loss = 0 allLoss = [] for i in range(0, Iteration): sess.run(train_step) sess.run(r_tex) if i == 0: temp_loss = loss_sum.eval() if i % 100 == 0: loss = loss_sum.eval() if loss > temp_loss: counter += 1 sys.stdout.write('\r') sys.stdout.write( "[%-50s] %d/%d ,loss=%e" % ('=' * int(i * 50 / Iteration), i, Iteration, loss)) sys.stdout.flush() temp_loss = loss if i % 10 == 0 and i != 0 and i < 200: answer = tex.eval() answer = answer.reshape( np.shape(answer)[1], np.shape(answer)[2], 3) answer = (answer * 255).astype('uint8') # print('Mean = ', np.mean(answer)) filename = os.path.join(output_directory, "%safter.jpg" % (str(i))) skimage.io.imsave(filename, answer) if i % 200 == 0 and i != 0 and i > 200: answer = tex.eval() answer = answer.reshape( np.shape(answer)[1], np.shape(answer)[2], 3) answer = (answer * 255).astype('uint8') # print('Mean = ', np.mean(answer)) filename = os.path.join(output_directory, "%safter.jpg" % (str(i))) skimage.io.imsave(filename, answer) if counter > 3000: print('\n', 'Early Stop!') break #allLoss.append(loss_sum.eval()) allLoss.append(temp_loss) answer = tex.eval() answer = answer.reshape(np.shape(answer)[1], np.shape(answer)[2], 3) answer = (utils.histogram_matching(answer, proc_img) * 255.).astype('uint8') skimage.io.imsave(os.path.join(output_directory, "final_texture.png"), answer) #Some plotting plotting(allLoss, iterations, output_directory)