def main(): #global args try: parser = get_parser_args() args = parser.parse_args() st.style_transfer(args) except: raise
def main(): """ Plot """ parser = get_parser_args() parser.set_defaults(verbose=True) args = parser.parse_args() #do_pdf_comparison_GramOnly(args) do_pdf_comparison_GramOnly_autreratio(args)
def main(name=None): """ Estimate the distribution of the distribution """ parser = get_parser_args() if(name==None): style_img_name = "BrickSmallBrown0293_1_S" else: style_img_name = name parser.set_defaults(style_img_name=style_img_name) args = parser.parse_args() estimate_std(args)
def generation_Texture_LossFct(): path_origin = '/home/nicolas/Style-Transfer/dataImagesTest/' path_output = '/home/nicolas/Style-Transfer/LossFct/resultsCompNets/' do_mkdir(path_output) parser = get_parser_args() max_iter = 2000 print_iter = 200 start_from_noise = 1 init_noise_ratio = 1.0 optimizer = 'lbfgs' init = 'Gaussian' init_range = 0.0 clipping_type = 'ImageStyleBGR' net_to_test = [ 'normalizedvgg.mat', 'imagenet-vgg-verydeep-19.mat', 'random_net.mat' ] loss = 'texture' list_img = get_list_of_images(path_origin) for net in net_to_test: for name_img in list_img: tf.reset_default_graph() # Necessity to use a new graph !! name_img_wt_ext, _ = name_img.split('.') img_folder = path_origin img_output_folder = path_origin if (net == 'normalizedvgg.mat'): extention = 'normNet' elif (net == 'imagenet-vgg-verydeep-19.mat'): extention = 'regularNet' elif (net == 'random_net.mat'): extention = 'RandNet' output_img_name = name_img_wt_ext + '_' + extention + '_' + str( max_iter) parser.set_defaults(verbose=True, max_iter=max_iter, print_iter=print_iter, img_folder=path_origin, img_output_folder=path_output, style_img_name=name_img_wt_ext, content_img_name=name_img_wt_ext, init_noise_ratio=init_noise_ratio, start_from_noise=start_from_noise, output_img_name=output_img_name, optimizer=optimizer, loss=loss, init=init, init_range=init_range, clipping_type=clipping_type, vgg_name=net) args = parser.parse_args() st.style_transfer(args)
def main_distrib(name=None): """ Estimate the distribution of the distribution """ parser = get_parser_args() if (name == None): style_img_name = "StarryNight" else: style_img_name = name parser.set_defaults(style_img_name=style_img_name) args = parser.parse_args() estimate_gennorm(args)
def main_plot(name=None): """ Plot the reponse to the filters/kernels and histogram in differents pdfs """ parser = get_parser_args() if (name == None): style_img_name = "StarryNight" else: style_img_name = name #style_img_name = "Louvre_Big" parser.set_defaults(style_img_name=style_img_name) args = parser.parse_args() plot_Rep(args)
def main_plot_commun(name=None): """ Plot for each layer in VGG Interest the kernels, the response of the kernel but also the histogram fitted """ parser = get_parser_args() if (name == None): style_img_name = "StarryNight" else: style_img_name = name parser.set_defaults(style_img_name=style_img_name) args = parser.parse_args() do_pdf_comparison(args)
def generation(): path_origin = '/home/nicolas/Style-Transfer/' path_output = '/home/nicolas/Style-Transfer/' parser = get_parser_args() max_iter = 2000 print_iter = 500 start_from_noise = 0 init_noise_ratio = 0.05 optimizer = 'lbfgs' init = 'Gaussian' init_range = 0.0 clipping_type = 'ImageStyleBGR' vgg_name = 'normalizedvgg.mat' loss = ['texture', 'content'] content_strengh_tab = [ 10., 1., 0.1, 0.01, 0.001, 0.0001, 0.00001, 0.000001 ] list_img = get_list_of_images(path_origin) for content_strengh in content_strengh_tab: tf.reset_default_graph() # Necessity to use a new graph !! name_img_wt_ext, _ = name_img.split('.') img_folder = path_origin img_output_folder = path_origin output_img_name = name_img_wt_ext + '_' + str(content_strengh) for loss_item in loss: output_img_name += '_' + loss_item parser.set_defaults(verbose=True, max_iter=max_iter, print_iter=print_iter, img_folder=path_origin, img_output_folder=path_output, style_img_name=name_img_wt_ext, content_img_name=name_img_wt_ext, init_noise_ratio=init_noise_ratio, start_from_noise=start_from_noise, output_img_name=output_img_name, optimizer=optimizer, loss=loss, init=init, init_range=init_range, p=p, n=n, clipping_type=clipping_type, content_strengh=content_strengh, vgg_name=vgg_name) args = parser.parse_args() st.style_transfer(args)
def generation_Texture_LossFct(): path_origin = '../originBigger' path_output = 'LossFct/results/' do_mkdir(path_output) parser = get_parser_args() max_iter = 2000 print_iter = 200 start_from_noise = 1 init_noise_ratio = 1.0 optimizer = 'lbfgs' init = 'Gaussian' init_range = 0.0 clipping_type = 'ImageStyleBGR' vgg_name = 'normalizedvgg.mat' n = 4 p = 4 losses_to_test = [['texture']] list_img = get_list_of_images(path_origin) for loss in losses_to_test: for name_img in list_img: tf.reset_default_graph() # Necessity to use a new graph !! name_img_wt_ext, _ = name_img.split('.') img_folder = path_origin img_output_folder = path_origin output_img_name = name_img_wt_ext + '_' + loss[0] parser.set_defaults(verbose=True, max_iter=max_iter, print_iter=print_iter, img_folder=path_origin, img_output_folder=path_output, style_img_name=name_img_wt_ext, content_img_name=name_img_wt_ext, init_noise_ratio=init_noise_ratio, start_from_noise=start_from_noise, output_img_name=output_img_name, optimizer=optimizer, loss=loss, init=init, init_range=init_range, p=p, n=n, clipping_type=clipping_type, vgg_name=vgg_name) args = parser.parse_args() st.style_transfer(args)
def generation_Texture(): parser = get_parser_args() max_iter = 2000 print_iter = 500 start_from_noise = 1 init_noise_ratio = 1.0 optimizer = 'lbfgs' style_img_name = "temp" max_order_nmoments = 5 min_order_nmoments = 3 losses_to_test = [['autocorr'], ['nmoments'], ['texture'], ['InterScale'], ['Lp'], ['texture', 'nmoments'], ['texture', 'Lp']] for list_of_loss in losses_to_test: print("loss = ", list_of_loss) img_folder = path_origin path_output_mod = path_output + "_".join(list_of_loss) if ('nmoments' in list_of_loss) and (len(list_of_loss) == 1): for n in range(min_order_nmoments, max_order_nmoments + 1, 1): path_output_mod2 = path_output_mod print("n", n) path_output_mod2 += "_" + str(n) + '/' if not (os.path.isdir(path_output_mod2)): os.mkdir(path_output_mod2) parser.set_defaults(max_iter=max_iter, print_iter=print_iter, img_folder=img_folder, init_noise_ratio=init_noise_ratio, start_from_noise=start_from_noise, optimizer=optimizer, n=n, loss=list_of_loss, style_img_name=style_img_name) args = parser.parse_args() generate_all_texture(args, path_output_mod2) else: path_output_mod += '/' if not (os.path.isdir(path_output_mod)): os.mkdir(path_output_mod) parser.set_defaults(max_iter=max_iter, print_iter=print_iter, img_folder=img_folder, init_noise_ratio=init_noise_ratio, start_from_noise=start_from_noise, optimizer=optimizer, loss=list_of_loss) args = parser.parse_args() generate_all_texture(args, path_output_mod)
def main(): parser = get_parser_args() style_img_name = "StarryNight" content_img_name = "Louvre" max_iter = 1000 print_iter = 100 start_from_noise = 1 # True init_noise_ratio = 0.1 content_strengh = 0.001 optimizer = 'adam' # In order to set the parameter before run the script parser.set_defaults(style_img_name=style_img_name,max_iter=max_iter, print_iter=print_iter,start_from_noise=start_from_noise, content_img_name=content_img_name,init_noise_ratio=init_noise_ratio, content_strengh=content_strengh,optimizer=optimizer) args = parser.parse_args() grad_computation(args)
def compute_moments_of_filter(TypeOfComputation='moments', n=9): """ Plot the 9th first moments of each of the filter n = number of moments """ img_folder = path_origin parser = get_parser_args() parser.set_defaults(img_folder=img_folder) args = parser.parse_args() dirs = get_list_of_images() Data = {} print("Computation") h_old = 0 w_old = 0 vgg_layers = st.get_vgg_layers() Notfirst = False for name_img in dirs: name_img_wt_ext, _ = name_img.split('.') print(name_img_wt_ext) image_style = st.load_img(args, name_img_wt_ext) # Sans reshape Data[name_img] = {} _, h, w, _ = image_style.shape if not ((h_old == h) and (w_old == w)) and Notfirst: Notfirst = True sess.close() if not ((h_old == h) and (w_old == w)): tf.reset_default_graph() net = st.net_preloaded(vgg_layers, image_style) # Need to load a new net sess = tf.Session() sess.run(net['input'].assign(image_style)) for layer in VGG19_LAYERS_INTEREST: a = net[layer] if (TypeOfComputation == 'moments'): listOfMoments = sess.run(st.compute_n_moments(a, n)) elif (TypeOfComputation == 'Lp'): listOfMoments = sess.run(st.compute_Lp_norm(a, n)) Data[name_img][layer] = listOfMoments h_old, w_old = h, w sess.close() data_path = args.data_folder + "moments_all_textures.pkl" with open(data_path, 'wb') as output_pkl: pickle.dump(Data, output_pkl) print("End")
def main_with_option(): parser = get_parser_args() image_style_name = "StarryNight_Big" image_style_name = "StarryNight" starry = "StarryNight" marbre = 'GrungeMarbled0021_S' tile = "TilesOrnate0158_1_S" tile2 = "TilesZellige0099_1_S" peddle = "pebbles" brick = "BrickSmallBrown0293_1_S" bleu = "bleu" orange = "orange" D = "D20_01" #img_output_folder = "images/" image_style_name = D content_img_name = D #content_img_name = "Louvre" max_iter = 1000 print_iter = 1000 start_from_noise = 1 # True init_noise_ratio = 1.0 # TODO add a gaussian noise on the image instead a uniform one content_strengh = 0.001 optimizer = 'adam' optimizer = 'lbfgs' learning_rate = 10 # 10 for adam and 10**(-10) for GD maxcor = 10 sampling = 'up' # In order to set the parameter before run the script parser.set_defaults(style_img_name=image_style_name, max_iter=max_iter, print_iter=print_iter, start_from_noise=start_from_noise, content_img_name=content_img_name, init_noise_ratio=init_noise_ratio, content_strengh=content_strengh, optimizer=optimizer, maxcor=maxcor, learning_rate=learning_rate, sampling=sampling) args = parser.parse_args() pooling_type = 'avg' padding = 'SAME' style_transfer(args, pooling_type, padding)
def generation_Texture_init(): path_origin = '/home/nicolas/Style-Transfer/dataImages/' path_output = '/home/nicolas/Style-Transfer/InitializationTest/resultsVGGnormalized//' do_mkdir(path_output) parser = get_parser_args() max_iter = 2000 print_iter = 100 start_from_noise = 1 init_noise_ratio = 1.0 optimizer = 'lbfgs' init_list = [('Gaussian', 0.), ('Cst', 0.), ('Uniform', 127.5), ('Uniform', 20.)] loss = 'texture' list_img = get_list_of_images(path_origin) print(list_img) for init, init_range in init_list: for name_img in list_img: tf.reset_default_graph() # Necessity to use a new graph !! name_img_wt_ext, _ = name_img.split('.') print("New Synthesis", init, name_img_wt_ext) img_folder = path_origin img_output_folder = path_origin output_img_name = name_img_wt_ext + '_' + init + '_' + str( init_range) parser.set_defaults(verbose=True, max_iter=max_iter, print_iter=print_iter, img_folder=path_origin, img_output_folder=path_output, style_img_name=name_img_wt_ext, content_img_name=name_img_wt_ext, init_noise_ratio=init_noise_ratio, start_from_noise=start_from_noise, output_img_name=output_img_name, optimizer=optimizer, loss=loss, init=init, init_range=init_range) args = parser.parse_args() st.style_transfer(args)
def CompResult(): path_origin_gatys_output = '/home/nicolas/Style-Transfer/LossFct/random_phase_noise_v1.3/' path_origin = '/home/nicolas/random_phase_noise_v1.3/im/' path_origin_rand = '/home/nicolas/random_phase_noise_v1.3/src/output/' list_img = get_list_of_images(path_origin) parser = get_parser_args() max_iter = 2000 print_iter = 500 start_from_noise = 1 init_noise_ratio = 1.0 optimizer = 'lbfgs' init = 'Gaussian' init_range = 0.0 clipping_type = 'ImageStyleBGR' vgg_name = 'normalizedvgg.mat' loss = ['texture'] style_layers = ['conv1_1', 'pool1', 'pool2', 'pool3', 'pool4'] style_layer_weights = [1] * len(style_layers) f = open('ComparisonRandomGatys.txt', 'w') for name_img in list_img: print(name_img) tf.reset_default_graph() name_img_wt_ext, _ = name_img.split('.') parser.set_defaults(verbose=False, max_iter=max_iter, print_iter=print_iter, img_folder=path_origin, style_img_name=name_img_wt_ext, content_img_name=name_img_wt_ext, optimizer=optimizer, loss=loss, style_layers=style_layers, style_layer_weights=style_layer_weights, vgg_name=vgg_name) args = parser.parse_args() f.write(name_img_wt_ext + '\n') image_content = st.load_img(args, name_img_wt_ext) image_style = st.load_img(args, name_img_wt_ext) _, image_h, image_w, number_of_channels = image_content.shape M_dict = st.get_M_dict(image_h, image_w) pooling_type = args.pooling_type padding = args.padding vgg_layers = st.get_vgg_layers(args.vgg_name) # Precomputation Phase : dict_gram = st.get_Gram_matrix_wrap(args, vgg_layers, image_style, pooling_type, padding) dict_features_repr = st.get_features_repr_wrap(args, vgg_layers, image_content, pooling_type, padding) net = st.net_preloaded( vgg_layers, image_content, pooling_type, padding) # The output image as the same size as the content one placeholder = tf.placeholder(tf.float32, shape=image_style.shape) assign_op = net['input'].assign(placeholder) sess = tf.Session() sess.run(tf.global_variables_initializer()) loss_total, list_loss, list_loss_name = st.get_losses( args, sess, net, dict_features_repr, M_dict, image_style, dict_gram, pooling_type, padding) image_gatys = name_img_wt_ext + '_texture' args.img_folder = path_origin_gatys_output image_gaty_get = st.load_img(args, image_gatys) sess.run(assign_op, {placeholder: image_gaty_get}) loss_ref_gatys = sess.run(loss_total) string = 'Gatys method = {:.2e} \n'.format(loss_ref_gatys) print(string) f.write(string) list_img_rand = get_list_of_images(path_origin_rand + name_img_wt_ext) loss_tab = [] for img_name in list_img_rand: img_name_wt, _ = img_name.split('.') img_name_path = name_img_wt_ext + '/' + img_name_wt args.img_folder = path_origin_rand image_loaded = st.load_img(args, img_name_path) sess.run(assign_op, {placeholder: image_loaded}) loss_ref_rand = sess.run(loss_total) loss_tab += [loss_ref_rand] mean = np.mean(loss_tab) std = np.std(loss_tab) string2 = 'Random method : {:.2e} et std : {:.2e} \n'.format(mean, std) print(string2) f.write(string2) f.flush() f.close()
def generation_Texture_LossFct(): path_origin = '/home/gonthier/Travail_Local/Texture_Style/Style_Transfer/dataImages/' path_origin = '/home/gonthier/Travail_Local/Texture_Style/Style_Transfer/dataImagesTest/' #path_origin = '/home/nicolas/random_phase_noise_v1.3/im/' path_output = '/home/gonthier/Travail_Local/Texture_Style/Style_Transfer/LossFct/resultsDiff_loss_functionTest/' #path_output = '/home/nicolas/Style-Transfer/LossFct/random_phase_noise_v1.3/' #path_output = '/home/nicolas/Style-Transfer/LossFct/tmp/' do_mkdir(path_output) parser = get_parser_args() max_iter = 2000 print_iter = 500 start_from_noise = 1 init_noise_ratio = 1.0 optimizer = 'lbfgs' init = 'Gaussian' init_range = 0.0 clipping_type = 'ImageStyleBGR' vgg_name = 'normalizedvgg.mat' n_list = [1, 2, 4] p = 4 losses_to_test = [['autocorr'], ['Lp'], ['fft3D'], ['bizarre']] #losses_to_test = [['texture','HF'],['texture'],['texture','TV'],['texture','HFmany']] losses_to_test = [['nmoments'], ['Lp'], ['texture'], ['PhaseAlea']] losses_to_test = [['autocorr_rfft'], ['autocorr']] losses_to_test = [['texture']] config_layers_tab = ['PoolConfig', 'FirstConvs'] config_layers_tab = ['PoolConfig'] list_img = get_list_of_images(path_origin) for config_layers in config_layers_tab: for loss in losses_to_test: if loss[0] == 'nmoments': for n in n_list: if (config_layers == 'FirstConvs') and (n > 2): loss = ['nmoments_reduce'] for name_img in list_img: tf.reset_default_graph( ) # Necessity to use a new graph !! name_img_wt_ext, _ = name_img.split('.') img_folder = path_origin img_output_folder = path_origin output_img_name = name_img_wt_ext for loss_item in loss: output_img_name += '_' + loss_item + '_' + str(n) parser.set_defaults(verbose=True, max_iter=max_iter, print_iter=print_iter, img_folder=path_origin, img_output_folder=path_output, style_img_name=name_img_wt_ext, content_img_name=name_img_wt_ext, init_noise_ratio=init_noise_ratio, start_from_noise=start_from_noise, output_img_name=output_img_name, optimizer=optimizer, loss=loss, init=init, init_range=init_range, p=p, n=n, clipping_type=clipping_type, vgg_name=vgg_name) args = parser.parse_args() st.style_transfer(args) else: n = 4 for name_img in list_img: tf.reset_default_graph() # Necessity to use a new graph !! name_img_wt_ext, _ = name_img.split('.') img_folder = path_origin img_output_folder = path_origin output_img_name = name_img_wt_ext for loss_item in loss: output_img_name += '_' + loss_item parser.set_defaults(verbose=True, max_iter=max_iter, print_iter=print_iter, img_folder=path_origin, img_output_folder=path_output, style_img_name=name_img_wt_ext, content_img_name=name_img_wt_ext, init_noise_ratio=init_noise_ratio, start_from_noise=start_from_noise, output_img_name=output_img_name, optimizer=optimizer, loss=loss, init=init, init_range=init_range, p=p, n=n, clipping_type=clipping_type, vgg_name=vgg_name) args = parser.parse_args() st.style_transfer(args)
def generation_Texture_LossFct(): path_origin = '/home/nicolas/Style-Transfer/dataImages/' path_output = '/home/nicolas/Style-Transfer/LossFct/resultsDiff_loss_function/' do_mkdir(path_output) parser = get_parser_args() max_iter = 2000 print_iter = 500 start_from_noise = 1 init_noise_ratio = 1.0 optimizer = 'lbfgs' init = 'Gaussian' init_range = 0.0 clipping_type = 'ImageStyleBGR' vgg_name = 'normalizedvgg.mat' n_list = [1, 2, 4] p = 4 losses_to_test = [['autocorr_rfft'], ['autocorr'], ['Lp'], ['texture'], ['phaseAlea'], ['phaseAleaSimple'], ['autocorr_rfft', 'texture'], ['texture', 'spectrum'], ['autocorr_rfft', 'spectrum'], ['autocorrLog'], ['variance']] losses_to_test = [['phaseAlea']] config_layers_tab = ['FirstConvs', 'PoolConfig'] config_layers_tab = ['PoolConfig'] # Il manque les phaseAlea pour PoolConfig mais aussi variance list_img = get_list_of_images(path_origin) Stop = False for config_layers in config_layers_tab: for loss in losses_to_test: if loss[0] == 'nmoments': for n in n_list: if (config_layers == 'FirstConvs') and (n > 2): loss = ['nmoments_reduce'] for name_img in list_img: name_img_wt_ext, _ = name_img.split('.') img_folder = path_origin output_img_name = name_img_wt_ext + '_' + config_layers loss_str = '' for loss_item in loss: output_img_name += '_' + loss_item + '_' + str(n) loss_str += loss_item + ' ' main_command = 'python Main_Style_Transfer.py --verbose --max_iter ' + str( max_iter ) + ' --print_iter ' + str( print_iter ) + ' --start_from_noise ' + str( start_from_noise ) + ' --init_noise_ratio ' + str( init_noise_ratio ) + ' --img_folder ' + path_origin + ' --output_img_name ' + output_img_name + ' --img_output_folder ' + path_output + ' --style_img_name ' + name_img_wt_ext + ' --content_img_name ' + name_img_wt_ext + ' --loss ' + loss_str + ' --init ' + str( init ) + ' --init_range ' + str(init_range) + ' --n ' + str( n ) + ' --p ' + str( p ) + ' --clipping_type ' + clipping_type + ' --vgg_name ' + vgg_name + ' --config_layers ' + config_layers print(main_command) try: if not (Stop): os.system(main_command) except: raise Stop = True sys.exit(0) else: n = 4 for name_img in list_img: name_img_wt_ext, _ = name_img.split('.') img_folder = path_origin output_img_name = name_img_wt_ext + '_' + config_layers loss_str = '' for loss_item in loss: output_img_name += '_' + loss_item loss_str += loss_item + ' ' main_command = 'python Main_Style_Transfer.py --verbose --max_iter ' + str( max_iter ) + ' --print_iter ' + str( print_iter ) + ' --start_from_noise ' + str( start_from_noise ) + ' --init_noise_ratio ' + str( init_noise_ratio ) + ' --img_folder ' + path_origin + ' --output_img_name ' + output_img_name + ' --img_output_folder ' + path_output + ' --style_img_name ' + name_img_wt_ext + ' --content_img_name ' + name_img_wt_ext + ' --loss ' + loss_str + ' --init ' + str( init ) + ' --init_range ' + str(init_range) + ' --n ' + str( n ) + ' --p ' + str( p ) + ' --clipping_type ' + clipping_type + ' --vgg_name ' + vgg_name + ' --config_layers ' + config_layers print(main_command) try: if not (Stop): os.system(main_command) except: raise Stop = True sys.exit(0)
def Test_If_Net_See_Noise(): """ This function test to see if the noise is invisible for the net or not """ list_img = [] list_name_img = [] img_folder = 'NoiseOrigin/CompDenoising/' img_name = 'Pastiche_Uniform' img_ext = '.png' image_path = img_folder + img_name + img_ext noisy_img = scipy.misc.imread(image_path) list_img += [noisy_img] list_name_img += ['noisy_img'] # NLmeans : read a image desoined by the user image_path = img_folder + 'denoised_Uniform' + img_ext nlmeans_denoised_img_user = scipy.misc.imread(image_path) list_img += [nlmeans_denoised_img_user] list_name_img += ['nlmeans_denoised_user'] # NLmeans by scikit image output_image_path = img_folder + 'nlmeans_denoised' + img_ext try: nlmeans_denoised_img = scipy.misc.imread(output_image_path) except: nlmeans_denoised_img = denoise_nl_means(noisy_img, patch_size=7, patch_distance=40, h=40, multichannel=True, fast_mode=False) scipy.misc.toimage(nlmeans_denoised_img).save(output_image_path) list_img += [nlmeans_denoised_img] list_name_img += ['nlmeans_denoised'] # Gaussian Filters output_image_path = img_folder + 'gaussian_filtered' + img_ext try: gaussian_filter_img = scipy.misc.imread(output_image_path) except: gaussian_filter_img = filters.gaussian(noisy_img, sigma=1, mode='reflect', multichannel=True) scipy.misc.toimage(gaussian_filter_img).save(output_image_path) list_img += [gaussian_filter_img] list_name_img += ['gaussian_filtered'] # Median Filters output_image_path = img_folder + 'median_filtered' + img_ext try: median_filter_img = scipy.misc.imread(output_image_path) except: median_filter_img = np.zeros(shape=noisy_img.shape) for i in range(3): median_filter_img[:, :, i] = filters.rank.median(noisy_img[:, :, i]) scipy.misc.toimage(median_filter_img).save(output_image_path) list_img += [median_filter_img] list_name_img += ['median_filtered'] # Median Filters output_image_path = img_folder + 'tv_filtered' + img_ext try: tv_img = scipy.misc.imread(output_image_path) except: tv_img = denoise_tv_chambolle(noisy_img, weight=0.1, multichannel=True) scipy.misc.toimage(tv_img).save(output_image_path) list_img += [tv_img] list_name_img += ['tv_filtered'] # Moyen Filters output_image_path = img_folder + 'moyen_img' + img_ext try: moyen_img = scipy.misc.imread(output_image_path) except: weights = (1. / 9.) * np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]) moyen_img = np.zeros(shape=noisy_img.shape) for i in range(3): moyen_img[:, :, i] = scipy.ndimage.filters.convolve(noisy_img[:, :, i], weights, output=None, mode='reflect', cval=0.0, origin=0) scipy.misc.toimage(moyen_img).save(output_image_path) list_img += [moyen_img] list_name_img += ['moyen_img'] # Bilateral output_image_path = img_folder + 'bilateral_filtered' + img_ext try: bilateral_img = scipy.misc.imread(output_image_path) except: bilateral_img = denoise_bilateral(noisy_img, sigma_color=0.05, sigma_spatial=15, multichannel=True) scipy.misc.toimage(bilateral_img).save(output_image_path) list_img += [bilateral_img] list_name_img += ['bilateral_filtered'] # Wavelet output_image_path = img_folder + 'wavelet_filtered' + img_ext try: wavelet_img = scipy.misc.imread(output_image_path) except: wavelet_img = denoise_wavelet(noisy_img, multichannel=True) scipy.misc.toimage(wavelet_img).save(output_image_path) list_img += [wavelet_img] list_name_img += ['wavelet_filtered'] # Reference Image ref_img_name = 'BrickSmallBrown0293_1_S' image_path = img_folder + ref_img_name + img_ext ref_img = st.preprocess(scipy.misc.imread(image_path).astype('float32')) parser = get_parser_args() parser.set_defaults(loss='texture') args = parser.parse_args() image_content = ref_img image_style = ref_img _, image_h, image_w, number_of_channels = image_content.shape M_dict = st.get_M_dict(image_h, image_w) sess = tf.Session() vgg_layers = st.get_vgg_layers() dict_gram = st.get_Gram_matrix_wrap(args, vgg_layers, image_style) dict_features_repr = st.get_features_repr_wrap(args, vgg_layers, image_content) net = st.net_preloaded(vgg_layers, image_content) style_loss = st.sum_style_losses(sess, net, dict_gram, M_dict) placeholder = tf.placeholder(tf.float32, shape=image_style.shape) assign_op = net['input'].assign(placeholder) sess.run(tf.global_variables_initializer()) print("Loss function for differents denoised image") print(ref_img_name) for img, img_name in zip(list_img, list_name_img): img = st.preprocess(img.astype('float32')) sess.run(assign_op, {placeholder: img}) loss = style_loss.eval(session=sess) print(img_name, loss) return (0)
def main(): #global args parser = get_parser_args() args = parser.parse_args() style_transfer(args)
'Comparaison Histogram BGR origine Generation modif par Hist Matching') plt.savefig(pp, format='pdf') #result_img_postproc_hist2= Misc.histeq(result_img_postproc_hist) #f, ax = plt.subplots(2,3) #reshaped_hist2 =np.reshape(result_img_postproc_hist2,(image_h_art*image_w_art,channels)) #for i in range(channels): #ax[0,i].hist(reshaped_original[:,i],bins=np.arange(0, 255 + binwidth, binwidth)) #ax[1,i].hist(reshaped_hist2[:,i],bins=np.arange(0, 255 + binwidth, binwidth)) #plt.suptitle('Comparaison Histogram BGR origine Generation modif par Hist Matching') #plt.savefig(pp, format='pdf') #output_image_path = args.img_output_folder + args.output_img_name +'_hist2' +args.img_ext #scipy.misc.toimage(result_img_postproc_hist2).save(output_image_path) plt.close() pp.close() if __name__ == '__main__': parser = get_parser_args() style_img_name = "pebbles" style_img_name = "TilesOrnate0158_1_S" output_img_name = "Gen" parser.set_defaults(verbose=True, style_img_name=style_img_name, output_img_name=output_img_name) args = parser.parse_args() #generationFromOriginal(args) generationFromMarginal(args)