def main(): tf.enable_eager_execution() model = get_model() style_features_group, content_features = get_feature_representations(model) if use_sliding_window: combined_array = content_array for i in range(iteration_size): if i % 20 == 0: print(i) image.save_img(x=image.array_to_img(deprocess_image(np.array(combined_array))), path='./combined/combine%d.jpg' % i) for row in range(0, content_array.shape[0], window_stride): for col in range(0, content_array.shape[1], window_stride): part_array = np.expand_dims(combined_array[row: row + window_size, col: col + window_size], axis=0) part_array = tf.Variable(part_array) gradient_decent(model, part_array, style_features_group, content_features) combined_array[row: row + window_size, col: col + window_size] = part_array.numpy()[0] else: combined_array = tf.Variable(np.expand_dims(content_array, axis=0), dtype=tf.float32) for i in range(iteration_size): if i % 50 == 0: print(i) image.save_img(x=image.array_to_img(deprocess_image(combined_array.numpy()[0])), path='./combined/combine%d.jpg' % i) gradient_decent(model, combined_array, style_features_group, content_features)
def save_img(self): progress_bar = Progbar(len(self.images_np_arr)) for i in range(len(self.images_np_arr)): image = self.images_np_arr[i].astype('float32') / 255. file = os.path.join(self.train_dir, 'images', str(i)) save_img(file + '.png', image) if i % 100 == 0: progress_bar.add(100)
def main(): model = get_model() style_features_group, content_features = get_feature_representations(model) combined_array = tf.contrib.eager.Variable(np.expand_dims(content_array, axis=0), dtype=tf.float32) for i in range(iteration_size): gradients = get_gradient(model, combined_array, style_features_group, content_features) optimizer.apply_gradients([(gradients, combined_array)]) if i % 50 == 0: image.save_img(x=image.array_to_img(deprocess_image(combined_array.numpy()[0])), path='./combined/combine%d.jpg' % i)
def move_image(self, group_dir, fp, imgs): base_path = '../../data/crl_image/' group_path = base_path + group_dir move_path = group_path + '/' + fp.split('\\')[-1] print(move_path) # print(fp,img) if os.path.exists(group_path): print('true') else: os.mkdir(group_path) if os.path.exists(move_path): print('true') else: os.mkdir(move_path) for img in imgs: image_obj = load_img(fp + '\\' + img) save_img(move_path + '\\' + img, image_obj)
def sub_main(): tf.enable_eager_execution() model = get_model() style_features_group, content_features = get_feature_representations( model) combined_array = tf.Variable(np.expand_dims(content_array, axis=0), dtype=tf.float32) for i in range(iteration_size): if i % 50 == 0: print(i) image.save_img(x=image.array_to_img( deprocess_image(combined_array.numpy()[0])), path='./combined/combine%d.jpg' % i) gradient_decent(model, combined_array, style_features_group, content_features)
def main(): tf.enable_eager_execution() model = get_model() style_features_group, content_features = get_feature_representations(model) combined_array = content_array for i in range(iteration_size): if i % 50 == 0: print(i) image.save_img(x=image.array_to_img(deprocess_image(combined_array)), path='./combined/combine%d.jpg' % i) for row in range(0, combined_array.shape[0], part_stride): for col in range(0, combined_array.shape[1], part_stride): part_array = tf.Variable(np.expand_dims(combined_array [row: row + part_kernel_size, col: col + part_kernel_size], axis=0), dtype=tf.float32) gradients = get_gradient(model, part_array, style_features_group, content_features) optimizer.apply_gradients([(gradients, part_array)]) combined_array[row: row + part_kernel_size, col: col + part_kernel_size] = part_array.numpy()[0]
def train(self, iterate_num): # Generate images by iterative optimization if K.image_data_format() == 'channels_first': x = np.random.uniform(0, 255, (1, 3, self.img_nrows, self.img_ncols)) - 128. else: x = np.random.uniform(0, 255, (1, self.img_nrows, self.img_ncols, 3)) - 128. fetch = {'loss_grad': self.outputs, 'summary': self.merged} evaluator = Evaluator(self.img_nrows, self.img_ncols, fetch, self.target_image, self.session) evaluator.write_summary = self.write_summary for i in range(iterate_num): print('Start of iteration', i) start_time = time.time() x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(), fprime=evaluator.grads, maxfun=20) print('Current loss value:', min_val) # save current generated image img = deprocess_image(x.copy(), self.img_nrows, self.img_ncols) fname = P.doodle_target_img_prefix + '_at_iteration_%d.png' % i save_img(fname, img) evaluator.count += 1 end_time = time.time() print('Image saved as', fname) print('Iteration %d completed in %ds' % (i, end_time - start_time))
from tensorflow.python.keras.preprocessing.image import save_img from surface_match.dataset import BatchGenerator, get_experimental_dataset batch_generator = BatchGenerator() # batch_generator.load_dataset() batch_generator.default_weight = 0.06 ** 2 batch_generator.init_weights() batch_generator.load_example_weights() batch_generator.init_weight_normalize() (t_images_1, t_images_2, t_results, indexes) = get_experimental_dataset(True) for index in range(len(t_results)): val = str(t_results[index]) root_name = str(index) + '-root_' + val + '.jpg' target_name = str(index) + '-target_' + val + '.jpg' save_img('exp_dataset/' + root_name, t_images_1[index]) save_img('exp_dataset/' + target_name, t_images_2[index])
sl = style_loss(style_reference_features, combination_features) loss += (style_weight / len(style_layers)) * sl loss += total_variation_weight * total_variation_loss(combination_image) grads = K.gradients(loss, combination_image)[0] fetch_loss_and_grads = K.function([combination_image], [loss, grads]) evaluator = Evaluator() result_prefix = 'result' iterations = 20 x = preprocess_image(target_image_path) x = x.flatten() for i in range(iterations): print('반복 횟수:', i) start_time = time.time() x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x, fprime=evaluator.grads, maxfun=20) print('현재 손실 값:', min_val) img = x.copy().reshape((img_height, img_width, 3)) img = deprocess_image(img) fname = result_prefix + '_at_iteration_%d.png' % i save_img(fname, img) end_time = time.time() print('저장 이미지: ', fname) print('%d 번째 반복 완료: %ds' % (i, end_time - start_time))
if delta > error_threshold: # print('Save b:' + str(batch) + '/' + str(test_batches) + ' i:' + str(index) + '/' + str(batch_size)) hard_indexes.append([ int(indexes[index][0]), int(indexes[index][1]), delta ]) if delta > save_images_error_threshold and save_images: r_val = 'r%.2f' % real_result p_val = 'p%.2f' % predicted_result root_name = str(batch) + '-' + str(index) + '-root_' + p_val + 'vs' + r_val + '.jpg' target_name = str(batch) + '-' + str(index) + '-target_' + p_val + 'vs' + r_val + '.jpg' save_img('bad_predictions/' + root_name, t_images_1[index]) save_img('bad_predictions/' + target_name, t_images_2[index]) if batch % 5 == 0 and batch > 0: progress_bar.add(5) if batch % 20 == 0 and batch > 0: save_file(hard_indexes) # Update weight complexity batch_generator.load_example_weights() for sample in samples: batch_generator.update_weights(sample[0], sample[1], sample[2]) batch_generator.save_example_weights()
def save_img(img, file_to_save): """ save passed image to requested file """ nparray_rep = img if not isinstance(img, np.ndarray): nparray_rep = k_image.img_to_array(img) k_image.save_img(file_to_save, nparray_rep)
def save_image(data, name): save_img(name, data)