def __init__(self, patch_size=512, saved_model_path='./model_logs/release_places2_256'): ''' Saved model weights url: https://drive.google.com/drive/folders/1y7Irxm3HSHGvp546hZdAZwuNmhLUVcjO ''' self.FLAGS = ng.Config('inpaint.yml') self.model = InpaintCAModel() self.checkpoint_dir = saved_model_path self.patch_size = patch_size self.sess_config = tf.ConfigProto() self.sess_config.gpu_options.allow_growth = True self._inpaint_input_placeholder = tf.placeholder( shape=(1, self.patch_size, self.patch_size * 2, 3), dtype=tf.float32) self.sess = tf.Session(config=self.sess_config) output = self.model.build_server_graph(self.FLAGS, self._inpaint_input_placeholder, reuse=tf.AUTO_REUSE) output = (output + 1.) * 127.5 output = tf.reverse(output, [-1]) output = tf.saturate_cast(output, tf.uint8) self._cached_inpaint_output = output self.load_model()
def test_save_and_run_model(tmpdir): checkpoint_dir = tmpdir.join('ckpt') # a 'local' object. with tf.Graph().as_default(): # Setup. # Create model. model = deep_fill_model.InpaintCAModel() dummy_img = tf.zeros([batch_size, 256, 256, 3], tf.float32) dummy_mask = tf.ones([batch_size, 256, 256, 1], tf.float32) stage_1, stage_2, offset_flow = model.build_inpaint_net( x=dummy_img, mask=dummy_mask, training=False, name=net_name) with tf.Session() as sess: # Initialize variables. sess.run(tf.global_variables_initializer()) # Test. # 1. Save random model. There should be no errors. global_vars = tf.global_variables() saver = tf.train.Saver(global_vars) saver.save(sess, str(checkpoint_dir)) # 2. Use random model model to fill. #input_image_path = './test/resources/case1_input.png' #mask_path = './test/resources/case1_mask.png' input_image_path = './waseda_fill/statue1.jpg' mask_path = './waseda_fill/statue1_mask2.png' out = deep_fill_app.fill( image_path=input_image_path, mask_path=mask_path, checkpoint_dir=str(checkpoint_dir), FLAGS=ng.Config('./test/resources/inpaint_test.yml')) reference_out = str(tmpdir.join('case1_filled_using_untrained_model.png')) cv2.imwrite('waseda_rand.png', out[0][:, :, ::-1]) (score, diff) = cv2.compare_ssim(out, reference_out, full=True) assert score > 0.9
def deepfill_inpaint(basedata, imagepath, maskinfo, status, checkpointdir, inputimgpath, outputpath): # basedata = BaseData() FLAGS = ng.Config(join(basedata.DEEPFILL_BASE_DIR, 'inpaint.yml')) # FLAGS = ng.Config('./inpaint.yml') # FLAGS = ng.Config('/home/zzy/work/dnnii_web/dnnii_web/App/deepfill/inpaint.yml') # ng.get_gpus(1) # args, unknown = parser.parse_known_args() model = InpaintCAModel() image = cv2.imread(imagepath) h, w, _ = image.shape if status == 0: mask = np.zeros((h, w, 3)).astype(np.uint8) for rect in maskinfo: mask[rect[1]:rect[1] + rect[3], rect[0]:rect[0] + rect[2], :] = 255 else: mask = cv2.imread(maskinfo) mask = cv2.resize(mask, (w, h), fx=0.5, fy=0.5) assert image.shape == mask.shape #把原始图片划分成grid*grid个格子区域,'//'表示向下取整的除法 grid = 8 image = image[:h // grid * grid, :w // grid * grid, :] mask = mask[:h // grid * grid, :w // grid * grid, :] print('Shape of image: {}'.format(image.shape)) inputimage = image * ((255 - mask) // 255) + mask cv2.imwrite(inputimgpath, inputimage.astype(np.uint8)) image = np.expand_dims(image, 0) mask = np.expand_dims(mask, 0) input_image = np.concatenate([image, mask], axis=2) sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = True deepfill_graph = tf.Graph() with tf.Session(config=sess_config, graph=deepfill_graph) as deepfill_sess: input_image = tf.constant(input_image, dtype=tf.float32) output = model.build_server_graph(FLAGS, input_image) output = (output + 1.) * 127.5 output = tf.reverse(output, [-1]) output = tf.saturate_cast(output, tf.uint8) # load pretrained model vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] for var in vars_list: vname = var.name from_name = vname var_value = tf.contrib.framework.load_variable( checkpointdir, from_name) assign_ops.append(tf.assign(var, var_value)) deepfill_sess.run(assign_ops) print('deepfill Model loaded.') result = deepfill_sess.run(output) cv2.imwrite(outputpath, result[0][:, :, ::-1]) deepfill_sess.close()
def Output(img_in, mask_in, img_out): print("import from test:", img_in, mask_in, img_out) #if __name__ == "__main__": FLAGS = ng.Config('inpaint.yml') # ng.get_gpus(1) #args, unknown = parser.parse_known_args() model = InpaintCAModel() #image = cv2.imread(args.image) image = cv2.imread("examples/places356/" + img_in) #mask = cv2.imread(args.mask) mask = cv2.imread("places356_mask/" + mask_in) # mask = cv2.resize(mask, (0,0), fx=0.5, fy=0.5) assert image.shape == mask.shape h, w, _ = image.shape grid = 8 image = image[:h // grid * grid, :w // grid * grid, :] mask = mask[:h // grid * grid, :w // grid * grid, :] print('Shape of image: {}'.format(image.shape)) image = np.expand_dims(image, 0) mask = np.expand_dims(mask, 0) input_image = np.concatenate([image, mask], axis=2) sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = True with tf.Session(config=sess_config) as sess: input_image = tf.constant(input_image, dtype=tf.float32) output = model.build_server_graph(FLAGS, input_image) output = (output + 1.) * 127.5 output = tf.reverse(output, [-1]) output = tf.saturate_cast(output, tf.uint8) # load pretrained model vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] for var in vars_list: vname = var.name from_name = vname #var_value = tf.contrib.framework.load_variable(args.checkpoint_dir, from_name) var_value = tf.contrib.framework.load_variable( "model_logs/release_places2_256", from_name) assign_ops.append(tf.assign(var, var_value)) sess.run(assign_ops) print('Model loaded.') result = sess.run(output) #cv2.imwrite(args.output, result[0][:, :, ::-1]) #cv2.imshow("result", result[0][:, :, ::-1]) cv2.imwrite("examples/places356/" + img_out, result[0][:, :, ::-1]) show1 = cv2.imread("examples/places356/" + img_in) show2 = cv2.imread("examples/places356/" + img_out) show = np.hstack([show1, show2]) cv2.imshow("result", show)
def inpaint(arg_image_dir, arg_mask_dir, arg_checkpoint_dir, arg_output_dir): tf.reset_default_graph() FLAGS = ng.Config('inpaint.yml') # ng.get_gpus(1) model = InpaintCAModel() for arg_image in os.listdir(arg_image_dir): arg_mask = arg_image # assume the mask has the same name as the image if os.path.exists(arg_output_dir + arg_image): print("note |", arg_image, "already inpainted.") continue if os.path.exists(arg_image_dir + arg_image) and os.path.exists(arg_mask_dir + arg_mask): pass else: continue image = cv2.imread(os.path.join(arg_image_dir, arg_image)) mask = cv2.imread(os.path.join(arg_mask_dir, arg_mask)) name = arg_image assert image.shape == mask.shape h, w, _ = image.shape grid = 8 image = image[:h // grid * grid, :w // grid * grid, :] mask = mask[:h // grid * grid, :w // grid * grid, :] print('Shape of image: {}'.format(image.shape)) image = np.expand_dims(image, 0) mask = np.expand_dims(mask, 0) input_image = np.concatenate([image, mask], axis=2) tf.reset_default_graph() sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = True with tf.Session(config=sess_config) as sess: input_image = tf.constant(input_image, dtype=tf.float32) output = model.build_server_graph(FLAGS, input_image) output = (output + 1.) * 127.5 output = tf.reverse(output, [-1]) output = tf.saturate_cast(output, tf.uint8) # load pretrained model vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] for var in vars_list: vname = var.name from_name = vname var_value = tf.contrib.framework.load_variable( arg_checkpoint_dir, from_name) assign_ops.append(tf.assign(var, var_value)) sess.run(assign_ops) print('Model loaded.') result = sess.run(output) cv2.imwrite(arg_output_dir + name, result[0][:, :, ::-1])
def __init__(self, checkpoint_dir=None, get_checkpoint_dir=download_radish, config=abspath('inpaint.yml'), max_size=(1024, 1024)): self.FLAGS = ng.Config(config) self.max_size = max_size if checkpoint_dir is None: checkpoint_dir = get_checkpoint_dir() self.checkpoint_dir = checkpoint_dir self.model_loaded = False
def inpaintP(name): CUR_DIR = os.path.join(WORK_DIR, name) TMASK_DIR = os.path.join(WORK_DIR, name + "//tmask") INPAINT_MODEL_PATH = os.path.join(ROOT_DIR, "model_logs/release_places2_256") FLAGS = ng.Config('inpaint.yml') model = InpaintCAModel() image = cv.imread(os.path.join(CUR_DIR, f"{name}.png")) mask = cv.imread(os.path.join(TMASK_DIR, "mask.png")) filename = f'4#_{name}.png' assert image.shape == mask.shape h, w, _ = image.shape grid = 8 image = image[:h // grid * grid, :w // grid * grid, :] mask = mask[:h // grid * grid, :w // grid * grid, :] print('Shape of image: {}'.format(image.shape)) image = np.expand_dims(image, 0) mask = np.expand_dims(mask, 0) input_image = np.concatenate([image, mask], axis=2) sess_config = tf.ConfigProto() sess_config.gpu_options.per_process_gpu_memory_fraction = 0.5 tf2 = tf.Graph() with tf2.as_default(): with tf.Session(config=sess_config) as sess: input_image = tf.constant(input_image, dtype=tf.float32) output = model.build_server_graph(FLAGS, input_image) output = (output + 1.) * 127.5 output = tf.reverse(output, [-1]) output = tf.saturate_cast(output, tf.uint8) # load pretrained model vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] for var in vars_list: vname = var.name from_name = vname var_value = tf.contrib.framework.load_variable( INPAINT_MODEL_PATH, from_name) assign_ops.append(tf.assign(var, var_value)) sess.run(assign_ops) print('Model loaded.') result = sess.run(output) cv.imwrite(os.path.join(CUR_DIR, filename), result[0][:, :, ::-1]) print('Image has been made') return 0
def generate_counterfactual(image_fp, mask_fp, output_fp, checkpoint_dir, model_id=None): try: FLAGS = ng.Config('config/inpaint.yml') except AssertionError: raise ValueError('check directory above') # ng.get_gpus(1) # args, unknown = parser.parse_known_args() model = InpaintCAModel() image = cv2.imread(image_fp) mask = cv2.imread(mask_fp) # mask = cv2.resize(mask, (0,0), fx=0.5, fy=0.5) assert image.shape == mask.shape h, w, _ = image.shape grid = 8 image = image[:h // grid * grid, :w // grid * grid, :] mask = mask[:h // grid * grid, :w // grid * grid, :] image = np.expand_dims(image, 0) mask = np.expand_dims(mask, 0) input_image = np.concatenate([image, mask], axis=2) sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = True with tf.Session(config=sess_config) as sess: input_image = tf.constant(input_image, dtype=tf.float32) output = model.build_server_graph(FLAGS, input_image) output = (output + 1.) * 127.5 output = tf.reverse(output, [-1]) output = tf.saturate_cast(output, tf.uint8) # load pretrained model vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] for var in vars_list: vname = var.name from_name = vname var_value = tf.contrib.framework.load_variable( checkpoint_dir, from_name) assign_ops.append(tf.assign(var, var_value)) sess.run(assign_ops) print('Model loaded.') result = sess.run(output) cv2.imwrite(output_fp, result[0][:, :, ::-1]) print(f'IMAGE WROTE TO {output_fp}\n\n\n') tf.reset_default_graph()
def pixel_fill(image, mask): import tensorflow as tf2 import neuralgym as ng from inpaint_model import InpaintCAModel if image.ndim > mask.ndim: mask = np.dstack([mask] * image.shape[2]) assert image.shape == mask.shape model = InpaintCAModel() FLAGS = ng.Config('inpaint.yml') h, w, _ = image.shape grid = 8 image = image[:h // grid * grid, :w // grid * grid, :] mask = mask[:h // grid * grid, :w // grid * grid, :] print('Shape of image: {}'.format(image.shape)) image = np.expand_dims(image, 0) mask = np.expand_dims(mask, 0) input_image = np.concatenate([image, mask], axis=2) sess_config = tf2.ConfigProto() sess_config.gpu_options.allow_growth = True with tf2.Session(config=sess_config) as sess: input_image = tf2.constant(input_image, dtype=tf2.float32) output = model.build_server_graph(FLAGS, input_image) output = (output + 1.) * 127.5 output = tf2.reverse(output, [-1]) output = tf2.saturate_cast(output, tf2.uint8) # load pretrained model vars_list = tf2.get_collection(tf2.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] print("checkpoint_dir = ", checkpoint_dir) for var in vars_list: vname = var.name from_name = vname if "inpaint_net" in var.name: # or else is going to mix with mrcnn var_value = tf2.contrib.framework.load_variable( checkpoint_dir, from_name) assign_ops.append(tf2.assign(var, var_value)) sess.run(assign_ops) print('Model loaded.' * 10) result = sess.run(output) sess.close() tf2.reset_default_graph() return result[0][:, :, ::-1]
def build_server_graph(self, batch_data, reuse=False, is_training=False): """ """ # generate mask, 1 represents masked point batch_raw, masks_raw = tf.split(batch_data, 2, axis=2) masks = tf.cast(masks_raw[0:1, :, :, 0:1] > 127.5, tf.float32) batch_pos = batch_raw / 127.5 - 1. batch_incomplete = batch_pos * (1. - masks) # inpaint config = ng.Config('inpaint.yml') x1, x2, flow = self.build_inpaint_net( batch_incomplete, masks, reuse=reuse, training=is_training, config=config) batch_predict = x2 # apply mask and reconstruct batch_complete = batch_predict*masks + batch_incomplete*(1-masks) return batch_complete
def inpainting_api(image, mask): FLAGS = ng.Config('inpaint.yml') tf.reset_default_graph() model = InpaintCAModel() # image = cv2.imread(img_path) # mask = cv2.imread(mask_path) # cv2.imwrite('new.png', image - mask) # mask = cv2.resize(mask, (0,0), fx=0.5, fy=0.5) assert image.shape == mask.shape image = crop(image) mask = crop(mask) print('Shape of image: {}'.format(image.shape)) image = np.expand_dims(image, 0) mask = np.expand_dims(mask, 0) input_image = np.concatenate([image, mask], axis=2) sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = True with tf.Session(config=sess_config) as sess: input_image = tf.constant(input_image, dtype=tf.float32) output = model.build_server_graph(FLAGS, input_image) output = (output + 1.) * 127.5 output = tf.reverse(output, [-1]) output = tf.saturate_cast(output, tf.uint8) # load pretrained model vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] for var in vars_list: vname = var.name from_name = vname var_value = tf.contrib.framework.load_variable( './model_logs/inpaint', from_name) assign_ops.append(tf.assign(var, var_value)) sess.run(assign_ops) print('Model loaded.') result = sess.run(output) return result[0][:, :, ::-1]
def __init__(self, checkpoint_dir, use_gpu=False, preview_socket_name=None): self.video_creator = VideoCreator(fps=5) if preview_socket_name is not None: self.preview_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.preview_socket.connect(preview_socket_name) self.recording = False FLAGS = ng.Config('generative_inpainting/inpaint.yml') if use_gpu: ng.get_gpus(1) sess_config = tf.compat.v1.ConfigProto() sess_config.gpu_options.allow_growth = True self.sess = tf.compat.v1.Session(config=sess_config) model = InpaintCAModel() self.input_image_ph = tf.compat.v1.placeholder(tf.float32, shape=(1, 640, 480 * 2, 3)) output = model.build_server_graph(FLAGS, self.input_image_ph) output = (output + 1.) * 127.5 output = tf.reverse(output, [-1]) self.output = tf.saturate_cast(output, tf.uint8) vars_list = tf.compat.v1.get_collection( tf.compat.v1.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] for var in vars_list: vname = var.name from_name = vname var_value = tf.contrib.framework.load_variable( checkpoint_dir, from_name) assign_ops.append(tf.compat.v1.assign(var, var_value)) self.sess.run(assign_ops) self.on_image_received( ThermalImage(0, Image.new('RGB', (480, 640), color='black'), np.full((480 * 640), False)))
def inpaintP(name): import os import time os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import cv2 as cv import numpy as np import tensorflow as tf tf.compat.v1.logging.set_verbosity("ERROR") import neuralgym as ng from inpaint_model import InpaintCAModel ROOT_DIR = os.path.abspath("./") WAIT_DIR = os.path.abspath("./waiting") WORK_DIR = os.path.abspath("./workspace") CUR_DIR = os.path.join(WORK_DIR, name) TMASK_DIR = os.path.join(WORK_DIR, name+"//tmask") INPAINT_MODEL_PATH = os.path.join(ROOT_DIR, "model_logs/release_places2_256") FLAGS = ng.Config('inpaint.yml') model = InpaintCAModel() image = cv.imread(os.path.join(CUR_DIR, f"{name}.png")) mask = cv.imread(os.path.join(TMASK_DIR, "mask.png")) filename = f'4#_{name}.png' assert image.shape == mask.shape h, w, _ = image.shape grid = 8 image = image[:h//grid*grid, :w//grid*grid, :] mask = mask[:h//grid*grid, :w//grid*grid, :] print('Shape of image: {}'.format(image.shape)) image = np.expand_dims(image, 0) mask = np.expand_dims(mask, 0) input_image = np.concatenate([image, mask], axis=2) sess_config = tf.ConfigProto() sess_config.gpu_options.per_process_gpu_memory_fraction = 0.5 with tf.Session(config=sess_config) as sess: input_image = tf.constant(input_image, dtype=tf.float32) output = model.build_server_graph(FLAGS, input_image) output = (output + 1.) * 127.5 output = tf.reverse(output, [-1]) output = tf.saturate_cast(output, tf.uint8) # load pretrained model vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] for var in vars_list: vname = var.name from_name = vname var_value = tf.contrib.framework.load_variable(INPAINT_MODEL_PATH, from_name) assign_ops.append(tf.assign(var, var_value)) sess.run(assign_ops) print('Model loaded.') result = sess.run(output) cv.imwrite(os.path.join(CUR_DIR, filename), result[0][:, :, ::-1])
summary=True, reuse=True) else: _, _, losses = model.build_graph_with_losses(images, config, reuse=True) if loss_type == 'g': return losses['g_loss'] elif loss_type == 'd': return losses['d_loss'] else: raise ValueError('loss type is not supported.') if __name__ == "__main__": config = ng.Config('inpaint.yml') if config.GPU_ID != -1: ng.set_gpus(config.GPU_ID) else: ng.get_gpus(config.NUM_GPUS) # training data with open(config.DATA_FLIST[config.DATASET][0]) as f: fnames = f.read().splitlines() data = ng.data.DataFromFNames(fnames, config.IMG_SHAPES, random_crop=config.RANDOM_CROP) images = data.data_pipeline(config.BATCH_SIZE) # main model model = InpaintCAModel()
def __init__(self, checkpoint_dir: os.PathLike, ng_config_path: os.PathLike) -> None: self.checkpoint_dir = checkpoint_dir self.FLAGS = ng.Config(ng_config_path)
'--image_height', default=256, type=int, help='The height of images should be defined, otherwise batch mode is not' ' supported.') parser.add_argument( '--image_width', default=256, type=int, help='The width of images should be defined, otherwise batch mode is not' ' supported.') parser.add_argument( '--checkpoint_dir', default='', type=str, help='The directory of tensorflow checkpoint.') parser.add_argument( '--outlist', default='', type=str, help='The directory of putting out image.') if __name__ == "__main__": FLAGS = ng.Config('./inpaint_dem.yml') ng.get_gpus(1) # os.environ['CUDA_VISIBLE_DEVICES'] ='' args = parser.parse_args() sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = True sess = tf.Session(config=sess_config) model = InpaintCAModel() input_image_ph = tf.placeholder( tf.float32, shape=(1, args.image_height, args.image_width*2, 1)) output = model.build_server_graph(FLAGS, input_image_ph) output = tf.reverse(output, [-1]) vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) assign_ops = []
_, _, losses = model.build_graph_with_losses(images, config, reuse=True) if loss_type == 'g': return losses['g_loss'] elif loss_type == 'd': return losses['d_loss'] else: raise ValueError('loss type is not supported.') if __name__ == "__main__": parser = ArgumentParser() parser.add_argument('config', default='inpaint.yml') args = parser.parse_args() config = ng.Config(args.config) if config.GPU_ID != -1: ng.set_gpus(config.GPU_ID) else: ng.get_gpus(config.NUM_GPUS, dedicated=False) # training data with open(config.DATA_FLIST[config.DATASET][0]) as f: fnames = f.read().splitlines() data = ng.data.DataFromFNames(fnames, config.IMG_SHAPES, random_crop=config.RANDOM_CROP, random_flip=config.RANDOM_FLIP, gamma=config.GAMMA, exposure=config.EXPOSURE) images = data.data_pipeline(config.BATCH_SIZE) # main model
else: _, _, losses = model.build_graph_with_losses(images, masks, guides, config, reuse=True) if loss_type == 'g': return losses['g_loss'] elif loss_type == 'd': return losses['d_loss'] else: raise ValueError('loss type is not supported.') if __name__ == "__main__": config = ng.Config(sys.argv[1]) if config.GPU_ID != -1: ng.set_gpus(config.GPU_ID) else: ng.get_gpus(config.NUM_GPUS) # training data # Image Data with open(config.DATA_FLIST[config.DATASET][0]) as f: fnames = f.read().splitlines() # # Mask Data if config.MASKFROMFILE: with open(config.DATA_FLIST[config.MASKDATASET][0]) as f: mask_fnames = f.read().splitlines() data_mask_data = DataMaskFromFNames(list(zip( fnames, mask_fnames)), [config.IMG_SHAPES, config.MASK_SHAPES],
'loss_type': 'g' }, spe=config.TRAIN_SPE, max_iters=config.MAX_ITERS, log_dir=log_prefix, ) trainer.add_callbacks([ ng.callbacks.WeightsViewer(), # ng.callbacks.ModelRestorer(trainer.context['saver'], dump_prefix=config.RESTORE_PREFIX, optimistic=True), discriminator_training_callback, ng.callbacks.ModelSaver(config.TRAIN_SPE, trainer.context['saver'], log_prefix + '/snap'), ng.callbacks.SummaryWriter((config.VAL_PSTEPS // 1), trainer.context['summary_writer'], tf.summary.merge_all()), ]) trainer.train() if __name__ == "__main__": config = ng.Config('progressive_gan.yml') if config.GPU_ID != -1: ng.set_gpus(config.GPU_ID) else: ng.get_gpus(config.NUM_GPUS) np.random.seed(config.RANDOM_SEED) eval(config.TRAIN.func + '(config)')
import cv2 import numpy as np import tensorflow as tf import neuralgym as ng from .inpaint_model import InpaintCAModel checkpoint_dir = 'generative_inpainting/models' FLAGS = ng.Config('generative_inpainting/inpaint.yml') def run_fill(file_test, file_mask): model = InpaintCAModel() image = cv2.imread(file_test) mask = cv2.imread(file_mask) h, w, _ = image.shape grid = 8 image = image[:h // grid * grid, :w // grid * grid, :] mask = mask[:h // grid * grid, :w // grid * grid, :] image = np.expand_dims(image, 0) mask = np.expand_dims(mask, 0) input_image = np.concatenate([image, mask], axis=2) sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = True with tf.Session(config=sess_config) as sess: input_image = tf.constant(input_image, dtype=tf.float32) output = model.build_server_graph(FLAGS, input_image) output = (output + 1.) * 127.5
vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] idx_list = [54, 56, 58, 60, 62] # random for idx, var in enumerate(vars_list): vname = var.name from_name = vname var_value = tf.contrib.framework.load_variable(checkpoint_dir, from_name) assign_ops.append(tf.assign(var, var_value)) sess.run(assign_ops) print('Model loaded.') result = sess.run(output) return result if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--image', default='', type=str, help='The filename of image to be completed.') parser.add_argument('--mask', default='', type=str, help='The filename of mask, value 255 indicates mask.') parser.add_argument('--output', default='output.png', type=str, help='Where to write output.') parser.add_argument('--checkpoint_dir', default='', type=str, help='The directory of tensorflow checkpoint.') FLAGS = ng.Config('./resources/inpaint.yml') args, _ = parser.parse_known_args() result = fill(args.image, args.mask, args.checkpoint_dir, FLAGS) cv2.imwrite(args.output, result[0][:, :, ::-1])
reuse=True) if loss_type == 'g': return losses['g_loss'] elif loss_type == 'd': return losses['d_loss'] else: raise ValueError('loss type is not supported.') if __name__ == "__main__": if len(sys.argv) > 1: yml_path = sys.argv[1] else: yml_path = 'inpaint.yml' config = ng.Config(yml_path) if config.GPU_ID != -1: ng.set_gpus(config.GPU_ID) else: ng.get_gpus(config.NUM_GPUS) # training data with open(config.DATA_FLIST[config.DATASET][0]) as f: fnames = f.read().splitlines() data = ng.data.DataFromFNames(fnames, config.IMG_SHAPES, random_crop=config.RANDOM_CROP) images = data.data_pipeline(config.BATCH_SIZE) # main model model = InpaintCAModel() g_vars, d_vars, losses = model.build_graph_with_losses(images, config=config)
def cagc_inp_batch(): baserecutpath = '/home/zzy/TrainData/MITPlace2Dataset/val_recut_512x680' imagesnums = 132 checkpointdir = "./checkpoints/places2_512x680" maskpath = "./examples/places2_680x512/wooden_mask.png" recutoutput = './examples/recutoutput/' recutmaskedoutput = './examples/recutmasked/' FLAGS = ng.Config('inpaint.yml') # ng.get_gpus(1) # args, unknown = parser.parse_known_args() model = InpaintCAModel() sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = True with tf.Session(config=sess_config) as sess: for imageindex in range(1, imagesnums): imagename = 'Places365_' + str(imageindex).zfill(8) + '.png' maskedimagename = 'Places365_' + str(imageindex).zfill( 8) + '_masked.png' outputimagename = 'Places365_' + str(imageindex).zfill( 8) + '_output.png' imagepath = os.path.join(baserecutpath, imagename) outputpath = os.path.join(recutoutput, outputimagename) maskedimagepath = os.path.join(recutmaskedoutput, maskedimagename) image = cv2.imread(imagepath) mask = cv2.imread(maskpath) h, w, _ = image.shape mask = cv2.resize(mask, (w, h), fx=0.5, fy=0.5) inputimage = image * ((255 - mask) // 255) + mask cv2.imwrite(maskedimagepath, inputimage.astype(np.uint8)) assert image.shape == mask.shape # 把原始图片划分成grid*grid个格子区域,'//'表示向下取整的除法 grid = 8 image = image[:h // grid * grid, :w // grid * grid, :] mask = mask[:h // grid * grid, :w // grid * grid, :] print('Shape of image: {}'.format(image.shape)) image = np.expand_dims(image, 0) mask = np.expand_dims(mask, 0) input_image = np.concatenate([image, mask], axis=2) input_image = tf.constant(input_image, dtype=tf.float32) output = model.build_server_graph(FLAGS, input_image) output = (output + 1.) * 127.5 output = tf.reverse(output, [-1]) output = tf.saturate_cast(output, tf.uint8) # load pretrained model vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] for var in vars_list: vname = var.name from_name = vname var_value = tf.contrib.framework.load_variable( checkpointdir, from_name) assign_ops.append(tf.assign(var, var_value)) sess.run(assign_ops) print('Model loaded.') result = sess.run(output) cv2.imwrite(outputpath, result[0][:, :, ::-1])
else: _, _, losses = model.build_graph_with_losses(FLAGS, images, FLAGS, reuse=True) if loss_type == 'g': return losses['g_loss'] elif loss_type == 'd': return losses['d_loss'] else: raise ValueError('loss type is not supported.') if __name__ == "__main__": # training data FLAGS = ng.Config('inpaint.yml') img_shapes = FLAGS.img_shapes with open(FLAGS.data_flist[FLAGS.dataset][0]) as f: fnames = f.read().splitlines() if FLAGS.guided: fnames = [(fname, fname[:-4] + '_edge.jpg') for fname in fnames] img_shapes = [img_shapes, img_shapes] data = ng.data.DataFromFNames(fnames, img_shapes, random_crop=FLAGS.random_crop, nthreads=FLAGS.num_cpus_per_job) images = data.data_pipeline(FLAGS.batch_size) # main model model = InpaintCAModel() g_vars, d_vars, losses = model.build_graph_with_losses(FLAGS, images) # validation images
def deepfillbatch(image_height, image_width, checkpoint_dir, img_mask_txt, outputdir): FLAGS = ng.Config('./deepfill/inpaint.yml') ng.get_gpus(1) # os.environ['CUDA_VISIBLE_DEVICES'] ='' sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = True sess = tf.Session(config=sess_config) model = InpaintCAModel() input_image_ph = tf.placeholder(tf.float32, shape=(1, image_height, image_width * 2, 3)) output = model.build_server_graph(FLAGS, input_image_ph) output = (output + 1.) * 127.5 output = tf.reverse(output, [-1]) output = tf.saturate_cast(output, tf.uint8) vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] for var in vars_list: vname = var.name from_name = vname var_value = tf.contrib.framework.load_variable(checkpoint_dir, from_name) assign_ops.append(tf.assign(var, var_value)) sess.run(assign_ops) print('Model loaded.') t = time.time() with open(img_mask_txt, 'r') as f: while True: line = f.readline() # 整行读取数据 if not line: break imagepath, maskpath = line.replace('\n', '').replace('\r', '').split(',') print(imagepath) print(maskpath) if not os.path.exists(outputdir): os.mkdir(outputdir) outputpath = os.path.join(outputdir, os.path.basename(imagepath)) image = cv2.imread(imagepath) mask = cv2.imread(maskpath) print(image.shape) print(mask.shape) image = cv2.resize(image, (image_width, image_height)) mask = cv2.resize(mask, (image_width, image_height)) # cv2.imwrite(maskedimg, image*(1-mask/255.) + mask) # # continue # image = np.zeros((128, 256, 3)) # mask = np.zeros((128, 256, 3)) assert image.shape == mask.shape h, w, _ = image.shape grid = 4 image = image[:h // grid * grid, :w // grid * grid, :] mask = mask[:h // grid * grid, :w // grid * grid, :] print('Shape of image: {}'.format(image.shape)) image = np.expand_dims(image, 0) mask = np.expand_dims(mask, 0) input_image = np.concatenate([image, mask], axis=2) # load pretrained model result = sess.run(output, feed_dict={input_image_ph: input_image}) print('Processed: {}'.format(outputpath)) cv2.imwrite(outputpath, result[0][:, :, ::-1]) print('Time total: {}'.format(time.time() - t))