示例#1
0
def freeze(ckpt_path):
    image = tf.placeholder(tf.float32, shape=(1, None, None, 3), name='image')
    mask = tf.placeholder(tf.float32, shape=(1, None, None, 3), name='mask')
    model = InpaintCAModel()
    input = tf.concat([image, mask],axis=2)

    output = model.build_server_graph(input)
    output = (output + 1.) * 127.5
    output = tf.reverse(output, [-1])
    output = tf.saturate_cast(output, tf.uint8)
    output = tf.add(output, 0, name='output')
 
    init_op = tf.global_variables_initializer()
 
    restore_saver = tf.train.Saver()
    
    with tf.Session() as sess:
        sess.run(init_op)
        restore_saver.restore(sess, ckpt_path)
        frozen_graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def,
                                                                        output_node_names=['output'])
        
        path = os.path.dirname(ckpt_path)
        with open(path + '/deepfill.pb', 'wb') as f:
            f.write(frozen_graph_def.SerializeToString())
    print("frozen model path: {}".format( path + '/deepfill.pb'))
示例#2
0
    def __init__(self, checkpoint_dir):
        model = InpaintCAModel()
        sess_config = tf.ConfigProto()
        sess_config.gpu_options.allow_growth = True
        input_image_ph = tf.placeholder(tf.float32, shape=(1, 256, 512, 3))
        output = model.build_server_graph(input_image_ph, reuse=tf.AUTO_REUSE)
        output = (output + 1.) * 127.5
        output = tf.reverse(output, [-1])
        output = tf.saturate_cast(output, tf.uint8)
        # === END OF BUILD GRAPH ===
        sess = tf.Session(config=sess_config)

        # load pretrained model
        vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        assign_ops = []
        for var in vars_list:
            vname = var.name
            from_name = vname
            var_value = tf.contrib.framework.load_variable(
                checkpoint_dir, from_name)
            assign_ops.append(tf.assign(var, var_value))
        # sess.run(assign_ops)
        self.sess = sess
        self.graph = output
        self.placeholder = input_image_ph
        self.assign_ops = assign_ops
def setup(opts):
    global g
    global sess
    global input_image
    model = InpaintCAModel()
    g = tf.get_default_graph()
    sess = tf.Session(graph=g)
    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    input_image = tf.placeholder(tf.float32, shape=(1, 256, 256 * 2, 3))
    output_image = model.build_server_graph(input_image)
    output_image = (output_image + 1.) * 127.5
    output_image = tf.reverse(output_image, [-1])
    output_image = tf.saturate_cast(output_image, tf.uint8)
    vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
    assign_ops = []
    for var in vars_list:
        vname = var.name
        from_name = vname
        var_value = tf.contrib.framework.load_variable(opts['checkpoint_dir'],
                                                       from_name)
        assign_ops.append(tf.assign(var, var_value))
    sess.run(assign_ops)
    print('Model loaded.')
    return output_image
示例#4
0
    def __init__(self,
                 patch_size=512,
                 saved_model_path='./model_logs/release_places2_256'):
        '''
        Saved model weights url:
            https://drive.google.com/drive/folders/1y7Irxm3HSHGvp546hZdAZwuNmhLUVcjO
        '''
        self.FLAGS = ng.Config('inpaint.yml')

        self.model = InpaintCAModel()
        self.checkpoint_dir = saved_model_path

        self.patch_size = patch_size

        self.sess_config = tf.ConfigProto()
        self.sess_config.gpu_options.allow_growth = True
        self._inpaint_input_placeholder = tf.placeholder(
            shape=(1, self.patch_size, self.patch_size * 2, 3),
            dtype=tf.float32)
        self.sess = tf.Session(config=self.sess_config)

        output = self.model.build_server_graph(self.FLAGS,
                                               self._inpaint_input_placeholder,
                                               reuse=tf.AUTO_REUSE)
        output = (output + 1.) * 127.5
        output = tf.reverse(output, [-1])
        output = tf.saturate_cast(output, tf.uint8)
        self._cached_inpaint_output = output

        self.load_model()
def generative_inpainting(image, mask):
    h, w, _ = image.shape
    grid = 8
    image = image[:h // grid * grid, :w // grid * grid, :]
    mask = mask[:h // grid * grid, :w // grid * grid, :]
    print('Shape of image: {}'.format(image.shape))
    model = InpaintCAModel()
    image = np.expand_dims(image, 0)
    mask = np.expand_dims(mask, 0)
    input_image = np.concatenate([image, mask], axis=2)

    checkpoint_dir = 'generative_inpainting/model_logs/release_imagenet_256/'

    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    with tf.Session(config=sess_config) as sess:
        input_image = tf.constant(input_image, dtype=tf.float32)
        output = model.build_server_graph(input_image)
        output = (output + 1.) * 127.5
        output = tf.reverse(output, [-1])
        output = tf.saturate_cast(output, tf.uint8)
        vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        assign_ops = []
        for var in vars_list:
            vname = var.name
            from_name = vname
            var_value = tf.contrib.framework.load_variable(
                checkpoint_dir, from_name)
            assign_ops.append(tf.assign(var, var_value))
        sess.run(assign_ops)
        result = sess.run(output)
    tf.reset_default_graph()
    return result
def Output(img_in, mask_in, img_out):
    print("import from test:", img_in, mask_in, img_out)

    #if __name__ == "__main__":
    FLAGS = ng.Config('inpaint.yml')
    # ng.get_gpus(1)

    #args, unknown = parser.parse_known_args()

    model = InpaintCAModel()
    #image = cv2.imread(args.image)
    image = cv2.imread("examples/places356/" + img_in)
    #mask = cv2.imread(args.mask)
    mask = cv2.imread("places356_mask/" + mask_in)
    # mask = cv2.resize(mask, (0,0), fx=0.5, fy=0.5)

    assert image.shape == mask.shape

    h, w, _ = image.shape
    grid = 8
    image = image[:h // grid * grid, :w // grid * grid, :]
    mask = mask[:h // grid * grid, :w // grid * grid, :]
    print('Shape of image: {}'.format(image.shape))

    image = np.expand_dims(image, 0)
    mask = np.expand_dims(mask, 0)
    input_image = np.concatenate([image, mask], axis=2)

    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    with tf.Session(config=sess_config) as sess:
        input_image = tf.constant(input_image, dtype=tf.float32)
        output = model.build_server_graph(FLAGS, input_image)
        output = (output + 1.) * 127.5
        output = tf.reverse(output, [-1])
        output = tf.saturate_cast(output, tf.uint8)
        # load pretrained model
        vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        assign_ops = []
        for var in vars_list:
            vname = var.name
            from_name = vname

            #var_value = tf.contrib.framework.load_variable(args.checkpoint_dir, from_name)
            var_value = tf.contrib.framework.load_variable(
                "model_logs/release_places2_256", from_name)
            assign_ops.append(tf.assign(var, var_value))
        sess.run(assign_ops)
        print('Model loaded.')
        result = sess.run(output)
        #cv2.imwrite(args.output, result[0][:, :, ::-1])
        #cv2.imshow("result", result[0][:, :, ::-1])
        cv2.imwrite("examples/places356/" + img_out, result[0][:, :, ::-1])
        show1 = cv2.imread("examples/places356/" + img_in)
        show2 = cv2.imread("examples/places356/" + img_out)
        show = np.hstack([show1, show2])
        cv2.imshow("result", show)
def inpaint(arg_image_dir, arg_mask_dir, arg_checkpoint_dir, arg_output_dir):
    tf.reset_default_graph()
    FLAGS = ng.Config('inpaint.yml')
    # ng.get_gpus(1)

    model = InpaintCAModel()
    for arg_image in os.listdir(arg_image_dir):
        arg_mask = arg_image  # assume the mask has the same name as the image
        if os.path.exists(arg_output_dir + arg_image):
            print("note |", arg_image, "already inpainted.")
            continue
        if os.path.exists(arg_image_dir +
                          arg_image) and os.path.exists(arg_mask_dir +
                                                        arg_mask):
            pass
        else:
            continue

        image = cv2.imread(os.path.join(arg_image_dir, arg_image))
        mask = cv2.imread(os.path.join(arg_mask_dir, arg_mask))
        name = arg_image

        assert image.shape == mask.shape

        h, w, _ = image.shape
        grid = 8
        image = image[:h // grid * grid, :w // grid * grid, :]
        mask = mask[:h // grid * grid, :w // grid * grid, :]
        print('Shape of image: {}'.format(image.shape))

        image = np.expand_dims(image, 0)
        mask = np.expand_dims(mask, 0)
        input_image = np.concatenate([image, mask], axis=2)

        tf.reset_default_graph()
        sess_config = tf.ConfigProto()
        sess_config.gpu_options.allow_growth = True
        with tf.Session(config=sess_config) as sess:
            input_image = tf.constant(input_image, dtype=tf.float32)
            output = model.build_server_graph(FLAGS, input_image)
            output = (output + 1.) * 127.5
            output = tf.reverse(output, [-1])
            output = tf.saturate_cast(output, tf.uint8)
            # load pretrained model
            vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
            assign_ops = []
            for var in vars_list:
                vname = var.name
                from_name = vname
                var_value = tf.contrib.framework.load_variable(
                    arg_checkpoint_dir, from_name)
                assign_ops.append(tf.assign(var, var_value))
            sess.run(assign_ops)
            print('Model loaded.')
            result = sess.run(output)
            cv2.imwrite(arg_output_dir + name, result[0][:, :, ::-1])
示例#8
0
def inpaintP(name):
    CUR_DIR = os.path.join(WORK_DIR, name)
    TMASK_DIR = os.path.join(WORK_DIR, name + "//tmask")
    INPAINT_MODEL_PATH = os.path.join(ROOT_DIR,
                                      "model_logs/release_places2_256")

    FLAGS = ng.Config('inpaint.yml')
    model = InpaintCAModel()

    image = cv.imread(os.path.join(CUR_DIR, f"{name}.png"))
    mask = cv.imread(os.path.join(TMASK_DIR, "mask.png"))
    filename = f'4#_{name}.png'

    assert image.shape == mask.shape

    h, w, _ = image.shape
    grid = 8
    image = image[:h // grid * grid, :w // grid * grid, :]
    mask = mask[:h // grid * grid, :w // grid * grid, :]
    print('Shape of image: {}'.format(image.shape))

    image = np.expand_dims(image, 0)
    mask = np.expand_dims(mask, 0)
    input_image = np.concatenate([image, mask], axis=2)

    sess_config = tf.ConfigProto()
    sess_config.gpu_options.per_process_gpu_memory_fraction = 0.5
    tf2 = tf.Graph()
    with tf2.as_default():
        with tf.Session(config=sess_config) as sess:
            input_image = tf.constant(input_image, dtype=tf.float32)

            output = model.build_server_graph(FLAGS, input_image)
            output = (output + 1.) * 127.5
            output = tf.reverse(output, [-1])
            output = tf.saturate_cast(output, tf.uint8)

            # load pretrained model
            vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
            assign_ops = []
            for var in vars_list:
                vname = var.name
                from_name = vname
                var_value = tf.contrib.framework.load_variable(
                    INPAINT_MODEL_PATH, from_name)
                assign_ops.append(tf.assign(var, var_value))

            sess.run(assign_ops)
            print('Model loaded.')
            result = sess.run(output)
            cv.imwrite(os.path.join(CUR_DIR, filename), result[0][:, :, ::-1])
            print('Image has been made')

    return 0
示例#9
0
def main():
    ng.get_gpus(1)
    args = parser.parse_args()

    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    sess = tf.Session(config=sess_config)

    model = InpaintCAModel()
    input_image_ph = tf.placeholder(tf.float32,
                                    shape=(1, IMG_HEIGHT, IMG_WIDTH * 2, 3))
    output = model.build_server_graph(input_image_ph)
    output = (output + 1.) * 127.5
    output = tf.reverse(output, [-1])
    output = tf.saturate_cast(output, tf.uint8)
    vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
    assign_ops = []
    for var in vars_list:
        vname = var.name
        from_name = vname
        var_value = tf.contrib.framework.load_variable(args.checkpoint_dir,
                                                       from_name)
        assign_ops.append(tf.assign(var, var_value))
    sess.run(assign_ops)
    print('Model loaded.')
    while (True):
        try:
            new_jobs = [
                f for f in listdir(SYNC_FLODER_NAME) if f.startswith("go_")
            ]
            if len(new_jobs) == 0:
                sleep(0.1)
                continue

            job_id = new_jobs[0][3:]
            mask_name = open(SYNC_FLODER_NAME + new_jobs[0]).readline().strip()

            os.remove(SYNC_FLODER_NAME + new_jobs[0])  #delete go_ file
            print("Processing image: ", job_id)
            pic_folder = FILES_FLODER_NAME + job_id + "/"

            process_img_new (img_name=pic_folder + "init", \
                         mask_name=mask_name, \
                         result_name=pic_folder + "out.png", \
                         sess=sess, \
                         output=output, \
                         input_image_ph=input_image_ph)

            open(SYNC_FLODER_NAME + "done_" + job_id,
                 'a').close()  #create done_ file
            print("Job {} done.".format(job_id))

        except KeyboardInterrupt:
            sys.exit()
def test_single_image(image_path, mask_path, output_path, image_height,
                      image_width, ckpt_dir):
    # generate input image
    input_image = get_input_image(image_path, mask_path, image_height,
                                  image_width)

    # start sess configuration
    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    sess = tf.Session(config=sess_config)

    # inpaint model
    model = InpaintCAModel()

    # input node placeholder
    input_image_ph = tf.placeholder(tf.float32,
                                    name="input",
                                    shape=(1, image_height, image_width * 2,
                                           3))

    output = model.build_server_graph(input_image_ph)
    output = (output + 1.0) * 127.5
    output = tf.reverse(output, [-1])
    output = tf.saturate_cast(output, tf.uint8)

    # load variables from checkpoint
    vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
    assign_ops = []
    for var in vars_list:
        vname = var.name
        from_name = vname
        var_value = tf.contrib.framework.load_variable(ckpt_dir, from_name)
        assign_ops.append(tf.assign(var, var_value))

    sess.run(assign_ops)

    # input, output
    print("input = {}".format(input_image_ph))
    print("output = {}".format(output))

    # run model
    print("Running model ...")
    result = sess.run(output, feed_dict={input_image_ph: input_image})
    print("Running model done.")

    # save output image to file
    print("Saving output to {}".format(output_path))
    cv2.imwrite(output_path, result[0][:, :, ::-1])
    print("Saving output done.")

    save_tensorboard_log(sess, "trensorboard")

    return sess
示例#11
0
def generate_counterfactual(image_fp,
                            mask_fp,
                            output_fp,
                            checkpoint_dir,
                            model_id=None):
    try:
        FLAGS = ng.Config('config/inpaint.yml')
    except AssertionError:
        raise ValueError('check directory above')
    # ng.get_gpus(1)
    # args, unknown = parser.parse_known_args()
    model = InpaintCAModel()
    image = cv2.imread(image_fp)
    mask = cv2.imread(mask_fp)
    # mask = cv2.resize(mask, (0,0), fx=0.5, fy=0.5)

    assert image.shape == mask.shape

    h, w, _ = image.shape
    grid = 8
    image = image[:h // grid * grid, :w // grid * grid, :]
    mask = mask[:h // grid * grid, :w // grid * grid, :]

    image = np.expand_dims(image, 0)
    mask = np.expand_dims(mask, 0)
    input_image = np.concatenate([image, mask], axis=2)

    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    with tf.Session(config=sess_config) as sess:
        input_image = tf.constant(input_image, dtype=tf.float32)
        output = model.build_server_graph(FLAGS, input_image)
        output = (output + 1.) * 127.5
        output = tf.reverse(output, [-1])
        output = tf.saturate_cast(output, tf.uint8)
        # load pretrained model
        vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        assign_ops = []
        for var in vars_list:
            vname = var.name
            from_name = vname
            var_value = tf.contrib.framework.load_variable(
                checkpoint_dir, from_name)
            assign_ops.append(tf.assign(var, var_value))
        sess.run(assign_ops)
        print('Model loaded.')
        result = sess.run(output)
        cv2.imwrite(output_fp, result[0][:, :, ::-1])
        print(f'IMAGE WROTE TO {output_fp}\n\n\n')
    tf.reset_default_graph()
示例#12
0
def downloadFile():
    start = time.time()
    os.environ['CUDA_VISIABLE_DEVICE'] = '0'
    args = parser.parse_args()

    model = InpaintCAModel()
    image = cv2.imread(args.image)
    mask = cv2.imread(args.mask)

    assert image.shape == mask.shape

    h, w, _ = image.shape
    grid = 8
    image = image[:h // grid * grid, :w // grid * grid, :]
    mask = mask[:h // grid * grid, :w // grid * grid, :]
    print('Shape of image: {}'.format(image.shape))

    image = np.expand_dims(image, 0)
    mask = np.expand_dims(mask, 0)
    input_image = np.concatenate([image, mask], axis=2)

    #sess_config = tf.ConfigProto()
    #sess_config.gpu_options.allow_growth = True

    input_image = tf.constant(input_image, dtype=tf.float32)
    output = model.build_server_graph(input_image)
    output = (output + 1.) * 127.5
    output = tf.reverse(output, [-1])
    output = tf.saturate_cast(output, tf.uint8)
    # load pretrained model
    vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
    assign_ops = []

    for var in vars_list:
        vname = var.name
        from_name = vname
        var_value = tf.contrib.framework.load_variable(args.checkpoint_dir,
                                                       from_name)
        assign_ops.append(tf.assign(var, var_value))

    global sess_config
    with tf.Session(config=sess_config) as sess:
        sess.run(assign_ops)
        print('Model loaded.')
        result = sess.run(output)
        cv2.imwrite(args.output, result[0][:, :, ::-1])
    print(time.time() - start)
    return send_file('output.png')
示例#13
0
 def __init__(self, model_path, max_size=768):
     self.max_size = max_size
     # build the graph
     self.model = InpaintCAModel()
     self.input_image = tf.placeholder(name='input_image',
                                       shape=[1, max_size, max_size * 2, 3],
                                       dtype=tf.float32)
     net_output = self.model.build_server_graph(self.input_image)
     net_output = (net_output + 1.) * 127.5
     self.output_image = tf.saturate_cast(net_output, tf.uint8)
     # restore pretrained model
     sess_config = tf.ConfigProto()
     sess_config.gpu_options.allow_growth = True
     self.sess = tf.Session(config=sess_config)
     saver = tf.train.Saver([v for v in tf.global_variables()])
     saver.restore(self.sess, model_path)
示例#14
0
def fillVideo(img, msk, i):
    checkpoint_dir = 'model_logs/snap-0'
    output_dir = 'output/'

    ng.get_gpus(0)
    model = InpaintCAModel()
    image = cv2.imread(img)
    # image = cv2.imread('./examples/places2/canyon_input.png')
    print(image.shape)
    # cv2.imshow(image)
    # cv2.waitKey(0)
    mask = cv2.imread(msk)

    assert image.shape == mask.shape

    h, w, _ = image.shape
    grid = 8
    image = image[:h // grid * grid, :w // grid * grid, :]
    mask = mask[:h // grid * grid, :w // grid * grid, :]
    print('Shape of image: {}'.format(image.shape))

    image = np.expand_dims(image, 0)
    mask = np.expand_dims(mask, 0)
    input_image = np.concatenate([image, mask], axis=2)

    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    with tf.Session(config=sess_config) as sess:
        input_image = tf.constant(input_image, dtype=tf.float32)
        output = model.build_server_graph(input_image)
        output = (output + 1.) * 127.5
        output = tf.reverse(output, [-1])
        output = tf.saturate_cast(output, tf.uint8)
        # load pretrained model
        vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        assign_ops = []
        for var in vars_list:
            vname = var.name
            from_name = vname
            var_value = tf.contrib.framework.load_variable(
                checkpoint_dir, from_name)
            assign_ops.append(tf.assign(var, var_value))
        sess.run(assign_ops)
        print('Model loaded.')
        result = sess.run(output)
        output_name = output_dir + 'output_' + str(i) + '.png'
        cv2.imwrite(output_name, result[0][:, :, ::-1])
示例#15
0
def pixel_fill(image, mask):
    import tensorflow as tf2
    import neuralgym as ng
    from inpaint_model import InpaintCAModel

    if image.ndim > mask.ndim:
        mask = np.dstack([mask] * image.shape[2])
    assert image.shape == mask.shape
    model = InpaintCAModel()
    FLAGS = ng.Config('inpaint.yml')

    h, w, _ = image.shape
    grid = 8
    image = image[:h // grid * grid, :w // grid * grid, :]
    mask = mask[:h // grid * grid, :w // grid * grid, :]
    print('Shape of image: {}'.format(image.shape))

    image = np.expand_dims(image, 0)
    mask = np.expand_dims(mask, 0)
    input_image = np.concatenate([image, mask], axis=2)

    sess_config = tf2.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    with tf2.Session(config=sess_config) as sess:
        input_image = tf2.constant(input_image, dtype=tf2.float32)
        output = model.build_server_graph(FLAGS, input_image)
        output = (output + 1.) * 127.5
        output = tf2.reverse(output, [-1])
        output = tf2.saturate_cast(output, tf2.uint8)
        # load pretrained model
        vars_list = tf2.get_collection(tf2.GraphKeys.GLOBAL_VARIABLES)
        assign_ops = []
        print("checkpoint_dir = ", checkpoint_dir)
        for var in vars_list:
            vname = var.name
            from_name = vname
            if "inpaint_net" in var.name:  # or else is going to mix with mrcnn
                var_value = tf2.contrib.framework.load_variable(
                    checkpoint_dir, from_name)
                assign_ops.append(tf2.assign(var, var_value))
        sess.run(assign_ops)
        print('Model loaded.' * 10)
        result = sess.run(output)
    sess.close()
    tf2.reset_default_graph()
    return result[0][:, :, ::-1]
    def generate_img_mask(self, image, mask):

        mask, image = try_to_adjust_to_3c_mask(image, mask)

        assert (image.shape == mask.shape
                ), "Mask shape {0} != img shape {1}".format(
                    mask.shape, image.shape)

        # ng.get_gpus(1)

        model = InpaintCAModel()

        h, w, _ = image.shape
        grid = 8
        image = image[:h // grid * grid, :w // grid * grid, :]
        mask = mask[:h // grid * grid, :w // grid * grid, :]

        image = np.expand_dims(image, 0)
        mask = np.expand_dims(mask, 0)
        input_image = np.concatenate([image, mask], axis=2)

        temp_graph = tf.Graph()
        with temp_graph.as_default():
            sess_config = tf.ConfigProto()
            sess_config.gpu_options.allow_growth = True

            with tf.Session(config=sess_config) as sess:
                input_image = tf.constant(input_image, dtype=tf.float32)
                output = model.build_server_graph(input_image)
                output = (output + 1.) * 127.5
                output = tf.reverse(output, [-1])
                output = tf.saturate_cast(output, tf.uint8)
                # load pretrained model
                vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
                assign_ops = []
                for var in vars_list:
                    vname = var.name
                    from_name = vname
                    var_value = tf.contrib.framework.load_variable(
                        self.generative_model_path, from_name)
                    assign_ops.append(tf.assign(var, var_value))
                sess.run(assign_ops)
                result = sess.run(output)
                out_img = result[0][:, :, ::-1]
            return [out_img]
示例#17
0
def complete(image_file):
    ng.get_gpus(1,verbose=False)
    tf.reset_default_graph()
    model = InpaintCAModel()
    image = cv2.imread(os.path.join(args.image_dir, image_file))
    mask = cv2.imread(os.path.join(args.mask_dir, image_file))
    assert image.shape == mask.shape

    h, w, _ = image.shape
    grid = 8
    image_rs = image[:h // grid * grid, :w // grid * grid, :]
    mask_rs = mask[:h // grid * grid, :w // grid * grid, :]
    print('Shape of image: {}'.format(image_rs.shape))

    image_rs = np.expand_dims(image_rs, 0)
    mask_rs = np.expand_dims(mask_rs, 0)
    input_image = np.concatenate([image_rs, mask_rs], axis=2)

    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    with tf.Session(config=sess_config) as sess:
        input_image = tf.constant(input_image, dtype=tf.float32)
        output = model.build_server_graph(input_image)
        output = (output + 1.) * 127.5
        output = tf.reverse(output, [-1])
        output = tf.saturate_cast(output, tf.uint8)
        # load pretrained model
        vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        assign_ops = []
        for var in vars_list:
            vname = var.name
            from_name = vname
            var_value = tf.contrib.framework.load_variable(args.checkpoint_dir, from_name)
            assign_ops.append(tf.assign(var, var_value))
        sess.run(assign_ops)

        result = sess.run(output)

        image[:h // grid * grid, :w // grid * grid, :] = result[0][:, :, ::-1]
        save_value = cv2.imwrite(os.path.join(args.output_dir, image_file), image)
        print("Image saved:", save_value)
        sess.close()
def test_model(image,
               mask,
               output_dir='output_images/output.png',
               checkpoint_dir='model_logs/release_places2_256'):
    ng.get_gpus(1)
    model = InpaintCAModel()

    assert image.shape == mask.shape

    h, w, _ = image.shape
    grid = 8
    image = image[:h // grid * grid, :w // grid * grid, :]
    mask = mask[:h // grid * grid, :w // grid * grid, :]
    print('Shape of image: {}'.format(image.shape))

    image = np.expand_dims(image, 0)
    mask = np.expand_dims(mask, 0)
    input_image = np.concatenate([image, mask], axis=2)

    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    with tf.Session(config=sess_config) as sess:
        input_image = tf.constant(input_image, dtype=tf.float32)
        output = model.build_server_graph(input_image)
        output = (output + 1.) * 127.5
        output = tf.reverse(output, [-1])
        output = tf.saturate_cast(output, tf.uint8)
        # load pretrained model
        vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        assign_ops = []
        for var in vars_list:
            vname = var.name
            from_name = vname
            var_value = tf.contrib.framework.load_variable(
                checkpoint_dir, from_name)
            assign_ops.append(tf.assign(var, var_value))
        sess.run(assign_ops)
        print('Model loaded.')
        result = sess.run(output)
        # cv2.imwrite(output_dir, result[0][:, :, ::-1])
        # plt.imsave('out.jpg', result[0][:, :, ::-1])
        return result[0]
示例#19
0
    def __init__(self, batch_size, checkpoint_dir):
        self.model = InpaintCAModel()
        self.images_ph = tf.placeholder(tf.float32,
                                        shape=[batch_size, 256, 512, 3])

        # with tf.device('/gpu:0'):
        # with tf.device('/cpu:0'):
        # for i, d in enumerate(['/gpu:0', '/gpu:1', '/gpu:2', '/gpu:3']):
        #     with tf.device(d):
        output = self.model.build_server_graph(self.images_ph)
        output = (output + 1.) * 127.5
        output = tf.reverse(output, [-1])
        output = tf.saturate_cast(output, tf.uint8)
        self.output = output

        # load pretrained model
        vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        self.assign_ops = []
        for var in vars_list:
            vname = var.name
            from_name = vname
            var_value = tf.contrib.framework.load_variable(
                checkpoint_dir, from_name)
            self.assign_ops.append(tf.assign(var, var_value))
        # print('Model loaded.')

        self.pth_mean = np.ones((1, 3, 1, 1), dtype='float32')
        self.pth_mean[0, :, 0, 0] = np.array([0.485, 0.456, 0.406])
        self.pth_std = np.ones((1, 3, 1, 1), dtype='float32')
        self.pth_std[0, :, 0, 0] = np.array([0.229, 0.224, 0.225])
        self.upsample = torch.nn.Upsample(size=(256, 256), mode='bilinear')
        self.downsample = torch.nn.Upsample(size=(224, 224), mode='bilinear')

        # Create a session
        sess_config = tf.ConfigProto()
        sess_config.gpu_options.allow_growth = True
        # sess_config.allow_soft_placement = True
        # sess_config.log_device_placement = True
        self.sess = tf.Session(config=sess_config)

        self.sess.run(self.assign_ops)
示例#20
0
def inpainting_api(image, mask):
    FLAGS = ng.Config('inpaint.yml')
    tf.reset_default_graph()

    model = InpaintCAModel()
    # image = cv2.imread(img_path)
    # mask = cv2.imread(mask_path)
    # cv2.imwrite('new.png', image - mask)
    # mask = cv2.resize(mask, (0,0), fx=0.5, fy=0.5)

    assert image.shape == mask.shape

    image = crop(image)
    mask = crop(mask)
    print('Shape of image: {}'.format(image.shape))

    image = np.expand_dims(image, 0)
    mask = np.expand_dims(mask, 0)
    input_image = np.concatenate([image, mask], axis=2)

    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    with tf.Session(config=sess_config) as sess:
        input_image = tf.constant(input_image, dtype=tf.float32)
        output = model.build_server_graph(FLAGS, input_image)
        output = (output + 1.) * 127.5
        output = tf.reverse(output, [-1])
        output = tf.saturate_cast(output, tf.uint8)
        # load pretrained model
        vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        assign_ops = []
        for var in vars_list:
            vname = var.name
            from_name = vname
            var_value = tf.contrib.framework.load_variable(
                './model_logs/inpaint', from_name)
            assign_ops.append(tf.assign(var, var_value))
        sess.run(assign_ops)
        print('Model loaded.')
        result = sess.run(output)
        return result[0][:, :, ::-1]
示例#21
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--config_path', type=str, default='gated_convolution.yml',
                        help='path to config file')
    parser.add_argument('--model', type=str, default='v1',
                        help='model type: v1 or v2')
    parser.add_argument('--snapshot', type=str, default='model.npz',
                        help='path to snapshot')
    parser.add_argument('--name', type=str, default='result.png',
                        help='file name to save results')

    args = parser.parse_args()

    config = Config(args.config_path)

    if args.model == "v1":
        config.FREE_FORM = False
        inpaint_model = InpaintCAModel(config)
    elif args.model == "v2":
        inpaint_model = InpaintGCModel(config)
    else:
        assert False, "Model name '{args.model}' is invalid."

    if config.GPU_ID >= 0:
        chainer.cuda.get_device(config.GPU_ID).use()
        inpaint_model.to_gpu()

    if os.path.exists(args.snapshot):
        serializers.load_npz(args.snapshot, inpaint_model)
    else:
        assert False, "Flie '{args.snapshot}' does not exist."

    xp = inpaint_model.xp

    # training data
    test_dataset = Dataset(config, test=True, return_mask=True)
    test_iter = chainer.iterators.SerialIterator(test_dataset, 8)

    batch_and_mask = test_iter.next()
    batch_data, mask_data = zip(*batch_and_mask)
    batch_data = xp.array(batch_data)
    mask = xp.array(mask_data)

    batch_pos = batch_data / 127.5 - 1.
    # edges = None
    batch_incomplete = batch_pos * (1. - mask[:, :1])
    # inpaint
    with chainer.using_config("train", False), chainer.using_config("enable_backprop", False):
        x1, x2, offset_flow = inpaint_model.inpaintnet(batch_incomplete, mask, config)

    batch_complete = x2 * mask[:, :1] + batch_incomplete * (1. - mask[:, :1])
    # visualization
    viz_img = [batch_pos, batch_incomplete - mask[:, 1:] + mask[:, :1], batch_complete.data]

    batch_w = len(viz_img)
    batch_h = viz_img[0].shape[0]
    viz_img = xp.concatenate(viz_img, axis=0)
    viz_img = batch_postprocess_images(viz_img, batch_w, batch_h)
    viz_img = cuda.to_cpu(viz_img)
    Image.fromarray(viz_img).save(args.name)
示例#22
0
def deepfill_model(checkpoint_dir):
    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    sess = tf.Session(config=sess_config)
    model = InpaintCAModel()
    input_image_ph = tf.placeholder(tf.float32, shape=(1, None, None, 3))
    output = model.build_server_graph(input_image_ph, dynamic=True)
    output = (output + 1.) * 127.5
    output = tf.reverse(output, [-1])
    output = tf.saturate_cast(output, tf.uint8)
    vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
    assign_ops = []
    for var in vars_list:
        vname = var.name
        from_name = vname
        var_value = tf.contrib.framework.load_variable(checkpoint_dir,
                                                       from_name)
        assign_ops.append(tf.assign(var, var_value))
    sess.run(assign_ops)
    print('Model loaded.')

    return input_image_ph, output, sess
示例#23
0
class InpaintModel:
    def __init__(self, model_path, max_size=768):
        self.max_size = max_size
        # build the graph
        self.model = InpaintCAModel()
        self.input_image = tf.placeholder(name='input_image',
                                          shape=[1, max_size, max_size * 2, 3],
                                          dtype=tf.float32)
        net_output = self.model.build_server_graph(self.input_image)
        net_output = (net_output + 1.) * 127.5
        self.output_image = tf.saturate_cast(net_output, tf.uint8)
        # restore pretrained model
        sess_config = tf.ConfigProto()
        sess_config.gpu_options.allow_growth = True
        self.sess = tf.Session(config=sess_config)
        saver = tf.train.Saver([v for v in tf.global_variables()])
        saver.restore(self.sess, model_path)

    def inpaint(self, image, mask):
        assert (image.shape == mask.shape)
        h, w, _ = image.shape
        assert (h <= self.max_size and w <= self.max_size)
        grid = 8
        ih, iw = h // grid * grid, w // grid * grid

        input_im = np.zeros((self.max_size, self.max_size, 3), dtype=np.uint8)
        input_mask = np.zeros((self.max_size, self.max_size, 3),
                              dtype=np.uint8)
        input_im[:ih, :iw, :] = image[:ih, :iw, :]
        input_mask[:ih, :iw, :] = mask[:ih, :iw, :]

        input_im = np.expand_dims(input_im, 0)
        input_mask = np.expand_dims(input_mask, 0)
        input_image = np.concatenate([input_im, input_mask], axis=2)

        output, = self.sess.run([self.output_image],
                                feed_dict={self.input_image: input_image})
        image[:ih, :iw, :] = output[0][:ih, :iw, :]
        return image
if __name__ == "__main__":
    # training data
    FLAGS = ng.Config('inpaint.yml')
    img_shapes = FLAGS.img_shapes
    with open(FLAGS.data_flist[FLAGS.dataset][0]) as f:
        fnames = f.read().splitlines()
    if FLAGS.guided:
        fnames = [(fname, fname[:-4] + '_edge.jpg') for fname in fnames]
        img_shapes = [img_shapes, img_shapes]
    data = ng.data.DataFromFNames(fnames,
                                  img_shapes,
                                  random_crop=FLAGS.random_crop,
                                  nthreads=FLAGS.num_cpus_per_job)
    images = data.data_pipeline(FLAGS.batch_size)
    # main model
    model = InpaintCAModel()
    g_vars, d_vars, losses = model.build_graph_with_losses(FLAGS, images)
    # validation images
    if FLAGS.val:
        with open(FLAGS.data_flist[FLAGS.dataset][1]) as f:
            val_fnames = f.read().splitlines()
        if FLAGS.guided:
            val_fnames = [(fname, fname[:-4] + '_edge.jpg')
                          for fname in val_fnames]
        # progress monitor by visualizing static images
        for i in range(FLAGS.static_view_size):
            static_fnames = val_fnames[i:i + 1]
            static_images = ng.data.DataFromFNames(
                static_fnames,
                img_shapes,
                nthreads=1,
示例#25
0
    image = image.astype(dtype)
    image = image[np.newaxis, ...]
    fo = rasterio.open(filename, 'w', **kwargs)
    fo.write(image)
    fo.close()
    
if __name__ == "__main__":
    FLAGS = ng.Config('inpaint.yml')
    # ng.get_gpus(1)
    args, unknown = parser.parse_known_args()

    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    sess = tf.Session(config=sess_config)

    model = InpaintCAModel()    
    image, meta = read_tif_file(args.image)
    print('Read mask form {}'.format(args.mask))
    mask = cv2.imread(args.mask, -1)
    if len(mask.shape) == 3:
        mask = mask[:, :, 0]
    im_min = image.min()
    im_max = image.max()
    image = cv2.normalize(image, None, 255, 0, cv2.NORM_MINMAX, cv2.CV_32F)
    
    image_tiles = tiling_image(image, args.num_tiles_x, args.num_tiles_y)
    mask_tiles = tiling_image(mask, args.num_tiles_x, args.num_tiles_y)
    result = np.zeros(image.shape)
    ntiles = len(image_tiles)
    for i in range(ntiles):
        startY, endY = image_tiles[i][1]
示例#26
0
    config = ng.Config('inpaint.yml')
    if config.GPU_ID != -1:
        ng.set_gpus(config.GPU_ID)
    else:
        ng.get_gpus(config.NUM_GPUS)

    # training data
    with open(config.DATA_FLIST[config.DATASET][0]) as f:
        fnames = f.read().splitlines()
    data = ng.data.DataFromFNames(fnames,
                                  config.IMG_SHAPES,
                                  random_crop=config.RANDOM_CROP)
    images = data.data_pipeline(config.BATCH_SIZE)

    # main model
    model = InpaintCAModel()
    g_vars, d_vars, losses = model.build_graph_with_losses(images,
                                                           config=config)

    # validation images
    if config.VAL:
        with open(config.DATA_FLIST[config.DATASET][1]) as f:
            val_fnames = f.read().splitlines()
        # progress monitor by visualizing static images
        for i in range(config.STATIC_VIEW_SIZE):
            static_fnames = val_fnames[i:i + 1]
            static_images = ng.data.DataFromFNames(
                static_fnames,
                config.IMG_SHAPES,
                nthreads=1,
                random_crop=config.RANDOM_CROP).data_pipeline(1)
示例#27
0
parser = argparse.ArgumentParser()
parser.add_argument('--image', default='', type=str,
                    help='The filename of image to be completed.')
parser.add_argument('--mask', default='', type=str,
                    help='The filename of mask, value 255 indicates mask.')
parser.add_argument('--output', default='output.png', type=str,
                    help='Where to write output.')
parser.add_argument('--checkpoint_dir', default='', type=str,
                    help='The directory of tensorflow checkpoint.')


if __name__ == "__main__":
    ng.get_gpus(num_gpus=1, dedicated=False)
    args = parser.parse_args()

    model = InpaintCAModel()
    image = cv2.imread(args.image)
    mask = cv2.imread(args.mask)

    assert image.shape == mask.shape

    h, w, _ = image.shape
    grid = 8
    image = image[:h//grid*grid, :w//grid*grid, :]
    mask = mask[:h//grid*grid, :w//grid*grid, :]
    print('Shape of image: {}'.format(image.shape))

    image = np.expand_dims(image, 0)
    mask = np.expand_dims(mask, 0)
    input_image = np.concatenate([image, mask], axis=2)
示例#28
0
parser.add_argument('--checkpoint_dir',
                    default='',
                    type=str,
                    help='The directory of tensorflow checkpoint.')

if __name__ == "__main__":
    FLAGS = ng.Config('inpaint.yml')
    ng.get_gpus(1)
    # os.environ['CUDA_VISIBLE_DEVICES'] =''
    args = parser.parse_args()

    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    sess = tf.Session(config=sess_config)

    model = InpaintCAModel()
    input_image_ph = tf.placeholder(tf.float32,
                                    shape=(1, args.image_height,
                                           args.image_width * 2, 3))
    output = model.build_server_graph(FLAGS, input_image_ph)
    output = (output + 1.) * 127.5
    output = tf.reverse(output, [-1])
    output = tf.saturate_cast(output, tf.uint8)
    vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
    assign_ops = []
    for var in vars_list:
        vname = var.name
        from_name = vname
        var_value = tf.contrib.framework.load_variable(args.checkpoint_dir,
                                                       from_name)
        assign_ops.append(tf.assign(var, var_value))
示例#29
0
class Synthesizer:
    def __init__(self,
                 patch_size=512,
                 saved_model_path='./model_logs/release_places2_256'):
        '''
        Saved model weights url:
            https://drive.google.com/drive/folders/1y7Irxm3HSHGvp546hZdAZwuNmhLUVcjO
        '''
        self.FLAGS = ng.Config('inpaint.yml')

        self.model = InpaintCAModel()
        self.checkpoint_dir = saved_model_path

        self.patch_size = patch_size

        self.sess_config = tf.ConfigProto()
        self.sess_config.gpu_options.allow_growth = True
        self._inpaint_input_placeholder = tf.placeholder(
            shape=(1, self.patch_size, self.patch_size * 2, 3),
            dtype=tf.float32)
        self.sess = tf.Session(config=self.sess_config)

        output = self.model.build_server_graph(self.FLAGS,
                                               self._inpaint_input_placeholder,
                                               reuse=tf.AUTO_REUSE)
        output = (output + 1.) * 127.5
        output = tf.reverse(output, [-1])
        output = tf.saturate_cast(output, tf.uint8)
        self._cached_inpaint_output = output

        self.load_model()

    def load_model(self):
        vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        assign_ops = []
        for var in vars_list:
            vname = var.name
            from_name = vname
            var_value = tf.contrib.framework.load_variable(
                self.checkpoint_dir, from_name)
            assign_ops.append(tf.assign(var, var_value))

        _ = self.sess.run(assign_ops)

    def resize_image(self, image):
        image = resize(image,
                       output_shape=(self.patch_size, self.patch_size, 3))
        return image

    def get_background(self, image, mask, reference_mask):
        # image is ground_truth_image
        merged_mask = mask + reference_mask

        mask_3d = np.zeros((merged_mask.shape[0], merged_mask.shape[1], 3))
        mask_3d[:, :, 0] = merged_mask
        mask_3d[:, :, 1] = merged_mask
        mask_3d[:, :, 2] = merged_mask
        mask_3d = mask_3d * 255
        mask_3d = mask_3d.astype(np.uint8)
        image = img_as_ubyte(image)

        h, w, _ = image.shape
        grid = 8
        image = image[:h // grid * grid, :w // grid * grid, :]
        mask_3d = mask_3d[:h // grid * grid, :w // grid * grid, :]

        image = np.expand_dims(image, 0)
        mask_3d = np.expand_dims(mask_3d, 0)
        input_image = np.concatenate([image, mask_3d], axis=2)
        input_image = input_image.astype(np.float32)

        res = self.inpaint(input_image)
        return res

    def inpaint(self, input_image):
        result = self.sess.run(
            self._cached_inpaint_output,
            feed_dict={self._inpaint_input_placeholder: input_image})
        return result[0][:, :, ::-1]

    def tweak_foreground(self, image):
        """
            tweak foreground by applying random factor
        """
        tweaked = image * np.random.uniform(0.1, 2)
        tweaked = np.clip(tweaked, 0, 1)
        # new_image = image + tweaked
        # new_image *= (1.0/new_image.max())
        return tweaked

    def synthesize(self, image, mask, reference_mask):
        inpainted_background_image = self.get_background(
            image, mask, reference_mask)
        inpainted_background_image = self.resize_image(
            inpainted_background_image)

        background_image = img_as_ubyte(inpainted_background_image.copy())
        background_image[mask == True] = (0, 0, 0)

        foreground_object = image.copy()
        foreground_object[mask == False] = (0, 0, 0)
        foreground_object = self.tweak_foreground(foreground_object)

        background_image = background_image / 255.0
        synthesized_image = background_image + foreground_object

        return synthesized_image
         count += 1
     if count == 1:
         fnames = f.read().splitlines()
     elif count == 2:
         fnames = [(l.split(' ')[0], l.split(' ')[1]) for l in f.read().splitlines()]
     elif count == 3:
         fnames = [(l.split(' ')[0], l.split(' ')[1], l.split(' ')[2]) for l in f.read().splitlines()]
     else:
         print('invalid data count')
         exit()
         
 data = ng.data.DataFromFNames(
     fnames, config.IMG_SHAPES, random_crop=config.RANDOM_CROP, gamma=config.GAMMA, exposure=config.EXPOSURE, random_flip = config.RANDOM_FLIP)
 images = data.data_pipeline(config.BATCH_SIZE)
 # main model
 model = InpaintCAModel()
 g_vars, d_vars, losses = model.build_graph_with_losses(
     images[0], config=config, exclusionmask=images[exclusionmask_index] if config.EXC_MASKS else None, mask=None if config.GEN_MASKS else images[mask_index])
 # validation images
 if config.VAL:
     with open(config.DATA_FLIST[config.DATASET][1]) as f:
         val_fnames = [(l.split(' ')[0], l.split(' ')[1]) for l in f.read().splitlines()]
     # progress monitor by visualizing static images
     for i in range(config.STATIC_VIEW_SIZE):
         static_fnames = val_fnames[i:i+1]
         static_images = ng.data.DataFromFNames(
             static_fnames, config.IMG_SHAPES, nthreads=1,
             random_crop=config.RANDOM_CROP, random_flip=config.RANDOM_FLIP).data_pipeline(1)
         static_inpainted_images = model.build_static_infer_graph(
             static_images[0], config, name='static_view/%d' % i, exclusionmask=images[exclusionmask_index] if config.EXC_MASKS else None)
 # training settings