Пример #1
0
def main(checkpoint_dir, style_name):
    ckpt_dir = '../checkpoint/' + 'generator_' + style_name + '_weight'
    check_folder(ckpt_dir)

    placeholder = tf.placeholder(tf.float32, [1, None, None, 3],
                                 name='generator_input')
    with tf.variable_scope("generator", reuse=False):
        _ = generator.G_net(placeholder).fake

    generator_var = [
        var for var in tf.trainable_variables()
        if var.name.startswith('generator')
    ]
    saver = tf.train.Saver(generator_var)

    gpu_options = tf.GPUOptions(allow_growth=True)
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          gpu_options=gpu_options)) as sess:
        sess.run(tf.global_variables_initializer())
        # load model
        ckpt = tf.train.get_checkpoint_state(
            checkpoint_dir)  # checkpoint file information
        if ckpt and ckpt.model_checkpoint_path:
            print(ckpt.model_checkpoint_path)
            ckpt_name = os.path.basename(
                ckpt.model_checkpoint_path)  # first line
            saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
            counter = ckpt_name.split('-')[-1]
            print(" [*] Success to read {}".format(ckpt_name))
        else:
            print(" [*] Failed to find a checkpoint")
            return
        info = save(saver, sess, ckpt_dir, style_name + '-' + counter)

        print(f'save over : {info} ')
Пример #2
0
def test(checkpoint_dir,
         style_name,
         test_dir,
         if_adjust_brightness,
         img_size=[256, 256]):
    # tf.reset_default_graph()
    test_files = glob('{}/*.*'.format(test_dir))

    result_dir = 'results/' + style_name
    check_folder(result_dir)

    # test_real = tf.placeholder(tf.float32, [1, 256, 256, 3], name='test')
    test_real = tf.placeholder(tf.float32, [1, None, None, 3], name='test')

    with tf.variable_scope("generator", reuse=False):
        test_generated = generator.G_net(test_real).fake

    generator_var = [
        var for var in tf.trainable_variables()
        if var.name.startswith('generator')
    ]
    saver = tf.train.Saver(generator_var)

    gpu_options = tf.GPUOptions(allow_growth=True)
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          gpu_options=gpu_options)) as sess:
        # tf.global_variables_initializer().run()
        # load model
        ckpt = tf.train.get_checkpoint_state(
            checkpoint_dir)  # checkpoint file information
        if ckpt and ckpt.model_checkpoint_path:
            ckpt_name = os.path.basename(
                ckpt.model_checkpoint_path)  # first line
            saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
            print(" [*] Success to read {}".format(ckpt_name))
        else:
            print(" [*] Failed to find a checkpoint")
            return

        # FLOPs
        # stats_graph(tf.get_default_graph())

        begin = time.time()
        for sample_file in tqdm(test_files):
            # print('Processing image: ' + sample_file)
            sample_image = np.asarray(load_test_data(sample_file, img_size))
            image_path = os.path.join(
                result_dir, '{0}'.format(os.path.basename(sample_file)))
            fake_img = sess.run(test_generated,
                                feed_dict={test_real: sample_image})
            if if_adjust_brightness:
                save_images(fake_img, image_path, sample_file)
            else:
                save_images(fake_img, image_path, None)
        end = time.time()
        print(f'test-time: {end-begin} s')
        print(f'one image test time : {(end-begin)/len(test_files)} s')
        print(f'result path: {result_dir}')
Пример #3
0
    def generator(self, x_init, reuse=False, scope="generator"):
        if self.light:
            with tf.variable_scope(scope, reuse=reuse):
                G = generator_lite.G_net(x_init)
                return G.fake

        else:
            with tf.variable_scope(scope, reuse=reuse):
                G = generator.G_net(x_init)
                return G.fake
Пример #4
0
def cvt2anime_video(video, output, checkpoint_dir, output_format='MP4V'):
    '''
    output_format: 4-letter code that specify codec to use for specific video type. e.g. for mp4 support use "H264", "MP4V", or "X264"
    '''
    gpu_stat = bool(len(tf.config.experimental.list_physical_devices('GPU')))
    if gpu_stat:
        os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    gpu_options = tf.GPUOptions(allow_growth=gpu_stat)

    test_real = tf.placeholder(tf.float32, [1, None, None, 3], name='test')
    with tf.variable_scope("generator", reuse=False):
        test_generated = generator.G_net(test_real).fake
         
    saver = tf.train.Saver()

    # load video
    vid = cv2.VideoCapture(video)
    vid_name = os.path.basename(video)
    total = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = int(vid.get(cv2.CAP_PROP_FPS))
    width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
    codec = cv2.VideoWriter_fourcc(*output_format)

    tfconfig = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)
    with tf.Session(config=tfconfig) as sess:
        # tf.global_variables_initializer().run()
        # load model
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)  # checkpoint file information
        if ckpt and ckpt.model_checkpoint_path:
            ckpt_name = os.path.basename(ckpt.model_checkpoint_path)  # first line
            saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
            print(" [*] Success to read {}".format(os.path.join(checkpoint_dir, ckpt_name)))
        else:
            print(" [*] Failed to find a checkpoint")
            return
         
        video_out = cv2.VideoWriter(os.path.join(output, vid_name.rsplit('.', 1)[0] + "_AnimeGANv2.mp4"), codec, fps, (width, height))

        pbar = tqdm(total=total, ncols=80)
        pbar.set_description(f"Making: {os.path.basename(video).rsplit('.', 1)[0] + '_AnimeGANv2.mp4'}")
        while True:
            ret, frame = vid.read()
            if not ret:
                break
            frame = np.asarray(np.expand_dims(process_image(frame),0))
            fake_img = sess.run(test_generated, feed_dict={test_real: frame})
            fake_img = post_precess(fake_img, (width, height))
            video_out.write(cv2.cvtColor(fake_img, cv2.COLOR_BGR2RGB))
            pbar.update(1)

        pbar.close()
        vid.release()
        video_out.release()
        return os.path.join(output, vid_name.rsplit('.', 1)[0] + "_AnimeGANv2.mp4")
Пример #5
0
def cvt2anime_dir(inputdir,
                  outputdir,
                  checkpoint_dir,
                  show_stats=False,
                  img_size=(256, 256)):

    gpu_stat = bool(len(tf.config.experimental.list_physical_devices('GPU')))
    if gpu_stat:
        os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    gpu_options = tf.compat.v1.GPUOptions(allow_growth=gpu_stat)

    test_real = tf.compat.v1.placeholder(tf.float32, [1, None, None, 3],
                                         name='test')

    with tf.compat.v1.variable_scope("generator", reuse=False):
        test_generated = generator.G_net(test_real).fake

    # get image list
    img_files = glob('{}/*.*'.format(inputdir))

    tfconfig = tf.compat.v1.ConfigProto(allow_soft_placement=True,
                                        gpu_options=gpu_options)
    with tf.compat.v1.Session(config=tfconfig) as sess:
        # tf.global_variables_initializer().run()
        # load model
        ckpt = tf.train.get_checkpoint_state(
            checkpoint_dir)  # checkpoint file information
        saver = tf.compat.v1.train.Saver()
        if ckpt and ckpt.model_checkpoint_path:
            ckpt_name = os.path.basename(
                ckpt.model_checkpoint_path)  # first line
            saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
            print(" [*] Success to read {}".format(ckpt_name))
        else:
            print(" [*] Failed to find a checkpoint")
            return

        # show FLOPs
        if show_stats:
            stats_graph(tf.compat.v1.get_default_graph())

        # convert
        for imgfile in tqdm(img_files):
            img = cv2.imread(imgfile).astype(np.float32)
            img_cvt = convert_image(img, img_size)
            fake_img = sess.run(test_generated, feed_dict={test_real: img_cvt})
            fake_img_inv = inverse_image(fake_img)
            output_path = os.path.join(outputdir,
                                       '{0}'.format(os.path.basename(imgfile)))
            cv2.imwrite(output_path, fake_img_inv)
Пример #6
0
def test(img_size=[256, 256]):
    checkpoint_dir = 'checkpoint/generator_Hayao_weight'
    # tf.reset_default_graph()
    result_dir = 'static/client/img'
    test_dir = 'static/client/img'
    check_folder(result_dir)
    test_files = glob('{}/*.*'.format(test_dir))

    test_real = tf.placeholder(tf.float32, [1, None, None, 3], name='test')

    with tf.variable_scope("generator", reuse=False):
        test_generated = generator.G_net(test_real).fake

    saver = tf.train.Saver()

    gpu_options = tf.GPUOptions(allow_growth=True)
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          gpu_options=gpu_options)) as sess:
        # tf.global_variables_initializer().run()
        # load model
        ckpt = tf.train.get_checkpoint_state(
            checkpoint_dir)  # checkpoint file information
        if ckpt and ckpt.model_checkpoint_path:
            ckpt_name = os.path.basename(
                ckpt.model_checkpoint_path)  # first line

            saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))

        begin = time.time()
        for sample_file in tqdm(test_files):
            sample_image = np.asarray(load_test_data(sample_file, img_size))
            image_path = os.path.join(
                result_dir, '{0}'.format(os.path.basename(sample_file)))
            fake_img = sess.run(test_generated,
                                feed_dict={test_real: sample_image})
            #adjustBrightness
            save_images(fake_img, image_path, sample_file)
            #save_images(fake_img, image_path, None)
        end = time.time()
        print(f'test-time: {end-begin} s')
        print(f'one image test time : {(end-begin)/len(test_files)} s')
Пример #7
0
def cvt2anime_video(video, output, checkpoint_dir, output_format='MP4V', if_adjust_brightness=False, img_size=(256,256)):
    '''
    output_format: 4-letter code that specify codec to use for specific video type. e.g. for mp4 support use "H264", "MP4V", or "X264"
    '''
    # tf.reset_default_graph()
    # check_folder(result_dir)
    gpu_stat = bool(len(tf.config.experimental.list_physical_devices('GPU')))
    if gpu_stat:
        os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    gpu_options = tf.GPUOptions(allow_growth=gpu_stat)

    test_real = tf.placeholder(tf.float32, [1, None, None, 3], name='test')

    with tf.variable_scope("generator", reuse=False):
        test_generated = generator.G_net(test_real).fake

    # load video
    vid = cv2.VideoCapture(video)
    vid_name = os.path.basename(video)
    total = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = vid.get(cv2.CAP_PROP_FPS)
    # codec = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
    codec = cv2.VideoWriter_fourcc(*output_format)

    tfconfig = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)
    with tf.Session(config=tfconfig) as sess:
        # tf.global_variables_initializer().run()
        # load model
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)  # checkpoint file information
        saver = tf.train.Saver()
        if ckpt and ckpt.model_checkpoint_path:
            ckpt_name = os.path.basename(ckpt.model_checkpoint_path)  # first line
            saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
            print(" [*] Success to read {}".format(ckpt_name))
        else:
            print(" [*] Failed to find a checkpoint")
            return

        # determine output width and height
        ret, img = vid.read()
        if img is None:
            print('Error! Failed to determine frame size: frame empty.')
            return
        img = preprocessing(img, img_size)
        height, width = img.shape[:2]
        out = cv2.VideoWriter(os.path.join(output, vid_name), codec, fps, (width, height))

        pbar = tqdm(total=total)
        vid.set(cv2.CAP_PROP_POS_FRAMES, 0)
        while ret:
            ret, frame = vid.read()
            if frame is None:
                print('Warning: got empty frame.')
                continue

            img = convert_image(frame, img_size)
            fake_img = sess.run(test_generated, feed_dict={test_real: img})
            fake_img = inverse_image(fake_img)
            if if_adjust_brightness:
                fake_img = cv2.cvtColor(adjust_brightness_from_src_to_dst(fake_img, frame), cv2.COLOR_BGR2RGB)
            else:
                fake_img = cv2.cvtColor(fake_img, cv2.COLOR_BGR2RGB)
            fake_img = cv2.resize(fake_img, (width, height))
            out.write(fake_img)
            pbar.update(1)

        pbar.close()
        vid.release()
        # cv2.destroyAllWindows()
        return os.path.join(output, vid_name)
Пример #8
0
    def generator(self, x_init, reuse=False, scope="generator"):

        with tf.variable_scope(scope, reuse=reuse):
            G = generator.G_net(x_init)
            return G.fake
Пример #9
0
def cvt2anime_video(video,
                    output,
                    checkpoint_dir,
                    output_format='MP4V',
                    show_stats=False,
                    img_size=(256, 256)):
    '''
    output_format: 4-letter code that specify codec to use for specific video type. e.g. for mp4 support use "H264", "MP4V", or "XVID"
    '''
    # tf.reset_default_graph()
    # check_folder(result_dir)
    gpu_stat = bool(len(tf.config.experimental.list_physical_devices('GPU')))
    if gpu_stat:
        os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    gpu_options = tf.compat.v1.GPUOptions(allow_growth=gpu_stat)

    test_real = tf.compat.v1.placeholder(tf.float32, [1, None, None, 3],
                                         name='test')

    with tf.compat.v1.variable_scope("generator", reuse=False):
        test_generated = generator.G_net(test_real).fake

    # load video
    try:
        vid = cv2.VideoCapture(int(video))
    except:
        vid = cv2.VideoCapture(video)

    #width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
    #height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
    total = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = int(vid.get(cv2.CAP_PROP_FPS))
    # codec = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
    codec = cv2.VideoWriter_fourcc(*output_format)

    tfconfig = tf.compat.v1.ConfigProto(allow_soft_placement=True,
                                        gpu_options=gpu_options)
    with tf.compat.v1.Session(config=tfconfig) as sess:
        # tf.global_variables_initializer().run()
        # load model
        ckpt = tf.train.get_checkpoint_state(
            checkpoint_dir)  # checkpoint file information
        saver = tf.compat.v1.train.Saver()
        if ckpt and ckpt.model_checkpoint_path:
            ckpt_name = os.path.basename(
                ckpt.model_checkpoint_path)  # first line
            saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
            print(" [*] Success to read {}".format(ckpt_name))
        else:
            print(" [*] Failed to find a checkpoint")
            return

        # FLOPs
        if show_stats:
            stats_graph(tf.compat.v1.get_default_graph())

        ret, img = vid.read()
        if img is None:
            print('Error! Failed to determine frame size: frame empty.')
            return
        img = convert_image(img, img_size)
        fake_img = sess.run(test_generated, feed_dict={test_real: img})
        fake_img = inverse_image(fake_img)
        height, width = fake_img.shape[:2]
        out = cv2.VideoWriter(output, codec, fps, (width, height))

        bar_total = 100
        bar_step = np.round(bar_total / total, 2)
        pbar = tqdm(total=bar_total)
        vid.set(cv2.CAP_PROP_POS_FRAMES, 0)
        while ret:
            ret, img = vid.read()
            if img is None:
                print('Warning: got empty frame.')
                continue

            img = convert_image(img, img_size)
            fake_img = sess.run(test_generated, feed_dict={test_real: img})
            fake_img = inverse_image(fake_img)
            # cv2.imwrite(f'results/cut_{i:03}.jpg', fake_img)
            out.write(fake_img)
            pbar.update(bar_step)
            # cv2.imshow('output', fake_img)
            if cv2.waitKey(1) == ord('q'):
                break
        pbar.close()
        vid.release()
        cv2.destroyAllWindows()
Пример #10
0
# Flask utils
from flask import Flask, send_from_directory, request, render_template
from werkzeug.utils import secure_filename
from gevent.pywsgi import WSGIServer

# Define a flask app
app = Flask(__name__)
"""=============================读取模型==================================="""
# 存放模型的文件夹
checkpoint_dir = 'checkpoint/' + 'AnimeGAN_Hayao_lsgan_300_300_1_3_10'
# 输入的真实图片留存区,执行的时候再赋具体size的大小
test_real = tf.compat.v1.placeholder(tf.float32, [1, None, None, 3],
                                     name='test')
# 先初始化generator网络
with tf.compat.v1.variable_scope("generator", reuse=False):
    test_generated = generator.G_net(test_real).fake

saver = tf.compat.v1.train.Saver()
sess = tf.compat.v1.Session(config=tf.ConfigProto(allow_soft_placement=True))
# 加载模型
ckpt = tf.train.get_checkpoint_state(
    checkpoint_dir)  # checkpoint file information
if ckpt and ckpt.model_checkpoint_path:
    ckpt_name = os.path.basename(ckpt.model_checkpoint_path)  # first line
    saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
    print(" [*] Success to read {}".format(ckpt_name))
else:
    print(" [*] Failed to find a checkpoint")
"""=============================================================================="""

Пример #11
0
def encoder(images, style_size=1, keep_prob=1.0, phase_train=True, weight_decay=0.0, reuse=None, scope='Encoders'):
    print("ENCODER INPUT SHAPE: ", images.shape)
    with tf.compat.v1.variable_scope(scope, reuse=reuse):
        with slim.arg_scope([slim.conv2d, slim.conv2d_transpose, slim.fully_connected],
                        activation_fn=tf.nn.relu,
                        # weights_initializer=tf.contrib.layers.xavier_initializer(),
                        weights_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=2.0),
                        weights_regularizer=tf.keras.regularizers.l2(0.5 * (weight_decay))):
            with slim.arg_scope([slim.dropout, slim.batch_norm], is_training=phase_train):
                with slim.arg_scope([slim.fully_connected],
                    normalizer_fn=layer_norm, normalizer_params=None):
                    print('{} input shape:'.format(scope), [dim.value for dim in images.shape])

                    batch_size = tf.shape(input=images)[0]
                    k = 64


                    # with tf.compat.v1.variable_scope('StyleEncoder'):
                    #     with slim.arg_scope([slim.conv2d, slim.conv2d_transpose, slim.fully_connected],
                    #         normalizer_fn=None, normalizer_params=None):
                    #
                    #         print('-- StyleEncoder')
                    #
                    #         net = images
                    #
                    #         net = conv(net, k, 7, stride=1, pad=3, scope='conv0')
                    #         print('module conv0 shape:', [dim.value for dim in net.shape])
                    #
                    #         net = conv(net, 2*k, 4, stride=2, scope='conv1')
                    #         print('module conv1 shape:', [dim.value for dim in net.shape])
                    #
                    #         net = conv(net, 4*k, 4, stride=2, scope='conv2')
                    #         print('module conv2 shape:', [dim.value for dim in net.shape])
                    #
                    #
                    #         encoded_style = net
                    #
                    #         net = slim.avg_pool2d(net, net.shape[1:3], padding='VALID', scope='global_pool')
                    #         net = slim.flatten(net)
                    #
                    #         style_vec = slim.fully_connected(net, style_size, activation_fn=None, normalizer_fn=None, scope='fc1')
                    #         print('module fc1 shape:', [dim.value for dim in net.shape])
                    #         style_vec = tf.identity(style_vec, name='style_vec')

                    with tf.compat.v1.variable_scope('AnimeGANGenerator'):
                        print( "AnimeGan input Image shape: :", [images.shape])
                        #
                        # test_real = tf.placeholder(tf.float32, [None, 256, 256, 3], name='test')
                        #
                        #
                        # with tf.variable_scope("generator", reuse=False):
                        #     test_generated = generator.G_net(test_real).fake
                        # saverA = tf.train.Saver()
                        #
                        # print("TEST \n \n")
                        #
                        # gpu_options = tf.GPUOptions(allow_growth=False)
                        #
                        # with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)) as sess2:
                        #     # tf.global_variables_initializer().run()
                        #     # load model
                        #     checkpoint_dir = "pretrained_hayao"
                        #     print("lOADIN ANIME MODEL \n \n \n")
                        #     ckpt = tf.train.get_checkpoint_state(checkpoint_dir)  # checkpoint file information
                        #
                        #     if ckpt and ckpt.model_checkpoint_path:
                        #         ckpt_name = os.path.basename(ckpt.model_checkpoint_path)  # first line
                        #         print(" [*]CHECKPOINT NAME {}".format(ckpt_name))
                        #         saverA.restore(sess2, os.path.join(checkpoint_dir, ckpt_name))
                        #         print(" [*] Success to read {}".format(ckpt_name))
                        #     else:
                        #         print(" [*] Failed to find a checkpoint")
                        #         return
                        #     # print('Processing image: ' + sample_file)
                        #     #sample_image = np.asarray(load_test_data(sample_file, img_size))
                        #     images_rendered = sess2.run(test_generated, feed_dict = {test_real : images})




                        images_rendered = generator.G_net(images).fake
                        #K.clear_session()
                        #images_rendered = testAnimeGan.test(checkpoint_dir="pretrained_hayao", test_dir = "testData",if_adjust_brightness = True,style_name = "H")
                        print( "rendered Image shape: :", [dim.value for dim in images_rendered.shape])
                        #imshow the image here



                    #  Transform textures
                    with tf.compat.v1.variable_scope('ContentEncoder'):
                        with slim.arg_scope([slim.conv2d, slim.conv2d_transpose, slim.fully_connected],
                                normalizer_fn=instance_norm, normalizer_params=None):
                            print('-- ContentEncoder')
                            net = images

                            net = conv(net, k, 7, stride=1, pad=3, scope='conv0')
                            print('module conv0 shape:', [dim.value for dim in net.shape])

                            net = conv(net, 2*k, 4, stride=2, scope='conv1')
                            print('module conv1 shape:', [dim.value for dim in net.shape])

                            net = conv(net, 4*k, 4, stride=2, scope='conv2')
                            print('module conv2 shape:', [dim.value for dim in net.shape])
                            
                            for i in range(3):
                                net_ = conv(net, 4*k, 3, scope='res{}_0'.format(i))
                                net += conv(net_, 4*k, 3, activation_fn=None, biases_initializer=None, scope='res{}_1'.format(i))
                                print('module res{} shape:'.format(i), [dim.value for dim in net.shape])

                            encoded = net
                        
                    return encoded, images_rendered#, style_vec