def main():
    if not os.path.exists(a.output_dir):
        os.makedirs(a.output_dir)

    input = tf.placeholder(tf.string, shape=[1])
    key = tf.placeholder(tf.string, shape=[1])

    in_data = tf.decode_base64(input[0])
    img = tf.image.decode_png(in_data)
    img = tf.image.rgb_to_grayscale(img)
    out_data = tf.image.encode_png(img)
    output = tf.convert_to_tensor([tf.encode_base64(out_data)])

    variable_to_allow_model_saving = tf.Variable(1, dtype=tf.float32)

    inputs = {
        "key": key.name,
        "input": input.name
    }
    tf.add_to_collection("inputs", json.dumps(inputs))
    outputs = {
        "key":  tf.identity(key).name,
        "output": output.name,
    }
    tf.add_to_collection("outputs", json.dumps(outputs))

    init_op = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init_op)
        saver = tf.train.Saver()
        saver.export_meta_graph(filename=os.path.join(a.output_dir, "export.meta"))
        saver.save(sess, os.path.join(a.output_dir, "export"), write_meta_graph=False)
    
    print("exported example model to %s" % a.output_dir)
Beispiel #2
0
import tempfile
import tensorflow as tf 

'''Tensors'''

tf.enable_eager_execution()

print(tf.add(1, 2))
print(tf.add([1, 2], [3, 4]))
print(tf.square(5))
print(tf.reduce_sum([1, 2, 3]))
print(tf.encode_base64("hello world"))

# Operator overloading is also supported
print(tf.square(2) + tf.square(3))

x = tf.matmul([[1]], [[2, 3]])
print(x)
print(x.shape)
print(x.dtype)

'''Numpy Compatability'''

import numpy as np

ndarray = np.ones([3, 3])

print("TensorFlow operations convert numpy arrays to Tensors automatically")
tensor = tf.multiply(ndarray, 42)
print(tensor)
Beispiel #3
0
def run(target, is_chief, job_name, a):
    output_dir = "./export"

    if tf.__version__.split('.')[0] != "1":
        raise Exception("Tensorflow version 1 required")

    if a.seed is None:
        a.seed = random.randint(0, 2**31 - 1)

    tf.set_random_seed(a.seed)
    np.random.seed(a.seed)
    random.seed(a.seed)

    for k, v in a._get_kwargs():
        print(k, "=", v)

    if a.checkpoint is None:
        raise Exception("checkpoint required for test mode")

    # load some options from the checkpoint
    # disable these features in test mode
    a.scale_size = CROP_SIZE
    a.flip = False

    input = tf.placeholder(tf.string, shape=[1])
    input_data = tf.decode_base64(input[0])
    input_image = tf.image.decode_png(input_data)

    # remove alpha channel if present
    input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 4),
                          lambda: input_image[:, :, :3], lambda: input_image)
    # convert grayscale to RGB
    input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 1),
                          lambda: tf.image.grayscale_to_rgb(input_image),
                          lambda: input_image)

    input_image = tf.image.convert_image_dtype(input_image, dtype=tf.float32)
    input_image.set_shape([CROP_SIZE, CROP_SIZE, 3])
    batch_input = tf.expand_dims(input_image, axis=0)

    with tf.variable_scope("generator"):
        batch_output = model.deprocess(
            model.create_generator(a.num_generator_filters,
                                   model.preprocess(batch_input), 3))

    output_image = tf.image.convert_image_dtype(batch_output,
                                                dtype=tf.uint8)[0]
    if a.output_filetype == "jpeg":
        output_data = tf.image.encode_jpeg(output_image, quality=80)
    else:
        output_data = tf.image.encode_png(output_image)

    output = tf.convert_to_tensor([tf.encode_base64(output_data)])

    key = tf.placeholder(tf.string, shape=[1])
    inputs = {"key": key.name, "input": input.name}
    tf.add_to_collection("inputs", json.dumps(inputs))
    outputs = {
        "key": tf.identity(key).name,
        "output": output.name,
    }
    tf.add_to_collection("outputs", json.dumps(outputs))

    init_op = tf.global_variables_initializer()
    restore_saver = tf.train.Saver()
    export_saver = tf.train.Saver()

    with tf.Session() as sess:
        print("monitored session created.")
        sess.run(init_op)
        print("loading model from checkpoint")
        checkpoint = tf.train.latest_checkpoint(a.checkpoint)
        restore_saver.restore(sess, checkpoint)
        # ready to process image
        print("exporting model")
        export_saver.export_meta_graph(
            filename=os.path.join(output_dir, "export.meta"))
        export_saver.save(sess,
                          os.path.join(output_dir, "export"),
                          write_meta_graph=False)
Beispiel #4
0
import tensorflow as tf
tf.enable_eager_execution()

## Calculating tensors
# tf.Tensor(3, shape=(), dtype=int32)
print("tf.add(1, 2) =", tf.add(1, 2))
# tf.Tensor([4 6], shape=(2,), dtype=int32)
print("tf.add([1, 2], [3, 4]) =", tf.add([1, 2], [3, 4]))
# tf.Tensor(25, shape=(), dtype=int32)
print("tf.square(5) =", tf.square(5))
# tf.Tensor(6, shape=(), dtype=int32)
print("tf.reduce_sum([1, 2, 3]) =", tf.reduce_sum([1, 2, 3]))
# tf.Tensor(b'aGVsbG8gd29ybGQ', shape=(), dtype=string)
print("tf.encode_base64('hello world'):", tf.encode_base64('hello world'))

# Operator overloading
# tf.Tensor(13, shape=(), dtype=int32)
print("tf.square(2) + tf.square(3) =", tf.square(2) + tf.square(3))

# Show a shape and a datatype of tensor
print("x = tf.matmul([[1]], [[2, 3]])")
x = tf.matmul([[1]], [[2, 3]])
print("Shape of x:", x.shape)
print("Data type of x:", x.dtype)

## NumPy conpatibility
import numpy as np

ndarray = np.ones([3, 3])
# array([[1., 1., 1.],
#        [1., 1., 1.],
Beispiel #5
0
import numpy as np
from cbrain.model_diagnostics import ModelDiagnostics
from numpy import linalg as LA
import matplotlib.pyplot as plt
# Otherwise tensorflow will use ALL your GPU RAM for no reason
limit_mem()
TRAINDIR = '/local/Tom.Beucler/SPCAM_PHYS/'
DATADIR = '/project/meteo/w2w/A6/S.Rasp/SP-CAM/fluxbypass_aqua/'
PREFIX = '8col009_01_'
NNs = 'NNA0.01'  # NNA0.01

import os

os.chdir('/filer/z-sv-pool12c/t/Tom.Beucler/SPCAM/CBRAIN-CAM')

print(tf.encode_base64("Loading coordinates..."))
print('Loading coordinates...')
coor = xr.open_dataset("/project/meteo/w2w/A6/S.Rasp/SP-CAM/fluxbypass_aqua/AndKua_aqua_SPCAM3.0_sp_fbp_f4.cam2.h1.0000-01-01-00000.nc",\
                    decode_times=False)
lat = coor.lat
lon = coor.lon
coor.close()

config_fn = '/filer/z-sv-pool12c/t/Tom.Beucler/SPCAM/CBRAIN-CAM/pp_config/8col_rad_tbeucler_local_PostProc.yml'
data_fn = '/local/Tom.Beucler/SPCAM_PHYS/8col009_01_valid.nc'
dict_lay = {
    'SurRadLayer': SurRadLayer,
    'MassConsLayer': MassConsLayer,
    'EntConsLayer': EntConsLayer
}
Beispiel #6
0
    def build_graph(self, data_dir, batch_size, mode):
        """Builds the VAE-GAN network.

    Args:
      data_dir: Locations of input data.
      batch_size: Batch size of input data.
      mode: Mode of the graph (TRAINING, EVAL, or PREDICT)

    Returns:
      The tensors used in training the model.
    """
        tensors = GraphReferences()
        assert batch_size > 0
        self.batch_size = batch_size
        if mode is PREDICT_EMBED_IN:
            # Input embeddings to send through decoder/generator network.
            tensors.embeddings = tf.placeholder(tf.float32,
                                                shape=(None,
                                                       EMBEDDING_DIMENSION),
                                                name='input')
        elif mode is PREDICT_IMAGE_IN:
            tensors.prediction_image = tf.placeholder(tf.string,
                                                      shape=(None, ),
                                                      name='input')
            tensors.image = tf.map_fn(self.process_image,
                                      tensors.prediction_image,
                                      dtype=tf.float32)

        if mode in (TRAIN, EVAL):
            mode_string = 'train'
            if mode is EVAL:
                mode_string = 'validation'

            tensors.image = util.read_and_decode(data_dir, batch_size,
                                                 mode_string,
                                                 self.resized_image_size,
                                                 self.crop_image_dimension,
                                                 self.center_crop)

            tensors.image = tf.reshape(
                tensors.image,
                [-1, self.resized_image_size, self.resized_image_size, 3])

            tf.summary.image('original_images', tensors.image, 1)

            tensors.embeddings, y_mean, y_stddev = self.encode(tensors.image)

        if mode is PREDICT_IMAGE_IN:
            tensors.image = tf.reshape(
                tensors.image,
                [-1, self.resized_image_size, self.resized_image_size, 3])
            tensors.embeddings, y_mean, _ = self.encode(tensors.image, False)
            tensors.predictions = tensors.embeddings
            return tensors

        decoded_images = self.decode(tensors.embeddings)

        if mode is TRAIN:
            tf.summary.image('decoded_images', decoded_images, 1)

        if mode is PREDICT_EMBED_IN:
            decoded_images = self.decode(tensors.embeddings, False, True)
            output_images = (decoded_images + 1.0) / 2.0
            output_img = tf.image.convert_image_dtype(output_images,
                                                      dtype=tf.uint8,
                                                      saturate=True)[0]
            output_data = tf.image.encode_png(output_img)
            output = tf.encode_base64(output_data)

            tensors.predictions = output

            return tensors

        tensors.dis_fake = self.discriminate(decoded_images, self.dropout)
        tensors.dis_real = self.discriminate(tensors.image,
                                             self.dropout,
                                             reuse=True)

        tensors.cost_encoder = self.loss_encoder(tensors.image, decoded_images,
                                                 y_mean, y_stddev)
        tensors.cost_generator = self.loss_generator(tensors.dis_fake)
        tensors.cost_discriminator = self.loss_discriminator(
            tensors.dis_real, tensors.dis_fake)

        if mode in (TRAIN, EVAL):
            tf.summary.scalar('cost_encoder', tensors.cost_encoder)
            tf.summary.scalar('cost_generator', tensors.cost_generator)
            tf.summary.scalar('cost_discriminator', tensors.cost_discriminator)
            tf.summary.tensor_summary('disc_fake', tensors.dis_fake)
            tf.summary.tensor_summary('disc_real', tensors.dis_real)
            tf.summary.scalar('mean_disc_fake',
                              tf.reduce_mean(tensors.dis_fake))
            tf.summary.scalar('mean_disc_real',
                              tf.reduce_mean(tensors.dis_real))

        # Cost of Decoder/Generator is VAE network cost and cost of generator
        # being detected by the discriminator.
        enc_weight = 1
        gen_weight = 1
        tensors.cost_balance = (enc_weight * tensors.cost_encoder +
                                gen_weight * tensors.cost_generator)

        tensors.global_step = tf.Variable(0,
                                          name='global_step',
                                          trainable=False)
        t_vars = tf.trainable_variables()

        with tf.variable_scope(tf.get_variable_scope(), reuse=None):
            encoder_vars = [
                var for var in t_vars if var.name.startswith('enc_')
            ]
            generator_vars = [
                var for var in t_vars if var.name.startswith('gen_')
            ]
            discriminator_vars = [
                var for var in t_vars if var.name.startswith('disc_')
            ]
            vae_vars = encoder_vars + generator_vars

            # Create optimizers for each network.
            tensors.encoder_optimizer = tf.train.AdamOptimizer(
                learning_rate=self.learning_rate,
                beta1=self.beta1).minimize(tensors.cost_encoder,
                                           var_list=vae_vars,
                                           global_step=tensors.global_step)
            tensors.generator_optimizer = tf.train.AdamOptimizer(
                learning_rate=self.learning_rate,
                beta1=self.beta1).minimize(tensors.cost_balance,
                                           var_list=vae_vars,
                                           global_step=tensors.global_step)
            tensors.discriminator_optimizer = tf.train.AdamOptimizer(
                learning_rate=self.learning_rate,
                beta1=self.beta1).minimize(tensors.cost_discriminator,
                                           var_list=discriminator_vars,
                                           global_step=tensors.global_step)

        return tensors
Beispiel #7
0
def export(checkpoint, img_shape):

    if img_shape is None:
        img_shape = [256, 256, 3]

    # placeholder for base64 string decoded to an png image
    input = tf.placeholder(tf.string, shape=[1])
    input_data = tf.decode_base64(input[0])
    input_image = tf.image.decode_png(input_data)

    # remove alpha channel if present
    input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 4),
                          lambda: input_image[:, :, :3], lambda: input_image)
    # convert grayscale to RGB
    input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 1),
                          lambda: tf.image.grayscale_to_rgb(input_image),
                          lambda: input_image)

    input_image = tf.image.convert_image_dtype(input_image, dtype=tf.float32)
    input_image.set_shape(img_shape)
    # expected shape is (1, img_shape) because of batches
    batch_input = tf.expand_dims(input_image, axis=0)

    # create network
    batch_output = transform.net(batch_input)

    # clip RGB values to the allowed range and cast to uint8
    batch_output = tf.clip_by_value(batch_output, 0, 255)
    batch_output = tf.bitcast(tf.cast(batch_output, tf.int8), tf.uint8)
    output_data = tf.image.encode_png(batch_output[0])
    output = tf.convert_to_tensor([tf.encode_base64(output_data)])

    # save inputs and outputs to collection
    key = tf.placeholder(tf.string, shape=[1])
    inputs = {"key": key.name, "input": input.name}
    tf.add_to_collection("inputs", json.dumps(inputs))
    outputs = {
        "key": tf.identity(key).name,
        "output": output.name,
    }
    tf.add_to_collection("outputs", json.dumps(outputs))

    init_op = tf.global_variables_initializer()
    restore_saver = tf.train.Saver()
    export_saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(init_op)

        if os.path.isdir(checkpoint):
            ckpt = tf.train.get_checkpoint_state(checkpoint)
            if ckpt and ckpt.model_checkpoint_path:
                restore_saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception("No checkpoint found...")
        else:
            restore_saver.restore(sess, checkpoint)
        print("exporting model")
        export_saver.export_meta_graph(
            filename=os.path.join(a.export, "export.meta"))
        export_saver.save(sess,
                          os.path.join(a.export, "export"),
                          write_meta_graph=False)

    return
# -*- coding: utf-8 -*-

import tensorflow as tf
import numpy as np

tf.enable_eager_execution()

print(tf.add(17, 2))
print(tf.add([11, 21, 3, 4],
             [23, 41, 8, 6]))  ###[11+23, 21+41, 3+8, 4+6]=[34, 62, 11, 10]
print(tf.square(8))  ##8*8=64
print(tf.reduce_sum([1, 2, 3, 4, 5]))  ###it is summation of 1+2+3+4+5
print(tf.encode_base64("The first program is to print hello world"))

# Operator overloading is also supported
print(tf.square(4) - tf.square(3) + tf.abs(-5))

x = tf.matmul([[1, 2, 5]],
              [[2, 5, 3], [2, 4, 7], [3, 6, 5]
               ])  ### 1x3 matrix multiple 3x3 matrix is 1x3 matrix
y = tf.matmul([[2, 4, 5], [1, 2, 5]],
              [[12, 15, 13], [20, 4, 17], [13, 3, 8]
               ])  ### 2x3 matrix multiple 3x3 matrix is 2x3 matrix
print(x.shape)
print(x.dtype)
print(y.shape)
print(y.dtype)

ndarray = np.ones([4, 3])

print("A TensorFlow operation will convert numpy array to a tensor.")
Beispiel #9
0
def main():

    #总的几时开始
    mainStart = time.clock()

    #add by Tony
    step1Start = time.time()

    if a.seed is None:
        a.seed = random.randint(0, 2**31 - 1)

    tf.set_random_seed(a.seed)
    np.random.seed(a.seed)
    random.seed(a.seed)

    if not os.path.exists(a.output_dir):
        os.makedirs(a.output_dir)

    if a.mode == "test" or a.mode == "export":
        if a.checkpoint is None:
            raise Exception("checkpoint required for test mode")

        # load some options from the checkpoint
        #从checkpoint取一些选项
        options = {"which_direction", "ngf", "ndf", "lab_colorization"}
        with open(os.path.join(a.checkpoint, "options.json")) as f:
            for key, val in json.loads(f.read()).items():
                if key in options:
                    print("loaded", key, "=", val)
                    setattr(a, key, val)
        # disable these features in test mode
        a.scale_size = CROP_SIZE
        a.flip = False

    for k, v in a._get_kwargs():
        print(k, "=", v)

    with open(os.path.join(a.output_dir, "options.json"), "w") as f:
        f.write(json.dumps(vars(a), sort_keys=True, indent=4))

    #add by Tony
    step1Stop = time.time()

    if a.mode == "export":
        # export the generator to a meta graph that can be imported later for standalone generation
        if a.lab_colorization:
            raise Exception("export not supported for lab_colorization")

        input = tf.placeholder(tf.string, shape=[1])
        input_data = tf.decode_base64(input[0])
        input_image = tf.image.decode_png(input_data)

        # remove alpha channel if present
        input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 4), lambda: input_image[:,:,:3], lambda: input_image)
        # convert grayscale to RGB
        input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 1), lambda: tf.image.grayscale_to_rgb(input_image), lambda: input_image)

        input_image = tf.image.convert_image_dtype(input_image, dtype=tf.float32)
        input_image.set_shape([CROP_SIZE, CROP_SIZE, 3])
        batch_input = tf.expand_dims(input_image, axis=0)

        with tf.variable_scope("generator"):
            batch_output = deprocess(create_generator(preprocess(batch_input), 3))

        output_image = tf.image.convert_image_dtype(batch_output, dtype=tf.uint8)[0]
        if a.output_filetype == "png":
            output_data = tf.image.encode_png(output_image)
        elif a.output_filetype == "jpeg":
            output_data = tf.image.encode_jpeg(output_image, quality=80)
        else:
            raise Exception("invalid filetype")
        output = tf.convert_to_tensor([tf.encode_base64(output_data)])

        key = tf.placeholder(tf.string, shape=[1])
        inputs = {
            "key": key.name,
            "input": input.name
        }
        tf.add_to_collection("inputs", json.dumps(inputs))
        outputs = {
            "key":  tf.identity(key).name,
            "output": output.name,
        }
        tf.add_to_collection("outputs", json.dumps(outputs))

        init_op = tf.global_variables_initializer()
        #创建一个Saver对象
        restore_saver = tf.train.Saver()
        #创建一个Saver对象
        export_saver = tf.train.Saver()



        with tf.Session() as sess:
            sess.run(init_op)
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint(a.checkpoint)
            restore_saver.restore(sess, checkpoint)
            print("exporting model")
            export_saver.export_meta_graph(filename=os.path.join(a.output_dir, "export.meta"))
            export_saver.save(sess, os.path.join(a.output_dir, "export"), write_meta_graph=False)

        return

       
    #add by Tony
    loadExamplesCreateModelStart = time.time()

    #delete all files of folder
    #test之前先删除掉facades/val和facades_test目录下的所有文件
    def del_file(path):
        ls = os.listdir(path)
        for i in ls:
            c_path = os.path.join(path, i)
            if os.path.isdir(c_path):
                del_file(c_path)
            else:
                os.remove(c_path)
    
    del_file("facades/val")
    del_file("facades_test")

    examples = load_examples()
    print("examples count = %d" % examples.count)


    # inputs and targets are [batch_size, height, width, channels]
    model = create_model(examples.inputs, examples.targets)

    # undo colorization splitting on images that we use for display/output
    if a.lab_colorization:
        if a.which_direction == "AtoB":
            # inputs is brightness, this will be handled fine as a grayscale image
            # need to augment targets and outputs with brightness
            targets = augment(examples.targets, examples.inputs)
            outputs = augment(model.outputs, examples.inputs)
            # inputs can be deprocessed normally and handled as if they are single channel
            # grayscale images
            inputs = deprocess(examples.inputs)
        elif a.which_direction == "BtoA":
            # inputs will be color channels only, get brightness from targets
            inputs = augment(examples.inputs, examples.targets)
            targets = deprocess(examples.targets)
            outputs = deprocess(model.outputs)
        else:
            raise Exception("invalid direction")
    else:
        inputs = deprocess(examples.inputs)
        targets = deprocess(examples.targets)
        outputs = deprocess(model.outputs)

    def convert(image):
        if a.aspect_ratio != 1.0:
            # upscale to correct aspect ratio
            size = [CROP_SIZE, int(round(CROP_SIZE * a.aspect_ratio))]
            image = tf.image.resize_images(image, size=size, method=tf.image.ResizeMethod.BICUBIC)
         #改变图像数据的类型
        return tf.image.convert_image_dtype(image, dtype=tf.uint8, saturate=True)

    # reverse any processing on images so they can be written to disk or displayed to user
    with tf.name_scope("convert_inputs"):
        converted_inputs = convert(inputs)

    with tf.name_scope("convert_targets"):
        converted_targets = convert(targets)

    with tf.name_scope("convert_outputs"):
        converted_outputs = convert(outputs)

    with tf.name_scope("encode_images"):
        display_fetches = {
            "paths": examples.paths,
            "inputs": tf.map_fn(tf.image.encode_png, converted_inputs, dtype=tf.string, name="input_pngs"),
            "targets": tf.map_fn(tf.image.encode_png, converted_targets, dtype=tf.string, name="target_pngs"),
            "outputs": tf.map_fn(tf.image.encode_png, converted_outputs, dtype=tf.string, name="output_pngs"),
        }

    # summaries
    with tf.name_scope("inputs_summary"):
        tf.summary.image("inputs", converted_inputs)

    with tf.name_scope("targets_summary"):
        tf.summary.image("targets", converted_targets)

    with tf.name_scope("outputs_summary"):
        tf.summary.image("outputs", converted_outputs)

    with tf.name_scope("predict_real_summary"):
        tf.summary.image("predict_real", tf.image.convert_image_dtype(model.predict_real, dtype=tf.uint8))

    with tf.name_scope("predict_fake_summary"):
        tf.summary.image("predict_fake", tf.image.convert_image_dtype(model.predict_fake, dtype=tf.uint8))

    tf.summary.scalar("discriminator_loss", model.discrim_loss)
    tf.summary.scalar("generator_loss_GAN", model.gen_loss_GAN)
    tf.summary.scalar("generator_loss_L1", model.gen_loss_L1)

    for var in tf.trainable_variables():
        tf.summary.histogram(var.op.name + "/values", var)

    for grad, var in model.discrim_grads_and_vars + model.gen_grads_and_vars:
        tf.summary.histogram(var.op.name + "/gradients", grad)

    with tf.name_scope("parameter_count"):
        parameter_count = tf.reduce_sum([tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])

    #只保存最后一代的模型
    saver = tf.train.Saver(max_to_keep=1)

    logdir = a.output_dir if (a.trace_freq > 0 or a.summary_freq > 0) else None
    sv = tf.train.Supervisor(logdir=logdir, save_summaries_secs=0, saver=None)


    loadExamplesCreateModelStop = time.time()


    #add by Tony
    loadingModelStart = time.time()
    with sv.managed_session() as sess:
        print("parameter_count =", sess.run(parameter_count))

        if a.checkpoint is not None:
            print("loading model from checkpoint")
            #可以使用tf.train.latest_checkpoint()来自动获取最后一次保存的模型
            checkpoint = tf.train.latest_checkpoint(a.checkpoint)
            #模型的恢复用的是restore()函数,它需要两个参数restore(sess, save_path),save_path指的是保存的模型路径
            saver.restore(sess, checkpoint)

        max_steps = 2**32
        # a.max_epochs = number of training epochs
        if a.max_epochs is not None:
            max_steps = examples.steps_per_epoch * a.max_epochs
        # a.max_steps = number of training steps
        if a.max_steps is not None:
            max_steps = a.max_steps

        loadingModelStop = time.time()

        if a.mode == "test":
            # testing
            # at most, process the test data once

             #add by Tony
            testStart = time.time()

            start = time.time()
            max_steps = min(examples.steps_per_epoch, max_steps)
            for step in range(max_steps):
                results = sess.run(display_fetches)
                #把生成的图片放到相应的目录中
                filesets = save_images(results)
                for i, f in enumerate(filesets):
                    print("evaluated image", f["name"])
                index_path = append_index(filesets)
            print("wrote index at", index_path)
            print("rate", (time.time() - start) / max_steps)

            testStop = time.time()

            #add by Tony : merge all small image to one
            #把test生成的小的图片合成大的图片
            mergeStart = time.time()
            import mergeAllImages
            if __name__ == "__main__":
                mergeAllImages.main()

            #把生成的图片文件复制到指定的output_file
            shutil.copy("facades_test/merged3.png", a.output_file)
            mergeStop = time.time()


            #step1 time
            #print("★★★step1 Time used                      :",str(step1Stop-step1Start) + "秒")

            #loadExamplesCreateModelStop time
            print("★★★loadExamplesCreateModel Time used     :",str(loadExamplesCreateModelStop-loadExamplesCreateModelStart) + "秒")

            #split time
            print("(Split Time used                            :",str(splitStop-splitStart) + "秒")


            #loading Model time
            print("★★★loading Model time used               :",str(loadingModelStop-loadingModelStart) + "秒")

            #test time
            print("★★★test Time used                        :",str(testStop-testStart) + "秒")

            #merge time
            print("★★★Merge Time used                       :",str(mergeStop-mergeStart) + "秒")

            #all time
            elapsed = (time.clock() - mainStart)
            print("★★★Time used(Total)                      : ",str(elapsed) + "秒")

        else:
            # training
            start = time.time()

            for step in range(max_steps):
                def should(freq):
                    return freq > 0 and ((step + 1) % freq == 0 or step == max_steps - 1)

                options = None
                run_metadata = None
                if should(a.trace_freq):
                    options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                    run_metadata = tf.RunMetadata()

                fetches = {
                    "train": model.train,
                    "global_step": sv.global_step,
                }

                # display progress every 50 steps
                if should(a.progress_freq):
                    fetches["discrim_loss"] = model.discrim_loss
                    fetches["gen_loss_GAN"] = model.gen_loss_GAN
                    fetches["gen_loss_L1"] = model.gen_loss_L1

                # update summaries every 100 steps
                if should(a.summary_freq):
                    fetches["summary"] = sv.summary_op

                # write current training images every 0 steps
                if should(a.display_freq):
                    fetches["display"] = display_fetches

                results = sess.run(fetches, options=options, run_metadata=run_metadata)

                if should(a.summary_freq):
                    print("recording summary")
                    sv.summary_writer.add_summary(results["summary"], results["global_step"])

                if should(a.display_freq):
                    print("saving display images")
                    filesets = save_images(results["display"], step=results["global_step"])
                    append_index(filesets, step=True)

                if should(a.trace_freq):
                    print("recording trace")
                    sv.summary_writer.add_run_metadata(run_metadata, "step_%d" % results["global_step"])

                if should(a.progress_freq):
                    # global_step will have the correct step count if we resume from a checkpoint
                    train_epoch = math.ceil(results["global_step"] / examples.steps_per_epoch)
                    train_step = (results["global_step"] - 1) % examples.steps_per_epoch + 1
                    rate = (step + 1) * a.batch_size / (time.time() - start)
                    remaining = (max_steps - step) * a.batch_size / rate
                    print("progress  epoch %d  step %d  image/sec %0.1f  remaining %dm" % (train_epoch, train_step, rate, remaining / 60))
                    print("discrim_loss", results["discrim_loss"])
                    print("gen_loss_GAN", results["gen_loss_GAN"])
                    print("gen_loss_L1", results["gen_loss_L1"])

                # 每隔save_freq(默认为5000)保存Model  
                if should(a.save_freq):
                    print("saving model")
                    # 保存训练好的模型 第二个参数设定保存的路径和名字 第三个参数将训练的次数作为后缀加入到模型名字中
                    # saver.save(sess, 'my-model', global_step=1000) ==> filename: 'my-model-1000'
                    saver.save(sess, os.path.join(a.output_dir, "model"), global_step=sv.global_step)

                if sv.should_stop():
                    break
Beispiel #10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--output_dir", required=True, help="where to put output files")
    parser.add_argument("--checkpoint", required=True, help="directory with checkpoint to resume training from or use for testing")
    parser.add_argument("--separable_conv", action="store_true", help="use separable convolutions in the generator")
    parser.add_argument("--ngf", type=int, default=64, help="number of generator filters in first conv layer")
    parser.add_argument('--disable_psf', action='store_true', help="disable the concatenation of the PSF as a channel")

    a = parser.parse_args()

    def load_data(dirty_path, psf_path):
        # type: (str, str) -> tf.data.Dataset
        def dataset_generator():
            psf = fits_open(psf_path)[:, :, np.newaxis]
            dirty = fits_open(dirty_path)[:, :, np.newaxis]
            min_flux = dirty.min()
            max_flux = dirty.max()
            yield min_flux, max_flux, psf, dirty

        ds = tf.data.Dataset.from_generator(dataset_generator,
                                            output_shapes=((), ()) + ((256, 256, 1),) * 2,
                                            output_types=(tf.float32, tf.float32) + (tf.float32,) * 2
                                            )
        ds = ds.batch(1)
        return ds

    dirty_path = tf.placeholder(tf.string, shape=[1])
    psf_path = tf.placeholder(tf.string, shape=[1])
    batch = load_data(dirty_path, psf_path)

    iter = batch.make_one_shot_iterator()
    min_flux, max_flux, psf, dirty = iter.get_next()

    scaled_dirty = preprocess(dirty, min_flux, max_flux)
    scaled_psf = preprocess(psf, min_flux, max_flux)

    if a.disable_psf:
        input_ = scaled_dirty
    else:
        input_ = tf.concat([scaled_dirty, scaled_psf], axis=3)

    with tf.variable_scope("generator"):
        generator =  create_generator(input_, 1, ngf=a.ngf, separable_conv=a.separable_conv)
        batch_output = deprocess(generator, min_flux, max_flux)

    output_image = tf.image.convert_image_dtype(batch_output, dtype=tf.uint8)[0]

    # lets just assume png for now
    output_data = tf.image.encode_png(output_image)
    output = tf.convert_to_tensor([tf.encode_base64(output_data)])

    key = tf.placeholder(tf.string, shape=[1])
    inputs = {
        "key": key.name,
        "input": dirty.name
    }
    tf.add_to_collection("inputs", json.dumps(inputs))
    outputs = {
        "key": tf.identity(key).name,
        "output": output.name,
    }
    tf.add_to_collection("outputs", json.dumps(outputs))

    init_op = tf.global_variables_initializer()
    restore_saver = tf.train.Saver()
    export_saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(init_op)
        print("loading model from checkpoint")
        checkpoint = tf.train.latest_checkpoint(a.checkpoint)
        restore_saver.restore(sess, checkpoint)
        print("exporting model")
        #export_saver.export_meta_graph(filename=os.path.join(a.output_dir, "export.meta"))
        export_saver.save(sess, os.path.join(a.output_dir, "export"), write_meta_graph=True) #, save_relative_paths=True)
Beispiel #11
0
def main():
    if arguments.seed is None:
        arguments.seed = random.randint(0, 2**31 - 1)

    tf.set_random_seed(arguments.seed)
    np.random.seed(arguments.seed)
    random.seed(arguments.seed)

    if not os.path.exists(arguments.output_dir):
        os.makedirs(arguments.output_dir)

    if arguments.mode == "test" or arguments.mode == "export":
        if arguments.checkpoint is None:
            raise Exception("checkpoint required for test mode")

        # load some options from the checkpoint
        options = {"which_direction", "ngf", "ndf", "lab_colorization"}
        with open(os.path.join(arguments.checkpoint, "options.json")) as f:
            for key, val in json.loads(f.read()).items():
                if key in options:
                    print("loaded", key, "=", val)
                    setattr(arguments, key, val)
        # disable these features in test mode
        arguments.scale_size = mu.CROP_SIZE
        arguments.flip = False

    for k, v in arguments._get_kwargs():
        print(k, "=", v)

    with open(os.path.join(arguments.output_dir, "options.json"), "w") as f:
        f.write(json.dumps(vars(arguments), sort_keys=True, indent=4))

    if arguments.mode == "export":
        # export the generator to arguments meta graph that can be imported later for standalone generation
        if arguments.lab_colorization:
            raise Exception("export not supported for lab_colorization")

        input = tf.placeholder(tf.string, shape=[1])
        input_data = tf.decode_base64(input[0])
        input_image = tf.image.decode_png(input_data)

        # remove alpha channel if present
        input_image = tf.cond(tf.equal(tf.shape(input_image)[2],
                                       4), lambda: input_image[:, :, :3],
                              lambda: input_image)
        # convert grayscale to RGB
        input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 1),
                              lambda: tf.image.grayscale_to_rgb(input_image),
                              lambda: input_image)

        input_image = tf.image.convert_image_dtype(input_image,
                                                   dtype=tf.float32)
        input_image.set_shape([mu.CROP_SIZE, mu.CROP_SIZE, 3])
        batch_input = tf.expand_dims(input_image, axis=0)

        with tf.variable_scope("generator"):
            batch_output = de_process(
                mu.create_generator(arguments, pre_process(batch_input), 3))

        output_image = tf.image.convert_image_dtype(batch_output,
                                                    dtype=tf.uint8)[0]
        if arguments.output_filetype == "png":
            output_data = tf.image.encode_png(output_image)
        elif arguments.output_filetype == "jpeg":
            output_data = tf.image.encode_jpeg(output_image, quality=80)
        else:
            raise Exception("invalid filetype")

        output = tf.convert_to_tensor([tf.encode_base64(output_data)])

        key = tf.placeholder(tf.string, shape=[1])
        inputs = {"key": key.name, "input": input.name}
        tf.add_to_collection("inputs", json.dumps(inputs))

        outputs = {
            "key": tf.identity(key).name,
            "output": output.name,
        }
        tf.add_to_collection("outputs", json.dumps(outputs))

        init_op = tf.global_variables_initializer()
        restore_saver = tf.train.Saver()
        export_saver = tf.train.Saver()

        with tf.Session() as sess:
            sess.run(init_op)
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint(arguments.checkpoint)
            restore_saver.restore(sess, checkpoint)
            print("exporting model")
            export_saver.export_meta_graph(
                filename=os.path.join(arguments.output_dir, "export.meta"))
            export_saver.save(sess,
                              os.path.join(arguments.output_dir, "export"),
                              write_meta_graph=False)

        return

    # load training data
    # inputs and targets are [batch_size, height, width, channels]
    source_placeholder = tf.placeholder(tf.float32, (None, 256, 256, 3),
                                        "x_source")
    target_placeholder = tf.placeholder(tf.float32, (None, 256, 256, 3),
                                        "y_target")

    model = mu.create_model(arguments, source_placeholder, target_placeholder)

    outputs = de_process(model.outputs)

    def convert(image):
        if arguments.aspect_ratio != 1.0:
            # upscale to correct aspect ratio
            size = [
                mu.CROP_SIZE,
                int(round(mu.CROP_SIZE * arguments.aspect_ratio))
            ]
            image = tf.image.resize_images(
                image, size=size, method=tf.image.ResizeMethod.BICUBIC)

        return tf.image.convert_image_dtype(image,
                                            dtype=tf.uint8,
                                            saturate=True)

    # reverse any processing on images so they can be written to disk or displayed to user
    with tf.name_scope("convert_outputs"):
        converted_outputs = convert(outputs)

    with tf.name_scope("encode_images"):
        display_fetches = {
            "outputs":
            tf.map_fn(tf.image.encode_png,
                      converted_outputs,
                      dtype=tf.string,
                      name="output_pngs"),
        }

    # summaries
    with tf.name_scope("outputs_summary"):
        tf.summary.image("outputs", converted_outputs)

    with tf.name_scope("predict_real_summary"):
        tf.summary.image(
            "predict_real",
            tf.image.convert_image_dtype(model.predict_real, dtype=tf.uint8))

    with tf.name_scope("predict_fake_summary"):
        tf.summary.image(
            "predict_fake",
            tf.image.convert_image_dtype(model.predict_fake, dtype=tf.uint8))

    tf.summary.scalar("discriminator_loss", model.discrim_loss)
    tf.summary.scalar("generator_loss_GAN", model.gen_loss_GAN)
    tf.summary.scalar("generator_loss_L1", model.gen_loss_L1)

    for var in tf.trainable_variables():
        tf.summary.histogram(var.op.name + "/values", var)

    for grad, var in model.discrim_grads_and_vars + model.gen_grads_and_vars:
        tf.summary.histogram(var.op.name + "/gradients", grad)

    with tf.name_scope("parameter_count"):
        parameter_count = tf.reduce_sum(
            [tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])

    saver = tf.train.Saver(max_to_keep=1)

    log_dir = arguments.output_dir if (arguments.trace_freq > 0
                                       or arguments.summary_freq > 0) else None
    # todo: Supervisor is deprecated, use MonitoredTrainingSession
    sv = tf.train.Supervisor(logdir=log_dir, save_summaries_secs=0, saver=None)

    # training loop
    with sv.managed_session() as sess:

        images_filename_list = du.get_data_files_list(arguments.input_dir,
                                                      "jpg")
        steps_per_epoch = int(
            math.ceil(len(images_filename_list) / arguments.batch_size))

        print("parameter_count =", sess.run(parameter_count))
        print(f"Data count = {len(images_filename_list)}")

        if arguments.checkpoint is not None:
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint(arguments.checkpoint)
            saver.restore(sess, checkpoint)

        max_steps = 2**32
        if arguments.max_epochs is not None:
            max_steps = steps_per_epoch * arguments.max_epochs
        if arguments.max_steps is not None:
            max_steps = arguments.max_steps

        if arguments.mode == "test":
            # testing
            start = time.time()
            for test_file in images_filename_list:
                x_source, y_target = du.read_single_data_file(
                    test_file, arguments)

                results = sess.run(display_fetches,
                                   feed_dict={
                                       source_placeholder:
                                       np.expand_dims(x_source, 0),
                                       target_placeholder:
                                       np.expand_dims(y_target, 0)
                                   })

                fileset = save_image(test_file, results)
                print("evaluated image", fileset["name"])
                index_path = append_index(fileset)

                print("wrote index at", index_path)

            print(f"time: {(time.time() - start)}")
        else:
            # training
            start = time.time()

            for step in range(max_steps):

                def should(freq):
                    return freq > 0 and ((step + 1) % freq == 0
                                         or step == max_steps - 1)

                options = None
                run_metadata = None
                if should(arguments.trace_freq):
                    options = tf.RunOptions(
                        trace_level=tf.RunOptions.FULL_TRACE)
                    run_metadata = tf.RunMetadata()

                fetches = {
                    "train": model.train,
                    "global_step": sv.global_step,
                }

                if should(arguments.progress_freq):
                    fetches["discrim_loss"] = model.discrim_loss
                    fetches["gen_loss_GAN"] = model.gen_loss_GAN
                    fetches["gen_loss_L1"] = model.gen_loss_L1

                if should(arguments.summary_freq):
                    fetches["summary"] = sv.summary_op

                if should(arguments.display_freq):
                    fetches["display"] = display_fetches

                x_source, y_target = du.generate_batch(images_filename_list,
                                                       arguments)

                results = sess.run(fetches,
                                   options=options,
                                   run_metadata=run_metadata,
                                   feed_dict={
                                       source_placeholder: x_source,
                                       target_placeholder: y_target
                                   })

                if should(arguments.summary_freq):
                    print("recording summary")
                    sv.summary_writer.add_summary(results["summary"],
                                                  results["global_step"])

                if should(arguments.display_freq):
                    # todo: broken, need to fix
                    print("saving display images")
                    filesets = save_images(results["display"],
                                           step=results["global_step"])
                    append_index(filesets, step=True)

                if should(arguments.trace_freq):
                    print("recording trace")
                    sv.summary_writer.add_run_metadata(
                        run_metadata, "step_%d" % results["global_step"])

                if should(arguments.progress_freq):
                    # global_step will have the correct step count if we resume from arguments checkpoint
                    train_epoch = math.ceil(results["global_step"] /
                                            steps_per_epoch)
                    train_step = (results["global_step"] -
                                  1) % steps_per_epoch + 1
                    rate = (step + 1) * arguments.batch_size / (time.time() -
                                                                start)
                    remaining = (max_steps -
                                 step) * arguments.batch_size / rate
                    print(
                        "progress  epoch %d  step %d  image/sec %0.1f  remaining %dm"
                        % (train_epoch, train_step, rate, remaining / 60))
                    print("discrim_loss", results["discrim_loss"])
                    print("gen_loss_GAN", results["gen_loss_GAN"])
                    print("gen_loss_L1", results["gen_loss_L1"])

                if should(arguments.save_freq):
                    print("saving model")
                    saver.save(sess,
                               os.path.join(arguments.output_dir, "model"),
                               global_step=sv.global_step)

                if sv.should_stop():
                    break
Beispiel #12
0
# https://www.tensorflow.org/tutorials/eager/eager_basics

import tensorflow as tf
import numpy as np

tf.enable_eager_execution()

# Tensors
# dataType, shape
# able to exist in accelerator memory (GPU)
# immutable
print(tf.add(1, 2))  # tf.Tensor(3, shape=(), dtype=int32)
print(tf.add([1, 2], [3, 4]))  # tf.Tensor([4 6], shape=(2,), dtype=int32)
print(tf.square(5))  # tf.Tensor(25, shape=(), dtype=int32)
print(tf.reduce_sum([1, 2, 3]))  # tf.Tensor(6, shape=(), dtype=int32)
print(tf.encode_base64(
    "hello world"))  # tf.Tensor(b'aGVsbG8gd29ybGQ', shape=(), dtype=string)
print(tf.square(2) + tf.square(3))  # tf.Tensor(13, shape=(), dtype=int32)

ndarray = np.ones((3, 3))
tensor = tf.multiply(ndarray,
                     42)  # automatically convert numpy array to tensor
print(tensor)
print(np.add(tensor, 1))  # automatically convert tensor to numpy array
print(tensor.numpy())

# GPU acceleration
x = tf.random_uniform(shape=(3, 3))
print(tf.test.is_gpu_available())
print(x.device.endswith("GPU:0"))  # tensor.device. the 0 index of gpu

# tensorflow will automatically choose one device to run
Beispiel #13
0
def train():
    if args.seed is None:
        args.seed = random.randint(0, 2**31 - 1)

    tf.set_random_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    if args.mode == "test" or args.mode == "export":
        if args.checkpoint_dir is None:
            raise Exception("checkpoint required for test mode")

        # load some options from the checkpoint
        # options = {"which_direction", "ngf", "ndf", "lab_colorization"}
        # with open(os.path.join(args.checkpoint_dir, "options.json"), 'r') as f:
        #     for key, val in json.loads(f.read()).items():
        #         if key in options:
        #             print("loaded", key, "=", val)
        #             setattr(args, key, val)
        # disable these features in test mode
        args.scale_size = CROP_SIZE
        args.flip = False

    for k, v in args._get_kwargs():
        print(k, "=", v)

    with open(os.path.join(args.output_dir, "options.json"), "w") as f:
        f.write(json.dumps(vars(args), sort_keys=True, indent=4))

    if args.mode == "export":
        # export the generator to a meta graph that can be imported later for standalone generation
        if args.lab_colorization:
            raise Exception("export not supported for lab_colorization")

        inputs = tf.placeholder(tf.string, shape=[1])
        input_data = tf.decode_base64(inputs[0])
        input_image = tf.image.decode_png(input_data)

        # remove alpha channel if present
        input_image = tf.cond(tf.equal(tf.shape(input_image)[2],
                                       4), lambda: input_image[:, :, :3],
                              lambda: input_image)
        # convert grayscale to RGB
        input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 1),
                              lambda: tf.image.grayscale_to_rgb(input_image),
                              lambda: input_image)

        input_image = tf.image.convert_image_dtype(input_image,
                                                   dtype=tf.float32)
        input_image.set_shape([CROP_SIZE, CROP_SIZE, 3])
        batch_input = tf.expand_dims(input_image, axis=0)

        model_ = Pix2Pix()
        batch_output = deprocess(
            model_.get_generator(preprocess(batch_input),
                                 3,
                                 ngf=args.ngf,
                                 conv_type=args.conv_type,
                                 channel_multiplier=args.channel_multiplier,
                                 padding='SAME'))
        # with tf.variable_scope("generator"):
        #     batch_output = deprocess(model.get_generator(preprocess(batch_input), 3))

        output_image = tf.image.convert_image_dtype(batch_output,
                                                    dtype=tf.uint8)[0]
        if args.output_filetype == "png":
            output_data = tf.image.encode_png(output_image)
        elif args.output_filetype == "jpeg":
            output_data = tf.image.encode_jpeg(output_image, quality=80)
        else:
            raise Exception("invalid filetype")
        output = tf.convert_to_tensor([tf.encode_base64(output_data)])

        key = tf.placeholder(tf.string, shape=[1])
        inputs = {"key": key.name, "input": inputs.name}
        tf.add_to_collection("inputs", json.dumps(inputs))
        outputs = {
            "key": tf.identity(key).name,
            "output": output.name,
        }
        tf.add_to_collection("outputs", json.dumps(outputs))

        init_op = tf.global_variables_initializer()
        restore_saver = tf.train.Saver()
        export_saver = tf.train.Saver()

        with tf.Session() as sess:
            sess.run(init_op)
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint(args.checkpoint_dir)
            restore_saver.restore(sess, checkpoint)
            print("exporting model")
            export_saver.export_meta_graph(
                filename=os.path.join(args.output_dir, "export.meta"))
            export_saver.save(sess,
                              os.path.join(args.output_dir, "export"),
                              write_meta_graph=False)

        return

    examples = load_examples()
    print("examples count = %d" % examples.count)

    max_steps = 2**32
    if args.max_epochs is not None:
        max_steps = examples.steps_per_epoch * args.max_epochs
    if args.max_steps is not None:
        max_steps = args.max_steps
    # inputs and targets are [batch_size, height, width, channels]
    modelNamedtuple = create_model(examples.inputs, examples.targets,
                                   max_steps)

    # undo colorization splitting on images that we use for display/output
    if args.lab_colorization:
        if args.which_direction == "AtoB":
            # inputs is brightness, this will be handled fine as a grayscale image
            # need to augment targets and outputs with brightness
            targets = augment(examples.targets, examples.inputs)
            outputs = augment(modelNamedtuple.outputs, examples.inputs)
            # inputs can be deprocessed normally and handled as if they are single channel
            # grayscale images
            inputs = deprocess(examples.inputs)
        elif args.which_direction == "BtoA":
            # inputs will be color channels only, get brightness from targets
            inputs = augment(examples.inputs, examples.targets)
            targets = deprocess(examples.targets)
            outputs = deprocess(modelNamedtuple.outputs)
        else:
            raise Exception("invalid direction")
    else:
        inputs = deprocess(examples.inputs)
        targets = deprocess(examples.targets)
        outputs = deprocess(modelNamedtuple.outputs)

    def convert(image):
        if args.aspect_ratio != 1.0:
            # upscale to correct aspect ratio
            size = [CROP_SIZE, int(round(CROP_SIZE * args.aspect_ratio))]
            image = tf.image.resize_images(
                image, size=size, method=tf.image.ResizeMethod.BICUBIC)

        return tf.image.convert_image_dtype(image,
                                            dtype=tf.uint8,
                                            saturate=True)

    # reverse any processing on images so they can be written to disk or displayed to user
    with tf.name_scope("convert_inputs"):
        converted_inputs = convert(inputs)

    with tf.name_scope("convert_targets"):
        converted_targets = convert(targets)

    with tf.name_scope("convert_outputs"):
        converted_outputs = convert(outputs)

    with tf.name_scope("encode_images"):
        if args.multiple_A:
            # channels = converted_inputs.shape.as_list()[3]
            converted_inputs = tf.split(converted_inputs, 2, 3)[1]
            print('\n----642----: {}\n'.format(
                converted_inputs.shape.as_list()))

        display_fetches = {
            "paths":
            examples.paths,
            "inputs":
            tf.map_fn(tf.image.encode_png,
                      converted_inputs,
                      dtype=tf.string,
                      name="input_pngs"),
            "targets":
            tf.map_fn(tf.image.encode_png,
                      converted_targets,
                      dtype=tf.string,
                      name="target_pngs"),
            "outputs":
            tf.map_fn(tf.image.encode_png,
                      converted_outputs,
                      dtype=tf.string,
                      name="output_pngs"),
        }

    # summaries
    # with tf.name_scope("inputs_summary"):
    #     tf.summary.image("inputs", converted_inputs)

    with tf.name_scope("targets_summary"):
        tf.summary.image("targets", converted_targets)

    with tf.name_scope("outputs_summary"):
        tf.summary.image("outputs", converted_outputs)

    tf.summary.scalar("discriminator_loss", modelNamedtuple.discrim_loss)
    tf.summary.scalar("generator_loss_GAN", modelNamedtuple.gen_loss_GAN)
    tf.summary.scalar("generator_loss_L1", modelNamedtuple.gen_loss_L1)

    # for var in tf.trainable_variables():
    #     tf.summary.histogram(var.op.name + "/values", var)

    for grad, var in modelNamedtuple.discrim_grads_and_vars + modelNamedtuple.gen_grads_and_vars:
        tf.summary.histogram(var.op.name + "/gradients", grad)

    with tf.name_scope("parameter_count"):
        parameter_count = tf.reduce_sum(
            [tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])

    summary_op = tf.summary.merge_all()
    saver = tf.train.Saver(max_to_keep=5)

    config = tf.ConfigProto(allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        summary_writer = tf.summary.FileWriter(args.output_dir, sess.graph)
        sess.run(tf.global_variables_initializer())
        print("parameter_count =", sess.run(parameter_count))

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        if args.checkpoint_dir is not None:
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint(args.checkpoint_dir)
            saver.restore(sess, checkpoint)

        # max_steps = 2 ** 32
        # if args.max_epochs is not None:
        #     max_steps = examples.steps_per_epoch * args.max_epochs
        # if args.max_steps is not None:
        #     max_steps = args.max_steps

        if args.mode == "test":
            # testing
            # at most, process the test data once
            start = time.time()
            max_steps = min(examples.steps_per_epoch, max_steps)
            for step in range(max_steps):
                results = sess.run(display_fetches)
                filesets = save_images(results)
                for i, f in enumerate(filesets):
                    print("evaluated image", f["name"])
                index_path = append_index(filesets)
            print("wrote index at", index_path)
            print("rate", (time.time() - start) / max_steps)
        else:
            # training
            start = time.time()

            for step in range(max_steps):

                def should(freq):
                    return freq > 0 and ((step + 1) % freq == 0
                                         or step == max_steps - 1)

                for i in range(args.n_dis):
                    sess.run(modelNamedtuple.d_train)

                fetches = {
                    "g_train": modelNamedtuple.g_train,
                    "losses": modelNamedtuple.losses,
                    "global_step": modelNamedtuple.global_step,
                }

                if should(args.progress_freq):
                    fetches["discrim_loss"] = modelNamedtuple.discrim_loss
                    fetches["gen_loss_GAN"] = modelNamedtuple.gen_loss_GAN
                    fetches["gen_loss_L1"] = modelNamedtuple.gen_loss_L1

                if should(args.summary_freq):
                    fetches["summary"] = summary_op

                if should(args.display_freq):
                    fetches["display"] = display_fetches

                # results = sess.run(fetches, options=options, run_metadata=run_metadata)
                results = sess.run(fetches)

                if should(args.summary_freq):
                    # print("recording summary")
                    summary_writer.add_summary(results["summary"],
                                               results["global_step"])

                if should(args.display_freq):
                    # print("saving display images")
                    filesets = save_images(results["display"],
                                           step=results["global_step"])
                    append_index(filesets, step=True)

                if should(args.progress_freq):
                    # global_step will have the correct step count if we resume from a checkpoint
                    train_epoch = math.ceil(results["global_step"] /
                                            examples.steps_per_epoch)
                    train_step = (results["global_step"] -
                                  1) % examples.steps_per_epoch + 1
                    rate = (step + 1) * args.batch_size / (time.time() - start)
                    remaining = (max_steps - step) * args.batch_size / rate

                    print(
                        "progress  epoch %d  step %d  image/sec %0.1f  remaining %dm"
                        % (train_epoch, train_step, rate, remaining / 60))
                    print("discrim_loss", results["discrim_loss"])
                    print("gen_loss_GAN", results["gen_loss_GAN"])
                    print("gen_loss_L1", results["gen_loss_L1"])

                if should(args.save_freq):
                    print("saving model...")
                    saver.save(sess,
                               os.path.join(args.output_dir, "model"),
                               global_step=modelNamedtuple.global_step)

        coord.request_stop()
        coord.join(threads)
Beispiel #14
0
def main():
	if tf.__version__.split('.')[0] != "1":
		raise Exception("Tensorflow version 1 required")

	if a.seed is None:
		a.seed = random.randint(0, 2**31 - 1)

	tf.set_random_seed(a.seed)
	np.random.seed(a.seed)
	random.seed(a.seed)

	if not os.path.exists(a.output_dir):
		os.makedirs(a.output_dir)

	if a.mode == "test" or a.mode == "export":
		if a.checkpoint is None:
			raise Exception("checkpoint required for test mode")

		# load some options from the checkpoint
		options = {"which_direction", "ngf", "ndf", "lab_colorization"}
		with open(os.path.join(a.checkpoint, "options.json")) as f:
			for key, val in json.loads(f.read()).items():
				if key in options:
					print("loaded", key, "=", val)
					setattr(a, key, val)
		# disable these features in test mode
		a.scale_size = CROP_SIZE
		a.flip = False

	for k, v in a._get_kwargs():
		print(k, "=", v)

	with open(os.path.join(a.output_dir, "options.json"), "w") as f:
		f.write(json.dumps(vars(a), sort_keys=True, indent=4))

	if a.mode == "export":
		# export the generator to a meta graph that can be imported later for standalone generation
		input = tf.placeholder(tf.string, shape=[1])
		input_data = tf.decode_base64(input[0])
		input_image = tf.image.decode_png(input_data)

		# remove alpha channel if present
		input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 4), lambda: input_image[:,:,:3], lambda: input_image)
		# convert grayscale to RGB
		input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 1), lambda: tf.image.grayscale_to_rgb(input_image), lambda: input_image)

		input_image = tf.image.convert_image_dtype(input_image, dtype=tf.float32)
		input_image.set_shape([CROP_SIZE, CROP_SIZE, 3])
		batch_input = tf.expand_dims(input_image, axis=0)

		with tf.variable_scope("generator"):
			batch_output = deprocess(create_generator(preprocess(batch_input)[0], 3))

		output_image = tf.image.convert_image_dtype(batch_output, dtype=tf.uint8)[0]
		if a.output_filetype == "png":
			output_data = tf.image.encode_png(output_image)
		elif a.output_filetype == "jpeg":
			output_data = tf.image.encode_jpeg(output_image, quality=80)
		else:
			raise Exception("invalid filetype")
		output = tf.convert_to_tensor([tf.encode_base64(output_data)])

		key = tf.placeholder(tf.string, shape=[1])
		inputs = {
			"key": key.name,
			"input": input.name
		}
		tf.add_to_collection("inputs", json.dumps(inputs))
		outputs = {
			"key":  tf.identity(key).name,
			"output": output.name,
		}
		tf.add_to_collection("outputs", json.dumps(outputs))

		init_op = tf.global_variables_initializer()
		restore_saver = tf.train.Saver()
		export_saver = tf.train.Saver()

		with tf.Session() as sess:
			sess.run(init_op)
			print("loading model from checkpoint")
			checkpoint = tf.train.latest_checkpoint(a.checkpoint)
			restore_saver.restore(sess, checkpoint)
			print("exporting model")
			export_saver.export_meta_graph(filename=os.path.join(a.output_dir, "export.meta"))
			export_saver.save(sess, os.path.join(a.output_dir, "export"), write_meta_graph=False)

		return

	gen_examples = load_examples_generator()
	dis_examples = load_examples_discriminator()

	print("examples count = %d" % gen_examples.count)

	# inputs and targets are [batch_size, height, width, channels]
	model_dis_train = create_model_dis_train(dis_examples.inputs, dis_examples.targets)
	model_gen_train = create_model_gen_train(gen_examples.inputs, gen_examples.targets)
	# model for D0
	model_dis_old = create_model_dis_old(gen_examples.inputs, gen_examples.targets)

	# undo colorization splitting on images that we use for display/output
	inputs = deprocess(gen_examples.inputs)
	targets = deprocess(gen_examples.targets)
	outputs = deprocess(model_gen_train.outputs)

	def convert(image):
		if a.aspect_ratio != 1.0:
			# upscale to correct aspect ratio
			size = [CROP_SIZE, int(round(CROP_SIZE * a.aspect_ratio))]
			image = tf.image.resize_images(image, size=size, method=tf.image.ResizeMethod.BICUBIC)

		return tf.image.convert_image_dtype(image, dtype=tf.uint8, saturate=True)

	# reverse any processing on images so they can be written to disk or displayed to user
	with tf.name_scope("convert_inputs"):
		converted_inputs = convert(inputs)

	with tf.name_scope("convert_targets"):
		converted_targets = convert(targets)

	with tf.name_scope("convert_outputs"):
		converted_outputs = convert(outputs)

	with tf.name_scope("encode_images"):
		display_fetches = {
			"paths": gen_examples.paths,
			"inputs": tf.map_fn(tf.image.encode_png, converted_inputs, dtype=tf.string, name="input_pngs"),
			"targets": tf.map_fn(tf.image.encode_png, converted_targets, dtype=tf.string, name="target_pngs"),
			"outputs": tf.map_fn(tf.image.encode_png, converted_outputs, dtype=tf.string, name="output_pngs"),
		}

	# summaries
	with tf.name_scope("inputs_summary"):
		tf.summary.image("inputs", converted_inputs)

	with tf.name_scope("targets_summary"):
		tf.summary.image("targets", converted_targets)

	with tf.name_scope("outputs_summary"):
		tf.summary.image("outputs", converted_outputs)

	with tf.name_scope("predict_real_summary"):
		tf.summary.image("predict_real", tf.image.convert_image_dtype(model_dis_train.predict_real, dtype=tf.uint8))

	with tf.name_scope("predict_fake_summary"):
		tf.summary.image("predict_fake", tf.image.convert_image_dtype(model_dis_train.predict_fake, dtype=tf.uint8))

	tf.summary.scalar("discriminator_loss", model_dis_train.discrim_loss)
	tf.summary.scalar("generator_loss_GAN", model_gen_train.gen_loss_GAN)
	tf.summary.scalar("generator_loss_L1", model_gen_train.gen_loss_L1)

	for var in tf.trainable_variables():
		tf.summary.histogram(var.op.name + "/values", var)

	#for grad, var in model_dis_train.discrim_grads_and_vars + model_gen_train.gen_grads_and_vars:
	#	tf.summary.histogram(var.op.name + "/gradients", grad)

	with tf.name_scope("parameter_count"):
		parameter_count = tf.reduce_sum([tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])

	# create savers
	d_vars = [var for var in tf.trainable_variables() if var.name.startswith("discriminator")]
	d_saver = tf.train.Saver(d_vars, max_to_keep=2)

	g_vars = [var for var in tf.trainable_variables() if var.name.startswith("generator")]
	g_saver = tf.train.Saver(g_vars, max_to_keep=3)

	saver = tf.train.Saver(max_to_keep=100)

	logdir = a.output_dir if (a.trace_freq > 0 or a.summary_freq > 0) else None

	sv = tf.train.Supervisor(logdir=logdir, save_summaries_secs=0)

	with sv.managed_session() as sess:
		print("parameter_count =", sess.run(parameter_count))

		if a.checkpoint is not None:
			print("loading model from checkpoint")
			checkpoint = tf.train.latest_checkpoint(a.checkpoint)
			saver.restore(sess, checkpoint)

		max_steps = 2**32
		if a.max_epochs is not None:
			max_steps = gen_examples.steps_per_epoch * a.max_epochs
		if a.max_steps is not None:
			max_steps = a.max_steps

		if a.mode == "test":
			# testing
			# at most, process the test data once
			max_steps = min(gen_examples.steps_per_epoch, max_steps)
			for step in range(max_steps):
				results = sess.run(display_fetches)
				filesets = save_images(results)
				#print(results['inputs'])

				for i, f in enumerate(filesets):
					print("evaluated image", f["name"])
				index_path = append_index(filesets)

			print("wrote index at", index_path)
		else:
			# training
			start = time.time()

			for step in range(max_steps):

				def should(freq):
					return freq > 0 and ((step + 1) % freq == 0 or step == max_steps - 1)

				def save_d(model_name):
					# model_name	[0,1]
					# 0: old model
					# 1: new model
					d_saver.save(sess, os.path.join(a.output_dir+"/discriminator", "d"), global_step=model_name)

				def load_d(model_name):
					# model_name	[0,1]
					# 0: old model
					# 1: new model
					checkpoint = a.output_dir+"/discriminator/"+"d-"+str(model_name)
					d_saver.restore(sess, checkpoint)
					'''
					if(model_name==1):
						d_saver.restore(sess, checkpoint)
					elif(model_name==0):
						d_old_saver.restore(sess, checkpoint)
						'''

				def save_g(model_name):
					# model_name	[0,1,2]
					# 0: old model
					# 1: new model
					# 2: temp model
					g_saver.save(sess, os.path.join(a.output_dir+"/generator", "d"), global_step=model_name)

				def load_g(model_name):
					# model_name	[0,1,2]
					# 0: old model
					# 1: new model
					# 2: temp model
					checkpoint = a.output_dir+"/generator/"+"d-"+str(model_name)
					g_saver.restore(sess, checkpoint)

				def update_g():
					save_g(2)
					load_g(1)
					save_g(0)
					load_g(2)
					save_g(1)

				options = None
				run_metadata = None
				if should(a.trace_freq):
					options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
					run_metadata = tf.RunMetadata()

				# two-phase training, initiate with D training
				if(step==0):
					cur_train_phase = 'd'
					fetches = {
						"train": model_dis_train.train,
						"global_step": sv.global_step,
					}
					predict_fake = np.mean(sess.run(model_dis_train.predict_fake))

					d_old = False
					g_old = False
					first_G = True

				else:
					if(cur_train_phase=='g'):
						# G training
						fetches = {
							"train": model_gen_train.train,
							"global_step": sv.global_step,
						}
						predict_fake = np.mean(sess.run(model_gen_train.predict_fake))
						gan_predict_fake = np.mean(sess.run(model_gen_train.gan_predict_fake))

						if should(a.progress_freq):
							fetches["discrim_loss"] = model_gen_train.discrim_loss
							fetches["gen_loss_GAN"] = model_gen_train.gen_loss_GAN
							fetches["gen_loss_L1"] = model_gen_train.gen_loss_L1
							fetches["predict_fake"] = model_gen_train.predict_fake
							fetches["predict_real"] = model_gen_train.predict_real
							fetches["gan_predict_fake"] = model_gen_train.gan_predict_fake
							fetches["gan_predict_real"] = model_gen_train.gan_predict_real


						if(predict_fake>0.95 and gan_predict_fake>0.95):
							if(first_G==True):
								print("*** G is trained well @ steps %d***" %step)
								os.system("echo G is trained well @ steps ^"+str(step)+">>"+a.output_dir+"/two_phase_training.txt")
								cur_train_phase = 'd'
								# save Cur_d as Old_d
								save_d(0)

								# save Cur_g as Old_g
								save_g(0)
								save_g(1)

								first_G = False
								d_old = True
								g_old = True


							else:
								if(d_old==True):
									# test G on D_old
									load_d(0)
									predict_fake = np.mean(sess.run(model_dis_old.predict_fake))
									gan_predict_fake = np.mean(sess.run(model_gen_train.gan_predict_fake))

									# if D_old is fooled,
									if(predict_fake>0.9 and gan_predict_fake>0.9):
										# copy Currend_d to D_old
										load_d(1)	# restore Cur_d
										save_d(0)	# save Cur_d to Old_d

										# switch to next training phase
										print("*** G is trained well @ steps %d***" %step)
										update_g()
										os.system("echo G is trained well @ steps ^"+str(step)+">>"+a.output_dir+"/two_phase_training.txt")

										cur_train_phase = 'd'

									# if D_old is not fooled
									else:
										# copy D_old to Currend_d
										print("*** G cannot fool D0 @ steps %d, Copy D0 to HERE***" %step)
										save_d(1)
										os.system("echo G cannot fool D0 @ steps ^"+str(step)+" Copy D0 to HERE>>"+a.output_dir+"/two_phase_training.txt")
										# set d_old = False
										d_old = False

								else:
									print("*** G is trained well @ steps %d***" %step)
									update_g()
									os.system("echo G is trained well @ steps ^"+str(step)+">>"+a.output_dir+"/two_phase_training.txt")
									cur_train_phase = 'd'
									d_old = True
					else:
						# D training
						if(g_old==True):
							# random load G
							ran = np.random.randint(10,size=1)
							if(ran<3):
								# train with old G
								load_g(0)
								fetches = {
									"train": model_dis_train.train,
									"global_step": sv.global_step,
								}
								load_g(1)
							else:
								# train with new G
								fetches = {
									"train": model_dis_train.train,
									"global_step": sv.global_step,
								}
								predict_fake = np.mean(sess.run(model_dis_train.predict_fake))
								predict_real = np.mean(sess.run(model_dis_train.predict_real))

							if should(a.progress_freq):
								fetches["discrim_loss"] = model_dis_train.discrim_loss
								fetches["gen_loss_GAN"] = model_dis_train.gen_loss_GAN
								fetches["gen_loss_L1"] = model_dis_train.gen_loss_L1
								fetches["predict_fake"] = model_dis_train.predict_fake
								fetches["predict_real"] = model_dis_train.predict_real
								fetches["gan_predict_fake"] = model_dis_train.gan_predict_fake
								fetches["gan_predict_real"] = model_dis_train.gan_predict_real


							if(predict_fake<0.05 and predict_real>0.95):
								print("*** D is trained well @ steps %d***" %step)
								os.system("echo D is trained well @ steps ^"+str(step)+">>"+a.output_dir+"/two_phase_training.txt")
								cur_train_phase = 'g'
								save_d(1)

						else:
							fetches = {
								"train": model_dis_train.train,
								"global_step": sv.global_step,
							}
							predict_fake = np.mean(sess.run(model_dis_train.predict_fake))
							predict_real = np.mean(sess.run(model_dis_train.predict_real))
							gan_predict_fake = np.mean(sess.run(model_gen_train.gan_predict_fake))
							gan_predict_real = np.mean(sess.run(model_gen_train.gan_predict_real))

							if should(a.progress_freq):
								fetches["discrim_loss"] = model_dis_train.discrim_loss
								fetches["gen_loss_GAN"] = model_dis_train.gen_loss_GAN
								fetches["gen_loss_L1"] = model_dis_train.gen_loss_L1
								fetches["predict_fake"] = model_dis_train.predict_fake
								fetches["predict_real"] = model_dis_train.predict_real
								fetches["gan_predict_fake"] = model_dis_train.gan_predict_fake
								fetches["gan_predict_real"] = model_dis_train.gan_predict_real

							if(predict_fake<0.05 and predict_real>0.95 and gan_predict_fake<0.05 and gan_predict_real>0.95):
								print("*** D is trained well @ steps %d***" %step)
								os.system("echo D is trained well @ steps ^"+str(step)+">>"+a.output_dir+"/two_phase_training.txt")
								cur_train_phase = 'g'
								save_d(1)


				if should(a.summary_freq):
					fetches["summary"] = sv.summary_op

				if should(a.display_freq):
					fetches["display"] = display_fetches

				results = sess.run(fetches, options=options, run_metadata=run_metadata)

				if should(a.summary_freq):
					print("recording summary")
					sv.summary_writer.add_summary(results["summary"], results["global_step"])

				if should(a.display_freq):
					print("saving display images")
					filesets = save_images(results["display"], step=results["global_step"])
					append_index(filesets, step=True)

				if should(a.trace_freq):
					print("recording trace")
					sv.summary_writer.add_run_metadata(run_metadata, "step_%d" % results["global_step"])

				if should(a.progress_freq):
					# global_step will have the correct step count if we resume from a checkpoint
					train_epoch = math.ceil(results["global_step"] / gen_examples.steps_per_epoch)
					train_step = (results["global_step"] - 1) % gen_examples.steps_per_epoch + 1
					rate = (step + 1) * a.batch_size / (time.time() - start)
					remaining = (max_steps - step) * a.batch_size / rate
					print("progress  epoch %d  step %d  image/sec %0.1f  remaining %dm" % (train_epoch, train_step, rate, remaining / 60))
					print("discrim_loss", results["discrim_loss"])
					print("gen_loss_GAN", results["gen_loss_GAN"])
					print("gen_loss_L1", results["gen_loss_L1"])
					print("predict_fake", np.mean(results["predict_fake"]))
					print("predict_real", np.mean(results["predict_real"]))
					print("gan_predict_fake", np.mean(results["gan_predict_fake"]))
					print("gan_predict_real", np.mean(results["gan_predict_real"]))

				if should(a.save_freq):
					print("saving model")
					saver.save(sess, os.path.join(a.output_dir, "model"), global_step=sv.global_step)

				if sv.should_stop():
					break
Beispiel #15
0
def main():
    if tf.__version__.split('.')[0] != "1":
        raise Exception("Tensorflow version 1 required")

    if a.seed is None:
        a.seed = random.randint(0, 2**31 - 1)

    tf.set_random_seed(a.seed)
    np.random.seed(a.seed)
    random.seed(a.seed)

    if not os.path.exists(a.output_dir):
        os.makedirs(a.output_dir)

    if a.mode == "test" or a.mode == "export":
        if a.checkpoint is None:
            raise Exception("checkpoint required for test mode")

        # load some options from the checkpoint
        options = {"which_direction", "ngf", "ndf", "lab_colorization"}
        with open(os.path.join(a.checkpoint, "options.json")) as f:
            for key, val in json.loads(f.read()).items():
                if key in options:
                    print("loaded", key, "=", val)
                    setattr(a, key, val)
        # disable these features in test mode
        a.scale_size = CROP_SIZE
        a.flip = False

    for k, v in a._get_kwargs():
        print(k, "=", v)

    with open(os.path.join(a.output_dir, "options.json"), "w") as f:
        f.write(json.dumps(vars(a), sort_keys=True, indent=4))

    if a.mode == "export":
        # export the generator to a meta graph that can be imported later for standalone generation
        if a.lab_colorization:
            raise Exception("export not supported for lab_colorization")

        input = tf.placeholder(tf.string, shape=[1])
        input_data = tf.decode_base64(input[0])
        input_image = tf.image.decode_png(input_data)

        # remove alpha channel if present
        input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 4), lambda: input_image[:,:,:3], lambda: input_image)
        # convert grayscale to RGB
        input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 1), lambda: tf.image.grayscale_to_rgb(input_image), lambda: input_image)

        input_image = tf.image.convert_image_dtype(input_image, dtype=tf.float32)
        input_image.set_shape([CROP_SIZE, CROP_SIZE, 3])
        batch_input = tf.expand_dims(input_image, axis=0)

        with tf.variable_scope("generator"):
            batch_output = deprocess(create_generator(preprocess(batch_input), 3))

        output_image = tf.image.convert_image_dtype(batch_output, dtype=tf.uint8)[0]
        if a.output_filetype == "png":
            output_data = tf.image.encode_png(output_image)
        elif a.output_filetype == "jpeg":
            output_data = tf.image.encode_jpeg(output_image, quality=80)
        else:
            raise Exception("invalid filetype")
        output = tf.convert_to_tensor([tf.encode_base64(output_data)])

        key = tf.placeholder(tf.string, shape=[1])
        inputs = {
            "key": key.name,
            "input": input.name
        }
        tf.add_to_collection("inputs", json.dumps(inputs))
        outputs = {
            "key":  tf.identity(key).name,
            "output": output.name,
        }
        tf.add_to_collection("outputs", json.dumps(outputs))

        init_op = tf.global_variables_initializer()
        restore_saver = tf.train.Saver()
        export_saver = tf.train.Saver()

        with tf.Session() as sess:
            sess.run(init_op)
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint(a.checkpoint)
            restore_saver.restore(sess, checkpoint)
            print("exporting model")
            export_saver.export_meta_graph(filename=os.path.join(a.output_dir, "export.meta"))
            export_saver.save(sess, os.path.join(a.output_dir, "export"), write_meta_graph=False)

        return

    examples = load_examples()
    print("examples count = %d" % examples.count)

    # inputs and targets are [batch_size, height, width, channels]
    model = create_model(examples.inputs, examples.targets)

    # undo colorization splitting on images that we use for display/output
    if a.lab_colorization:
        if a.which_direction == "AtoB":
            # inputs is brightness, this will be handled fine as a grayscale image
            # need to augment targets and outputs with brightness
            targets = augment(examples.targets, examples.inputs)
            outputs = augment(model.outputs, examples.inputs)
            # inputs can be deprocessed normally and handled as if they are single channel
            # grayscale images
            inputs = deprocess(examples.inputs)
        elif a.which_direction == "BtoA":
            # inputs will be color channels only, get brightness from targets
            inputs = augment(examples.inputs, examples.targets)
            targets = deprocess(examples.targets)
            outputs = deprocess(model.outputs)
        else:
            raise Exception("invalid direction")
    else:
        inputs = deprocess(examples.inputs)
        targets = deprocess(examples.targets)
        outputs = deprocess(model.outputs)

    def convert(image):
        if a.aspect_ratio != 1.0:
            # upscale to correct aspect ratio
            size = [CROP_SIZE, int(round(CROP_SIZE * a.aspect_ratio))]
            image = tf.image.resize_images(image, size=size, method=tf.image.ResizeMethod.BICUBIC)

        return tf.image.convert_image_dtype(image, dtype=tf.uint8, saturate=True)

    # reverse any processing on images so they can be written to disk or displayed to user
    with tf.name_scope("convert_inputs"):
        converted_inputs = convert(inputs)

    with tf.name_scope("convert_targets"):
        converted_targets = convert(targets)

    with tf.name_scope("convert_outputs"):
        converted_outputs = convert(outputs)

    with tf.name_scope("encode_images"):
        display_fetches = {
            "paths": examples.paths,
            "inputs": tf.map_fn(tf.image.encode_png, converted_inputs, dtype=tf.string, name="input_pngs"),
            "targets": tf.map_fn(tf.image.encode_png, converted_targets, dtype=tf.string, name="target_pngs"),
            "outputs": tf.map_fn(tf.image.encode_png, converted_outputs, dtype=tf.string, name="output_pngs"),
        }

    # summaries
    with tf.name_scope("inputs_summary"):
        tf.summary.image("inputs", converted_inputs)

    with tf.name_scope("targets_summary"):
        tf.summary.image("targets", converted_targets)

    with tf.name_scope("outputs_summary"):
        tf.summary.image("outputs", converted_outputs)

    with tf.name_scope("predict_real_summary"):
        tf.summary.image("predict_real", tf.image.convert_image_dtype(model.predict_real, dtype=tf.uint8))

    with tf.name_scope("predict_fake_summary"):
        tf.summary.image("predict_fake", tf.image.convert_image_dtype(model.predict_fake, dtype=tf.uint8))

    tf.summary.scalar("discriminator_loss", model.discrim_loss)
    tf.summary.scalar("generator_loss_GAN", model.gen_loss_GAN)
    tf.summary.scalar("generator_loss_L1", model.gen_loss_L1)

    for var in tf.trainable_variables():
        tf.summary.histogram(var.op.name + "/values", var)

    for grad, var in model.discrim_grads_and_vars + model.gen_grads_and_vars:
        tf.summary.histogram(var.op.name + "/gradients", grad)

    with tf.name_scope("parameter_count"):
        parameter_count = tf.reduce_sum([tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])

    saver = tf.train.Saver(max_to_keep=1)

    logdir = a.output_dir if (a.trace_freq > 0 or a.summary_freq > 0) else None
    sv = tf.train.Supervisor(logdir=logdir, save_summaries_secs=0, saver=None)
    with sv.managed_session() as sess:
        print("parameter_count =", sess.run(parameter_count))

        if a.checkpoint is not None:
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint(a.checkpoint)
            saver.restore(sess, checkpoint)

        max_steps = 2**32
        if a.max_epochs is not None:
            max_steps = examples.steps_per_epoch * a.max_epochs
        if a.max_steps is not None:
            max_steps = a.max_steps

        if a.mode == "test":
            # testing
            # at most, process the test data once
            max_steps = min(examples.steps_per_epoch, max_steps)
            for step in range(max_steps):
                results = sess.run(display_fetches)
                filesets = save_images(results)
                for i, f in enumerate(filesets):
                    print("evaluated image", f["name"])
                index_path = append_index(filesets)

            print("wrote index at", index_path)
        else:
            # training
            start = time.time()

            for step in range(max_steps):
                def should(freq):
                    return freq > 0 and ((step + 1) % freq == 0 or step == max_steps - 1)

                options = None
                run_metadata = None
                if should(a.trace_freq):
                    options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                    run_metadata = tf.RunMetadata()

                fetches = {
                    "train": model.train,
                    "global_step": sv.global_step,
                }

                if should(a.progress_freq):
                    fetches["discrim_loss"] = model.discrim_loss
                    fetches["gen_loss_GAN"] = model.gen_loss_GAN
                    fetches["gen_loss_L1"] = model.gen_loss_L1

                if should(a.summary_freq):
                    fetches["summary"] = sv.summary_op

                if should(a.display_freq):
                    fetches["display"] = display_fetches

                results = sess.run(fetches, options=options, run_metadata=run_metadata)

                if should(a.summary_freq):
                    print("recording summary")
                    sv.summary_writer.add_summary(results["summary"], results["global_step"])

                if should(a.display_freq):
                    print("saving display images")
                    filesets = save_images(results["display"], step=results["global_step"])
                    append_index(filesets, step=True)

                if should(a.trace_freq):
                    print("recording trace")
                    sv.summary_writer.add_run_metadata(run_metadata, "step_%d" % results["global_step"])

                if should(a.progress_freq):
                    # global_step will have the correct step count if we resume from a checkpoint
                    train_epoch = math.ceil(results["global_step"] / examples.steps_per_epoch)
                    train_step = (results["global_step"] - 1) % examples.steps_per_epoch + 1
                    rate = (step + 1) * a.batch_size / (time.time() - start)
                    remaining = (max_steps - step) * a.batch_size / rate
                    print("progress  epoch %d  step %d  image/sec %0.1f  remaining %dm" % (train_epoch, train_step, rate, remaining / 60))
                    print("discrim_loss", results["discrim_loss"])
                    print("gen_loss_GAN", results["gen_loss_GAN"])
                    print("gen_loss_L1", results["gen_loss_L1"])

                if should(a.save_freq):
                    print("saving model")
                    saver.save(sess, os.path.join(a.output_dir, "model"), global_step=sv.global_step)

                if sv.should_stop():
                    break
def main():
    if tf.__version__.split('.')[0] != "1":
        raise Exception("Tensorflow version 1 required")

    if a.seed is None:
        a.seed = random.randint(0, 2**31 - 1)

    tf.set_random_seed(a.seed)
    np.random.seed(a.seed)
    random.seed(a.seed)

    if not os.path.exists(a.output_dir):
        os.makedirs(a.output_dir)

    if a.mode == "test" or a.mode == "export":
        if a.checkpoint is None:
            raise Exception("checkpoint required for test mode")

        # load some options from the checkpoint
        options = {"which_direction", "ngf", "ndf", "lab_colorization"}
        with open(os.path.join(a.checkpoint, "options.json")) as f:
            for key, val in json.loads(f.read()).items():
                if key in options:
                    print("loaded", key, "=", val)
                    setattr(a, key, val)
        # disable these features in test mode
        a.scale_size = CROP_SIZE
        a.flip = False

    for k, v in a._get_kwargs():
        print(k, "=", v)

    with open(os.path.join(a.output_dir, "options.json"), "w") as f:
        f.write(json.dumps(vars(a), sort_keys=True, indent=4))

    if a.mode == "export":
        # export the generator to a meta graph that can be imported later for standalone generation
        if a.lab_colorization:
            raise Exception("export not supported for lab_colorization")

        input = tf.placeholder(tf.string, shape=[1])
        input_data = tf.decode_base64(input[0])
        input_image = tf.image.decode_png(input_data)

        # remove alpha channel if present
        input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 4), lambda: input_image[:,:,:3], lambda: input_image)
        # convert grayscale to RGB
        input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 1), lambda: tf.image.grayscale_to_rgb(input_image), lambda: input_image)

        input_image = tf.image.convert_image_dtype(input_image, dtype=tf.float32)
        input_image.set_shape([CROP_SIZE, CROP_SIZE, 3])
        batch_input = tf.expand_dims(input_image, axis=0)

        with tf.variable_scope("generator"):
            batch_output = deprocess(create_generator(preprocess(batch_input), 3))

        output_image = tf.image.convert_image_dtype(batch_output, dtype=tf.uint8)[0]
        if a.output_filetype == "png":
            output_data = tf.image.encode_png(output_image)
        elif a.output_filetype == "jpeg":
            output_data = tf.image.encode_jpeg(output_image, quality=80)
        else:
            raise Exception("invalid filetype")
        output = tf.convert_to_tensor([tf.encode_base64(output_data)])

        key = tf.placeholder(tf.string, shape=[1])
        inputs = {
            "key": key.name,
            "input": input.name
        }
        tf.add_to_collection("inputs", json.dumps(inputs))
        outputs = {
            "key":  tf.identity(key).name,
            "output": output.name,
        }
        tf.add_to_collection("outputs", json.dumps(outputs))

        init_op = tf.global_variables_initializer()
        restore_saver = tf.train.Saver()
        export_saver = tf.train.Saver()

        with tf.Session() as sess:
            sess.run(init_op)
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint(a.checkpoint)
            restore_saver.restore(sess, checkpoint)
            print("exporting model")
            export_saver.export_meta_graph(filename=os.path.join(a.output_dir, "export.meta"))
            export_saver.save(sess, os.path.join(a.output_dir, "export"), write_meta_graph=False)

        return

    examples = load_examples()
    print("examples count = %d" % examples.count)

    # inputs and targets are [batch_size, height, width, channels]
    model = create_model(examples.inputs, examples.targets)

    # undo colorization splitting on images that we use for display/output
    if a.lab_colorization:
        if a.which_direction == "AtoB":
            # inputs is brightness, this will be handled fine as a grayscale image
            # need to augment targets and outputs with brightness
            targets = augment(examples.targets, examples.inputs)
            outputs = augment(model.outputs, examples.inputs)
            # inputs can be deprocessed normally and handled as if they are single channel
            # grayscale images
            inputs = deprocess(examples.inputs)
        elif a.which_direction == "BtoA":
            # inputs will be color channels only, get brightness from targets
            inputs = augment(examples.inputs, examples.targets)
            targets = deprocess(examples.targets)
            outputs = deprocess(model.outputs)
        else:
            raise Exception("invalid direction")
    else:
        inputs = deprocess(examples.inputs)
        targets = deprocess(examples.targets)
        outputs = deprocess(model.outputs)

    def convert(image):
        if a.aspect_ratio != 1.0:
            # upscale to correct aspect ratio
            size = [CROP_SIZE, int(round(CROP_SIZE * a.aspect_ratio))]
            image = tf.image.resize_images(image, size=size, method=tf.image.ResizeMethod.BICUBIC)

        return tf.image.convert_image_dtype(image, dtype=tf.uint8, saturate=True)

    # reverse any processing on images so they can be written to disk or displayed to user
    with tf.name_scope("convert_inputs"):
        converted_inputs = convert(inputs)

    with tf.name_scope("convert_targets"):
        converted_targets = convert(targets)

    with tf.name_scope("convert_outputs"):
        converted_outputs = convert(outputs)

    with tf.name_scope("encode_images"):
        display_fetches = {
            "paths": examples.paths,
            "inputs": tf.map_fn(tf.image.encode_png, converted_inputs, dtype=tf.string, name="input_pngs"),
            "targets": tf.map_fn(tf.image.encode_png, converted_targets, dtype=tf.string, name="target_pngs"),
            "outputs": tf.map_fn(tf.image.encode_png, converted_outputs, dtype=tf.string, name="output_pngs"),
        }

    # summaries
    with tf.name_scope("inputs_summary"):
        tf.summary.image("inputs", converted_inputs)

    with tf.name_scope("targets_summary"):
        tf.summary.image("targets", converted_targets)

    with tf.name_scope("outputs_summary"):
        tf.summary.image("outputs", converted_outputs)

    with tf.name_scope("predict_real_summary"):
        tf.summary.image("predict_real", tf.image.convert_image_dtype(model.predict_real, dtype=tf.uint8))

    with tf.name_scope("predict_fake_summary"):
        tf.summary.image("predict_fake", tf.image.convert_image_dtype(model.predict_fake, dtype=tf.uint8))

    tf.summary.scalar("discriminator_loss", model.discrim_loss)
    tf.summary.scalar("generator_loss_GAN", model.gen_loss_GAN)
    tf.summary.scalar("generator_loss_L1", model.gen_loss_L1)

    for var in tf.trainable_variables():
        tf.summary.histogram(var.op.name + "/values", var)

    for grad, var in model.discrim_grads_and_vars + model.gen_grads_and_vars:
        tf.summary.histogram(var.op.name + "/gradients", grad)

    with tf.name_scope("parameter_count"):
        parameter_count = tf.reduce_sum([tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])

    saver = tf.train.Saver(max_to_keep=1)

    logdir = a.output_dir if (a.trace_freq > 0 or a.summary_freq > 0) else None
    sv = tf.train.Supervisor(logdir=logdir, save_summaries_secs=0, saver=None)
    with sv.managed_session() as sess:
        print("parameter_count =", sess.run(parameter_count))

        if a.checkpoint is not None:
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint(a.checkpoint)
            saver.restore(sess, checkpoint)

        max_steps = 2**32
        if a.max_epochs is not None:
            max_steps = examples.steps_per_epoch * a.max_epochs
        if a.max_steps is not None:
            max_steps = a.max_steps

        if a.mode == "test":
            # testing
            # at most, process the test data once
            max_steps = min(examples.steps_per_epoch, max_steps)
            for step in range(max_steps):
                results = sess.run(display_fetches)
                filesets = save_images(results)
                for i, f in enumerate(filesets):
                    print("evaluated image", f["name"])
                index_path = append_index(filesets)

            print("wrote index at", index_path)
        else:
            # training
            start = time.time()

            for step in range(max_steps):
                def should(freq):
                    return freq > 0 and ((step + 1) % freq == 0 or step == max_steps - 1)

                options = None
                run_metadata = None
                if should(a.trace_freq):
                    options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                    run_metadata = tf.RunMetadata()

                fetches = {
                    "train": model.train,
                    "global_step": sv.global_step,
                }

                if should(a.progress_freq):
                    fetches["discrim_loss"] = model.discrim_loss
                    fetches["gen_loss_GAN"] = model.gen_loss_GAN
                    fetches["gen_loss_L1"] = model.gen_loss_L1

                if should(a.summary_freq):
                    fetches["summary"] = sv.summary_op

                if should(a.display_freq):
                    fetches["display"] = display_fetches

                results = sess.run(fetches, options=options, run_metadata=run_metadata)

                if should(a.summary_freq):
                    print("recording summary")
                    sv.summary_writer.add_summary(results["summary"], results["global_step"])

                if should(a.display_freq):
                    print("saving display images")
                    filesets = save_images(results["display"], step=results["global_step"])
                    append_index(filesets, step=True)

                if should(a.trace_freq):
                    print("recording trace")
                    sv.summary_writer.add_run_metadata(run_metadata, "step_%d" % results["global_step"])

                if should(a.progress_freq):
                    # global_step will have the correct step count if we resume from a checkpoint
                    train_epoch = math.ceil(results["global_step"] / examples.steps_per_epoch)
                    train_step = (results["global_step"] - 1) % examples.steps_per_epoch + 1
                    rate = (step + 1) * a.batch_size / (time.time() - start)
                    remaining = (max_steps - step) * a.batch_size / rate
                    print("progress  epoch %d  step %d  image/sec %0.1f  remaining %dm" % (train_epoch, train_step, rate, remaining / 60))
                    print("discrim_loss", results["discrim_loss"])
                    print("gen_loss_GAN", results["gen_loss_GAN"])
                    print("gen_loss_L1", results["gen_loss_L1"])

                if should(a.save_freq):
                    print("saving model")
                    saver.save(sess, os.path.join(a.output_dir, "model"), global_step=sv.global_step)

                if sv.should_stop():
                    break
Beispiel #17
0
# -*- coding: utf-8 -*-

import tensorflow as tf
import timeit

tf.enable_eager_execution()

print(tf.add(1, 2))
print(tf.add([1, 2], [3, 4]))
print(tf.square(5))
print(tf.reduce_sum([1, 2, 3]))
print(tf.encode_base64("Hello World!"))

# Operator overloading is also supported
print(tf.square(2) + tf.square(3))

a = tf.constant([[1]])
print(a)
b = tf.constant([[2, 3]])
print(b)
x = tf.matmul(a, b)
print(x)
print(x.shape)
print(x.dtype)

import numpy as np

nd_array = np.ones([3, 3])
print(nd_array)
print("Tensorflow operation convert numpy arrays to Tensors automatically")
ts_array = tf.multiply(nd_array, 42)