def main():
    if not os.path.exists(a.output_dir):
        os.makedirs(a.output_dir)

    input = tf.placeholder(tf.string, shape=[1])
    key = tf.placeholder(tf.string, shape=[1])

    in_data = tf.decode_base64(input[0])
    img = tf.image.decode_png(in_data)
    img = tf.image.rgb_to_grayscale(img)
    out_data = tf.image.encode_png(img)
    output = tf.convert_to_tensor([tf.encode_base64(out_data)])

    variable_to_allow_model_saving = tf.Variable(1, dtype=tf.float32)

    inputs = {
        "key": key.name,
        "input": input.name
    }
    tf.add_to_collection("inputs", json.dumps(inputs))
    outputs = {
        "key":  tf.identity(key).name,
        "output": output.name,
    }
    tf.add_to_collection("outputs", json.dumps(outputs))

    init_op = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init_op)
        saver = tf.train.Saver()
        saver.export_meta_graph(filename=os.path.join(a.output_dir, "export.meta"))
        saver.save(sess, os.path.join(a.output_dir, "export"), write_meta_graph=False)
    
    print("exported example model to %s" % a.output_dir)
 def _decode_jpg(image):
   img_buf = BytesIO()
   Image.new('RGB', (16, 16)).save(img_buf, 'jpeg')
   default_image_string = base64.urlsafe_b64encode(img_buf.getvalue())
   image = tf.where(tf.equal(image, ''), default_image_string, image)
   image = tf.decode_base64(image)
   image = tf.image.decode_jpeg(image, channels=3)
   image = tf.reshape(image, [-1])
   image = tf.reduce_max(image)
   return image
Example #3
0
    def _decode_and_resize(image_str_tensor):
      """Decodes jpeg string, resizes it and returns a uint8 tensor."""

      # These constants are set by Inception v3's expectations.
      height = 299
      width = 299
      channels = 3

      image = tf.where(tf.equal(image_str_tensor, ''), IMAGE_DEFAULT_STRING, image_str_tensor)
      image = tf.decode_base64(image)
      image = tf.image.decode_jpeg(image, channels=channels)
      image = tf.expand_dims(image, 0)
      image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
      image = tf.squeeze(image, squeeze_dims=[0])
      image = tf.cast(image, dtype=tf.uint8)
      return image
def export_model():
    # export the generator to a meta graph that can be imported later for standalone generation
    if args.lab_colorization:
        raise Exception("export not supported for lab_colorization")
    input = tf.placeholder(tf.string, shape=[1])
    input_data = tf.decode_base64(input[0])
    input_image = tf.image.decode_png(input_data)
    # remove alpha channel if present
    input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 4), lambda: input_image[:, :, :3], lambda: input_image)
    # convert grayscale to RGB
    input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 1), lambda: tf.image.grayscale_to_rgb(input_image),
                          lambda: input_image)
    input_image = tf.image.convert_image_dtype(input_image, dtype=tf.float32)
    input_image.set_shape([CROP_SIZE, CROP_SIZE, 3])
    batch_input = tf.expand_dims(input_image, axis=0)
    with tf.variable_scope("generator"):
        batch_output = deprocess(create_generator(preprocess(batch_input), 3))
    output_image = tf.image.convert_image_dtype(batch_output, dtype=tf.uint8)[0]
    if args.output_filetype == "png":
        output_data = tf.image.encode_png(output_image)
    elif args.output_filetype == "jpeg":
        output_data = tf.image.encode_jpeg(output_image, quality=80)
    else:
        raise Exception("invalid filetype")
    output = tf.convert_to_tensor([tf.encode_base64(output_data)])
    key = tf.placeholder(tf.string, shape=[1])
    inputs = {
        "key": key.name,
        "input": input.name
    }
    tf.add_to_collection("inputs", json.dumps(inputs))
    outputs = {
        "key": tf.identity(key).name,
        "output": output.name,
    }
    tf.add_to_collection("outputs", json.dumps(outputs))
    init_op = tf.global_variables_initializer()
    restore_saver = tf.train.Saver()
    export_saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(init_op)
        print("loading model from checkpoint")
        checkpoint = tf.train.latest_checkpoint(args.checkpoint)
        restore_saver.restore(sess, checkpoint)
        print("exporting model")
        export_saver.export_meta_graph(filename=os.path.join(args.output_dir, "export.meta"))
        export_saver.save(sess, os.path.join(args.output_dir, "export"), write_meta_graph=False)
def handle():
    image_data = tf.decode_base64(request.data)
    array = tf.image.decode_jpeg(image_data, channels=3)

    #     array = tf.Print(array, [array], message="Print: ")

    #     array.eval(session=sess)
    #     b = tf.add(array, array).eval(session=sess)

    resized = tf.image.resize_images([array], [64, 64])

    #     resized = tf.Print(resized, [resized], message="Print: ")

    #     resized.eval(session=sess)
    #     c = tf.add(resized, resized).eval(session=sess)

    return match(resized)
Example #6
0
def main():
    mnist = input_data.read_data_sets("./input_data")

    x = tf.placeholder(tf.float32, [None, 784])
    logits = inference(x)
    y_ = tf.placeholder(tf.int64, [None])
    cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=y_,
                                                           logits=logits)
    train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

    init_op = tf.global_variables_initializer()

    # Define op for model signature
    tf.get_variable_scope().reuse_variables()

    model_base64_placeholder = tf.placeholder(shape=[None],
                                              dtype=tf.string,
                                              name="model_input_b64_images")
    model_base64_string = tf.decode_base64(model_base64_placeholder)
    model_base64_input = tf.map_fn(lambda x: tf.image.resize_images(
        tf.image.decode_jpeg(x, channels=1), [28, 28]),
                                   model_base64_string,
                                   dtype=tf.float32)
    model_base64_reshape_input = tf.reshape(model_base64_input, [-1, 28 * 28])
    model_logits = inference(model_base64_reshape_input)
    model_predict_softmax = tf.nn.softmax(model_logits)
    model_predict = tf.argmax(model_predict_softmax, 1)

    with tf.Session() as sess:

        sess.run(init_op)

        for i in range(938):
            batch_xs, batch_ys = mnist.train.next_batch(64)
            sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

        # Export image model
        export_dir = "./model/1"
        print("Try to export the model in {}".format(export_dir))
        tf.saved_model.simple_save(sess,
                                   export_dir,
                                   inputs={"images": model_base64_placeholder},
                                   outputs={
                                       "predict": model_predict,
                                       "probability": model_predict_softmax
                                   })
Example #7
0
        def _decode_and_resize(image_str_tensor):
            """Decodes jpeg string, resizes it and returns a uint8 tensor."""

            # These constants are set by Inception v3's expectations.
            height = 299
            width = 299
            channels = 3

            image = tf.where(tf.equal(image_str_tensor, ''),
                             IMAGE_DEFAULT_STRING, image_str_tensor)
            image = tf.decode_base64(image)
            image = tf.image.decode_jpeg(image, channels=channels)
            image = tf.expand_dims(image, 0)
            image = tf.image.resize_bilinear(image, [height, width],
                                             align_corners=False)
            image = tf.squeeze(image, squeeze_dims=[0])
            image = tf.cast(image, dtype=tf.uint8)
            return image
Example #8
0
def read_tensor_from_imageb64(img_b64,
                              input_height=224,
                              input_width=224,
                              input_mean=128,
                              input_std=128):

    image_reader = tf.image.decode_jpeg(tf.decode_base64(img_b64),
                                        channels=3,
                                        name="jpeg_reader")

    float_caster = tf.cast(image_reader, tf.float32)
    dims_expander = tf.expand_dims(float_caster, 0)
    resized = tf.image.resize_bilinear(dims_expander,
                                       [input_height, input_width])
    normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
    sess = tf.Session()
    result = sess.run(normalized)
    return result
Example #9
0
 def parsing_fn(self, example_proto):
     """tf.data.Dataset parsing function."""
     # Parse
     parsed_features = self.raw_parsing_fn(example_proto)
     # Reshape
     parsed_features['image'] = decode_raw_image(parsed_features['image'],
                                                 (32, 32, 3),
                                                 image_size=self.image_size)
     parsed_features['image'] = tf.identity(parsed_features['image'],
                                            name='image')
     parsed_features['class'] = tf.to_int32(parsed_features['class'])
     parsed_features['coarse_class'] = tf.to_int32(
         parsed_features['coarse_class'])
     parsed_features['coarse_class_str'] = tf.decode_base64(
         parsed_features['coarse_class_str'])
     # Return
     if self.verbose: print_records(parsed_features)
     return parsed_features
Example #10
0
 def parsing_fn(self, example_proto):
     """tf.data.Dataset parsing function."""
     # Parse
     parsed_features = self.raw_parsing_fn(example_proto)
     # Reshape
     if self.save_image_in_records:
         image = decode_raw_image(parsed_features['image'], (227, 227, 3), image_size=self.image_size)
     else:
         filename = tf.decode_base64(parsed_features['image'])
         parsed_features['image_path'] = tf.identity(filename, name='image_path')
         image = decode_relative_image(filename, self.image_dir, image_size=self.image_size)
     parsed_features['image'] = tf.identity(image, name='image')
     # Class
     parsed_features['class_content'] = tf.to_int32(parsed_features['class_content'])
     parsed_features['class_style'] = tf.to_int32(parsed_features['class_style'])
     # Return
     if self.verbose: print_records(parsed_features)
     return parsed_features
Example #11
0
def base64_decode_img(b64code):
    """
    :param b64code:
    :return:
    """
    # img_base64_data = base64.b64decode(b64code)
    # img_nparr = np.fromstring(img_base64_data, np.uint8)
    # img = cv2.imdecode(img_nparr, cv2.COLOR_BGR2RGB)
    # print('shape of img:{}'.format(img.shape))

    base64_tensor = tf.convert_to_tensor(b64code, dtype=tf.string)
    img_str = tf.decode_base64(base64_tensor)
    img = tf.image.decode_image(img_str, channels=3)

    with tf.Session() as sess:
        # img_str_result = sess.run([img_str])[0]
        # print('img_str_result:{}'.format(img_str_result))
        img_value = sess.run([img])[0]
        print(img_value.shape)
def main():
  mnist = input_data.read_data_sets("./input_data")

  x = tf.placeholder(tf.float32, [None, 784])
  logits = inference(x)
  y_ = tf.placeholder(tf.int64, [None])
  cross_entropy = tf.losses.sparse_softmax_cross_entropy(
      labels=y_, logits=logits)
  train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

  init_op = tf.global_variables_initializer()

  # Define op for model signature
  tf.get_variable_scope().reuse_variables()

  model_base64_placeholder = tf.placeholder(
      shape=[None], dtype=tf.string, name="model_input_b64_images")
  model_base64_string = tf.decode_base64(model_base64_placeholder)
  model_base64_input = tf.map_fn(lambda x: tf.image.resize_images(tf.image.decode_jpeg(x, channels=1), [28, 28]), model_base64_string, dtype=tf.float32)
  model_base64_reshape_input = tf.reshape(model_base64_input, [-1, 28 * 28])
  model_logits = inference(model_base64_reshape_input)
  model_predict_softmax = tf.nn.softmax(model_logits)
  model_predict = tf.argmax(model_predict_softmax, 1)

  with tf.Session() as sess:

    sess.run(init_op)

    for i in range(938):
      batch_xs, batch_ys = mnist.train.next_batch(64)
      sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

    # Export image model
    export_dir = "./model/1"
    print("Try to export the model in {}".format(export_dir))
    tf.saved_model.simple_save(
        sess,
        export_dir,
        inputs={"images": model_base64_placeholder},
        outputs={
            "predict": model_predict,
            "probability": model_predict_softmax
        })
Example #13
0
def export_generator():
    input_src = tf.placeholder(tf.string, shape=[1])
    input_data = tf.decode_base64(input_src[0])
    input_image = tf.image.decode_png(input_data)
    input_image = input_image[:, :, :3]
    input_image = tf.image.convert_image_dtype(input_image, dtype=tf.float32)
    input_image.set_shape([args['cropping'], args['cropping'], 3])
    batch_input = tf.expand_dims(input_image, axis=0)

    with tf.variable_scope('generator') as scope:
        generator = gp.Generator(ds.preprocess(batch_input), 3, args['ngf'])
        batch_output = ds.deprocess(generator.build())

    output_image = tf.image.convert_image_dtype(batch_output,
                                                dtype=tf.uint8)[0]

    output_data = tf.image.encode_jpeg(output_image, quality=100)

    output = tf.convert_to_tensor([tf.encode_base64(output_data)])
    key = tf.placeholder(tf.string, shape=[1])
    inputs = {"key": key.name, "input": input_src.name}
    tf.add_to_collection("inputs", json.dumps(inputs))
    outputs = {"key": tf.identity(key).name, "output": output.name}
    tf.add_to_collection("outputs", json.dumps(outputs))

    init_op = tf.global_variables_initializer()
    restore_saver = tf.train.Saver()
    export_saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(init_op)
        print("loading model")  # Loading previous model parameters
        checkpoint = tf.train.latest_checkpoint(args['checkpoint'])
        restore_saver.restore(sess, checkpoint)
        print("exporting model")
        export_saver.export_meta_graph(
            filename=os.path.join(args['results_dir'], "export.meta"))
        export_saver.save(sess,
                          os.path.join(args['results_dir'], "export"),
                          write_meta_graph=False)
Example #14
0
 def parsing_fn(self, example_proto):
     """tf.data.Dataset parsing function."""
     # Parse
     parsed_features = self.raw_parsing_fn(example_proto)
     # Reshape
     if self.save_image_in_records:
         image = decode_raw_image(parsed_features['image'], (500, 500, 3),
                                  image_size=None)
     else:
         filename = tf.decode_base64(parsed_features['image'])
         parsed_features['image_path'] = tf.identity(filename,
                                                     name='image_path')
         image = decode_relative_image(filename,
                                       self.image_dir,
                                       image_size=None)
     # Crop image to bounding box
     if self.crop_images:
         bounding_box = parsed_features['bounding_box']
         if self.keep_crop_aspect_ratio:
             bounding_box = make_square_bounding_box(bounding_box,
                                                     mode='max')
         image = tf.image.crop_and_resize(
             tf.expand_dims(image, axis=0),
             tf.expand_dims(bounding_box, axis=0), [0], (500, 500))[0]
         del parsed_features['bounding_box']
     # Resize image after cropping
     if self.image_size is not None:
         image = tf.image.resize_images(image,
                                        (self.image_size, self.image_size))
     parsed_features['image'] = tf.identity(image, name='image')
     # One-hot encode each attribute
     if self.one_hot_attributes:
         for key, num_values in self.attributes_values_list:
             parsed_features[key] = tf.one_hot(parsed_features[key],
                                               num_values,
                                               axis=-1,
                                               name='one_hot_%s' % key)
     # Return
     if self.verbose: print_records(parsed_features)
     return parsed_features
Example #15
0
def ilsvrc_parsing_fn(example_proto,
                      image_size=None,
                      crop_size=None,
                      image_dir=''):
    """ILSVRC parsing function. Load image from image_dir and resize them to `image_size`"""
    features = {
        'image': tf.FixedLenFeature((), tf.string),
        'class': tf.FixedLenFeature((), tf.int64),
        'class_name': tf.FixedLenFeature((), tf.string)
    }
    parsed_features = tf.parse_single_example(example_proto, features)
    if crop_size is None or image_size is None:
        image = decode_relative_image(parsed_features['image'],
                                      image_dir,
                                      image_size=image_size)
    else:
        image = decode_relative_image(parsed_features['image'],
                                      image_dir,
                                      image_size=None)
        image = central_crop(image, image_size, crop_size)
    class_id = tf.to_int32(parsed_features['class'])
    class_name = tf.decode_base64(parsed_features['class_name'])
    return {'image': image, 'class': class_id, 'class_name': class_name}
def age_predict(request):
  import tensorflow as tf
  import os
  import numpy as np
  import base64
  import json
  model = tf.keras.models.load_model(os.path.abspath('./my_model.h5'))
  model.compile(optimizer=tf.train.AdamOptimizer(), 
              loss="mean_absolute_error",
              metrics=["mean_absolute_error"])

  # * 处理前端的base64字符串,去掉头部,
  # * 并且转换为urlsafe形式,这是TensorFlow的要求,详情见  https://www.tensorflow.org/versions/r1.9/api_docs/python/tf/decode_base64?hl=en    
  img_base64_header, img_base64 = json.loads(request.body)['base64_img'].split(",",1)
  raw = base64.decodestring(img_base64.encode('utf-8'))
  img_base64_websafe = base64.urlsafe_b64encode(raw)

  # * base转为tensor,然后输入模型
  img_raw = tf.decode_base64(img_base64_websafe)
  image_tensor = tf.cast(tf.image.resize_images(tf.io.decode_jpeg(img_raw, channels=3), [192,192]),tf.float32)
  image = tf.expand_dims(image_tensor, axis=0)
  
  age = model.predict(image, steps=1)
  return HttpResponse(age) 
Example #17
0
def export_inception_with_base64_decode(model, hparams):
    from keras.models import Model
    from keras.layers import Dense

    print("check gpu")
    print(K.tensorflow_backend._get_available_gpus())

    num_classes = 9

    # Intermediate layer
    print('Defining the model')
    intermediate_layer_model = Model(inputs=model.input, outputs=model.layers[311].output)
    x = intermediate_layer_model.output
    x = Dense(1024, activation='relu', name='dense_relu')(x)
    predictions = Dense(num_classes, activation='softmax', name='dense_softmax')(x)
    transfer_model = Model(inputs=intermediate_layer_model.input, outputs=predictions)
    for layer in transfer_model.layers:
        layer.trainable = False

    # Unfreeze the last layers, so that only these layers are trainable.
    transfer_model.layers[312].trainable = True
    transfer_model.layers[313].trainable = True

    print('Training ...')
    transfer_model.compile(loss='categorical_crossentropy',
                           optimizer='adam',
                           metrics=['accuracy'])

    #  flow
    from keras.preprocessing.image import ImageDataGenerator
    datagen = ImageDataGenerator(validation_split=0.25)

    train_generator = keras_gs.flow_from_google_storage(datagen,
        hparams.data_project, hparams.data_bucket, hparams.data_path,
        subset="training",
        target_size=(299, 299),
        batch_size=hparams.batch_size)

    validation_generator = keras_gs.flow_from_google_storage(datagen,
        hparams.data_project, hparams.data_bucket, hparams.data_path,
        subset="validation",
        target_size=(299, 299),
        batch_size=hparams.batch_size)

    history = transfer_model.fit_generator(
        train_generator,
        steps_per_epoch=hparams.steps_per_epoch,
        epochs=hparams.num_epochs,
        validation_data=validation_generator,
        validation_steps=hparams.validation_steps)

    acc = history.history['acc']
    loss = history.history['loss']
    print('Loss {}, Accuracy {}'.format(loss, acc))

    print('Exporting ...')
    sess = K.get_session()
    g_trans = sess.graph
    g_trans_def = graph_util.convert_variables_to_constants(sess,
                                                            g_trans.as_graph_def(),
                                                            [transfer_model.output.name.replace(':0', '')])

    # Step 1 : Build a graph that converts image
    with tf.Graph().as_default() as g_input:
        input_b64 = tf.placeholder(
            shape=(1,),
            dtype=tf.string,
            name='input')
        input_bytes = tf.decode_base64(input_b64[0])
        img = tf.image.decode_image(input_bytes)
        image_f = tf.image.convert_image_dtype(img, dtype=tf.float32)
        input_image = tf.expand_dims(image_f, axis=0)
        tf.identity(input_image, name='input_image')

    # Convert to GraphDef
    g_input_def = g_input.as_graph_def()

    with tf.Graph().as_default() as g_combined:
        x = tf.placeholder(tf.string, name="input_b64")

        im, = tf.import_graph_def(g_input_def,
                                  input_map={'input:0': x},
                                  return_elements=["input_image:0"])

        pred, = tf.import_graph_def(g_trans_def,
                                    input_map={
                                        transfer_model.input.name: im,
                                        'batch_normalization_1/keras_learning_phase:0': False
                                    },
                                    return_elements=[transfer_model.output.name])

        with tf.Session() as sess2:
            inputs = {"inputs": tf.saved_model.utils.build_tensor_info(x)}
            outputs = {"outputs": tf.saved_model.utils.build_tensor_info(pred)}
            signature = tf.saved_model.signature_def_utils.build_signature_def(
                inputs=inputs,
                outputs=outputs,
                method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
            )

            # save as SavedModel
            sess2.run(tf.global_variables_initializer())
            b = tf.saved_model.builder.SavedModelBuilder(os.path.join(hparams.job_dir, "model"))
            b.add_meta_graph_and_variables(sess2,
                                           [tf.saved_model.tag_constants.SERVING],
                                           signature_def_map={'serving_default': signature})
            b.save()
    def decode_and_process(base64):
        _bytes = tf.decode_base64(base64)
        _image = __tf_jpeg_process(_bytes)

        return _image
Example #19
0
def run(target, is_chief, job_name, a):
    output_dir = "./export"

    if tf.__version__.split('.')[0] != "1":
        raise Exception("Tensorflow version 1 required")

    if a.seed is None:
        a.seed = random.randint(0, 2**31 - 1)

    tf.set_random_seed(a.seed)
    np.random.seed(a.seed)
    random.seed(a.seed)

    for k, v in a._get_kwargs():
        print(k, "=", v)

    if a.checkpoint is None:
        raise Exception("checkpoint required for test mode")

    # load some options from the checkpoint
    # disable these features in test mode
    a.scale_size = CROP_SIZE
    a.flip = False

    input = tf.placeholder(tf.string, shape=[1])
    input_data = tf.decode_base64(input[0])
    input_image = tf.image.decode_png(input_data)

    # remove alpha channel if present
    input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 4),
                          lambda: input_image[:, :, :3], lambda: input_image)
    # convert grayscale to RGB
    input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 1),
                          lambda: tf.image.grayscale_to_rgb(input_image),
                          lambda: input_image)

    input_image = tf.image.convert_image_dtype(input_image, dtype=tf.float32)
    input_image.set_shape([CROP_SIZE, CROP_SIZE, 3])
    batch_input = tf.expand_dims(input_image, axis=0)

    with tf.variable_scope("generator"):
        batch_output = model.deprocess(
            model.create_generator(a.num_generator_filters,
                                   model.preprocess(batch_input), 3))

    output_image = tf.image.convert_image_dtype(batch_output,
                                                dtype=tf.uint8)[0]
    if a.output_filetype == "jpeg":
        output_data = tf.image.encode_jpeg(output_image, quality=80)
    else:
        output_data = tf.image.encode_png(output_image)

    output = tf.convert_to_tensor([tf.encode_base64(output_data)])

    key = tf.placeholder(tf.string, shape=[1])
    inputs = {"key": key.name, "input": input.name}
    tf.add_to_collection("inputs", json.dumps(inputs))
    outputs = {
        "key": tf.identity(key).name,
        "output": output.name,
    }
    tf.add_to_collection("outputs", json.dumps(outputs))

    init_op = tf.global_variables_initializer()
    restore_saver = tf.train.Saver()
    export_saver = tf.train.Saver()

    with tf.Session() as sess:
        print("monitored session created.")
        sess.run(init_op)
        print("loading model from checkpoint")
        checkpoint = tf.train.latest_checkpoint(a.checkpoint)
        restore_saver.restore(sess, checkpoint)
        # ready to process image
        print("exporting model")
        export_saver.export_meta_graph(
            filename=os.path.join(output_dir, "export.meta"))
        export_saver.save(sess,
                          os.path.join(output_dir, "export"),
                          write_meta_graph=False)
Example #20
0
def main():
    if a.seed is None:
        a.seed = random.randint(0, 2**31 - 1)

    tf.set_random_seed(a.seed)
    np.random.seed(a.seed)
    random.seed(a.seed)

    if not os.path.exists(a.output_dir):
        os.makedirs(a.output_dir)

    if a.mode == "test" or a.mode == "export":
        if a.checkpoint is None:
            raise Exception("checkpoint required for test mode")

        # load some options from the checkpoint
        options = {"which_direction", "ngf", "ndf", "lab_colorization"}
        with open(os.path.join(a.checkpoint, "options.json")) as f:
            for key, val in json.loads(f.read()).items():
                if key in options:
                    print("loaded", key, "=", val)
                    setattr(a, key, val)

        a.flip = False

    for k, v in a._get_kwargs():
        print(k, "=", v)

    with open(os.path.join(a.output_dir, "options.json"), "w") as f:
        f.write(json.dumps(vars(a), sort_keys=True, indent=4))

    if a.mode == "export":
        # export the generator to a meta graph that can be imported later for standalone generation
        if a.lab_colorization:
            raise Exception("export not supported for lab_colorization")

        input = tf.placeholder(tf.string, shape=[1])
        input_data = tf.decode_base64(input[0])
        input_image = tf.image.decode_png(input_data)

        # remove alpha channel if present
        input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 4), lambda: input_image[:,:,:3], lambda: input_image)
        # convert grayscale to RGB
        input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 1), lambda: tf.image.grayscale_to_rgb(input_image), lambda: input_image)

        input_image = tf.image.convert_image_dtype(input_image, dtype=tf.float32)
        # commented by (kjh)
        # input_image.set_shape([CROP_SIZE, CROP_SIZE, 3])
        batch_input = tf.expand_dims(input_image, axis=0)

        with tf.variable_scope("generator"):
            batch_output = deprocess(create_generator(preprocess(batch_input), 3))

        output_image = tf.image.convert_image_dtype(batch_output, dtype=tf.uint8)[0]
        if a.output_filetype == "png":
            output_data = tf.image.encode_png(output_image)
        elif a.output_filetype == "jpeg":
            output_data = tf.image.encode_jpeg(output_image, quality=80)
        else:
            raise Exception("invalid filetype")
        output = tf.convert_to_tensor([tf.encode_base64(output_data)])

        key = tf.placeholder(tf.string, shape=[1])
        inputs = {
            "key": key.name,
            "input": input.name
        }
        tf.add_to_collection("inputs", json.dumps(inputs))
        outputs = {
            "key":  tf.identity(key).name,
            "output": output.name,
        }
        tf.add_to_collection("outputs", json.dumps(outputs))

        init_op = tf.global_variables_initializer()
        restore_saver = tf.train.Saver()
        export_saver = tf.train.Saver()

        with tf.Session() as sess:
            sess.run(init_op)
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint(a.checkpoint)
            restore_saver.restore(sess, checkpoint)
            print("exporting model")
            export_saver.export_meta_graph(filename=os.path.join(a.output_dir, "export.meta"))
            export_saver.save(sess, os.path.join(a.output_dir, "export"), write_meta_graph=False)

        return

    img = tf.placeholder(tf.float32)
    input_img, target_img, segment_img = load_examples(img)

    lbs = tf.placeholder(tf.float32)
    lbsw = tf.placeholder(tf.float32)
    bboxt = tf.placeholder(tf.float32)
    bboxlw = tf.placeholder(tf.float32)

    steps_per_epoch = int(N / a.batch_size)
    print("examples count = %d" % N)

    # inputs and targets are [batch_size, height, width, channels]
    model = create_model(input_img, target_img, segment_img, lbs, lbsw, bboxt, bboxlw)

    inputs = deprocess(input_img)
    targets = deprocess(target_img)
    outputs = deprocess(model.outputs)

    def convert(image):
        # commented by (kjh)
        # if a.aspect_ratio != 1.0:
        #     # upscale to correct aspect ratio
        #     size = [CROP_SIZE, int(round(CROP_SIZE * a.aspect_ratio))]
        image = tf.image.resize_images(image, size=[512,640], method=tf.image.ResizeMethod.BICUBIC)
        return tf.image.convert_image_dtype(image, dtype=tf.uint8, saturate=True)

    # reverse any processing on images so they can be written to disk or displayed to user
    with tf.name_scope("convert_inputs"):
        converted_inputs = convert(inputs)

    with tf.name_scope("convert_targets"):
        converted_targets = convert(targets)

    with tf.name_scope("convert_outputs"):
        converted_outputs = convert(outputs)

    with tf.name_scope("convert_segment"):
        converted_segment = convert(model.seg_result)

    with tf.name_scope("encode_images"):
        display_fetches = {
            # "paths": examples.paths,
            "inputs": tf.map_fn(tf.image.encode_png, converted_inputs, dtype=tf.string, name="input_pngs"),
            "targets": tf.map_fn(tf.image.encode_png, converted_targets, dtype=tf.string, name="target_pngs"),
            "outputs": tf.map_fn(tf.image.encode_png, converted_outputs, dtype=tf.string, name="output_pngs"),
            "segment": tf.map_fn(tf.image.encode_png, converted_segment, dtype=tf.string, name="segment_pngs"),
        }

    # summaries
    with tf.name_scope("inputs_summary"):
        tf.summary.image("inputs", converted_inputs)

    with tf.name_scope("targets_summary"):
        tf.summary.image("targets", converted_targets)

    with tf.name_scope("outputs_summary"):
        tf.summary.image("outputs", converted_outputs)

    with tf.name_scope("segment_summary"):
        tf.summary.image("segment", converted_segment)

    with tf.name_scope("predict_real_summary"):
        tf.summary.image("predict_real", tf.image.convert_image_dtype(model.predict_real, dtype=tf.uint8))

    with tf.name_scope("predict_fake_summary"):
        tf.summary.image("predict_fake", tf.image.convert_image_dtype(model.predict_fake, dtype=tf.uint8))

    tf.summary.scalar("discriminator_loss_GAN", model.discrim_loss_GAN)
    tf.summary.scalar("discriminator_loss_segment", model.discrim_loss_segment)
    tf.summary.scalar("discriminator_loss_cls", model.discrim_loss_cls)
    tf.summary.scalar("discriminator_loss_bbox", model.discrim_loss_bbox)
    tf.summary.scalar("generator_loss_GAN", model.gen_loss_GAN)
    tf.summary.scalar("generator_loss_L1", model.gen_loss_L1)

    for var in tf.trainable_variables():
        tf.summary.histogram(var.op.name + "/values", var)

    for grad, var in model.discrim_grads_and_vars + model.gen_grads_and_vars:
        tf.summary.histogram(var.op.name + "/gradients", grad)

    with tf.name_scope("parameter_count"):
        parameter_count = tf.reduce_sum([tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])

    # max_to_keep: how many latest models will you save, to save all of them, set it to 'None' (kjh)
    saver = tf.train.Saver(max_to_keep=None)

    logdir = a.output_dir if (a.trace_freq > 0 or a.summary_freq > 0) else None
    sv = tf.train.Supervisor(logdir=logdir, save_summaries_secs=0, saver=None)
    with sv.managed_session() as sess:
        print("parameter_count =", sess.run(parameter_count))

        # if a.checkpoint is not None:
            # print("loading model from checkpoint")
            # checkpoint = tf.train.latest_checkpoint(a.checkpoint)
            # saver.restore(sess, checkpoint)
            # saver.restore(sess, 'D:/pix2pix/ped_train/model-300000')

        max_steps = 2**32
        if a.max_epochs is not None:
            max_steps = steps_per_epoch * a.max_epochs
        if a.max_steps is not None:
            max_steps = a.max_steps

        if a.mode == "test":
            # testing
            # at most, process the test data once
            for m in range(400000//a.save_freq):
                if m==400000//a.save_freq-1:
                    checkpoint = tf.train.latest_checkpoint(a.checkpoint)
                    saver.restore(sess, checkpoint)
                    model_name = 'last'
                else:
                    model_name = str(a.save_freq*(m+1))
                    saver.restore(sess, 'D:/pix2pix/ped_train/model-'+model_name)

                start = time.time()
                max_steps = min(steps_per_epoch, max_steps)
                for step in range(max_steps):
                    image = cv2.imread(input_paths[step])
                    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                    name, _ = os.path.splitext(os.path.basename(input_paths[step]))
                    image = add_noise(image,name,a.mode)
                    # thermal image is not used for testing
                    # [display_fetches, model.cls_result, model.bbox_result]
                    display_result, cls_output, bbox_output = sess.run([display_fetches, model.cls_result, model.bbox_result], feed_dict={img: image})
                    filesets = save_images(display_result, name, model_name=model_name)
                    for i, f in enumerate(filesets):
                        print("evaluated image", f["name"])
                    index_path = append_index(filesets)

                    bbox_dir = os.path.join(a.output_dir, "detection_result"+'-'+model_name)
                    if not os.path.exists(bbox_dir):
                        os.makedirs(bbox_dir)
                    bbox_result={"cls": cls_output, "bbox": bbox_output}
                    bbox_path = os.path.join(bbox_dir,name)
                    scipy.io.savemat(bbox_path, bbox_result)

                print("wrote index at", index_path)
                print("rate", (time.time() - start) / max_steps)
        else:
            # training
            start = time.time()

            L = list(range(N))
            random.seed(1)

            random.shuffle(L)
            n = 0

            for step in range(max_steps):
                def should(freq):
                    return freq > 0 and ((step + 1) % freq == 0 or step == max_steps - 1)

                if n == N:
                    random.shuffle(L)
                    n = 0

                options = None
                run_metadata = None
                if should(a.trace_freq):
                    options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                    run_metadata = tf.RunMetadata()

                fetches = {
                    "train": model.train,
                    "global_step": sv.global_step,
                }

                if should(a.progress_freq):
                    fetches["discrim_loss_GAN"] = model.discrim_loss_GAN
                    fetches["discrim_loss_segment"] = model.discrim_loss_segment
                    fetches["discrim_loss_cls"] = model.discrim_loss_cls
                    fetches["discrim_loss_bbox"] = model.discrim_loss_bbox
                    fetches["gen_loss_GAN"] = model.gen_loss_GAN
                    fetches["gen_loss_L1"] = model.gen_loss_L1

                if should(a.summary_freq):
                    fetches["summary"] = sv.summary_op

                if should(a.display_freq):
                    fetches["display"] = display_fetches

                # load image and gt
                image = cv2.imread(input_paths[L[n]])
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                name, _ = os.path.splitext(os.path.basename(input_paths[L[n]]))
                image = add_noise(image, name, a.mode)
                mat = scipy.io.loadmat(os.path.join(bbox_paths, name))
                labels, label_weights, bbox_targets, bbox_loss_weights = sample_minibatch(mat['bbox_targets'])
                results = sess.run(fetches, options=options, run_metadata=run_metadata,
                                   feed_dict={img: image, lbs: labels, lbsw:label_weights, bboxt:bbox_targets, bboxlw:bbox_loss_weights})
                # results = sess.run(fetches, options=options, run_metadata=run_metadata)

                n = n + 1

                if should(a.summary_freq):
                    print("recording summary")
                    sv.summary_writer.add_summary(results["summary"], results["global_step"])

                if should(a.display_freq):
                    print("saving display images")
                    filesets = save_images(results["display"], name, step=results["global_step"])
                    append_index(filesets, step=True)

                if should(a.trace_freq):
                    print("recording trace")
                    sv.summary_writer.add_run_metadata(run_metadata, "step_%d" % results["global_step"])

                if should(a.progress_freq):
                    # global_step will have the correct step count if we resume from a checkpoint
                    train_epoch = math.ceil(results["global_step"] / steps_per_epoch)
                    train_step = (results["global_step"] - 1) % steps_per_epoch + 1
                    rate = (step + 1) * a.batch_size / (time.time() - start)
                    remaining = (max_steps - step) * a.batch_size / rate
                    print("progress  epoch %d  step %d  image/sec %0.1f  remaining %dm" % (train_epoch, train_step, rate, remaining / 60))
                    print("discrim_loss_GAN", results["discrim_loss_GAN"])
                    print("gen_loss_GAN", results["gen_loss_GAN"])
                    print("discrim_loss_segment", results["discrim_loss_segment"])
                    print("discrim_loss_cls", results["discrim_loss_cls"])
                    print("discrim_loss_bbox", results["discrim_loss_bbox"])
                    print("gen_loss_L1", results["gen_loss_L1"])

                if should(a.save_freq):
                    print("saving model")
                    saver.save(sess, os.path.join(a.output_dir, "model"), global_step=sv.global_step)


                if sv.should_stop():
                    break
Example #21
0
def main():
    if tf.__version__.split('.')[0] != "1":
        raise Exception("Tensorflow version 1 required")

    if a.seed is None:
        a.seed = random.randint(0, 2**31 - 1)

    tf.set_random_seed(a.seed)
    np.random.seed(a.seed)
    random.seed(a.seed)

    if not os.path.exists(a.output_dir):
        os.makedirs(a.output_dir)

    if a.mode == "test" or a.mode == "export":
        if a.checkpoint is None:
            raise Exception("checkpoint required for test mode")

        # load some options from the checkpoint
        options = {"which_direction", "ngf", "ndf", "lab_colorization"}
        with open(os.path.join(a.checkpoint, "options.json")) as f:
            for key, val in json.loads(f.read()).items():
                if key in options:
                    print("loaded", key, "=", val)
                    setattr(a, key, val)
        # disable these features in test mode
        a.scale_size = CROP_SIZE
        a.flip = False

    for k, v in a._get_kwargs():
        print(k, "=", v)

    with open(os.path.join(a.output_dir, "options.json"), "w") as f:
        f.write(json.dumps(vars(a), sort_keys=True, indent=4))

    if a.mode == "export":
        # export the generator to a meta graph that can be imported later for standalone generation
        if a.lab_colorization:
            raise Exception("export not supported for lab_colorization")

        input = tf.placeholder(tf.string, shape=[1], name="input_base64")
        input_data = tf.decode_base64(input[0])
        input_image = tf.image.decode_png(input_data)

        # remove alpha channel if present
        input_image = tf.cond(tf.equal(tf.shape(input_image)[2],
                                       4), lambda: input_image[:, :, :3],
                              lambda: input_image)
        # convert grayscale to RGB
        input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 1),
                              lambda: tf.image.grayscale_to_rgb(input_image),
                              lambda: input_image)

        input_image = tf.image.convert_image_dtype(input_image,
                                                   dtype=tf.float32)
        input_image.set_shape([CROP_SIZE, CROP_SIZE, 3])
        batch_input = tf.expand_dims(input_image, axis=0)

        with tf.variable_scope("generator"):
            batch_output = deprocess(
                create_generator(preprocess(batch_input), 3))

        output_image = tf.image.convert_image_dtype(batch_output,
                                                    dtype=tf.uint8)[0]
        if a.output_filetype == "png":
            output_data = tf.image.encode_png(output_image)
        elif a.output_filetype == "jpeg":
            output_data = tf.image.encode_jpeg(output_image, quality=80)
        else:
            raise Exception("invalid filetype")
        output = tf.convert_to_tensor([tf.encode_base64(output_data)],
                                      name="output_base64")

        init_op = tf.global_variables_initializer()
        restore_saver = tf.train.Saver()
        export_saver = tf.train.Saver()

        with tf.Session() as sess:
            sess.run(init_op)
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint(a.checkpoint)
            restore_saver.restore(sess, checkpoint)
            print("exporting model")

            exports_dir = os.path.abspath(os.path.join(a.output_dir, 'export'))
            if not os.path.exists(exports_dir):
                os.makedirs(exports_dir)
            model_exporter = exporter.Exporter(export_saver)
            model_exporter.init(sess.graph.as_graph_def(),
                                named_graph_signatures={
                                    'inputs':
                                    exporter.generic_signature({'x': input}),
                                    'outputs':
                                    exporter.generic_signature(
                                        {'predictions': output})
                                })

            model_exporter.export(export_dir_base=exports_dir,
                                  global_step_tensor=tf.constant(2),
                                  sess=sess)
            # export_saver.export_meta_graph(filename=os.path.join(a.output_dir, "export.meta"))
            # export_saver.save(sess, os.path.join(a.output_dir, "export"), write_meta_graph=False)
        return

    examples = load_examples()
    print("examples count = %d" % examples.count)

    final_input = tf.placeholder_with_default(examples.inputs,
                                              shape=(None, CROP_SIZE,
                                                     CROP_SIZE, 3),
                                              name="final_input")
    final_dropout = tf.placeholder(tf.float32, name='final_dropout')
    # inputs and targets are [batch_size, height, width, channels]
    model = create_model(final_input, examples.targets, final_dropout)

    # undo colorization splitting on images that we use for display/output
    if a.lab_colorization:
        if a.which_direction == "AtoB":
            # inputs is brightness, this will be handled fine as a grayscale image
            # need to augment targets and outputs with brightness
            targets = augment(examples.targets, examples.inputs)
            outputs = augment(model.outputs, examples.inputs)
            # inputs can be deprocessed normally and handled as if they are single channel
            # grayscale images
            inputs = deprocess(examples.inputs)
        elif a.which_direction == "BtoA":
            # inputs will be color channels only, get brightness from targets
            inputs = augment(examples.inputs, examples.targets)
            targets = deprocess(examples.targets)
            outputs = deprocess(model.outputs)
        else:
            raise Exception("invalid direction")
    else:
        inputs = examples.inputs
        targets = tf.argmax(examples.targets, axis=-1)
        # targets = examples.targets
        targets = tf.expand_dims(targets, axis=-1)
        outputs = tf.argmax(model.outputs, axis=-1)
        outputs = tf.expand_dims(outputs, axis=-1)

# Image summary.

    def depreprocess(image_batch, num_images, img_mean, img_std):
        b, h, w, c = image_batch.shape
        outputs = []
        if num_images > b:
            num_images = b

        for i in range(num_images):
            input_plus_mean = img_std * image_batch[i] + img_mean
            input_casted = tf.cast(input_plus_mean, tf.uint8)
            outputs.append(input_casted)
        return tf.stack(outputs)

    # reverse any processing on images so they can be written to disk or displayed to user
    with tf.name_scope("convert_inputs"):
        converted_inputs = depreprocess(inputs,
                                        num_images=3,
                                        img_mean=IMG_RGB_MEAN,
                                        img_std=IMG_RGB_STD)

    with tf.name_scope("convert_targets"):
        converted_targets = (targets * (np.round(256 /
                                                 (NUM_OF_CLASSESS - 1)) - 1))
        # converted_targets = 100 * targets
        # converted_targets = convert(converted_targets)
        converted_targets = tf.cast(converted_targets, tf.uint8)

    with tf.name_scope("convert_outputs"):
        # converted_outputs = 100 * outputs
        converted_outputs = (outputs * (np.round(256 /
                                                 (NUM_OF_CLASSESS - 1)) - 1))
        # converted_outputs = convert(converted_outputs)
        converted_outputs = tf.cast(converted_outputs, tf.uint8)
        # converted_outputs = color_image(converted_outputs, NUM_OF_CLASSESS)

    with tf.name_scope("encode_images"):
        display_fetches = {
            "paths":
            examples.paths,
            "inputs":
            tf.map_fn(tf.image.encode_png,
                      converted_inputs,
                      dtype=tf.string,
                      name="input_pngs"),
            "targets":
            tf.map_fn(tf.image.encode_png,
                      converted_targets,
                      dtype=tf.string,
                      name="target_pngs"),
            "outputs":
            tf.map_fn(tf.image.encode_png,
                      converted_outputs,
                      dtype=tf.string,
                      name="output_pngs"),
        }

    # summaries
    with tf.name_scope("inputs_summary"):
        tf.summary.image("inputs", converted_inputs)

    with tf.name_scope("targets_summary"):
        tf.summary.image("targets", converted_targets)

    with tf.name_scope("outputs_summary"):
        tf.summary.image("outputs", converted_outputs)

    tf.summary.scalar("gen_loss", model.gen_loss)
    tf.summary.scalar("learning_rate", model.lr_rate)

    for var in tf.trainable_variables():
        tf.summary.histogram(var.op.name + "/values", var)

    for grad, var in model.gen_grads_and_vars:
        tf.summary.histogram(var.op.name + "/gradients", grad)

    with tf.name_scope("parameter_count"):
        parameter_count = tf.reduce_sum(
            [tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])

    saver = tf.train.Saver(max_to_keep=1)

    logdir = a.output_dir if (a.trace_freq > 0 or a.summary_freq > 0) else None
    sv = tf.train.Supervisor(logdir=logdir, save_summaries_secs=0, saver=None)
    with sv.managed_session() as sess:
        print("parameter_count =", sess.run(parameter_count))

        if a.checkpoint is not None:
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint(a.checkpoint)
            saver.restore(sess, checkpoint)

        max_steps = 2**32
        if a.max_epochs is not None:
            max_steps = examples.steps_per_epoch * a.max_epochs
        if a.max_steps is not None:
            max_steps = a.max_steps

        if a.mode == "test":
            # testing
            # at most, process the test data once
            max_steps = min(examples.steps_per_epoch, max_steps)
            for step in range(max_steps):
                results = sess.run(display_fetches)
                filesets = save_images(results)
                for i, f in enumerate(filesets):
                    print("evaluated image", f["name"])
                index_path = append_index(filesets)

            print("wrote index at", index_path)
        else:
            # training
            start = time.time()

            for step in range(max_steps):

                def should(freq):
                    return freq > 0 and ((step + 1) % freq == 0
                                         or step == max_steps - 1)

                options = None
                run_metadata = None
                if should(a.trace_freq):
                    options = tf.RunOptions(
                        trace_level=tf.RunOptions.FULL_TRACE)
                    run_metadata = tf.RunMetadata()

                fetches = {
                    "train": model.train,
                    "global_step": sv.global_step,
                }

                if should(a.progress_freq):
                    fetches["gen_loss"] = model.gen_loss

                if should(a.summary_freq):
                    fetches["summary"] = sv.summary_op

                if should(a.display_freq):
                    fetches["display"] = display_fetches

                results = sess.run(fetches,
                                   feed_dict={final_dropout: 0.5},
                                   options=options,
                                   run_metadata=run_metadata)

                if should(a.summary_freq):
                    print("recording summary")
                    sv.summary_writer.add_summary(results["summary"],
                                                  results["global_step"])

                if should(a.display_freq):
                    print("saving display images")
                    filesets = save_images(results["display"],
                                           step=results["global_step"])
                    append_index(filesets, step=True)

                if should(a.trace_freq):
                    print("recording trace")
                    sv.summary_writer.add_run_metadata(
                        run_metadata, "step_%d" % results["global_step"])

                if should(a.progress_freq):
                    # global_step will have the correct step count if we resume from a checkpoint
                    train_epoch = math.ceil(results["global_step"] /
                                            examples.steps_per_epoch)
                    train_step = (results["global_step"] -
                                  1) % examples.steps_per_epoch + 1
                    rate = (step + 1) * a.batch_size / (time.time() - start)
                    remaining = (max_steps - step) * a.batch_size / rate
                    print(
                        "progress  epoch %d  step %d  image/sec %0.1f  remaining %dm"
                        % (train_epoch, train_step, rate, remaining / 60))
                    print("gen_loss", results["gen_loss"])

                if should(a.save_freq):
                    print("saving model")
                    saver.save(sess,
                               os.path.join(a.output_dir, "model"),
                               global_step=sv.global_step)

                if sv.should_stop():
                    break
Example #22
0
 def _decode_base64():
     return tf.decode_base64(image_tensor)
 def _decode_base64():
   return tf.decode_base64(image_tensor)
Example #24
0
import tensorflow as tf
import base64

f = open("/Users/xingoo/PycharmProjects/ai/test/3.jpg", 'rb')
file_content = f.read()
base64str = str(base64.urlsafe_b64encode(file_content), encoding='utf-8')

base64_tensor = tf.convert_to_tensor(base64str, dtype=tf.string)
print(base64_tensor)
img_str = tf.decode_base64(base64_tensor)
#得到(width, height, channel)的图像tensor
img = tf.image.decode_image(img_str, channels=3)
with tf.Session() as sess:
    img_value = sess.run([img])[0]  #得到numpy array类型的数据
    print(img_value.shape)
Example #25
0
def main(argv):

    if a.seed is None:
        a.seed = random.randint(0, 2**31 - 1)

    # for reproducing
    tf.set_random_seed(a.seed)
    np.random.seed(a.seed)
    random.seed(a.seed)

    if not os.path.exists(a.output_dir):
        os.makedirs(a.output_dir)

    if a.mode == "test" or a.mode == "export":
        if a.checkpoint is None:
            raise Exception("checkpoint required for test mode")

        # load some options from the checkpoint
        options = {"which_direction", "ngf", "ndf", "lab_colorization"}
        with open(os.path.join(a.checkpoint, "options.json")) as f:
            for key, val in json.loads(f.read()).items():
                if key in options:
                    print("loaded", key, "=", val)
                    setattr(a, key, val)
        # disable these features in test mode
        a.scale_size = CROP_SIZE
        a.flip = False

    for k, v in a._get_kwargs():
        print(k, "=", v)

    with open(os.path.join(a.output_dir, "options.json"), "w") as f:
        f.write(json.dumps(vars(a), sort_keys=True, indent=4))

    if a.mode == "export":
        # export the generator to a meta graph that can be imported later for standalone generation
        if a.lab_colorization:
            raise Exception("export not supported for lab_colorization")

        input = tf.placeholder(tf.string, shape=[1])
        input_data = tf.decode_base64(input[0])
        input_image = tf.image.decode_png(input_data)

        # remove alpha channel if present
        input_image = tf.cond(tf.equal(tf.shape(input_image)[2],
                                       4), lambda: input_image[:, :, :3],
                              lambda: input_image)
        # convert grayscale to RGB
        input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 1),
                              lambda: tf.image.grayscale_to_rgb(input_image),
                              lambda: input_image)

        input_image = tf.image.convert_image_dtype(input_image,
                                                   dtype=tf.float32)
        input_image.set_shape([CROP_SIZE, CROP_SIZE, 3])
        batch_input = tf.expand_dims(input_image, axis=0)

        with tf.variable_scope("generator"):
            batch_output = deprocess(
                create_generator(preprocess(batch_input), 3, a.ngf))

        output_image = tf.image.convert_image_dtype(batch_output,
                                                    dtype=tf.uint8)[0]
        if a.output_filetype == "png":
            output_data = tf.image.encode_png(output_image)
        elif a.output_filetype == "jpeg":
            output_data = tf.image.encode_jpeg(output_image, quality=80)
        else:
            raise Exception("invalid filetype")
        output = tf.convert_to_tensor([tf.encode_base64(output_data)])

        key = tf.placeholder(tf.string, shape=[1])
        inputs = {"key": key.name, "input": input.name}
        tf.add_to_collection("inputs", json.dumps(inputs))
        outputs = {
            "key": tf.identity(key).name,
            "output": output.name,
        }
        tf.add_to_collection("outputs", json.dumps(outputs))

        init_op = tf.global_variables_initializer()
        restore_saver = tf.train.Saver()
        export_saver = tf.train.Saver()

        with tf.Session() as sess:
            sess.run(init_op)
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint(a.checkpoint)
            restore_saver.restore(sess, checkpoint)
            print("exporting model")
            export_saver.export_meta_graph(
                filename=os.path.join(a.output_dir, "export.meta"))
            export_saver.save(sess,
                              os.path.join(a.output_dir, "export"),
                              write_meta_graph=False)

        return

    # prepare dataset
    examples = load_batch_examples(a)
    print("examples count = %d" % examples.count)

    # pix2pix model
    out_channels = int(examples.targets.get_shape()[-1])
    pix_model = pix2pix(a, out_channels)

    # inputs and targets are [batch_size, height, width, channels]
    model = pix_model.create_model(examples.inputs, examples.targets)

    # undo colorization splitting on images that we use for display/output
    if a.lab_colorization:
        if a.which_direction == "AtoB":
            # inputs is brightness, this will be handled fine as a grayscale image
            # need to augment targets and outputs with brightness
            targets = augment(examples.targets, examples.inputs)
            outputs = augment(model.outputs, examples.inputs)
            # inputs can be deprocessed normally and handled as if they are single channel
            # grayscale images
            inputs = deprocess(examples.inputs)
        elif a.which_direction == "BtoA":
            # inputs will be color channels only, get brightness from targets
            inputs = augment(examples.inputs, examples.targets)
            targets = deprocess(examples.targets)
            outputs = deprocess(model.outputs)
        else:
            raise Exception("invalid direction")
    else:
        inputs = deprocess(examples.inputs)
        targets = deprocess(examples.targets)
        outputs = deprocess(model.outputs)

    def convert(image):
        if a.aspect_ratio != 1.0:
            # upscale to correct aspect ratio
            size = [CROP_SIZE, int(round(CROP_SIZE * a.aspect_ratio))]
            image = tf.image.resize_images(
                image, size=size, method=tf.image.ResizeMethod.BICUBIC)

        return tf.image.convert_image_dtype(image,
                                            dtype=tf.uint8,
                                            saturate=True)

    # reverse any processing on images so they can be written to disk or displayed to user
    with tf.name_scope("convert_inputs"):
        converted_inputs = convert(inputs)

    with tf.name_scope("convert_targets"):
        converted_targets = convert(targets)

    with tf.name_scope("convert_outputs"):
        converted_outputs = convert(outputs)

    with tf.name_scope("encode_images"):
        display_fetches = {
            "paths":
            examples.paths,
            "inputs":
            tf.map_fn(tf.image.encode_png,
                      converted_inputs,
                      dtype=tf.string,
                      name="input_pngs"),
            "targets":
            tf.map_fn(tf.image.encode_png,
                      converted_targets,
                      dtype=tf.string,
                      name="target_pngs"),
            "outputs":
            tf.map_fn(tf.image.encode_png,
                      converted_outputs,
                      dtype=tf.string,
                      name="output_pngs"),
        }

    # summaries
    with tf.name_scope("inputs_summary"):
        tf.summary.image("inputs", converted_inputs)

    with tf.name_scope("targets_summary"):
        tf.summary.image("targets", converted_targets)

    with tf.name_scope("outputs_summary"):
        tf.summary.image("outputs", converted_outputs)

    with tf.name_scope("predict_real_summary"):
        tf.summary.image(
            "predict_real",
            tf.image.convert_image_dtype(model.predict_real, dtype=tf.uint8))

    with tf.name_scope("predict_fake_summary"):
        tf.summary.image(
            "predict_fake",
            tf.image.convert_image_dtype(model.predict_fake, dtype=tf.uint8))

    tf.summary.scalar("discriminator_loss", model.discrim_loss)
    tf.summary.scalar("generator_loss_GAN", model.gen_loss_GAN)
    tf.summary.scalar("generator_loss_L1", model.gen_loss_L1)

    for var in tf.trainable_variables():
        tf.summary.histogram(var.op.name + "/values", var)

    for grad, var in model.discrim_grads_and_vars + model.gen_grads_and_vars:
        tf.summary.histogram(var.op.name + "/gradients", grad)

    with tf.name_scope("parameter_count"):
        parameter_count = tf.reduce_sum(
            [tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])

    # compute max_steps
    max_steps = 2**32
    if a.max_epochs is not None:
        max_steps = examples.steps_per_epoch * a.max_epochs
    if a.max_steps is not None:
        max_steps = a.max_steps

    # to save checkpoint
    saver = tf.train.Saver(max_to_keep=1)

    # log directory
    logdir = a.output_dir if (a.trace_freq > 0 or a.summary_freq > 0) else None

    # use differnt hooks for training and testing
    if a.mode == "test":
        hooks = None
    else:
        # hooks for tf.train.MonitoredTrainingSession
        from networks.custom_hooks import TraceHook, DisplayHook, LoggingTensorHook

        #
        train_hooks = [
            tf.train.StopAtStepHook(last_step=max_steps),
        ]
        if a.checkpoint:
            train_hooks.append(
                tf.train.CheckpointSaverHook(checkpoint_dir=a.checkpoint,
                                             save_steps=a.save_freq,
                                             saver=saver))

        if a.summary_freq:
            train_hooks.append(
                tf.train.SummarySaverHook(
                    save_steps=a.summary_freq,
                    output_dir=logdir,
                    scaffold=tf.train.Scaffold(
                        summary_op=tf.summary.merge_all())))

        if a.progress_freq:
            train_hooks.append(
                LoggingTensorHook(
                    tensors={
                        "discrim_loss": model.discrim_loss,
                        "gen_loss_GAN": model.gen_loss_GAN,
                        "gen_loss_L1": model.gen_loss_L1
                    },
                    batch_size=a.batch_size,
                    max_steps=max_steps,
                    steps_per_epoch=examples.steps_per_epoch,
                    every_n_iter=a.progress_freq,
                ))

        if a.trace_freq:
            train_hooks.append(
                TraceHook(ckptdir=logdir, every_n_step=a.trace_freq))

        if a.display_freq:
            train_hooks.append(
                DisplayHook(display_fetches,
                            a.output_dir,
                            every_n_step=a.display_freq))

        #
        hooks = train_hooks

    # don't take the whole memory
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True  #pylint: disable=E1101

    # another way to do it
    #config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))

    with tf.train.MonitoredTrainingSession(hooks=hooks, config=config) as sess:

        print("parameter_count =", sess.run(parameter_count))

        # load previous checkpoint
        if a.checkpoint is not None:
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint(a.checkpoint)
            saver.restore(sess, checkpoint)

        if a.mode == "test":
            # testing
            # at most, process the test data once
            start = time.time()
            max_steps = min(examples.steps_per_epoch, max_steps)
            for _ in range(max_steps):
                results = sess.run(display_fetches)
                filesets = save_images(results, a.output_dir)
                for i, f in enumerate(filesets):
                    print("{}: evaluated image: {}".format(i, f["name"]))
                index_path = append_index(filesets, a.output_dir)
            print("wrote index at", index_path)
            print("rate", (time.time() - start) / max_steps)

        else:

            # training only
            while not sess.should_stop():
                #
                fetches = {
                    "train": model.train,
                    #                    "global_step": tf.training_util._get_or_create_global_step_read(),    #pylint: disable=protected-access
                }

                # the run
                results = sess.run(fetches)
Example #26
0
import numpy as np
import tensorflow as tf

# tf.enable_eager_execution must be called at program startup.
tf.enable_eager_execution()

print(tf.add(1, 2))
print(tf.add([1, 2], [3, 4]))
print(tf.square(5))
print(tf.reduce_sum([1, 2, 3]))
print(tf.encode_base64("hello world"))
print(tf.decode_base64('aGVsbG8gd29ybGQ'))

# Operator overloading is also supported
print(tf.square(2) + tf.square(3))

# The most obvious differences between NumPy arrays and TensorFlow Tensors are:
# 
#   Tensors can be backed by accelerator memory (like GPU, TPU).
#   Tensors are immutable.


ndarray = np.ones([3, 3])

print("TensorFlow operations convert numpy arrays to Tensors automatically")
tensor = tf.multiply(ndarray, 42)
print(tensor)


print("And NumPy operations convert Tensors to numpy arrays automatically")
print(np.add(tensor, 1))
print('Hashing:\nfast:%s\nstrong:%s\nnormal:%s\n\n' %
      (result[0], result[1], result[2]))

# 把数组连接为字符串
c = tf.reduce_join([a, b], axis=0)
d = tf.string_join([a, b], '__')

result = sess.run([c, d])

print('Joining:\nc=%s\nd=%s\n\n' % (result[0], result[1]))

# 分割字符串
c = tf.string_split([a], ',')
d = tf.substr(a, 0, 5)

result = sess.run([c, d])

print('Spliting:\nc=%s\nd=%s\n\n' % (result[0][1], result[1]))

# 转换字符串
c = tf.as_string(tf.constant(10))
d = tf.string_to_number(c)
e = tf.encode_base64(a)
f = tf.decode_base64(e)

result = sess.run([c, d, e, f])

print('Conversion:\nc=%s\nd=%s\ne=%s\nf=%s\n\n' %
      (result[0], result[1], result[2], result[3]))

sess.close()
Example #28
0
def main():
    # Get hyper-parameters
    print("Start Pokemon classifier")
    if os.path.exists(FLAGS.checkpoint_path) == False:
        os.makedirs(FLAGS.checkpoint_path)
    CHECKPOINT_FILE = FLAGS.checkpoint_path + "/checkpoint.ckpt"
    LATEST_CHECKPOINT = tf.train.latest_checkpoint(FLAGS.checkpoint_path)

    # Initialize train and test data
    TRAIN_IMAGE_NUMBER = 646
    TEST_IMAGE_NUMBER = 68
    IMAGE_SIZE = 32
    RGB_CHANNEL_SIZE = 3
    LABEL_SIZE = 17

    train_dataset = np.ndarray(shape=(TRAIN_IMAGE_NUMBER, IMAGE_SIZE,
                                      IMAGE_SIZE, RGB_CHANNEL_SIZE),
                               dtype=np.float32)
    test_dataset = np.ndarray(shape=(TEST_IMAGE_NUMBER, IMAGE_SIZE, IMAGE_SIZE,
                                     RGB_CHANNEL_SIZE),
                              dtype=np.float32)

    train_labels = np.ndarray(shape=(TRAIN_IMAGE_NUMBER, ), dtype=np.int32)
    test_labels = np.ndarray(shape=(TEST_IMAGE_NUMBER, ), dtype=np.int32)

    TRAIN_DATA_DIR = "./data/train/"
    TEST_DATA_DIR = "./data/test/"
    VALIDATE_DATA_DIR = "./data/validate/"
    IMAGE_FORMAT = ".png"
    index = 0
    pokemon_type_id_map = {
        "Bug": 0,
        "Dark": 1,
        "Dragon": 2,
        "Electric": 3,
        "Fairy": 4,
        "Fighting": 5,
        "Fire": 6,
        "Ghost": 7,
        "Grass": 8,
        "Ground": 9,
        "Ice": 10,
        "Normal": 11,
        "Poison": 12,
        "Psychic": 13,
        "Rock": 14,
        "Steel": 15,
        "Water": 16
    }
    pokemon_types = [
        "Bug", "Dark", "Dragon", "Electric", "Fairy", "Fighting", "Fire",
        "Ghost", "Grass", "Ground", "Ice", "Normal", "Poison", "Psychic",
        "Rock", "Steel", "Water"
    ]

    # Load train images
    for pokemon_type in os.listdir(TRAIN_DATA_DIR):
        for image_filename in os.listdir(
                os.path.join(TRAIN_DATA_DIR, pokemon_type)):
            if image_filename.endswith(IMAGE_FORMAT):

                image_filepath = os.path.join(TRAIN_DATA_DIR, pokemon_type,
                                              image_filename)
                image_ndarray = ndimage.imread(image_filepath, mode="RGB")
                train_dataset[index] = image_ndarray

                train_labels[index] = pokemon_type_id_map.get(pokemon_type)
                index += 1

    index = 0
    # Load test image
    for pokemon_type in os.listdir(TEST_DATA_DIR):
        for image_filename in os.listdir(
                os.path.join(TEST_DATA_DIR, pokemon_type)):
            if image_filename.endswith(IMAGE_FORMAT):

                image_filepath = os.path.join(TEST_DATA_DIR, pokemon_type,
                                              image_filename)
                image_ndarray = ndimage.imread(image_filepath, mode="RGB")
                test_dataset[index] = image_ndarray

                test_labels[index] = pokemon_type_id_map.get(pokemon_type)
                index += 1

    # Define the model
    keys_placeholder = tf.placeholder(tf.int32, shape=[None, 1])
    keys = tf.identity(keys_placeholder)

    model_base64_placeholder = tf.placeholder(shape=[None],
                                              dtype=tf.string,
                                              name="model_input_b64_images")
    model_base64_string = tf.decode_base64(model_base64_placeholder)
    model_base64_input = tf.map_fn(lambda x: tf.image.resize_images(
        tf.image.decode_jpeg(x, channels=RGB_CHANNEL_SIZE),
        [IMAGE_SIZE, IMAGE_SIZE]),
                                   model_base64_string,
                                   dtype=tf.float32)

    x = tf.placeholder(tf.float32,
                       shape=(None, IMAGE_SIZE, IMAGE_SIZE, RGB_CHANNEL_SIZE))
    y = tf.placeholder(tf.int32, shape=(None, ))

    batch_size = FLAGS.batch_size
    epoch_number = FLAGS.epoch_number
    checkpoint_dir = FLAGS.checkpoint_dir
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)
    tensorboard_dir = FLAGS.tensorboard_dir
    mode = FLAGS.mode
    checkpoint_file = checkpoint_dir + "/checkpoint.ckpt"
    steps_to_validate = FLAGS.steps_to_validate

    def cnn_inference(x):
        # Convolution layer result: [BATCH_SIZE, 16, 16, 64]
        with tf.variable_scope("conv1"):
            weights = tf.get_variable(
                "weights", [3, 3, 3, 32],
                initializer=tf.random_normal_initializer())
            bias = tf.get_variable("bias", [32],
                                   initializer=tf.random_normal_initializer())

            layer = tf.nn.conv2d(x,
                                 weights,
                                 strides=[1, 1, 1, 1],
                                 padding="SAME")
            layer = tf.nn.bias_add(layer, bias)
            layer = tf.nn.relu(layer)
            layer = tf.nn.max_pool(layer,
                                   ksize=[1, 2, 2, 1],
                                   strides=[1, 2, 2, 1],
                                   padding="SAME")

        # Convolution layer result: [BATCH_SIZE, 8, 8, 64]
        with tf.variable_scope("conv2"):
            weights = tf.get_variable(
                "weights", [3, 3, 32, 64],
                initializer=tf.random_normal_initializer())
            bias = tf.get_variable("bias", [64],
                                   initializer=tf.random_normal_initializer())

            layer = tf.nn.conv2d(layer,
                                 weights,
                                 strides=[1, 1, 1, 1],
                                 padding="SAME")
            layer = tf.nn.bias_add(layer, bias)
            layer = tf.nn.relu(layer)
            layer = tf.nn.max_pool(layer,
                                   ksize=[1, 2, 2, 1],
                                   strides=[1, 2, 2, 1],
                                   padding="SAME")

        # Reshape for full-connect network
        layer = tf.reshape(layer, [-1, 8 * 8 * 64])

        # Full connected layer result: [BATCH_SIZE, 17]
        with tf.variable_scope("fc1"):
            # weights.get_shape().as_list()[0]] = 8 * 8 * 64
            weights = tf.get_variable(
                "weights", [8 * 8 * 64, LABEL_SIZE],
                initializer=tf.random_normal_initializer())
            bias = tf.get_variable("bias", [LABEL_SIZE],
                                   initializer=tf.random_normal_initializer())
            layer = tf.add(tf.matmul(layer, weights), bias)

        return layer

    def lstm_inference(x):
        RNN_HIDDEN_UNITS = 128

        # x was [BATCH_SIZE, 32, 32, 3]
        # x changes to [32, BATCH_SIZE, 32, 3]
        x = tf.transpose(x, [1, 0, 2, 3])
        # x changes to [32 * BATCH_SIZE, 32 * 3]
        x = tf.reshape(x, [-1, IMAGE_SIZE * RGB_CHANNEL_SIZE])
        # x changes to array of 32 * [BATCH_SIZE, 32 * 3]
        x = tf.split(axis=0, num_or_size_splits=IMAGE_SIZE, value=x)

        weights = tf.Variable(tf.random_normal([RNN_HIDDEN_UNITS, LABEL_SIZE]))
        biases = tf.Variable(tf.random_normal([LABEL_SIZE]))

        # output size is 128, state size is (c=128, h=128)
        lstm_cell = rnn.BasicLSTMCell(RNN_HIDDEN_UNITS, forget_bias=1.0)
        # outputs is array of 32 * [BATCH_SIZE, 128]
        #outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
        outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)

        # outputs[-1] is [BATCH_SIZE, 128]
        return tf.matmul(outputs[-1], weights) + biases

    def bidirectional_lstm_inference(x):
        RNN_HIDDEN_UNITS = 128

        # x was [BATCH_SIZE, 32, 32, 3]
        # x changes to [32, BATCH_SIZE, 32, 3]
        x = tf.transpose(x, [1, 0, 2, 3])
        # x changes to [32 * BATCH_SIZE, 32 * 3]
        x = tf.reshape(x, [-1, IMAGE_SIZE * RGB_CHANNEL_SIZE])
        # x changes to array of 32 * [BATCH_SIZE, 32 * 3]
        x = tf.split(axis=0, num_or_size_splits=IMAGE_SIZE, value=x)

        weights = tf.Variable(
            tf.random_normal([2 * RNN_HIDDEN_UNITS, LABEL_SIZE]))
        biases = tf.Variable(tf.random_normal([LABEL_SIZE]))

        # output size is 128, state size is (c=128, h=128)
        fw_lstm_cell = rnn.BasicLSTMCell(RNN_HIDDEN_UNITS, forget_bias=1.0)
        bw_lstm_cell = rnn.BasicLSTMCell(RNN_HIDDEN_UNITS, forget_bias=1.0)

        # outputs is array of 32 * [BATCH_SIZE, 128]
        #outputs, _, _ = rnn.bidirectional_rnn(
        #    fw_lstm_cell, bw_lstm_cell, x, dtype=tf.float32)
        outputs, _, _ = tf.nn.static_bidirectional_rnn(fw_lstm_cell,
                                                       bw_lstm_cell,
                                                       x,
                                                       dtype=tf.float32)

        # outputs[-1] is [BATCH_SIZE, 128]
        return tf.matmul(outputs[-1], weights) + biases

    def stacked_lstm_inference(x):
        RNN_HIDDEN_UNITS = 128

        # x was [BATCH_SIZE, 32, 32, 3]
        # x changes to [32, BATCH_SIZE, 32, 3]
        x = tf.transpose(x, [1, 0, 2, 3])
        # x changes to [32 * BATCH_SIZE, 32 * 3]
        x = tf.reshape(x, [-1, IMAGE_SIZE * RGB_CHANNEL_SIZE])
        # x changes to array of 32 * [BATCH_SIZE, 32 * 3]
        x = tf.split(axis=0, num_or_size_splits=IMAGE_SIZE, value=x)

        weights = tf.Variable(tf.random_normal([RNN_HIDDEN_UNITS, LABEL_SIZE]))
        biases = tf.Variable(tf.random_normal([LABEL_SIZE]))

        # output size is 128, state size is (c=128, h=128)
        lstm_cell = rnn.BasicLSTMCell(RNN_HIDDEN_UNITS, forget_bias=1.0)
        lstm_cells = rnn.MultiRNNCell([lstm_cell] * 2)

        # outputs is array of 32 * [BATCH_SIZE, 128]
        outputs, states = rnn.rnn(lstm_cells, x, dtype=tf.float32)

        # outputs[-1] is [BATCH_SIZE, 128]
        return tf.matmul(outputs[-1], weights) + biases

    def inference(inputs):
        print("Use the model: {}".format(FLAGS.model))
        if FLAGS.model == "cnn":
            return cnn_inference(inputs)
        elif FLAGS.model == "lstm":
            return lstm_inference(inputs)
        elif FLAGS.model == "bidirectional_lstm":
            return bidirectional_lstm_inference(inputs)
        elif FLAGS.model == "stacked_lstm":
            return stacked_lstm_inference(inputs)
        else:
            print("Unknow model, exit now")
            exit(1)

    # Define train op
    logit = inference(x)
    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=y))

    learning_rate = FLAGS.learning_rate
    print("Use the optimizer: {}".format(FLAGS.optimizer))
    if FLAGS.optimizer == "sgd":
        optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    elif FLAGS.optimizer == "adadelta":
        optimizer = tf.train.AdadeltaOptimizer(learning_rate)
    elif FLAGS.optimizer == "adagrad":
        optimizer = tf.train.AdagradOptimizer(learning_rate)
    elif FLAGS.optimizer == "adam":
        optimizer = tf.train.AdamOptimizer(learning_rate)
    elif FLAGS.optimizer == "ftrl":
        optimizer = tf.train.FtrlOptimizer(learning_rate)
    elif FLAGS.optimizer == "rmsprop":
        optimizer = tf.train.RMSPropOptimizer(learning_rate)
    else:
        print("Unknow optimizer: {}, exit now".format(FLAGS.optimizer))
        exit(1)

    global_step = tf.Variable(0, name='global_step', trainable=False)
    train_op = optimizer.minimize(loss, global_step=global_step)

    # Define accuracy and inference op
    tf.get_variable_scope().reuse_variables()

    #logits = inference(x)
    inference_logits = inference(model_base64_input)

    inference_predict_softmax = tf.nn.softmax(inference_logits)
    inference_predict_op = tf.argmax(inference_predict_softmax, 1)
    inference_correct_prediction = tf.equal(inference_predict_op,
                                            tf.to_int64(y))
    inference_accuracy_op = tf.reduce_mean(
        tf.cast(inference_correct_prediction, tf.float32))

    model_signature = signature_def_utils.build_signature_def(
        inputs={"images": utils.build_tensor_info(model_base64_placeholder)},
        outputs={
            "softmax": utils.build_tensor_info(inference_predict_softmax),
            "prediction": utils.build_tensor_info(inference_predict_op)
        },
        method_name=signature_constants.PREDICT_METHOD_NAME)

    saver = tf.train.Saver()
    tf.summary.scalar('loss', loss)
    init_op = tf.global_variables_initializer()

    # Create session to run graph
    with tf.Session() as sess:
        summary_op = tf.summary.merge_all()
        writer = tf.summary.FileWriter(tensorboard_dir, sess.graph)
        sess.run(init_op)

        if mode == "train":
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                logging.info("Continue training from the model {}".format(
                    ckpt.model_checkpoint_path))
                saver.restore(sess, ckpt.model_checkpoint_path)

            #start_time = datetime.datetime.now()
            for epoch in range(epoch_number):

                _, loss_value, step = sess.run([train_op, loss, global_step],
                                               feed_dict={
                                                   x: train_dataset,
                                                   y: train_labels
                                               })

                if epoch % steps_to_validate == 0:
                    end_time = datetime.datetime.now()
                    """
          train_accuracy_value, summary_value = sess.run(
              [accuracy_op, summary_op],
              feed_dict={x: train_dataset,
                         y: train_labels})
          test_accuracy_value = sess.run(
              accuracy_op, feed_dict={x: test_dataset,
                                      y: test_labels})

          logging.info(
              "[{}] Epoch: {}, loss: {}, train_accuracy: {}, test_accuracy: {}".
              format(end_time - start_time, epoch, loss_value,
                     train_accuracy_value, test_accuracy_value))
          """
                    logging.info("Epoch: {}, loss: {}".format(
                        epoch, loss_value))

                    saver.save(sess, checkpoint_file, global_step=step)
                    #writer.add_summary(summary_value, step)
                    #start_time = end_time

                # Export the model
            export_path = os.path.join(
                compat.as_bytes(FLAGS.model_path),
                compat.as_bytes(str(FLAGS.model_version)))
            logging.info("Export the model to {}".format(export_path))

            try:
                legacy_init_op = tf.group(tf.tables_initializer(),
                                          name='legacy_init_op')
                builder = saved_model_builder.SavedModelBuilder(export_path)
                builder.add_meta_graph_and_variables(
                    sess, [tag_constants.SERVING],
                    clear_devices=True,
                    signature_def_map={
                        signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                        model_signature,
                    },
                    legacy_init_op=legacy_init_op)

                builder.save()
            except Exception as e:
                logging.error(
                    "Fail to export saved model, exception: {}".format(e))
            """
      logging.info("Exporting trained model to {}".format(FLAGS.model_path))
      model_exporter = exporter.Exporter(saver)
      model_exporter.init(
          sess.graph.as_graph_def(),
          named_graph_signatures={
              'inputs':
              exporter.generic_signature({
                  "keys": keys_placeholder,
                  "features": x
              }),
              'outputs':
              exporter.generic_signature({
                  "keys": keys,
                  "prediction": predict_op
              })
          })
      model_exporter.export(FLAGS.model_path,
                            tf.constant(FLAGS.export_version), sess)
      logging.info("Done export model: {}".format(FLAGS.model_path))
      """

        elif mode == "inference":
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                logging.info("Load the model {}".format(
                    ckpt.model_checkpoint_path))
                saver.restore(sess, ckpt.model_checkpoint_path)

            start_time = datetime.datetime.now()

            image_ndarray = ndimage.imread(FLAGS.image, mode="RGB")
            # TODO: Update for server without gui
            #print_image(image_ndarray)

            image_ndarray = image_ndarray.reshape(1, IMAGE_SIZE, IMAGE_SIZE,
                                                  RGB_CHANNEL_SIZE)
            prediction = sess.run(predict_op, feed_dict={x: image_ndarray})

            end_time = datetime.datetime.now()
            pokemon_type = pokemon_types[prediction[0]]
            logging.info("[{}] Predict type: {}".format(
                end_time - start_time, pokemon_type))

        elif FLAGS.mode == "savedmodel":
            if restore_from_checkpoint(sess, saver,
                                       LATEST_CHECKPOINT) == False:
                logging.error("No checkpoint for exporting model, exit now")
                exit(1)

            export_path = os.path.join(
                compat.as_bytes(FLAGS.model_path),
                compat.as_bytes(str(FLAGS.model_version)))
            logging.info("Export the model to {}".format(export_path))

            try:
                legacy_init_op = tf.group(tf.tables_initializer(),
                                          name='legacy_init_op')
                builder = saved_model_builder.SavedModelBuilder(export_path)
                builder.add_meta_graph_and_variables(
                    sess, [tag_constants.SERVING],
                    clear_devices=True,
                    signature_def_map={
                        signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                        model_signature,
                    },
                    legacy_init_op=legacy_init_op)

                builder.save()
            except Exception as e:
                logging.error(
                    "Fail to export saved model, exception: {}".format(e))
def base64_to_tensor_image(websafe_base64):
    return tf.image.decode_image(tf.decode_base64(websafe_base64))
Example #30
0
import tensorflow as tf
import base64

f = open("/Users/xingoo/PycharmProjects/ai/test/3.jpg", 'rb')
file_content = f.read()
base64str = str(base64.urlsafe_b64encode(file_content), encoding='utf-8')


input_tensor = tf.convert_to_tensor(base64str, dtype=tf.string)
image_str = tf.decode_base64(input_tensor)
img = tf.image.decode_image(image_str, channels=3)

with tf.Session() as sess:
   print(sess.run(image_str))
   print(sess.run(img))
print('accuracy: {0}'.format(score))

K.set_learning_phase(0)  # test
sess = K.get_session()

from tensorflow.python.framework import graph_util

# Make GraphDef of Transfer Model
g_trans = sess.graph
g_trans_def = graph_util.convert_variables_to_constants(
    sess, g_trans.as_graph_def(), [model.output.name.replace(':0', '')])

# Image Converter Model
with tf.Graph().as_default() as g_input:
    input_b64 = tf.placeholder(shape=(1, ), dtype=tf.string, name='input')
    input_bytes = tf.decode_base64(input_b64[0])
    image = tf.image.decode_image(input_bytes)
    image_f = tf.image.convert_image_dtype(image, dtype=tf.float32)
    input_image = tf.expand_dims(image_f, 0)
    output = tf.identity(input_image, name='input_image')

g_input_def = g_input.as_graph_def()

with tf.Graph().as_default() as g_combined:
    x = tf.placeholder(tf.string, name="input_b64")

    im, = tf.import_graph_def(g_input_def,
                              input_map={'input:0': x},
                              return_elements=["input_image:0"])

    pred, = tf.import_graph_def(
Example #32
0
        def decode_and_process(base64):
            _bytes = tf.decode_base64(base64)
            _image = self.__tf_jpeg_process(_bytes)

            return _image
def main():
    if tf.__version__.split('.')[0] != "1":
        raise Exception("Tensorflow version 1 required")

    if a.seed is None:
        a.seed = random.randint(0, 2**31 - 1)

    tf.set_random_seed(a.seed)
    np.random.seed(a.seed)
    random.seed(a.seed)

    if not os.path.exists(a.output_dir):
        os.makedirs(a.output_dir)

    if a.mode == "test" or a.mode == "export":
        if a.checkpoint is None:
            raise Exception("checkpoint required for test mode")

        # load some options from the checkpoint
        options = {"which_direction", "ngf", "ndf", "lab_colorization"}
        with open(os.path.join(a.checkpoint, "options.json")) as f:
            for key, val in json.loads(f.read()).items():
                if key in options:
                    print("loaded", key, "=", val)
                    setattr(a, key, val)
        # disable these features in test mode
        a.scale_size = CROP_SIZE
        a.flip = False

    for k, v in a._get_kwargs():
        print(k, "=", v)

    with open(os.path.join(a.output_dir, "options.json"), "w") as f:
        f.write(json.dumps(vars(a), sort_keys=True, indent=4))

    if a.mode == "export":
        # export the generator to a meta graph that can be imported later for standalone generation
        if a.lab_colorization:
            raise Exception("export not supported for lab_colorization")

        input = tf.placeholder(tf.string, shape=[1])
        input_data = tf.decode_base64(input[0])
        input_image = tf.image.decode_png(input_data)

        # remove alpha channel if present
        input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 4), lambda: input_image[:,:,:3], lambda: input_image)
        # convert grayscale to RGB
        input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 1), lambda: tf.image.grayscale_to_rgb(input_image), lambda: input_image)

        input_image = tf.image.convert_image_dtype(input_image, dtype=tf.float32)
        input_image.set_shape([CROP_SIZE, CROP_SIZE, 3])
        batch_input = tf.expand_dims(input_image, axis=0)

        with tf.variable_scope("generator"):
            batch_output = deprocess(create_generator(preprocess(batch_input), 3))

        output_image = tf.image.convert_image_dtype(batch_output, dtype=tf.uint8)[0]
        if a.output_filetype == "png":
            output_data = tf.image.encode_png(output_image)
        elif a.output_filetype == "jpeg":
            output_data = tf.image.encode_jpeg(output_image, quality=80)
        else:
            raise Exception("invalid filetype")
        output = tf.convert_to_tensor([tf.encode_base64(output_data)])

        key = tf.placeholder(tf.string, shape=[1])
        inputs = {
            "key": key.name,
            "input": input.name
        }
        tf.add_to_collection("inputs", json.dumps(inputs))
        outputs = {
            "key":  tf.identity(key).name,
            "output": output.name,
        }
        tf.add_to_collection("outputs", json.dumps(outputs))

        init_op = tf.global_variables_initializer()
        restore_saver = tf.train.Saver()
        export_saver = tf.train.Saver()

        with tf.Session() as sess:
            sess.run(init_op)
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint(a.checkpoint)
            restore_saver.restore(sess, checkpoint)
            print("exporting model")
            export_saver.export_meta_graph(filename=os.path.join(a.output_dir, "export.meta"))
            export_saver.save(sess, os.path.join(a.output_dir, "export"), write_meta_graph=False)

        return

    examples = load_examples()
    print("examples count = %d" % examples.count)

    # inputs and targets are [batch_size, height, width, channels]
    model = create_model(examples.inputs, examples.targets)

    # undo colorization splitting on images that we use for display/output
    if a.lab_colorization:
        if a.which_direction == "AtoB":
            # inputs is brightness, this will be handled fine as a grayscale image
            # need to augment targets and outputs with brightness
            targets = augment(examples.targets, examples.inputs)
            outputs = augment(model.outputs, examples.inputs)
            # inputs can be deprocessed normally and handled as if they are single channel
            # grayscale images
            inputs = deprocess(examples.inputs)
        elif a.which_direction == "BtoA":
            # inputs will be color channels only, get brightness from targets
            inputs = augment(examples.inputs, examples.targets)
            targets = deprocess(examples.targets)
            outputs = deprocess(model.outputs)
        else:
            raise Exception("invalid direction")
    else:
        inputs = deprocess(examples.inputs)
        targets = deprocess(examples.targets)
        outputs = deprocess(model.outputs)

    def convert(image):
        if a.aspect_ratio != 1.0:
            # upscale to correct aspect ratio
            size = [CROP_SIZE, int(round(CROP_SIZE * a.aspect_ratio))]
            image = tf.image.resize_images(image, size=size, method=tf.image.ResizeMethod.BICUBIC)

        return tf.image.convert_image_dtype(image, dtype=tf.uint8, saturate=True)

    # reverse any processing on images so they can be written to disk or displayed to user
    with tf.name_scope("convert_inputs"):
        converted_inputs = convert(inputs)

    with tf.name_scope("convert_targets"):
        converted_targets = convert(targets)

    with tf.name_scope("convert_outputs"):
        converted_outputs = convert(outputs)

    with tf.name_scope("encode_images"):
        display_fetches = {
            "paths": examples.paths,
            "inputs": tf.map_fn(tf.image.encode_png, converted_inputs, dtype=tf.string, name="input_pngs"),
            "targets": tf.map_fn(tf.image.encode_png, converted_targets, dtype=tf.string, name="target_pngs"),
            "outputs": tf.map_fn(tf.image.encode_png, converted_outputs, dtype=tf.string, name="output_pngs"),
        }

    # summaries
    with tf.name_scope("inputs_summary"):
        tf.summary.image("inputs", converted_inputs)

    with tf.name_scope("targets_summary"):
        tf.summary.image("targets", converted_targets)

    with tf.name_scope("outputs_summary"):
        tf.summary.image("outputs", converted_outputs)

    with tf.name_scope("predict_real_summary"):
        tf.summary.image("predict_real", tf.image.convert_image_dtype(model.predict_real, dtype=tf.uint8))

    with tf.name_scope("predict_fake_summary"):
        tf.summary.image("predict_fake", tf.image.convert_image_dtype(model.predict_fake, dtype=tf.uint8))

    tf.summary.scalar("discriminator_loss", model.discrim_loss)
    tf.summary.scalar("generator_loss_GAN", model.gen_loss_GAN)
    tf.summary.scalar("generator_loss_L1", model.gen_loss_L1)

    for var in tf.trainable_variables():
        tf.summary.histogram(var.op.name + "/values", var)

    for grad, var in model.discrim_grads_and_vars + model.gen_grads_and_vars:
        tf.summary.histogram(var.op.name + "/gradients", grad)

    with tf.name_scope("parameter_count"):
        parameter_count = tf.reduce_sum([tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])

    saver = tf.train.Saver(max_to_keep=1)

    logdir = a.output_dir if (a.trace_freq > 0 or a.summary_freq > 0) else None
    sv = tf.train.Supervisor(logdir=logdir, save_summaries_secs=0, saver=None)
    with sv.managed_session() as sess:
        print("parameter_count =", sess.run(parameter_count))

        if a.checkpoint is not None:
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint(a.checkpoint)
            saver.restore(sess, checkpoint)

        max_steps = 2**32
        if a.max_epochs is not None:
            max_steps = examples.steps_per_epoch * a.max_epochs
        if a.max_steps is not None:
            max_steps = a.max_steps

        if a.mode == "test":
            # testing
            # at most, process the test data once
            max_steps = min(examples.steps_per_epoch, max_steps)
            for step in range(max_steps):
                results = sess.run(display_fetches)
                filesets = save_images(results)
                for i, f in enumerate(filesets):
                    print("evaluated image", f["name"])
                index_path = append_index(filesets)

            print("wrote index at", index_path)
        else:
            # training
            start = time.time()

            for step in range(max_steps):
                def should(freq):
                    return freq > 0 and ((step + 1) % freq == 0 or step == max_steps - 1)

                options = None
                run_metadata = None
                if should(a.trace_freq):
                    options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                    run_metadata = tf.RunMetadata()

                fetches = {
                    "train": model.train,
                    "global_step": sv.global_step,
                }

                if should(a.progress_freq):
                    fetches["discrim_loss"] = model.discrim_loss
                    fetches["gen_loss_GAN"] = model.gen_loss_GAN
                    fetches["gen_loss_L1"] = model.gen_loss_L1

                if should(a.summary_freq):
                    fetches["summary"] = sv.summary_op

                if should(a.display_freq):
                    fetches["display"] = display_fetches

                results = sess.run(fetches, options=options, run_metadata=run_metadata)

                if should(a.summary_freq):
                    print("recording summary")
                    sv.summary_writer.add_summary(results["summary"], results["global_step"])

                if should(a.display_freq):
                    print("saving display images")
                    filesets = save_images(results["display"], step=results["global_step"])
                    append_index(filesets, step=True)

                if should(a.trace_freq):
                    print("recording trace")
                    sv.summary_writer.add_run_metadata(run_metadata, "step_%d" % results["global_step"])

                if should(a.progress_freq):
                    # global_step will have the correct step count if we resume from a checkpoint
                    train_epoch = math.ceil(results["global_step"] / examples.steps_per_epoch)
                    train_step = (results["global_step"] - 1) % examples.steps_per_epoch + 1
                    rate = (step + 1) * a.batch_size / (time.time() - start)
                    remaining = (max_steps - step) * a.batch_size / rate
                    print("progress  epoch %d  step %d  image/sec %0.1f  remaining %dm" % (train_epoch, train_step, rate, remaining / 60))
                    print("discrim_loss", results["discrim_loss"])
                    print("gen_loss_GAN", results["gen_loss_GAN"])
                    print("gen_loss_L1", results["gen_loss_L1"])

                if should(a.save_freq):
                    print("saving model")
                    saver.save(sess, os.path.join(a.output_dir, "model"), global_step=sv.global_step)

                if sv.should_stop():
                    break
Example #34
0
def handle():
    image_data = tf.decode_base64(request.data)
    array = tf.image.decode_jpeg(image_data)

    pp.pprint(array)
    return match(array)
def batch_base64_to_tensor(input_text):
    img = tf.map_fn(tf.image.decode_image,
                    tf.decode_base64(input_text),
                    dtype=tf.uint8)
    return img
Example #36
0
def main():
    if a.seed is None:
        a.seed = random.randint(0, 2**31 - 1)

    tf.set_random_seed(a.seed)
    np.random.seed(a.seed)
    random.seed(a.seed)

    if not os.path.exists(a.output_dir):
        os.makedirs(a.output_dir)

    if a.mode == "test" or a.mode == "export":
        if a.checkpoint is None:
            raise Exception("checkpoint required for test mode")

        # load some options from the checkpoint
        options = {"which_direction", "ngf", "ndf", "lab_colorization"}
        with open(os.path.join(a.checkpoint, "options.json")) as f:
            for key, val in json.loads(f.read()).items():
                if key in options:
                    print("loaded", key, "=", val)
                    setattr(a, key, val)
        # disable these features in test mode
        a.scale_size = CROP_SIZE
        a.flip = False

    for k, v in a._get_kwargs():
        print(k, "=", v)

    with open(os.path.join(a.output_dir, "options.json"), "w") as f:
        f.write(json.dumps(vars(a), sort_keys=True, indent=4))

    if a.mode == "export":
        # export the generator to a meta graph that can be imported later for standalone generation
        if a.lab_colorization:
            raise Exception("export not supported for lab_colorization")

        input = tf.placeholder(tf.string, shape=[1])
        input_data = tf.decode_base64(input[0])
        input_image = tf.image.decode_png(input_data)

        # remove alpha channel if present
        input_image = tf.cond(tf.equal(tf.shape(input_image)[2],
                                       4), lambda: input_image[:, :, :3],
                              lambda: input_image)
        # convert grayscale to RGB
        input_image = tf.cond(tf.equal(tf.shape(input_image)[2], 1),
                              lambda: tf.image.grayscale_to_rgb(input_image),
                              lambda: input_image)

        input_image = tf.image.convert_image_dtype(input_image,
                                                   dtype=tf.float32)
        input_image.set_shape([CROP_SIZE, CROP_SIZE, 3])
        batch_input = tf.expand_dims(input_image, axis=0)

        with tf.variable_scope("generator"):
            batch_output = deprocess(
                create_generator(preprocess(batch_input), 3))

        output_image = tf.image.convert_image_dtype(batch_output,
                                                    dtype=tf.uint8)[0]
        if a.output_filetype == "png":
            output_data = tf.image.encode_png(output_image)
        elif a.output_filetype == "jpeg":
            output_data = tf.image.encode_jpeg(output_image, quality=80)
        else:
            raise Exception("invalid filetype")
        output = tf.convert_to_tensor([tf.encode_base64(output_data)])

        key = tf.placeholder(tf.string, shape=[1])
        inputs = {"key": key.name, "input": input.name}
        tf.add_to_collection("inputs", json.dumps(inputs))
        outputs = {
            "key": tf.identity(key).name,
            "output": output.name,
        }
        tf.add_to_collection("outputs", json.dumps(outputs))

        init_op = tf.global_variables_initializer()
        restore_saver = tf.train.Saver()
        export_saver = tf.train.Saver()

        with tf.Session() as sess:
            sess.run(init_op)
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint(a.checkpoint)
            restore_saver.restore(sess, checkpoint)
            print("exporting model")
            export_saver.export_meta_graph(
                filename=os.path.join(a.output_dir, "export.meta"))
            export_saver.save(sess,
                              os.path.join(a.output_dir, "export"),
                              write_meta_graph=False)

        return

    examples = load_examples()
    print("examples count = %d" % examples.count)

    # inputs and targets are [batch_size, height, width, channels]
    model = create_model(examples.inputs, examples.targets)

    # undo colorization splitting on images that we use for display/output
    if a.lab_colorization:
        if a.which_direction == "AtoB":
            # inputs is brightness, this will be handled fine as a grayscale image
            # need to augment targets and outputs with brightness
            targets = augment(examples.targets, examples.inputs)
            outputs = augment(model.outputs, examples.inputs)
            # inputs can be deprocessed normally and handled as if they are single channel
            # grayscale images
            inputs = deprocess(examples.inputs)
        elif a.which_direction == "BtoA":
            # inputs will be color channels only, get brightness from targets
            inputs = augment(examples.inputs, examples.targets)
            targets = deprocess(examples.targets)
            outputs = deprocess(model.outputs)
        else:
            raise Exception("invalid direction")
    else:
        inputs = deprocess(examples.inputs)
        targets = deprocess(examples.targets)
        outputs = deprocess(model.outputs)

    def convert(image):
        if a.aspect_ratio != 1.0:
            # upscale to correct aspect ratio
            size = [CROP_SIZE, int(round(CROP_SIZE * a.aspect_ratio))]
            image = tf.image.resize_images(
                image, size=size, method=tf.image.ResizeMethod.BICUBIC)

        return tf.image.convert_image_dtype(image,
                                            dtype=tf.uint8,
                                            saturate=True)

    # reverse any processing on images so they can be written to disk or displayed to user
    with tf.name_scope("convert_inputs"):
        converted_inputs = convert(inputs)

    with tf.name_scope("convert_targets"):
        converted_targets = convert(targets)

    with tf.name_scope("convert_outputs"):
        converted_outputs = convert(outputs)

    with tf.name_scope("encode_images"):
        display_fetches = {
            "paths":
            examples.paths,
            "inputs":
            tf.map_fn(tf.image.encode_png,
                      converted_inputs,
                      dtype=tf.string,
                      name="input_pngs"),
            "targets":
            tf.map_fn(tf.image.encode_png,
                      converted_targets,
                      dtype=tf.string,
                      name="target_pngs"),
            "outputs":
            tf.map_fn(tf.image.encode_png,
                      converted_outputs,
                      dtype=tf.string,
                      name="output_pngs"),
        }

    # summaries
    with tf.name_scope("inputs_summary"):
        tf.summary.image("inputs", converted_inputs)

    with tf.name_scope("targets_summary"):
        tf.summary.image("targets", converted_targets)

    with tf.name_scope("outputs_summary"):
        tf.summary.image("outputs", converted_outputs)

    with tf.name_scope("predict_real_summary"):
        tf.summary.image(
            "predict_real",
            tf.image.convert_image_dtype(model.predict_real, dtype=tf.uint8))

    with tf.name_scope("predict_fake_summary"):
        tf.summary.image(
            "predict_fake",
            tf.image.convert_image_dtype(model.predict_fake, dtype=tf.uint8))

    tf.summary.scalar("discriminator_loss", model.discrim_loss)
    tf.summary.scalar("generator_loss_GAN", model.gen_loss_GAN)
    tf.summary.scalar("generator_loss_L1", model.gen_loss_L1)

    for var in tf.trainable_variables():
        tf.summary.histogram(var.op.name + "/values", var)

    for grad, var in model.discrim_grads_and_vars + model.gen_grads_and_vars:
        tf.summary.histogram(var.op.name + "/gradients", grad)

    with tf.name_scope("parameter_count"):
        parameter_count = tf.reduce_sum(
            [tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])

    saver = tf.train.Saver(max_to_keep=1)

    logdir = a.output_dir if (a.trace_freq > 0 or a.summary_freq > 0) else None
    sv = tf.train.Supervisor(logdir=logdir, save_summaries_secs=0, saver=None)
    with sv.managed_session() as sess:
        print("parameter_count =", sess.run(parameter_count))

        if a.checkpoint is not None:
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint(a.checkpoint)
            saver.restore(sess, checkpoint)

        max_steps = 2**32
        if a.max_epochs is not None:
            max_steps = examples.steps_per_epoch * a.max_epochs
        if a.max_steps is not None:
            max_steps = a.max_steps

        if a.mode == "test":
            # testing
            # at most, process the test data once
            start = time.time()
            max_steps = min(examples.steps_per_epoch, max_steps)
            for step in range(max_steps):
                results = sess.run(display_fetches)
                filesets = save_images(results)
                for i, f in enumerate(filesets):
                    print("evaluated image", f["name"])
                index_path = append_index(filesets)
            print("wrote index at", index_path)
            print("rate", (time.time() - start) / max_steps)
        else:
            # training
            start = time.time()

            for step in range(max_steps):

                def should(freq):
                    return freq > 0 and ((step + 1) % freq == 0
                                         or step == max_steps - 1)

                options = None
                run_metadata = None
                if should(a.trace_freq):
                    options = tf.RunOptions(
                        trace_level=tf.RunOptions.FULL_TRACE)
                    run_metadata = tf.RunMetadata()

                fetches = {
                    "train": model.train,
                    "global_step": sv.global_step,
                }

                if should(a.progress_freq):
                    fetches["discrim_loss"] = model.discrim_loss
                    fetches["gen_loss_GAN"] = model.gen_loss_GAN
                    fetches["gen_loss_L1"] = model.gen_loss_L1

                if should(a.summary_freq):
                    fetches["summary"] = sv.summary_op

                if should(a.display_freq):
                    fetches["display"] = display_fetches

                results = sess.run(fetches,
                                   options=options,
                                   run_metadata=run_metadata)

                if should(a.summary_freq):
                    print("recording summary")
                    sv.summary_writer.add_summary(results["summary"],
                                                  results["global_step"])

                if should(a.display_freq):
                    print("saving display images")
                    filesets = save_images(results["display"],
                                           step=results["global_step"])
                    append_index(filesets, step=True)

                if should(a.trace_freq):
                    print("recording trace")
                    sv.summary_writer.add_run_metadata(
                        run_metadata, "step_%d" % results["global_step"])

                if should(a.progress_freq):
                    # global_step will have the correct step count if we resume from a checkpoint
                    train_epoch = math.ceil(results["global_step"] /
                                            examples.steps_per_epoch)
                    train_step = (results["global_step"] -
                                  1) % examples.steps_per_epoch + 1
                    rate = (step + 1) * a.batch_size / (time.time() - start)
                    remaining = (max_steps - step) * a.batch_size / rate
                    print(
                        "progress  epoch %d  step %d  image/sec %0.1f  remaining %dm"
                        % (train_epoch, train_step, rate, remaining / 60))
                    print("discrim_loss", results["discrim_loss"])
                    print("gen_loss_GAN", results["gen_loss_GAN"])
                    print("gen_loss_L1", results["gen_loss_L1"])

                if should(a.save_freq):
                    print("saving model")
                    saver.save(sess,
                               os.path.join(a.output_dir, "model"),
                               global_step=sv.global_step)

                if sv.should_stop():
                    break
Example #37
0
import tensorflow as tf
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants

export_dir = "modelbase64"
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)

sigs = {}

with tf.Graph().as_default() as g1:
    base64_str = tf.placeholder(tf.string, shape=[None], name='input_string')
    base64_scalar = tf.reshape(base64_str, [])
    input_str = tf.decode_base64(base64_scalar)
    decoded_image = tf.image.decode_png(input_str, channels=3)
    decoded_image_as_float = tf.image.convert_image_dtype(
        decoded_image, tf.float32)
    decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
    resize_shape = tf.stack([224, 224])
    resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)
    resize_image = tf.image.resize_bilinear(decoded_image_4d,
                                            resize_shape_as_int)
    print(resize_image.shape)
    tf.identity(resize_image, name="DecodeJPGOutput")

g1def = g1.as_graph_def()

with tf.Graph().as_default() as g2:
    with tf.Session(graph=g2) as sess:
        tf.saved_model.loader.load(sess, ["serve"], "./saved_model")
        graph = tf.get_default_graph()