Пример #1
0
def classification_signature_fn(examples, unused_features, predictions):
    """Creates classification signature from given examples and predictions.

  Args:
    examples: `Output`.
    unused_features: `dict` of `Output`s.
    predictions: `Output` or dict of tensors that contains the classes tensor
      as in {'classes': `Output`}.

  Returns:
    Tuple of default classification signature and empty named signatures.

  Raises:
    ValueError: If examples is `None`.
  """
    if examples is None:
        raise ValueError(
            'examples cannot be None when using this signature fn.')

    if isinstance(predictions, dict):
        default_signature = exporter.classification_signature(
            examples, classes_tensor=predictions['classes'])
    else:
        default_signature = exporter.classification_signature(
            examples, classes_tensor=predictions)
    return default_signature, {}
Пример #2
0
def classification_signature_fn_with_prob(
    examples, unused_features, predictions):
  """Classification signature from given examples and predicted probabilities.

  Args:
    examples: `Tensor`.
    unused_features: `dict` of `Tensor`s.
    predictions: `Tensor` of predicted probabilities or dict that contains the
      probabilities tensor as in {'probabilities', `Tensor`}.

  Returns:
    Tuple of default classification signature and empty named signatures.

  Raises:
    ValueError: If examples is `None`.
  """
  if examples is None:
    raise ValueError('examples cannot be None when using this signature fn.')

  if isinstance(predictions, dict):
    default_signature = exporter.classification_signature(
        examples, scores_tensor=predictions['probabilities'])
  else:
    default_signature = exporter.classification_signature(
        examples, scores_tensor=predictions)
  return default_signature, {}
Пример #3
0
def classification_signature_fn_with_prob(examples, unused_features,
                                          predictions):
    """Classification signature from given examples and predicted probabilities.

  Args:
    examples: `Tensor`.
    unused_features: `dict` of `Tensor`s.
    predictions: `Tensor` of predicted probabilities or dict that contains the
      probabilities tensor as in {'probabilities', `Tensor`}.

  Returns:
    Tuple of default classification signature and empty named signatures.

  Raises:
    ValueError: If examples is `None`.
  """
    if examples is None:
        raise ValueError(
            'examples cannot be None when using this signature fn.')

    if isinstance(predictions, dict):
        default_signature = exporter.classification_signature(
            examples, scores_tensor=predictions['probabilities'])
    else:
        default_signature = exporter.classification_signature(
            examples, scores_tensor=predictions)
    return default_signature, {}
Пример #4
0
        def _classification_signature_fn(examples, unused_features,
                                         predictions):
            """Servo signature function."""
            if isinstance(predictions, dict):
                default_signature = exporter.classification_signature(
                    input_tensor=examples,
                    classes_tensor=predictions[PedictionKey.CLASSES],
                    scores_tensor=predictions[PedictionKey.PROBABILITIES])
            else:
                default_signature = exporter.classification_signature(
                    input_tensor=examples, scores_tensor=predictions)

            # TODO(zakaria): add validation
            return default_signature, {}
Пример #5
0
    def _classification_signature_fn(examples, unused_features, predictions):
      """Servo signature function."""
      if isinstance(predictions, dict):
        default_signature = exporter.classification_signature(
            input_tensor=examples,
            classes_tensor=predictions[PredictionKey.CLASSES],
            scores_tensor=predictions[PredictionKey.PROBABILITIES])
      else:
        default_signature = exporter.classification_signature(
            input_tensor=examples,
            scores_tensor=predictions)

      # TODO(zakaria): add validation
      return default_signature, {}
Пример #6
0
def export():
    with tf.Graph().as_default():
        # Build inference model.
        # Please refer to Tensorflow inception model for details.

        # Input transformation.
        jpegs = tf.placeholder(tf.string)
        images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)
        print(images)
        # Run inference.
        feature = vgg.inference(images)

        saver = tf.train.Saver()
        with tf.Session() as sess:
            saver.restore(sess, 'model/inshop.sgd.adam')
            # Export inference model.
            init_op = tf.group(tf.initialize_all_tables(), name='init_op')

            model_exporter = exporter.Exporter(saver)
            signature = exporter.classification_signature(
                input_tensor=jpegs, classes_tensor=None, scores_tensor=feature)
            model_exporter.init(default_graph_signature=signature,
                                init_op=init_op)
            model_exporter.export('model', tf.constant(150000), sess)
            print('Successfully exported model to model/.')
Пример #7
0
def exporter(saver, sess):
    model_exporter = exp.Exporter(saver)
    signature = exp.classification_signature(input_tensor=img,
                                             pred_tensor=pred_val)
    model_exporter.init(default_graph_signature=signature,
                        init_op=tf.initialize_all_tables())
    model_exporter.export(FLAGS.log_dir + "/export", tf.constant(time.time()),
                          sess)
def export_model_to_tensorflow(path_to_trained_keras_model: str):
    print("Loading model for exporting to Protocol Buffer format...")
    model = keras.models.load_model(path_to_trained_keras_model)

    sess = K.get_session()

    # serialize the model and get its weights, for quick re-building
    config = model.get_config()
    weights = model.get_weights()

    # re-build a model where the learning phase is now hard-coded to 0
    new_model = Sequential.from_config(config)
    new_model.set_weights(weights)

    export_path = os.path.abspath(os.path.join("export", "simple"))  # where to save the exported graph
    os.makedirs(export_path)
    checkpoint_state_name = "checkpoint_state"
    export_version = 1  # version number (integer)
    saver = tensorflow.train.Saver(sharded=True, name=checkpoint_state_name)
    model_exporter = exporter.Exporter(saver)
    signature = exporter.classification_signature(input_tensor=model.input, scores_tensor=model.output)

    # # Version 1 of exporter
    # model_exporter.init(sess.graph.as_graph_def(), default_graph_signature=signature)
    # model_exporter.export(export_path, tensorflow.constant(export_version), sess)
    #
    # # Version 2 of exporter
    # tensorflow.train.write_graph(sess.graph.as_graph_def(), logdir=".", name="simple.pbtxt", as_text=True)

    # Version 3 with Freezer from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph_test.py
    input_graph_name = "input_graph.pb"
    output_graph_name = "output_graph.pb"
    saver_write_version = saver_pb2.SaverDef.V2

    # We'll create an input graph that has a single variable containing 1.0,
    # and that then multiplies it by 2.
    saver = tensorflow.train.Saver(write_version=saver_write_version)
    checkpoint_path = saver.save(sess, export_path, global_step=0, latest_filename=checkpoint_state_name)
    graph_io.write_graph(sess.graph, export_path, input_graph_name)

    # We save out the graph to disk, and then call the const conversion
    # routine.
    input_graph_path = os.path.join(export_path, input_graph_name)
    input_saver_def_path = ""
    input_binary = False
    output_node_names = "output_node/Softmax"
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    output_graph_path = os.path.join(export_path, output_graph_name)
    clear_devices = False
    freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                              input_binary, checkpoint_path, output_node_names,
                              restore_op_name, filename_tensor_name,
                              output_graph_path, clear_devices, "")

    shutil.copy(os.path.join("export", "simple", "output_graph.pb"), output_graph_name)
    shutil.rmtree("export")
    print("Exported model: {0}".format(os.path.abspath(output_graph_name)))
Пример #9
0
def classification_signature_fn(examples, unused_features, predictions):
    """Creates classification signature from given examples and predictions.

  Args:
    examples: `Tensor`.
    unused_features: `dict` of `Tensor`s.
    predictions: `Tensor` or dict of tensors that contains the classes tensor
      as in {'classes': `Tensor`}.

  Returns:
    Tuple of default classification signature and empty named signatures.
  """
    if isinstance(predictions, dict):
        default_signature = exporter.classification_signature(
            examples, classes_tensor=predictions['classes'])
    else:
        default_signature = exporter.classification_signature(
            examples, classes_tensor=predictions)
    return default_signature, {}
Пример #10
0
def classification_signature_fn(examples, unused_features, predictions):
  """Creates classification signature from given examples and predictions.

  Args:
    examples: `Tensor`.
    unused_features: `dict` of `Tensor`s.
    predictions: `Tensor` or dict of tensors that contains the classes tensor
      as in {'classes': `Tensor`}.

  Returns:
    Tuple of default classification signature and empty named signatures.
  """
  if isinstance(predictions, dict):
    default_signature = exporter.classification_signature(
        examples, classes_tensor=predictions['classes'])
  else:
    default_signature = exporter.classification_signature(
        examples, classes_tensor=predictions)
  return default_signature, {}
Пример #11
0
def classification_signature_fn_with_prob(examples, unused_features,
                                          predictions):
    """Classification signature from given examples and predicted probabilities.

  Args:
    examples: `Tensor`.
    unused_features: `dict` of `Tensor`s.
    predictions: `Tensor` of predicted probabilities or dict that contains the
      probabilities tensor as in {'probabilities', `Tensor`}.

  Returns:
    Tuple of default classification signature and empty named signatures.
  """
    if isinstance(predictions, dict):
        default_signature = exporter.classification_signature(
            examples, scores_tensor=predictions['probabilities'])
    else:
        default_signature = exporter.classification_signature(
            examples, scores_tensor=predictions)
    return default_signature, {}
Пример #12
0
def classification_signature_fn_with_prob(
    examples, unused_features, predictions):
  """Classification signature from given examples and predicted probabilities.

  Args:
    examples: `Tensor`.
    unused_features: `dict` of `Tensor`s.
    predictions: `Tensor` of predicted probabilities or dict that contains the
      probabilities tensor as in {'probabilities', `Tensor`}.

  Returns:
    Tuple of default classification signature and empty named signatures.
  """
  if isinstance(predictions, dict):
    default_signature = exporter.classification_signature(
        examples, scores_tensor=predictions['probabilities'])
  else:
    default_signature = exporter.classification_signature(
        examples, scores_tensor=predictions)
  return default_signature, {}
Пример #13
0
def main(_):
    if len(sys.argv) < 2 or sys.argv[-1].startswith('-'):
        print(
            'Usage: mnist_export.py [--training_iteration=x] '
            '[--export_version=y] export_dir')
        sys.exit(-1)
    if FLAGS.training_iteration <= 0:
        print 'Please specify a positive value for training iteration.'
        sys.exit(-1)
    if FLAGS.export_version <= 0:
        print 'Please specify a positive value for version number.'
        sys.exit(-1)

    # Train model
    print 'Training model...'
    mnist = mnist_input_data.read_data_sets(FLAGS.work_dir, one_hot=True)
    sess = tf.InteractiveSession()
    x = tf.placeholder('float', shape=[None, 784])
    y_ = tf.placeholder('float', shape=[None, 10])
    w = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    sess.run(tf.initialize_all_variables())
    y = tf.nn.softmax(tf.matmul(x, w) + b)
    cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
    train_step = tf.train.GradientDescentOptimizer(0.01).minimize(
        cross_entropy)
    for _ in range(FLAGS.training_iteration):
        batch = mnist.train.next_batch(50)
        train_step.run(feed_dict={x: batch[0], y_: batch[1]})
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
    print 'training accuracy %g' % sess.run(accuracy,
                                            feed_dict={
                                                x: mnist.test.images,
                                                y_: mnist.test.labels
                                            })
    print 'Done training!'

    # Export model
    # WARNING(break-tutorial-inline-code): The following code snippet is
    # in-lined in tutorials, please update tutorial documents accordingly
    # whenever code changes.
    export_path = sys.argv[-1]
    print 'Exporting trained model to', export_path
    saver = tf.train.Saver(sharded=True)
    model_exporter = exporter.Exporter(saver)
    signature = exporter.classification_signature(input_tensor=x,
                                                  scores_tensor=y)
    model_exporter.init(sess.graph.as_graph_def(),
                        default_graph_signature=signature)
    model_exporter.export(export_path, tf.constant(FLAGS.export_version), sess)
    print 'Done exporting!'
Пример #14
0
def classification_signature_fn(examples, unused_features, predictions):
    """Creates classification signature from given examples and predictions.

  Args:
    examples: `Tensor`.
    unused_features: `dict` of `Tensor`s.
    predictions: `Tensor`.

  Returns:
    Tuple of default classification signature and empty named signatures.
  """
    default_signature = exporter.classification_signature(
        examples, classes_tensor=predictions)
    return default_signature, {}
Пример #15
0
def classification_signature_fn(examples, unused_features, predictions):
    """Creates classification signature from given examples and predictions.

  Args:
    examples: `Tensor`.
    unused_features: `dict` of `Tensor`s.
    predictions: `Tensor` or dict of tensors that contains the classes tensor
      as in {'classes': `Tensor`}.

  Returns:
    Tuple of default classification signature and empty named signatures.

  Raises:
    ValueError: If examples is `None`.
  """
    if examples is None:
        raise ValueError("examples cannot be None when using this signature fn.")

    if isinstance(predictions, dict):
        default_signature = exporter.classification_signature(examples, classes_tensor=predictions["classes"])
    else:
        default_signature = exporter.classification_signature(examples, classes_tensor=predictions)
    return default_signature, {}
Пример #16
0
def classification_signature_fn(examples, unused_features, predictions):
  """Creates classification signature from given examples and predictions.

  Args:
    examples: `Tensor`.
    unused_features: `dict` of `Tensor`s.
    predictions: `Tensor`.

  Returns:
    Tuple of default classification signature and empty named signatures.
  """
  default_signature = exporter.classification_signature(
      examples, classes_tensor=predictions)
  return default_signature, {}
Пример #17
0
def classification_signature_fn_with_prob(
    examples, unused_features, predictions):
  """Classification signature from given examples and predicted probabilities.

  Args:
    examples: `Tensor`.
    unused_features: `dict` of `Tensor`s.
    predictions: `Tensor` of predicted probabilities.

  Returns:
    Tuple of default classification signature and empty named signatures.
  """
  default_signature = exporter.classification_signature(
      examples, scores_tensor=predictions)
  return default_signature, {}
Пример #18
0
def classification_signature_fn_with_prob(examples, unused_features,
                                          predictions):
    """Classification signature from given examples and predicted probabilities.

  Args:
    examples: `Tensor`.
    unused_features: `dict` of `Tensor`s.
    predictions: `Tensor` of predicted probabilities.

  Returns:
    Tuple of default classification signature and empty named signatures.
  """
    default_signature = exporter.classification_signature(
        examples, scores_tensor=predictions)
    return default_signature, {}
Пример #19
0
def classification_signature_fn(examples, unused_features, predictions):
  """Creates classification signature from given examples and predictions.

  Args:
    examples: `Tensor`.
    unused_features: `dict` of `Tensor`s.
    predictions: `dict` of `Tensor`s.

  Returns:
    Tuple of default classification signature and empty named signatures.
  """
  signature = exporter.classification_signature(
      examples,
      classes_tensor=predictions[Classifier.CLASS_OUTPUT],
      scores_tensor=predictions[Classifier.PROBABILITY_OUTPUT])
  return signature, {}
Пример #20
0
def classification_signature_fn(examples, unused_features, predictions):
    """Creates classification signature from given examples and predictions.

  Args:
    examples: `Tensor`.
    unused_features: `dict` of `Tensor`s.
    predictions: `dict` of `Tensor`s.

  Returns:
    Tuple of default classification signature and empty named signatures.
  """
    signature = exporter.classification_signature(
        examples,
        classes_tensor=predictions[Classifier.CLASS_OUTPUT],
        scores_tensor=predictions[Classifier.PROBABILITY_OUTPUT])
    return signature, {}
Пример #21
0
def main(_):
  if len(sys.argv) < 2 or sys.argv[-1].startswith('-'):
    print('Usage: mnist_export.py [--training_iteration=x] '
          '[--export_version=y] export_dir')
    sys.exit(-1)
  if FLAGS.training_iteration <= 0:
    print 'Please specify a positive value for training iteration.'
    sys.exit(-1)
  if FLAGS.export_version <= 0:
    print 'Please specify a positive value for version number.'
    sys.exit(-1)

  # Train model
  print 'Training model...'
  mnist = mnist_input_data.read_data_sets(FLAGS.work_dir, one_hot=True)
  sess = tf.InteractiveSession()
  x = tf.placeholder('float', shape=[None, 784])
  y_ = tf.placeholder('float', shape=[None, 10])
  w = tf.Variable(tf.zeros([784, 10]))
  b = tf.Variable(tf.zeros([10]))
  sess.run(tf.initialize_all_variables())
  y = tf.nn.softmax(tf.matmul(x, w) + b)
  cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
  train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
  for _ in range(FLAGS.training_iteration):
    batch = mnist.train.next_batch(50)
    train_step.run(feed_dict={x: batch[0], y_: batch[1]})
  correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
  accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
  print 'training accuracy %g' % sess.run(accuracy,
                                          feed_dict={x: mnist.test.images,
                                                     y_: mnist.test.labels})
  print 'Done training!'

  # Export model
  # WARNING(break-tutorial-inline-code): The following code snippet is
  # in-lined in tutorials, please update tutorial documents accordingly
  # whenever code changes.
  export_path = sys.argv[-1]
  print 'Exporting trained model to', export_path
  saver = tf.train.Saver(sharded=True)
  model_exporter = exporter.Exporter(saver)
  signature = exporter.classification_signature(input_tensor=x, scores_tensor=y)
  model_exporter.init(sess.graph.as_graph_def(),
                      default_graph_signature=signature)
  model_exporter.export(export_path, tf.constant(FLAGS.export_version), sess)
  print 'Done exporting!'
Пример #22
0
def saveWithSavedModel():
    # K.set_learning_phase(0)  # all new operations will be in test mode from now on

    # wordIndex = loadWordIndex()
    model = createModel()
    model.load_weights(KERAS_WEIGHTS_FILE)

    export_path = os.path.join(PUNCTUATOR_DIR,
                               'graph')  # where to save the exported graph

    shutil.rmtree(export_path, True)
    export_version = 1  # version number (integer)

    import tensorflow as tf
    sess = tf.Session()

    saver = tf.train.Saver(sharded=True)
    from tensorflow.contrib.session_bundle import exporter
    model_exporter = exporter.Exporter(saver)
    signature = exporter.classification_signature(input_tensor=model.input,
                                                  scores_tensor=model.output)
    # model_exporter.init(sess.graph.as_graph_def(),default_graph_signature=signature)
    tf.initialize_all_variables().run(session=sess)
    # model_exporter.export(export_path, tf.constant(export_version), sess)
    from tensorflow.python.saved_model import builder as saved_model_builder
    builder = saved_model_builder.SavedModelBuilder(export_path)
    from tensorflow.python.saved_model import signature_constants
    from tensorflow.python.saved_model import tag_constants
    legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
    from tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def
    signature_def = predict_signature_def(
        {signature_constants.PREDICT_INPUTS: model.input},
        {signature_constants.PREDICT_OUTPUTS: model.output})
    builder.add_meta_graph_and_variables(
        sess, [tag_constants.SERVING],
        signature_def_map={
            signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
            signature_def
        },
        legacy_init_op=legacy_init_op)
    builder.save()
def export():
  # Create index->synset mapping
  synsets = []
  with open(SYNSET_FILE) as f:
    synsets = f.read().splitlines()
  # Create synset->metadata mapping
  texts = {}
  with open(METADATA_FILE) as f:
    for line in f.read().splitlines():
      parts = line.split('\t')
      assert len(parts) == 2
      texts[parts[0]] = parts[1]

  with tf.Graph().as_default():
    # Build inference model.
    # Please refer to Tensorflow inception model for details.

    # Input transformation.
    serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
    feature_configs = {
        'image/encoded': tf.FixedLenFeature(shape=[], dtype=tf.string),
    }
    tf_example = tf.parse_example(serialized_tf_example, feature_configs)
    jpegs = tf_example['image/encoded']
    images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)

    # Run inference.
    logits, _ = inception_model.inference(images, NUM_CLASSES + 1)

    # Transform output to topK result.
    values, indices = tf.nn.top_k(logits, NUM_TOP_CLASSES)

    # Create a constant string Tensor where the i'th element is
    # the human readable class description for the i'th index.
    # Note that the 0th index is an unused background class
    # (see inception model definition code).
    class_descriptions = ['unused background']
    for s in synsets:
      class_descriptions.append(texts[s])
    class_tensor = tf.constant(class_descriptions)

    classes = tf.contrib.lookup.index_to_string(tf.to_int64(indices),
                                                mapping=class_tensor)

    # Restore variables from training checkpoint.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception_model.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)
    with tf.Session() as sess:
      # Restore variables from training checkpoints.
      ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
      if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        # Assuming model_checkpoint_path looks something like:
        #   /my-favorite-path/imagenet_train/model.ckpt-0,
        # extract global_step from it.
        global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
        print('Successfully loaded model from %s at step=%s.' %
              (ckpt.model_checkpoint_path, global_step))
      else:
        print('No checkpoint file found at %s' % FLAGS.checkpoint_dir)
        return

      # Export inference model.
      init_op = tf.group(tf.initialize_all_tables(), name='init_op')
      classification_signature = exporter.classification_signature(
          input_tensor=serialized_tf_example,
          classes_tensor=classes,
          scores_tensor=values)
      named_graph_signature = {
          'inputs': exporter.generic_signature({'images': jpegs}),
          'outputs': exporter.generic_signature({
              'classes': classes,
              'scores': values
          })}
      model_exporter = exporter.Exporter(saver)
      model_exporter.init(
          init_op=init_op,
          default_graph_signature=classification_signature,
          named_graph_signatures=named_graph_signature)
      model_exporter.export(FLAGS.export_dir, tf.constant(global_step), sess)
      print('Successfully exported model to %s' % FLAGS.export_dir)
Пример #24
0
def export():
  # Create index->synset mapping
  synsets = []
  with open(SYNSET_FILE) as f:
    synsets = f.read().splitlines()
  # Create synset->metadata mapping
  texts = {}
  with open(METADATA_FILE) as f:
    for line in f.read().splitlines():
      parts = line.split('\t')
      assert len(parts) == 2
      texts[parts[0]] = parts[1]

  with tf.Graph().as_default():
    # Build inference model.
    # Please refer to Tensorflow inception model for details.

    # Input transformation.
    # TODO(b/27776734): Add batching support.
    jpegs = tf.placeholder(tf.string, shape=(1))
    image_buffer = tf.squeeze(jpegs, [0])
    # Decode the string as an RGB JPEG.
    # Note that the resulting image contains an unknown height and width
    # that is set dynamically by decode_jpeg. In other words, the height
    # and width of image is unknown at compile-time.
    image = tf.image.decode_jpeg(image_buffer, channels=3)
    # After this point, all image pixels reside in [0,1)
    # until the very end, when they're rescaled to (-1, 1).  The various
    # adjust_* ops all require this range for dtype float.
    image = tf.image.convert_image_dtype(image, dtype=tf.float32)
    # Crop the central region of the image with an area containing 87.5% of
    # the original image.
    image = tf.image.central_crop(image, central_fraction=0.875)
    # Resize the image to the original height and width.
    image = tf.expand_dims(image, 0)
    image = tf.image.resize_bilinear(image,
                                     [FLAGS.image_size, FLAGS.image_size],
                                     align_corners=False)
    image = tf.squeeze(image, [0])
    # Finally, rescale to [-1,1] instead of [0, 1)
    image = tf.sub(image, 0.5)
    image = tf.mul(image, 2.0)
    images = tf.expand_dims(image, 0)

    # Run inference.
    logits, _ = inception_model.inference(images, NUM_CLASSES + 1)

    # Transform output to topK result.
    values, indices = tf.nn.top_k(logits, NUM_TOP_CLASSES)

    # Create a constant string Tensor where the i'th element is
    # the human readable class description for the i'th index.
    # Note that the 0th index is an unused background class
    # (see inception model definition code).
    class_descriptions = ['unused background']
    for s in synsets:
      class_descriptions.append(texts[s])
    class_tensor = tf.constant(class_descriptions)

    classes = tf.contrib.lookup.index_to_string(tf.to_int64(indices),
                                                mapping=class_tensor)

    # Restore variables from training checkpoint.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception_model.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)
    with tf.Session() as sess:
      # Restore variables from training checkpoints.
      ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
      if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        # Assuming model_checkpoint_path looks something like:
        #   /my-favorite-path/imagenet_train/model.ckpt-0,
        # extract global_step from it.
        global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
        print('Successfully loaded model from %s at step=%s.' %
              (ckpt.model_checkpoint_path, global_step))
      else:
        print('No checkpoint file found at %s' % FLAGS.checkpoint_dir)
        return

      # Export inference model.
      init_op = tf.group(tf.initialize_all_tables(), name='init_op')
      model_exporter = exporter.Exporter(saver)
      signature = exporter.classification_signature(
          input_tensor=jpegs, classes_tensor=classes, scores_tensor=values)
      model_exporter.init(default_graph_signature=signature, init_op=init_op)
      model_exporter.export(FLAGS.export_dir, tf.constant(global_step), sess)
      print('Successfully exported model to %s' % FLAGS.export_dir)
Пример #25
0
def fam(train_i, train_o, test_i, test_o):
    sess = tf.Session()
    K.set_session(sess)
    K.set_learning_phase(1)

    batch_size = 60
    nb_classes = len(MOD)
    nb_epoch = 20

    img_rows, img_cols = 2 * P * L, 2 * Np
    nb_filters = 96
    nb_pool = 2

    X_train, Y_train = shuffle_in_unison_inplace(np.array(train_i),
                                                 np.array(train_o))

    model = Sequential()
    model.add(
        Convolution2D(64,
                      11,
                      11,
                      subsample=(2, 2),
                      input_shape=(1, img_rows, img_cols)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(128, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.5))

    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.5))

    model.add(Dense(nb_classes, init='normal'))
    model.add(Activation('softmax', name="out"))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    """
    datagen = ImageDataGenerator(
        #featurewise_center=True,
        #featurewise_std_normalization=True,
        rotation_range=20,
        #width_shift_range=0.3,
        #height_shift_range=0.3,
        #zoom_range=[0,1.3],
        horizontal_flip=True,
        vertical_flip=True)

    datagen.fit(X_train)

    model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size,shuffle=True),
                    samples_per_epoch=len(X_train), nb_epoch=5,verbose=1,validation_data=(test_i[0], test_o[0]))

    """

    model.fit(X_train,
              Y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              verbose=1,
              shuffle=True,
              validation_data=(test_i[0], test_o[0]))

    for s in range(len(test_i)):
        if len(test_i[s]) == 0:
            continue
        X_test = test_i[s]
        Y_test = test_o[s]
        score = model.evaluate(X_test, Y_test, verbose=0)
        print("SNR", SNR[s], "Test accuracy:", score[1])

    K.set_learning_phase(0)
    config = model.get_config()
    weights = model.get_weights()

    new_model = Sequential.from_config(config)
    new_model.set_weights(weights)

    export_path = "/tmp/fam"
    export_version = 1

    labels_tensor = tf.constant(MOD)

    saver = tf.train.Saver(sharded=True)
    model_exporter = exporter.Exporter(saver)
    signature = exporter.classification_signature(
        input_tensor=new_model.input,
        classes_tensor=labels_tensor,
        scores_tensor=new_model.output)
    model_exporter.init(sess.graph.as_graph_def(),
                        default_graph_signature=signature)
    model_exporter.export(export_path, tf.constant(export_version), sess)
Пример #26
0
def main(_):
    if len(sys.argv) < 2 or sys.argv[-1].startswith('-'):
        print(
            'Usage: mnist_export.py [--training_iteration=x] '
            '[--export_version=y] export_dir')
        sys.exit(-1)
    if FLAGS.training_iteration <= 0:
        print 'Please specify a positive value for training iteration.'
        sys.exit(-1)
    if FLAGS.export_version <= 0:
        print 'Please specify a positive value for version number.'
        sys.exit(-1)

    # Train model
    print 'Training model...'
    mnist = mnist_input_data.read_data_sets(FLAGS.work_dir, one_hot=True)
    sess = tf.InteractiveSession()
    serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
    feature_configs = {
        'x': tf.FixedLenFeature(shape=[784], dtype=tf.float32),
    }
    tf_example = tf.parse_example(serialized_tf_example, feature_configs)
    x = tf_example['x']
    y_ = tf.placeholder('float', shape=[None, 10])
    w = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    sess.run(tf.initialize_all_variables())
    y = tf.nn.softmax(tf.matmul(x, w) + b)
    cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
    train_step = tf.train.GradientDescentOptimizer(0.01).minimize(
        cross_entropy)
    values, indices = tf.nn.top_k(y, 10)
    prediction_classes = tf.contrib.lookup.index_to_string(
        tf.to_int64(indices),
        mapping=tf.constant([str(i) for i in xrange(10)]))
    for _ in range(FLAGS.training_iteration):
        batch = mnist.train.next_batch(50)
        train_step.run(feed_dict={x: batch[0], y_: batch[1]})
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
    print 'training accuracy %g' % sess.run(accuracy,
                                            feed_dict={
                                                x: mnist.test.images,
                                                y_: mnist.test.labels
                                            })
    print 'Done training!'

    # Export model
    # WARNING(break-tutorial-inline-code): The following code snippet is
    # in-lined in tutorials, please update tutorial documents accordingly
    # whenever code changes.
    export_path = sys.argv[-1]
    print 'Exporting trained model to', export_path
    init_op = tf.group(tf.initialize_all_tables(), name='init_op')
    saver = tf.train.Saver(sharded=True)
    model_exporter = exporter.Exporter(saver)
    model_exporter.init(
        sess.graph.as_graph_def(),
        init_op=init_op,
        default_graph_signature=exporter.classification_signature(
            input_tensor=serialized_tf_example,
            classes_tensor=prediction_classes,
            scores_tensor=values),
        named_graph_signatures={
            'inputs': exporter.generic_signature({'images': x}),
            'outputs': exporter.generic_signature({'scores': y})
        })
    model_exporter.export(export_path, tf.constant(FLAGS.export_version), sess)
    print 'Done exporting!'
Пример #27
0
def main(_):
  if len(sys.argv) < 2 or sys.argv[-1].startswith('-'):
    print('Usage: mnist_export.py [--training_iteration=x] '
          '[--export_version=y] export_dir')
    sys.exit(-1)
  if FLAGS.training_iteration <= 0:
    print('Please specify a positive value for training iteration.')
    sys.exit(-1)
  if FLAGS.export_version <= 0:
    print('Please specify a positive value for version number.')
    sys.exit(-1)

  # Train model
  print('Training model...')
  mnist = mnist_input_data.read_data_sets(FLAGS.work_dir, one_hot=True)
  sess = tf.InteractiveSession()
  serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
  feature_configs = {
      'x': tf.FixedLenFeature(shape=[784], dtype=tf.float32),
  }
  tf_example = tf.parse_example(serialized_tf_example, feature_configs)
  x = tf.identity(tf_example['x'], name='x')  # use tf.identity() to assign name
  y_ = tf.placeholder('float', shape=[None, 10])
  w = tf.Variable(tf.zeros([784, 10]))
  b = tf.Variable(tf.zeros([10]))
  sess.run(tf.initialize_all_variables())
  y = tf.nn.softmax(tf.matmul(x, w) + b, name='y')
  cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
  train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
  values, indices = tf.nn.top_k(y, 10)
  prediction_classes = tf.contrib.lookup.index_to_string(
      tf.to_int64(indices),
      mapping=tf.constant([str(i) for i in range(10)]))
  for _ in range(FLAGS.training_iteration):
    batch = mnist.train.next_batch(50)
    train_step.run(feed_dict={x: batch[0], y_: batch[1]})
  correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
  accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
  print('training accuracy %g' % sess.run(accuracy,
                                          feed_dict={x: mnist.test.images,
                                                     y_: mnist.test.labels}))
  print('Done training!')

  # Export model
  # WARNING(break-tutorial-inline-code): The following code snippet is
  # in-lined in tutorials, please update tutorial documents accordingly
  # whenever code changes.
  export_path = sys.argv[-1]
  print('Exporting trained model to %s' % export_path)
  init_op = tf.group(tf.initialize_all_tables(), name='init_op')
  saver = tf.train.Saver(sharded=True)
  model_exporter = exporter.Exporter(saver)
  model_exporter.init(
      sess.graph.as_graph_def(),
      init_op=init_op,
      default_graph_signature=exporter.classification_signature(
          input_tensor=serialized_tf_example,
          classes_tensor=prediction_classes,
          scores_tensor=values),
      named_graph_signatures={
          'inputs': exporter.generic_signature({'images': x}),
          'outputs': exporter.generic_signature({'scores': y})})
  model_exporter.export(export_path, tf.constant(FLAGS.export_version), sess)
  print('Done exporting!')
Пример #28
0
def export():
  # Create index->synset mapping
  synsets = []
  with open(SYNSET_FILE) as f:
    synsets = f.read().splitlines()
  # Create synset->metadata mapping
  texts = {}
  with open(METADATA_FILE) as f:
    for line in f.read().splitlines():
      parts = line.split('\t')
      assert len(parts) == 2
      texts[parts[0]] = parts[1]

  with tf.Graph().as_default():
    # Build inference model.
    # Please refer to Tensorflow inception model for details.

    # Input transformation.
    serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
    feature_configs = {
        'image/encoded': tf.FixedLenFeature(shape=[], dtype=tf.string),
    }
    tf_example = tf.parse_example(serialized_tf_example, feature_configs)
    jpegs = tf_example['image/encoded']
    images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)

    # Run inference.
    logits, _ = inception_model.inference(images, NUM_CLASSES + 1)

    # Transform output to topK result.
    values, indices = tf.nn.top_k(logits, NUM_TOP_CLASSES)

    # Create a constant string Tensor where the i'th element is
    # the human readable class description for the i'th index.
    # Note that the 0th index is an unused background class
    # (see inception model definition code).
    class_descriptions = ['unused background']
    for s in synsets:
      class_descriptions.append(texts[s])
    class_tensor = tf.constant(class_descriptions)

    classes = tf.contrib.lookup.index_to_string(tf.to_int64(indices),
                                                mapping=class_tensor)

    # Restore variables from training checkpoint.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception_model.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)
    with tf.Session() as sess:
      # Restore variables from training checkpoints.
      ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
      if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        # Assuming model_checkpoint_path looks something like:
        #   /my-favorite-path/imagenet_train/model.ckpt-0,
        # extract global_step from it.
        global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
        print('Successfully loaded model from %s at step=%s.' %
              (ckpt.model_checkpoint_path, global_step))
      else:
        print('No checkpoint file found at %s' % FLAGS.checkpoint_dir)
        return

      # Export inference model.
      init_op = tf.group(tf.initialize_all_tables(), name='init_op')
      classification_signature = exporter.classification_signature(
          input_tensor=serialized_tf_example,
          classes_tensor=classes,
          scores_tensor=values)
      named_graph_signature = {
          'inputs': exporter.generic_signature({'images': jpegs}),
          'outputs': exporter.generic_signature({
              'classes': classes,
              'scores': values
          })}
      model_exporter = exporter.Exporter(saver)
      model_exporter.init(
          init_op=init_op,
          default_graph_signature=classification_signature,
          named_graph_signatures=named_graph_signature)
      model_exporter.export(FLAGS.export_dir, tf.constant(global_step), sess)
      print('Successfully exported model to %s' % FLAGS.export_dir)
Пример #29
0
def export(model_path, export_path, export_version, export_for_serving, cfg):
  
  graph = tf.get_default_graph()

  sess_config = tf.ConfigProto(
    log_device_placement=False,
    allow_soft_placement = True,
    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=cfg.SESSION_CONFIG.PER_PROCESS_GPU_MEMORY_FRACTION
    )
  )
  
  # GVH: This is a little tricky. 
  #   tf.image.decode_jpeg does not have a batch implementation, creating a bottleneck
  #   for batching. We can request the user to send in a raveled image, but this will 
  #   increase our transport size over the network. Also, should we assume that the images
  #   have been completely preprocessed by the user? (mean subtracted, scaled by std, etc?)
  
  # GVH: We could just make this a switch and let the user decide what to do.
  
  # JPEG bytes:
#   jpegs = tf.placeholder(tf.string, shape=(1))
#   image_buffer = tf.squeeze(jpegs, [0])
#   image = tf.image.decode_jpeg(image_buffer, channels=3)
#   image = tf.cast(image, tf.float32)
#   images = tf.expand_dims(image, 0)
#   images = tf.image.resize_images(images, cfg.INPUT_SIZE, cfg.INPUT_SIZE)
#   images -= cfg.IMAGE_MEAN
#   images /= cfg.IMAGE_STD
  
  # For now we'll assume that the user is sending us a raveled array, totally preprocessed. 
  image_data = tf.placeholder(tf.float32, [None, cfg.INPUT_SIZE * cfg.INPUT_SIZE * 3], name="images")
  images = tf.reshape(image_data, [-1, cfg.INPUT_SIZE, cfg.INPUT_SIZE, 3])
  
  
  features = model.build(graph, images, cfg)
  logits = add_logits(graph, features, cfg.NUM_CLASSES)
  class_scores, predicted_classes = tf.nn.top_k(logits, k=cfg.NUM_CLASSES)
  
  ema = tf.train.ExponentialMovingAverage(decay=cfg.MOVING_AVERAGE_DECAY)
  shadow_vars = {
    ema.average_name(var) : var
    for var in graph.get_collection('conv_params')
  }
  shadow_vars.update({
    ema.average_name(var) : var
    for var in graph.get_collection('batchnorm_params')
  })
  shadow_vars.update({
    ema.average_name(var) : var
    for var in graph.get_collection('softmax_params')
  })
  shadow_vars.update({
    ema.average_name(var) : var
    for var in graph.get_collection('batchnorm_mean_var')
  })

  # Restore the variables
  saver = tf.train.Saver(shadow_vars, reshape=True)
  
  with tf.Session(graph=graph, config=sess_config) as sess:
    
    tf.global_variables_initializer()
    
    saver.restore(sess, model_path)

    # TODO: Change to options flag
    if export_for_serving:
      export_saver = tf.train.Saver(sharded=True)
      model_exporter = exporter.Exporter(export_saver)
      signature = exporter.classification_signature(input_tensor=image_data, scores_tensor=class_scores, classes_tensor=predicted_classes)
      model_exporter.init(sess.graph.as_graph_def(),
                          default_graph_signature=signature)
      model_exporter.export(export_path, tf.constant(export_version), sess)
    
    else:
      v2c = graph_util.convert_variables_to_constants
      deploy_graph_def = v2c(sess, graph.as_graph_def(), [logits.name[:-2]])
    
      if not os.path.exists(export_path):
          os.makedirs(export_path)
      save_path = os.path.join(export_path, 'constant_model-%d.pb' % (export_version,))
      with open(save_path, 'wb') as f:
          f.write(deploy_graph_def.SerializeToString())
Пример #30
0
    def doBasicsOneExportPath(self,
                              export_path,
                              clear_devices=False,
                              global_step=GLOBAL_STEP,
                              sharded=True):
        # Build a graph with 2 parameter nodes on different devices.
        tf.reset_default_graph()
        with tf.Session(target="",
                        config=config_pb2.ConfigProto(
                            device_count={"CPU": 2})) as sess:
            # v2 is an unsaved variable derived from v0 and v1.  It is used to
            # exercise the ability to run an init op when restoring a graph.
            with sess.graph.device("/cpu:0"):
                v0 = tf.Variable(10, name="v0")
            with sess.graph.device("/cpu:1"):
                v1 = tf.Variable(20, name="v1")
            v2 = tf.Variable(1, name="v2", trainable=False, collections=[])
            assign_v2 = tf.assign(v2, tf.add(v0, v1))
            init_op = tf.group(assign_v2, name="init_op")

            tf.add_to_collection("v", v0)
            tf.add_to_collection("v", v1)
            tf.add_to_collection("v", v2)

            global_step_tensor = tf.Variable(global_step, name="global_step")
            named_tensor_bindings = {
                "logical_input_A": v0,
                "logical_input_B": v1
            }
            signatures = {
                "foo":
                exporter.regression_signature(input_tensor=v0,
                                              output_tensor=v1),
                "generic":
                exporter.generic_signature(named_tensor_bindings)
            }

            asset_filepath_orig = os.path.join(tf.test.get_temp_dir(),
                                               "hello42.txt")
            asset_file = tf.constant(asset_filepath_orig, name="filename42")
            tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, asset_file)

            with gfile.FastGFile(asset_filepath_orig, "w") as f:
                f.write("your data here")
            assets_collection = tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS)

            ignored_asset = os.path.join(tf.test.get_temp_dir(), "ignored.txt")
            with gfile.FastGFile(ignored_asset, "w") as f:
                f.write("additional data here")

            tf.initialize_all_variables().run()

            # Run an export.
            save = tf.train.Saver({
                "v0": v0,
                "v1": v1
            },
                                  restore_sequentially=True,
                                  sharded=sharded)
            export = exporter.Exporter(save)
            export.init(
                sess.graph.as_graph_def(),
                init_op=init_op,
                clear_devices=clear_devices,
                default_graph_signature=exporter.classification_signature(
                    input_tensor=v0),
                named_graph_signatures=signatures,
                assets_collection=assets_collection)
            export.export(export_path,
                          global_step_tensor,
                          sess,
                          exports_to_keep=gc.largest_export_versions(2))

        # Restore graph.
        compare_def = tf.get_default_graph().as_graph_def()
        tf.reset_default_graph()
        with tf.Session(target="",
                        config=config_pb2.ConfigProto(
                            device_count={"CPU": 2})) as sess:
            save = tf.train.import_meta_graph(
                os.path.join(export_path,
                             constants.VERSION_FORMAT_SPECIFIER % global_step,
                             constants.META_GRAPH_DEF_FILENAME))
            self.assertIsNotNone(save)
            meta_graph_def = save.export_meta_graph()
            collection_def = meta_graph_def.collection_def

            # Validate custom graph_def.
            graph_def_any = collection_def[constants.GRAPH_KEY].any_list.value
            self.assertEquals(len(graph_def_any), 1)
            graph_def = tf.GraphDef()
            graph_def_any[0].Unpack(graph_def)
            if clear_devices:
                for node in compare_def.node:
                    node.device = ""
            self.assertProtoEquals(compare_def, graph_def)

            # Validate init_op.
            init_ops = collection_def[constants.INIT_OP_KEY].node_list.value
            self.assertEquals(len(init_ops), 1)
            self.assertEquals(init_ops[0], "init_op")

            # Validate signatures.
            signatures_any = collection_def[
                constants.SIGNATURES_KEY].any_list.value
            self.assertEquals(len(signatures_any), 1)
            signatures = manifest_pb2.Signatures()
            signatures_any[0].Unpack(signatures)
            default_signature = signatures.default_signature
            self.assertEqual(
                default_signature.classification_signature.input.tensor_name,
                "v0:0")
            bindings = signatures.named_signatures[
                "generic"].generic_signature.map
            self.assertEquals(bindings["logical_input_A"].tensor_name, "v0:0")
            self.assertEquals(bindings["logical_input_B"].tensor_name, "v1:0")
            read_foo_signature = (
                signatures.named_signatures["foo"].regression_signature)
            self.assertEquals(read_foo_signature.input.tensor_name, "v0:0")
            self.assertEquals(read_foo_signature.output.tensor_name, "v1:0")

            # Validate the assets.
            assets_any = collection_def[constants.ASSETS_KEY].any_list.value
            self.assertEquals(len(assets_any), 1)
            asset = manifest_pb2.AssetFile()
            assets_any[0].Unpack(asset)
            assets_path = os.path.join(
                export_path, constants.VERSION_FORMAT_SPECIFIER % global_step,
                constants.ASSETS_DIRECTORY, "hello42.txt")
            asset_contents = gfile.GFile(assets_path).read()
            self.assertEqual(asset_contents, "your data here")
            self.assertEquals("hello42.txt", asset.filename)
            self.assertEquals("filename42:0", asset.tensor_binding.tensor_name)
            ignored_asset_path = os.path.join(
                export_path, constants.VERSION_FORMAT_SPECIFIER % global_step,
                constants.ASSETS_DIRECTORY, "ignored.txt")
            self.assertFalse(gfile.Exists(ignored_asset_path))

            # Validate graph restoration.
            if sharded:
                save.restore(
                    sess,
                    os.path.join(
                        export_path,
                        constants.VERSION_FORMAT_SPECIFIER % global_step,
                        constants.VARIABLES_FILENAME_PATTERN))
            else:
                save.restore(
                    sess,
                    os.path.join(
                        export_path,
                        constants.VERSION_FORMAT_SPECIFIER % global_step,
                        constants.VARIABLES_FILENAME))
            self.assertEqual(10, tf.get_collection("v")[0].eval())
            self.assertEqual(20, tf.get_collection("v")[1].eval())
            tf.get_collection(constants.INIT_OP_KEY)[0].run()
            self.assertEqual(30, tf.get_collection("v")[2].eval())
Пример #31
0
external_x = tf.placeholder(tf.string)
x = convert_external_inputs(external_x)
y = inference(x)

saver = tf.train.Saver()

with tf.Session() as sess:
    # Restore variables from training checkpoints.
    ckpt = tf.train.get_checkpoint_state(sys.argv[1])
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
    else:
        print("Checkpoint file not found")
        raise SystemExit

    scores, class_ids = tf.nn.top_k(y, NUM_CLASSES_TO_RETURN)

    # for simplification we will just return the class ids, we should return the names instead
    classes = tf.contrib.lookup.index_to_string(
        tf.to_int64(class_ids),
        mapping=tf.constant([str(i) for i in range(1001)]))

    model_exporter = exporter.Exporter(saver)
    signature = exporter.classification_signature(input_tensor=external_x,
                                                  classes_tensor=classes,
                                                  scores_tensor=scores)
    model_exporter.init(default_graph_signature=signature,
                        init_op=tf.initialize_all_tables())
    model_exporter.export(sys.argv[1] + "/export", tf.constant(time.time()),
                          sess)
Пример #32
0
  def doBasicsOneExportPath(self,
                            export_path,
                            clear_devices=False,
                            global_step=GLOBAL_STEP,
                            sharded=True):
    # Build a graph with 2 parameter nodes on different devices.
    tf.reset_default_graph()
    with tf.Session(
        target="",
        config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
      # v2 is an unsaved variable derived from v0 and v1.  It is used to
      # exercise the ability to run an init op when restoring a graph.
      with sess.graph.device("/cpu:0"):
        v0 = tf.Variable(10, name="v0")
      with sess.graph.device("/cpu:1"):
        v1 = tf.Variable(20, name="v1")
      v2 = tf.Variable(1, name="v2", trainable=False, collections=[])
      assign_v2 = tf.assign(v2, tf.add(v0, v1))
      init_op = tf.group(assign_v2, name="init_op")

      tf.add_to_collection("v", v0)
      tf.add_to_collection("v", v1)
      tf.add_to_collection("v", v2)

      global_step_tensor = tf.Variable(global_step, name="global_step")
      named_tensor_bindings = {"logical_input_A": v0, "logical_input_B": v1}
      signatures = {
          "foo": exporter.regression_signature(input_tensor=v0,
                                               output_tensor=v1),
          "generic": exporter.generic_signature(named_tensor_bindings)
      }

      asset_filepath_orig = os.path.join(tf.test.get_temp_dir(), "hello42.txt")
      asset_file = tf.constant(asset_filepath_orig, name="filename42")
      tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, asset_file)

      with gfile.FastGFile(asset_filepath_orig, "w") as f:
        f.write("your data here")
      assets_collection = tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS)

      ignored_asset = os.path.join(tf.test.get_temp_dir(), "ignored.txt")
      with gfile.FastGFile(ignored_asset, "w") as f:
        f.write("additional data here")

      tf.initialize_all_variables().run()

      # Run an export.
      save = tf.train.Saver({"v0": v0,
                             "v1": v1},
                            restore_sequentially=True,
                            sharded=sharded)
      export = exporter.Exporter(save)
      export.init(sess.graph.as_graph_def(),
                  init_op=init_op,
                  clear_devices=clear_devices,
                  default_graph_signature=exporter.classification_signature(
                      input_tensor=v0),
                  named_graph_signatures=signatures,
                  assets_collection=assets_collection)
      export.export(export_path,
                    global_step_tensor,
                    sess,
                    exports_to_keep=gc.largest_export_versions(2))

    # Restore graph.
    compare_def = tf.get_default_graph().as_graph_def()
    tf.reset_default_graph()
    with tf.Session(
        target="",
        config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
      save = tf.train.import_meta_graph(
          os.path.join(export_path, constants.VERSION_FORMAT_SPECIFIER %
                       global_step, constants.META_GRAPH_DEF_FILENAME))
      self.assertIsNotNone(save)
      meta_graph_def = save.export_meta_graph()
      collection_def = meta_graph_def.collection_def

      # Validate custom graph_def.
      graph_def_any = collection_def[constants.GRAPH_KEY].any_list.value
      self.assertEquals(len(graph_def_any), 1)
      graph_def = tf.GraphDef()
      graph_def_any[0].Unpack(graph_def)
      if clear_devices:
        for node in compare_def.node:
          node.device = ""
      self.assertProtoEquals(compare_def, graph_def)

      # Validate init_op.
      init_ops = collection_def[constants.INIT_OP_KEY].node_list.value
      self.assertEquals(len(init_ops), 1)
      self.assertEquals(init_ops[0], "init_op")

      # Validate signatures.
      signatures_any = collection_def[constants.SIGNATURES_KEY].any_list.value
      self.assertEquals(len(signatures_any), 1)
      signatures = manifest_pb2.Signatures()
      signatures_any[0].Unpack(signatures)
      default_signature = signatures.default_signature
      self.assertEqual(
          default_signature.classification_signature.input.tensor_name, "v0:0")
      bindings = signatures.named_signatures["generic"].generic_signature.map
      self.assertEquals(bindings["logical_input_A"].tensor_name, "v0:0")
      self.assertEquals(bindings["logical_input_B"].tensor_name, "v1:0")
      read_foo_signature = (
          signatures.named_signatures["foo"].regression_signature)
      self.assertEquals(read_foo_signature.input.tensor_name, "v0:0")
      self.assertEquals(read_foo_signature.output.tensor_name, "v1:0")

      # Validate the assets.
      assets_any = collection_def[constants.ASSETS_KEY].any_list.value
      self.assertEquals(len(assets_any), 1)
      asset = manifest_pb2.AssetFile()
      assets_any[0].Unpack(asset)
      assets_path = os.path.join(export_path,
                                 constants.VERSION_FORMAT_SPECIFIER %
                                 global_step, constants.ASSETS_DIRECTORY,
                                 "hello42.txt")
      asset_contents = gfile.GFile(assets_path).read()
      self.assertEqual(asset_contents, "your data here")
      self.assertEquals("hello42.txt", asset.filename)
      self.assertEquals("filename42:0", asset.tensor_binding.tensor_name)
      ignored_asset_path = os.path.join(export_path,
                                        constants.VERSION_FORMAT_SPECIFIER %
                                        global_step, constants.ASSETS_DIRECTORY,
                                        "ignored.txt")
      self.assertFalse(gfile.Exists(ignored_asset_path))

      # Validate graph restoration.
      if sharded:
        save.restore(sess,
                     os.path.join(
                        export_path, constants.VERSION_FORMAT_SPECIFIER %
                        global_step, constants.VARIABLES_FILENAME_PATTERN))
      else:
        save.restore(sess,
                     os.path.join(
                        export_path, constants.VERSION_FORMAT_SPECIFIER %
                        global_step, constants.VARIABLES_FILENAME))
      self.assertEqual(10, tf.get_collection("v")[0].eval())
      self.assertEqual(20, tf.get_collection("v")[1].eval())
      tf.get_collection(constants.INIT_OP_KEY)[0].run()
      self.assertEqual(30, tf.get_collection("v")[2].eval())
Пример #33
0
def fam(train_i, train_o, test_i, test_o):
    sess = tf.Session()
    K.set_session(sess)
    K.set_learning_phase(1)

    batch_size = 60
    nb_classes = len(MOD)
    nb_epoch = 20

    img_rows, img_cols = 2 * P * L, 2 * Np
    nb_filters = 96
    nb_pool = 2

    X_train,Y_train = shuffle_in_unison_inplace( np.array(train_i) , np.array(train_o) )

    model = Sequential()
    model.add(Convolution2D(64, 11, 11,subsample=(2,2),
                            input_shape=(1, img_rows, img_cols)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(128, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2,2)))

    model.add(Flatten())
    model.add(Dense(512,activation='relu'))
    model.add(Dropout(0.5)) 

    model.add(Dense(512,activation='relu'))
    model.add(Dropout(0.5)) 

    model.add(Dense(nb_classes,init='normal'))
    model.add(Activation('softmax', name="out"))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    """
    datagen = ImageDataGenerator(
        #featurewise_center=True,
        #featurewise_std_normalization=True,
        rotation_range=20,
        #width_shift_range=0.3,
        #height_shift_range=0.3,
        #zoom_range=[0,1.3],
        horizontal_flip=True,
        vertical_flip=True)

    datagen.fit(X_train)

    model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size,shuffle=True),
                    samples_per_epoch=len(X_train), nb_epoch=5,verbose=1,validation_data=(test_i[0], test_o[0]))

    """

    model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
              verbose=1, shuffle=True, validation_data=(test_i[0], test_o[0]))


    for s in range(len(test_i)):
        if len(test_i[s]) == 0:
            continue
        X_test = test_i[s]
        Y_test = test_o[s]
        score = model.evaluate(X_test, Y_test, verbose=0)
        print("SNR", SNR[s], "Test accuracy:", score[1])

    K.set_learning_phase(0)
    config = model.get_config()
    weights = model.get_weights()

    new_model = Sequential.from_config(config)
    new_model.set_weights(weights)

    export_path = "/tmp/fam"
    export_version = 1

    labels_tensor = tf.constant(MOD)

    saver = tf.train.Saver(sharded=True)
    model_exporter = exporter.Exporter(saver)
    signature = exporter.classification_signature(
        input_tensor=new_model.input,classes_tensor=labels_tensor,scores_tensor=new_model.output)
    model_exporter.init(
        sess.graph.as_graph_def(),
        default_graph_signature=signature)
    model_exporter.export(export_path, tf.constant(export_version), sess)
Пример #34
0
def cnn(train_i, train_o, test_i, test_o, mods, snrs, train_idx, test_idx,
        lbl):

    # CNN1
    c1 = 64
    c2 = 16
    dl = 128
    """ 
    # CNN2
    c1 = 256
    c2 = 80
    dl = 256
    """

    nb_epoch = 400

    sess = tf.Session()

    init_op = tf.group(tf.initialize_all_variables(),
                       tf.initialize_local_variables())

    K.set_session(sess)
    K.set_learning_phase(1)

    classes = mods
    #X_train,Y_train = shuffle_in_unison_inplace( np.array(train_i) , np.array(train_o) )

    X_train = train_i
    Y_train = train_o
    X_test = test_i
    Y_test = test_o

    in_shp = list(X_train.shape[1:])

    # Build VT-CNN2 Neural Net model using Keras primitives --
    #  - Reshape [N,2,128] to [N,1,2,128] on input
    #  - Pass through 2 2DConv/ReLu layers
    #  - Pass through 2 Dense layers (ReLu and Softmax)
    #  - Perform categorical cross entropy optimization

    dr = 0.5  # dropout rate (%)
    model = models.Sequential()
    model.add(Reshape([1] + in_shp, input_shape=in_shp))
    model.add(ZeroPadding2D((0, 2)))
    model.add(
        Convolution2D(256,
                      1,
                      3,
                      border_mode='valid',
                      activation="relu",
                      init='glorot_uniform'))
    model.add(Dropout(dr))
    model.add(ZeroPadding2D((0, 2)))
    model.add(
        Convolution2D(80,
                      2,
                      3,
                      border_mode="valid",
                      activation="relu",
                      init='glorot_uniform'))
    model.add(Dropout(dr))
    model.add(Flatten())
    model.add(Dense(256, activation='relu', init='he_normal'))
    model.add(Dropout(dr))
    model.add(Dense(len(classes), init='he_normal'))
    model.add(Activation('softmax', name="out"))
    model.add(Reshape([len(classes)]))
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    model.summary()

    #datagen = ImageDataGenerator()
    #featurewise_center=False,
    #featurewise_std_normalization=False,
    #rotation_range=0,
    #width_shift_range=0.3,
    #height_shift_range=0.3,
    #zoom_range=[0,1.3],
    #shear_range=0.2,
    # horizontal_flip=True,
    #vertical_flip=True)
    """
    datagen.fit(X_train)

    model.fit_generator(
        datagen.flow(
            X_train,
            Y_train,
            batch_size=1024,
            shuffle=True),
        samples_per_epoch=len(X_train),
        nb_epoch=nb_epoch,
        verbose=1,
        validation_data=(
            test_i[0],
            test_o[0]))
    """
    # Set up some params
    nb_epoch = 25  #100   # number of epochs to train on
    batch_size = 1024  # training batch size

    tb = TensorBoard(log_dir='./logs')

    # perform training ...
    #   - call the main training loop in keras for our network+dataset
    filepath = 'convmodrecnets_CNN2_0.5.wts.h5'
    history = model.fit(X_train,
                        Y_train,
                        batch_size=batch_size,
                        nb_epoch=nb_epoch,
                        show_accuracy=False,
                        verbose=2,
                        validation_data=(X_test, Y_test),
                        callbacks=[
                            keras.callbacks.ModelCheckpoint(
                                filepath,
                                monitor='val_loss',
                                verbose=0,
                                save_best_only=True,
                                mode='auto'),
                            keras.callbacks.EarlyStopping(monitor='val_loss',
                                                          patience=5,
                                                          verbose=0,
                                                          mode='auto')
                        ])
    # we re-load the best weights once training is finished
    model.load_weights(filepath)

    K.set_learning_phase(0)

    acc = {}
    for snr in snrs:

        # extract classes @ SNR
        test_SNRs = map(lambda x: lbl[x][1], test_idx)
        test_X_i = X_test[np.where(np.array(test_SNRs) == snr)]
        test_Y_i = Y_test[np.where(np.array(test_SNRs) == snr)]

        # estimate classes
        test_Y_i_hat = model.predict(test_X_i)
        #print("PREDICT ",test_Y_i_hat)

        conf = np.zeros([len(classes), len(classes)])
        confnorm = np.zeros([len(classes), len(classes)])
        for i in range(0, test_X_i.shape[0]):
            j = list(test_Y_i[i, :]).index(1)
            k = int(np.argmax(test_Y_i_hat[i, :]))
            conf[j, k] = conf[j, k] + 1
        for i in range(0, len(classes)):
            confnorm[i, :] = conf[i, :] / np.sum(conf[i, :])
        plt.figure()
        plot_confusion_matrix(confnorm,
                              labels=classes,
                              title="ConvNet Confusion Matrix (SNR=%d)" %
                              (snr))

        cor = np.sum(np.diag(conf))
        ncor = np.sum(conf) - cor
        print("Overall Accuracy: ", cor / (cor + ncor))
        acc[snr] = 1.0 * cor / (cor + ncor)

    config = model.get_config()
    weights = model.get_weights()

    new_model = models.Sequential.from_config(config)
    new_model.set_weights(weights)

    export_path = "/tmp/cnn"
    export_version = 1

    labels_tensor = tf.constant(mods)

    saver = tf.train.Saver(sharded=True)
    model_exporter = exporter.Exporter(saver)
    signature = exporter.classification_signature(
        input_tensor=new_model.input,
        classes_tensor=labels_tensor,
        scores_tensor=new_model.output)
    model_exporter.init(sess.graph.as_graph_def(),
                        default_graph_signature=signature)
    model_exporter.export(export_path, tf.constant(export_version), sess)
Пример #35
0
def cnn(train_i, train_o, test_i, test_o,mods,snrs,train_idx,test_idx,lbl):
 
    # CNN1
    c1 = 64
    c2 = 16
    dl = 128
    
    """ 
    # CNN2
    c1 = 256
    c2 = 80
    dl = 256
    """
    
    nb_epoch = 400

    sess = tf.Session()

    init_op = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())

    K.set_session(sess)
    K.set_learning_phase(1)
    
    classes = mods
    #X_train,Y_train = shuffle_in_unison_inplace( np.array(train_i) , np.array(train_o) )

    X_train = train_i
    Y_train = train_o
    X_test = test_i
    Y_test = test_o

    in_shp = list(X_train.shape[1:])

    
    # Build VT-CNN2 Neural Net model using Keras primitives -- 
    #  - Reshape [N,2,128] to [N,1,2,128] on input
    #  - Pass through 2 2DConv/ReLu layers
    #  - Pass through 2 Dense layers (ReLu and Softmax)
    #  - Perform categorical cross entropy optimization

    dr = 0.5 # dropout rate (%)
    model = models.Sequential()
    model.add(Reshape([1]+in_shp, input_shape=in_shp))
    model.add(ZeroPadding2D((0, 2)))
    model.add(Convolution2D(256, 1, 3, border_mode='valid', activation="relu", init='glorot_uniform'))
    model.add(Dropout(dr))
    model.add(ZeroPadding2D((0, 2)))
    model.add(Convolution2D(80, 2, 3, border_mode="valid", activation="relu",  init='glorot_uniform'))
    model.add(Dropout(dr))
    model.add(Flatten())
    model.add(Dense(256, activation='relu', init='he_normal'))
    model.add(Dropout(dr))
    model.add(Dense( len(classes), init='he_normal' ))
    model.add(Activation('softmax',name="out"))
    model.add(Reshape([len(classes)]))
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    model.summary()

    #datagen = ImageDataGenerator()
        #featurewise_center=False,
        #featurewise_std_normalization=False,
        #rotation_range=0,
        #width_shift_range=0.3,
        #height_shift_range=0.3,
        #zoom_range=[0,1.3],
        #shear_range=0.2,
        # horizontal_flip=True,
        #vertical_flip=True)

    """
    datagen.fit(X_train)

    model.fit_generator(
        datagen.flow(
            X_train,
            Y_train,
            batch_size=1024,
            shuffle=True),
        samples_per_epoch=len(X_train),
        nb_epoch=nb_epoch,
        verbose=1,
        validation_data=(
            test_i[0],
            test_o[0]))
    """
    # Set up some params 
    nb_epoch = 25 #100   # number of epochs to train on
    batch_size = 1024  # training batch size

    tb = TensorBoard(log_dir='./logs')

    # perform training ...
    #   - call the main training loop in keras for our network+dataset
    filepath = 'convmodrecnets_CNN2_0.5.wts.h5'
    history = model.fit(X_train,
        Y_train,
        batch_size=batch_size,
        nb_epoch=nb_epoch,
        show_accuracy=False,
        verbose=2,
        validation_data=(X_test, Y_test),
        callbacks = [
            keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=True, mode='auto'),
            keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')
        ])
    # we re-load the best weights once training is finished
    model.load_weights(filepath)

    K.set_learning_phase(0)



    acc = {}
    for snr in snrs:

        # extract classes @ SNR
        test_SNRs = map(lambda x: lbl[x][1], test_idx)
        test_X_i = X_test[np.where(np.array(test_SNRs)==snr)]
        test_Y_i = Y_test[np.where(np.array(test_SNRs)==snr)]    

        # estimate classes
        test_Y_i_hat = model.predict(test_X_i)
        #print("PREDICT ",test_Y_i_hat)

        conf = np.zeros([len(classes),len(classes)])
        confnorm = np.zeros([len(classes),len(classes)])
        for i in range(0,test_X_i.shape[0]):
            j = list(test_Y_i[i,:]).index(1)
            k = int(np.argmax(test_Y_i_hat[i,:]))
            conf[j,k] = conf[j,k] + 1
        for i in range(0,len(classes)):
            confnorm[i,:] = conf[i,:] / np.sum(conf[i,:])
        plt.figure()
        plot_confusion_matrix(confnorm, labels=classes, title="ConvNet Confusion Matrix (SNR=%d)"%(snr))
    
        cor = np.sum(np.diag(conf))
        ncor = np.sum(conf) - cor
        print ("Overall Accuracy: ", cor / (cor+ncor))
        acc[snr] = 1.0*cor/(cor+ncor)


    config = model.get_config()
    weights = model.get_weights()

    new_model = models.Sequential.from_config(config)
    new_model.set_weights(weights)

    export_path = "/tmp/cnn"
    export_version = 1

    labels_tensor = tf.constant(mods)

    saver = tf.train.Saver(sharded=True)
    model_exporter = exporter.Exporter(saver)
    signature = exporter.classification_signature(
        input_tensor=new_model.input,classes_tensor=labels_tensor, scores_tensor=new_model.output)
    model_exporter.init(
        sess.graph.as_graph_def(),
        default_graph_signature=signature)
    model_exporter.export(export_path, tf.constant(export_version), sess)
import keras
import tensorflow
from keras import backend as K
from tensorflow.contrib.session_bundle import exporter
from keras.models import model_from_config, Sequential

print("Loading model for exporting to Protocol Buffer format...")
model_path = 'temp.h5'
model = keras.models.load_model(model_path)

K.set_learning_phase(0)  # all new operations will be in test mode from now on
sess = K.get_session()

# serialize the model and get its weights, for quick re-building
config = model.get_config()
weights = model.get_weights()

# re-build a model where the learning phase is now hard-coded to 0
new_model = Sequential.from_config(config)
new_model.set_weights(weights)

export_path = "TensorGraphs//simple.pb"  # where to save the exported graph
export_version = 1  # version number (integer)

saver = tensorflow.train.Saver(sharded=True)
model_exporter = exporter.Exporter(saver)
signature = exporter.classification_signature(input_tensor=model.input,
                                              scores_tensor=model.output)
model_exporter.init(sess.graph.as_graph_def(),
                    default_graph_signature=signature)
model_exporter.export(export_path, tensorflow.constant(export_version), sess)
Пример #37
0
        ##### Start: exporting model #####
        model_exporter = exporter.Exporter(saver)

        # maybe this needs to be done before saver is created
        init_op = tf.group(tf.tables_initializer(), name='init_op')
        serving_input_x = cnn.input_x
        values, indices = tf.nn.top_k(cnn.input_y, 2)
        table = tf.contrib.lookup.index_to_string_table_from_tensor(
            tf.constant([str(i) for i in range(2)]))
        prediction_classes = table.lookup(tf.to_int64(indices))

        model_exporter.init(
            sess.graph.as_graph_def(),
            init_op=init_op,
            default_graph_signature=exporter.classification_signature(
                input_tensor=serving_input_x,
                classes_tensor=prediction_classes,
                scores_tensor=values),
            named_graph_signatures={
                'inputs': exporter.generic_signature({'images': cnn.input_x}),
                'outputs': exporter.generic_signature({'scores': cnn.input_y})})
        export_path = "<keep the path here>"
        model_exporter.export(export_path, tf.constant(FLAGS.export_version), sess)

        ## END ##

        # Write vocabulary
        vocab_processor.save(os.path.join(out_dir, "vocab"))

        # Initialize all variables
        sess.run(tf.global_variables_initializer())