Example #1
0
def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)
  tf.logging.info('Prepare to export model to: %s', FLAGS.export_path)

  with tf.Graph().as_default():
    image, image_size, resized_image_size = _create_input_tensors()

    model_options = common.ModelOptions(
        outputs_to_num_classes={common.OUTPUT_TYPE: FLAGS.num_classes},
        crop_size=FLAGS.crop_size,
        atrous_rates=FLAGS.atrous_rates,
        output_stride=FLAGS.output_stride)

    if tuple(FLAGS.inference_scales) == (1.0,):
      tf.logging.info('Exported model performs single-scale inference.')
      predictions = model.predict_labels(
          image,
          model_options=model_options,
          image_pyramid=FLAGS.image_pyramid)
    else:
      tf.logging.info('Exported model performs multi-scale inference.')
      predictions = model.predict_labels_multi_scale(
          image,
          model_options=model_options,
          eval_scales=FLAGS.inference_scales,
          add_flipped_images=FLAGS.add_flipped_images)

    predictions = tf.cast(predictions[common.OUTPUT_TYPE], tf.float32)
    # Crop the valid regions from the predictions.
    semantic_predictions = tf.slice(
        predictions,
        [0, 0, 0],
        [1, resized_image_size[0], resized_image_size[1]])
    # Resize back the prediction to the original image size.
    def _resize_label(label, label_size):
      # Expand dimension of label to [1, height, width, 1] for resize operation.
      label = tf.expand_dims(label, 3)
      resized_label = tf.image.resize_images(
          label,
          label_size,
          method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
          align_corners=True)
      return tf.cast(tf.squeeze(resized_label, 3), tf.int32)
    semantic_predictions = _resize_label(semantic_predictions, image_size)
    semantic_predictions = tf.identity(semantic_predictions, name=_OUTPUT_NAME)

    saver = tf.train.Saver(tf.model_variables())

    tf.gfile.MakeDirs(os.path.dirname(FLAGS.export_path))
    freeze_graph.freeze_graph_with_def_protos(
        tf.get_default_graph().as_graph_def(add_shapes=True),
        saver.as_saver_def(),
        FLAGS.checkpoint_path,
        _OUTPUT_NAME,
        restore_op_name=None,
        filename_tensor_name=None,
        output_graph=FLAGS.export_path,
        clear_devices=True,
        initializer_nodes=None)
Example #2
0
 def do_graph_freeze(output_file=None, output_node_names=None, variables_blacklist=None):
     return freeze_graph.freeze_graph_with_def_protos(
         input_graph_def=session.graph_def,
         input_saver_def=saver.as_saver_def(),
         input_checkpoint=checkpoint_path,
         output_node_names=output_node_names,
         restore_op_name=None,
         filename_tensor_name=None,
         output_graph=output_file,
         clear_devices=False,
         variable_names_blacklist=variables_blacklist,
         initializer_nodes='')
  def testSinglePartitionedVariable(self):
    """Ensures partitioned variables fail cleanly with freeze graph."""
    checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint")
    checkpoint_state_name = "checkpoint_state"
    input_graph_name = "input_graph.pb"
    output_graph_name = "output_graph.pb"

    # Create a graph with partition variables. When weights are partitioned into
    # a single partition, the weights variable is followed by a identity ->
    # identity (an additional identity node).
    partitioner = partitioned_variables.fixed_size_partitioner(1)
    with ops.Graph().as_default():
      with variable_scope.variable_scope("part", partitioner=partitioner):
        batch_size, height, width, depth = 5, 128, 128, 3
        input1 = array_ops.zeros(
            (batch_size, height, width, depth), name="input1")
        input2 = array_ops.zeros(
            (batch_size, height, width, depth), name="input2")

        num_nodes = depth
        filter1 = variable_scope.get_variable("filter", [num_nodes, num_nodes])
        filter2 = array_ops.reshape(filter1, [1, 1, num_nodes, num_nodes])
        conv = nn.conv2d(
            input=input1, filter=filter2, strides=[1, 1, 1, 1], padding="SAME")
        node = math_ops.add(conv, input2, name="test/add")
        node = nn.relu6(node, name="test/relu6")

      # Save graph and checkpoints.
      sess = session.Session()
      sess.run(variables.global_variables_initializer())

      saver = saver_lib.Saver()
      checkpoint_path = saver.save(
          sess,
          checkpoint_prefix,
          global_step=0,
          latest_filename=checkpoint_state_name)
      graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)

      # Ensure this graph has partition variables.
      self.assertTrue([
          tensor.name.split(":")[0]
          for op in sess.graph.get_operations()
          for tensor in op.values()
          if re.search(r"/part_\d+/", tensor.name)
      ])

    # Test freezing graph doesn't make it crash.
    output_node_names = "save/restore_all"
    output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)

    return_value = freeze_graph.freeze_graph_with_def_protos(
        input_graph_def=sess.graph_def,
        input_saver_def=None,
        input_checkpoint=checkpoint_path,
        output_node_names=output_node_names,
        restore_op_name="save/restore_all",  # default value
        filename_tensor_name="save/Const:0",  # default value
        output_graph=output_graph_path,
        clear_devices=False,
        initializer_nodes="")
    self.assertTrue(return_value, -1)
Example #4
0
def _export_inference_graph(input_type,
                            detection_model,
                            use_moving_averages,
                            trained_checkpoint_prefix,
                            output_directory,
                            additional_output_tensor_names=None,
                            input_shape=None,
                            output_collection_name='inference_op',
                            graph_hook_fn=None,
                            write_inference_graph=False,
                            temp_checkpoint_prefix='',
                            use_side_inputs=False,
                            side_input_shapes=None,
                            side_input_names=None,
                            side_input_types=None):
    """Export helper."""
    tf.gfile.MakeDirs(output_directory)
    frozen_graph_path = os.path.join(output_directory,
                                     'frozen_inference_graph.pb')
    saved_model_path = os.path.join(output_directory, 'saved_model')
    model_path = os.path.join(output_directory, 'model.ckpt')

    outputs, placeholder_tensor_dict = build_detection_graph(
        input_type=input_type,
        detection_model=detection_model,
        input_shape=input_shape,
        output_collection_name=output_collection_name,
        graph_hook_fn=graph_hook_fn,
        use_side_inputs=use_side_inputs,
        side_input_shapes=side_input_shapes,
        side_input_names=side_input_names,
        side_input_types=side_input_types)

    profile_inference_graph(tf.get_default_graph())
    saver_kwargs = {}
    if use_moving_averages:
        if not temp_checkpoint_prefix:
            # This check is to be compatible with both version of SaverDef.
            if os.path.isfile(trained_checkpoint_prefix):
                saver_kwargs['write_version'] = saver_pb2.SaverDef.V1
                temp_checkpoint_prefix = tempfile.NamedTemporaryFile().name
            else:
                temp_checkpoint_prefix = tempfile.mkdtemp()
        replace_variable_values_with_moving_averages(
            tf.get_default_graph(), trained_checkpoint_prefix,
            temp_checkpoint_prefix)
        checkpoint_to_use = temp_checkpoint_prefix
    else:
        checkpoint_to_use = trained_checkpoint_prefix

    saver = tf.train.Saver(**saver_kwargs)
    input_saver_def = saver.as_saver_def()

    write_graph_and_checkpoint(
        inference_graph_def=tf.get_default_graph().as_graph_def(),
        model_path=model_path,
        input_saver_def=input_saver_def,
        trained_checkpoint_prefix=checkpoint_to_use)
    if write_inference_graph:
        inference_graph_def = tf.get_default_graph().as_graph_def()
        inference_graph_path = os.path.join(output_directory,
                                            'inference_graph.pbtxt')
        for node in inference_graph_def.node:
            node.device = ''
        with tf.gfile.GFile(inference_graph_path, 'wb') as f:
            f.write(str(inference_graph_def))

    if additional_output_tensor_names is not None:
        output_node_names = ','.join(
            list(outputs.keys()) + (additional_output_tensor_names))
    else:
        output_node_names = ','.join(outputs.keys())

    frozen_graph_def = freeze_graph.freeze_graph_with_def_protos(
        input_graph_def=tf.get_default_graph().as_graph_def(),
        input_saver_def=input_saver_def,
        input_checkpoint=checkpoint_to_use,
        output_node_names=output_node_names,
        restore_op_name='save/restore_all',
        filename_tensor_name='save/Const:0',
        output_graph=frozen_graph_path,
        clear_devices=True,
        initializer_nodes='')

    write_saved_model(saved_model_path, frozen_graph_def,
                      placeholder_tensor_dict, outputs)
def main(unused_argv):
    tf.logging.set_verbosity(tf.logging.INFO)
    tf.logging.info('Prepare to export model to: %s', FLAGS.export_path)

    with tf.Graph().as_default():
        image, image_size, resized_image_size = _create_input_tensors()

        model_options = common.ModelOptions(
            outputs_to_num_classes={common.OUTPUT_TYPE: FLAGS.num_classes},
            crop_size=FLAGS.crop_size,
            atrous_rates=FLAGS.atrous_rates,
            output_stride=FLAGS.output_stride)

        if tuple(FLAGS.inference_scales) == (1.0, ):
            tf.logging.info('Exported model performs single-scale inference.')
            predictions = model.predict_labels(
                image,
                model_options=model_options,
                image_pyramid=FLAGS.image_pyramid)
        else:
            tf.logging.info('Exported model performs multi-scale inference.')
            predictions = model.predict_labels_multi_scale(
                image,
                model_options=model_options,
                eval_scales=FLAGS.inference_scales,
                add_flipped_images=FLAGS.add_flipped_images)

        predictions = tf.cast(predictions[common.OUTPUT_TYPE], tf.float32)
        # Crop the valid regions from the predictions.
        semantic_predictions = tf.slice(
            predictions, [0, 0, 0],
            [1, resized_image_size[0], resized_image_size[1]])

        # Resize back the prediction to the original image size.
        def _resize_label(label, label_size):
            # Expand dimension of label to [1, height, width, 1] for resize operation.
            label = tf.expand_dims(label, 3)
            resized_label = tf.image.resize_images(
                label,
                label_size,
                method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
                align_corners=True)
            return tf.cast(tf.squeeze(resized_label, 3), tf.int32)

        semantic_predictions = _resize_label(semantic_predictions, image_size)
        semantic_predictions = tf.identity(semantic_predictions,
                                           name=_OUTPUT_NAME)

        saver = tf.train.Saver(tf.model_variables())

        tf.gfile.MakeDirs(os.path.dirname(FLAGS.export_path))
        freeze_graph.freeze_graph_with_def_protos(
            tf.get_default_graph().as_graph_def(add_shapes=True),
            saver.as_saver_def(),
            FLAGS.checkpoint_path,
            _OUTPUT_NAME,
            restore_op_name=None,
            filename_tensor_name=None,
            output_graph=FLAGS.export_path,
            clear_devices=True,
            initializer_nodes=None)
Example #6
0
    def testSinglePartitionedVariable(self):
        """Ensures partitioned variables fail cleanly with freeze graph."""
        checkpoint_prefix = os.path.join(self.get_temp_dir(),
                                         "saved_checkpoint")
        checkpoint_state_name = "checkpoint_state"
        input_graph_name = "input_graph.pb"
        output_graph_name = "output_graph.pb"

        # Create a graph with partition variables. When weights are partitioned into
        # a single partition, the weights variable is followed by a identity ->
        # identity (an additional identity node).
        partitioner = partitioned_variables.fixed_size_partitioner(1)
        with ops.Graph().as_default():
            with variable_scope.variable_scope("part",
                                               partitioner=partitioner):
                batch_size, height, width, depth = 5, 128, 128, 3
                input1 = array_ops.zeros((batch_size, height, width, depth),
                                         name="input1")
                input2 = array_ops.zeros((batch_size, height, width, depth),
                                         name="input2")

                num_nodes = depth
                filter1 = variable_scope.get_variable("filter",
                                                      [num_nodes, num_nodes])
                filter2 = array_ops.reshape(filter1,
                                            [1, 1, num_nodes, num_nodes])
                conv = nn.conv2d(input=input1,
                                 filter=filter2,
                                 strides=[1, 1, 1, 1],
                                 padding="SAME")
                node = math_ops.add(conv, input2, name="test/add")
                node = nn.relu6(node, name="test/relu6")

            # Save graph and checkpoints.
            sess = session.Session()
            sess.run(variables.global_variables_initializer())

            saver = saver_lib.Saver()
            checkpoint_path = saver.save(sess,
                                         checkpoint_prefix,
                                         global_step=0,
                                         latest_filename=checkpoint_state_name)
            graph_io.write_graph(sess.graph, self.get_temp_dir(),
                                 input_graph_name)

            # Ensure this graph has partition variables.
            self.assertTrue([
                tensor.name.split(":")[0]
                for op in sess.graph.get_operations()
                for tensor in op.values()
                if re.search(r"/part_\d+/", tensor.name)
            ])

        # Test freezing graph doesn't make it crash.
        output_node_names = "save/restore_all"
        output_graph_path = os.path.join(self.get_temp_dir(),
                                         output_graph_name)

        return_value = freeze_graph.freeze_graph_with_def_protos(
            input_graph_def=sess.graph_def,
            input_saver_def=None,
            input_checkpoint=checkpoint_path,
            output_node_names=output_node_names,
            restore_op_name="save/restore_all",  # default value
            filename_tensor_name="save/Const:0",  # default value
            output_graph=output_graph_path,
            clear_devices=False,
            initializer_nodes="")
        self.assertTrue(return_value, -1)
Example #7
0
def main(_):
    # Horovod: initialize Horovod.
    hvd.init()

    # delete previous saving checkpoints and model
    # if os.path.exists('./checkpoints') and os.path.isdir('./checkpoints'):
    #     shutil.rmtree('./checkpoints')
    if os.path.exists(os.path.join(home, 'data', 'model')) and os.path.isdir(os.path.join(home, 'data', 'model')):
        shutil.rmtree(os.path.join(home, 'data', 'model'))

    # Data set sources : http://archive.ics.uci.edu/ml/datasets/ \
    # Smartphone-Based+Recognition+of+Human+Activities+and+Postural+Transitions
    # sensorData_timestamp.txt is pre-processed data and is based on UCI datasets.
    # load dataset from DB
    mysql_to_csv(sql='Select * From sensorData', file_path='./sensorData_timestamp1.csv', host='163.180.117.202',
                 port=3847, user='******', password='******', dbName='hardb')
    columns = ['user', 'activity', 'timestamp', 'acc_x-axis', 'acc_y-axis', 'acc_z-axis', 'gyro_x-axis', 'gyro_y-axis',
               'gyro_z-axis']
    df = pd.read_csv('./sensorData_timestamp1.csv',
                     header=None, names=columns, lineterminator='\n')
    df = df.dropna()

    step = 20
    segments = []
    labels = []
    for i in range(0, len(df) - n_time_steps, step):
        acc_xs = df['acc_x-axis'].values[i: i + n_time_steps]
        acc_ys = df['acc_y-axis'].values[i: i + n_time_steps]
        acc_zs = df['acc_z-axis'].values[i: i + n_time_steps]
        gyro_xs = df['gyro_x-axis'].values[i: i + n_time_steps]
        gyro_ys = df['gyro_y-axis'].values[i: i + n_time_steps]
        gyro_zs = df['gyro_z-axis'].values[i: i + n_time_steps]
        label = stats.mode(df['activity'][i: i + n_time_steps])[0][0]
        segments.append([acc_xs, acc_ys, acc_zs, gyro_xs, gyro_ys, gyro_zs])
        labels.append(label)

    reshaped_segments = np.asarray(segments, dtype=np.float32).reshape(-1, n_time_steps, n_features)
    tmp_df = pd.get_dummies(labels)
    labels = np.asarray(tmp_df, dtype=np.float32)
    reverse_one_hot_encode = tmp_df.idxmax().reset_index().rename(columns={'index': 'activity', 0: 'idx'})
    pickle.dump(reverse_one_hot_encode, open(os.path.join(home, 'data', 'reverse_one_hot_encode'), "wb"))

    # Data split train : test = 80 : 20
    # This split method cause overfit. We need to K-fold taining method.
    x_train, x_test, y_train, y_test = train_test_split(
        reshaped_segments, labels, test_size=0.2, random_state=random_seed)
    pickle.dump(x_test, open(os.path.join(home, 'data', 'x_test'), "wb"))
    pickle.dump(y_test, open(os.path.join(home, 'data', 'y_test'), "wb"))

    # Build model...
    with tf.name_scope('input'):
        x = tf.placeholder(tf.float32, [None, n_time_steps, n_features], name="inputs")
        y = tf.placeholder(tf.float32, [None, n_classes], name="label")
    predict, loss = create_lstm_model(x, y)
    tf.summary.scalar("loss", loss)
    # correct_pred = tf.equal(tf.argmax(predict, 1), tf.argmax(y, 1))
    # accuracy = tf.reduce_mean(tf.cast(correct_pred, dtype=tf.float32))

    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
    # Horovod: add Horovod Distributed Optimizer.
    optimizer = hvd.DistributedOptimizer(optimizer)

    global_step = tf.train.get_or_create_global_step()
    train_op = optimizer.minimize(loss, global_step=global_step)

    hooks = [
        # Horovod: BroadcastGlobalVariablesHook broadcasts initial variable states
        # from rank 0 to all other processes. This is necessary to ensure consistent
        # initialization of all workers when training is started with random weights
        # or restored from a checkpoint.
        hvd.BroadcastGlobalVariablesHook(0),

        # Horovod: adjust number of steps based on number of GPUs.
        tf.train.StopAtStepHook(last_step=8000 // hvd.size()),

        tf.train.LoggingTensorHook(tensors={'step': global_step, 'loss': loss},
                                   every_n_iter=10),
        tf.train.SummarySaverHook(save_secs=10,
                                  output_dir='/tmp/tf',
                                  summary_op=tf.summary.merge_all())
    ]

    # Horovod: pin GPU to be used to process local rank (one GPU per process)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.gpu_options.visible_device_list = str(hvd.local_rank())

    # Horovod: save checkpoints only on worker 0 to prevent other workers from
    # corrupting them.
    checkpoint_dir = './checkpoints' if hvd.rank() == 0 else None
    training_batch_generator = train_input_generator(x_train, y_train, batch_size=batch_size)
    # The MonitoredTrainingSession takes care of session initialization,
    # restoring from a checkpoint, saving to a checkpoint, and closing when done
    # or an error occurs.
    with tf.train.MonitoredTrainingSession(checkpoint_dir=checkpoint_dir,
                                           hooks=hooks,
                                           config=config) as mon_sess:
        while not mon_sess.should_stop():
            # Run a training step synchronously.
            input_batch, target = next(training_batch_generator)
            mon_sess.run(train_op, feed_dict={x: input_batch, y: target})

    # save model
    if hvd.rank() != 0:
        return
    checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)
    optGraph = optimize_for_inference_lib.optimize_for_inference(tf.get_default_graph().as_graph_def(),
                                                                 ["input/inputs"], ["y_"],
                                                                 dtypes.float32.as_datatype_enum)
    frozenGraph = freeze_graph.freeze_graph_with_def_protos(optGraph, None,
                                                            checkpoint_file, "y_", None, None,
                                                            "frozen.pb", True, None)
    with tf.Graph().as_default():
        importer.import_graph_def(frozenGraph, name="")
        with tf.Session() as sess:
            inputs = tf.get_default_graph().get_tensor_by_name("input/inputs:0")
            model = tf.get_default_graph().get_tensor_by_name("y_:0")
            predictor = tf.argmax(model, 1, name="predictor")
            inputs_classes = tf.saved_model.utils.build_tensor_info(inputs)  # input
            outputs_classes = tf.saved_model.utils.build_tensor_info(predictor)  # output
            signature = (tf.saved_model.signature_def_utils.build_signature_def(
                inputs={tf.saved_model.signature_constants.CLASSIFY_INPUTS: inputs_classes},
                outputs={tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES: outputs_classes},
                method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))
            builder = tf.saved_model.builder.SavedModelBuilder(os.path.join(home, 'data', 'model'))
            legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
            builder.add_meta_graph_and_variables(sess,
                                                 [tf.saved_model.tag_constants.SERVING],
                                                 signature_def_map={'predict_activity': signature},
                                                 legacy_init_op=legacy_init_op)
            builder.save()
def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)
  tf.logging.info('Prepare to export model to: %s', FLAGS.export_path)

  with tf.Graph().as_default():
    image, image_size, resized_image_size = _create_input_tensors()

    model_options = common.ModelOptions(
        outputs_to_num_classes={common.OUTPUT_TYPE: FLAGS.num_classes},
        crop_size=FLAGS.crop_size,
        atrous_rates=FLAGS.atrous_rates,
        output_stride=FLAGS.output_stride)

    if tuple(FLAGS.inference_scales) == (1.0,):
      tf.logging.info('Exported model performs single-scale inference.')
      predictions = model.predict_labels(
          image,
          model_options=model_options,
          image_pyramid=FLAGS.image_pyramid)
    else:
      tf.logging.info('Exported model performs multi-scale inference.')
      if FLAGS.quantize_delay_step >= 0:
        raise ValueError(
            'Quantize mode is not supported with multi-scale test.')
      predictions = model.predict_labels_multi_scale(
          image,
          model_options=model_options,
          eval_scales=FLAGS.inference_scales,
          add_flipped_images=FLAGS.add_flipped_images)
    raw_predictions = tf.identity(
        tf.cast(predictions[common.OUTPUT_TYPE], tf.float32),
        _RAW_OUTPUT_NAME)
    raw_probabilities = tf.identity(
        predictions[common.OUTPUT_TYPE + model.PROB_SUFFIX],
        _RAW_OUTPUT_PROB_NAME)

    # Crop the valid regions from the predictions.
    semantic_predictions = raw_predictions[
        :, :resized_image_size[0], :resized_image_size[1]]
    semantic_probabilities = raw_probabilities[
        :, :resized_image_size[0], :resized_image_size[1]]

    # Resize back the prediction to the original image size.
    def _resize_label(label, label_size):
      # Expand dimension of label to [1, height, width, 1] for resize operation.
      label = tf.expand_dims(label, 3)
      resized_label = tf.image.resize_images(
          label,
          label_size,
          method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
          align_corners=True)
      return tf.cast(tf.squeeze(resized_label, 3), tf.int32)
    semantic_predictions = _resize_label(semantic_predictions, image_size)
    semantic_predictions = tf.identity(semantic_predictions, name=_OUTPUT_NAME)

    semantic_probabilities = tf.image.resize_bilinear(
        semantic_probabilities, image_size, align_corners=True,
        name=_OUTPUT_PROB_NAME)

    if FLAGS.quantize_delay_step >= 0:
      contrib_quantize.create_eval_graph()

    saver = tf.train.Saver(tf.all_variables())

    dirname = os.path.dirname(FLAGS.export_path)
    tf.gfile.MakeDirs(dirname)
    graph_def = tf.get_default_graph().as_graph_def(add_shapes=True)
    freeze_graph.freeze_graph_with_def_protos(
        graph_def,
        saver.as_saver_def(),
        FLAGS.checkpoint_path,
        _OUTPUT_NAME + ',' + _OUTPUT_PROB_NAME,
        restore_op_name=None,
        filename_tensor_name=None,
        output_graph=FLAGS.export_path,
        clear_devices=True,
        initializer_nodes=None)

    if FLAGS.save_inference_graph:
      tf.train.write_graph(graph_def, dirname, 'inference_graph.pbtxt')
Example #9
0
def _export_inference_graph(input_type,
                            detection_model,
                            use_moving_averages,
                            trained_checkpoint_prefix,
                            output_directory,
                            additional_output_tensor_names=None,
                            input_shape=None,
                            output_collection_name='inference_op',
                            graph_hook_fn=None,
                            write_inference_graph=False):
  """Export helper."""
  tf.gfile.MakeDirs(output_directory)
  frozen_graph_path = os.path.join(output_directory,
                                   'frozen_inference_graph.pb')
  saved_model_path = os.path.join(output_directory, 'saved_model')
  model_path = os.path.join(output_directory, 'model.ckpt')

  outputs, placeholder_tensor = _build_detection_graph(
      input_type=input_type,
      detection_model=detection_model,
      input_shape=input_shape,
      output_collection_name=output_collection_name,
      graph_hook_fn=graph_hook_fn)

  profile_inference_graph(tf.get_default_graph())
  saver_kwargs = {}
  if use_moving_averages:
    # This check is to be compatible with both version of SaverDef.
    if os.path.isfile(trained_checkpoint_prefix):
      saver_kwargs['write_version'] = saver_pb2.SaverDef.V1
      temp_checkpoint_prefix = tempfile.NamedTemporaryFile().name
    else:
      temp_checkpoint_prefix = tempfile.mkdtemp()
    replace_variable_values_with_moving_averages(
        tf.get_default_graph(), trained_checkpoint_prefix,
        temp_checkpoint_prefix)
    checkpoint_to_use = temp_checkpoint_prefix
  else:
    checkpoint_to_use = trained_checkpoint_prefix

  saver = tf.train.Saver(**saver_kwargs)
  input_saver_def = saver.as_saver_def()

  write_graph_and_checkpoint(
      inference_graph_def=tf.get_default_graph().as_graph_def(),
      model_path=model_path,
      input_saver_def=input_saver_def,
      trained_checkpoint_prefix=checkpoint_to_use)
  if write_inference_graph:
    inference_graph_def = tf.get_default_graph().as_graph_def()
    inference_graph_path = os.path.join(output_directory,
                                        'inference_graph.pbtxt')
    for node in inference_graph_def.node:
      node.device = ''
    with gfile.GFile(inference_graph_path, 'wb') as f:
      f.write(str(inference_graph_def))

  if additional_output_tensor_names is not None:
    output_node_names = ','.join(outputs.keys()+additional_output_tensor_names)
  else:
    output_node_names = ','.join(outputs.keys())

  frozen_graph_def = freeze_graph.freeze_graph_with_def_protos(
      input_graph_def=tf.get_default_graph().as_graph_def(),
      input_saver_def=input_saver_def,
      input_checkpoint=checkpoint_to_use,
      output_node_names=output_node_names,
      restore_op_name='save/restore_all',
      filename_tensor_name='save/Const:0',
      output_graph=frozen_graph_path,
      clear_devices=True,
      initializer_nodes='')

  write_saved_model(saved_model_path, frozen_graph_def,
                    placeholder_tensor, outputs)
Example #10
0
def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)
  tf.logging.info('Prepare to export model to: %s', FLAGS.export_path)

  with tf.Graph().as_default():
    image, image_size, resized_image_size = _create_input_tensors()

    model_options = common.ModelOptions(
        outputs_to_num_classes={common.OUTPUT_TYPE: FLAGS.num_classes},
        crop_size=FLAGS.crop_size,
        atrous_rates=FLAGS.atrous_rates,
        output_stride=FLAGS.output_stride)

    if tuple(FLAGS.inference_scales) == (1.0,):
      tf.logging.info('Exported model performs single-scale inference.')
      predictions = model.predict_labels(
          image,
          model_options=model_options,
          image_pyramid=FLAGS.image_pyramid)
    else:
      tf.logging.info('Exported model performs multi-scale inference.')
      predictions = model.predict_labels_multi_scale(
          image,
          model_options=model_options,
          eval_scales=FLAGS.inference_scales,
          add_flipped_images=FLAGS.add_flipped_images)

    # Crop the valid regions from the predictions.
    semantic_predictions = tf.slice(
        predictions[common.OUTPUT_TYPE],
        [0, 0, 0],
        [1, resized_image_size[0], resized_image_size[1]])
    # Resize back the prediction to the original image size.
    def _resize_label(label, label_size):
      # Expand dimension of label to [1, height, width, 1] for resize operation.
      label = tf.expand_dims(label, 3)
      resized_label = tf.image.resize_images(
          label,
          label_size,
          method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
          align_corners=True)
      return tf.squeeze(resized_label, 3)
    semantic_predictions = _resize_label(semantic_predictions, image_size)
    semantic_predictions = tf.identity(semantic_predictions, name=_OUTPUT_NAME)

    export_path= "lol123"
    print('Exporting trained model to', export_path)
    builder = tf.saved_model.builder.SavedModelBuilder(export_path)
    # Creates the TensorInfo protobuf objects that encapsulates the input/output tensors
    tensor_info_input = tf.saved_model.utils.build_tensor_info(image)
    # output tensor info
    tensor_info_output = tf.saved_model.utils.build_tensor_info(semantic_predictions)

    prediction_signature = (
        tf.saved_model.signature_def_utils.build_signature_def(
            inputs={'images': tensor_info_input},
            outputs={'segmentation_map': tensor_info_output},
            method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))

    tf.get_default_graph().as_graph_def(add_shapes=True)
    with tf.Session() as sess:
        builder.add_meta_graph_and_variables(
            sess, [tf.saved_model.tag_constants.SERVING],
            signature_def_map={
                'predict_images':
                    prediction_signature,
            })
    builder.save(as_text=True)
    saver = tf.train.Saver(tf.model_variables())

    tf.gfile.MakeDirs(os.path.dirname(FLAGS.export_path))
    freeze_graph.freeze_graph_with_def_protos(
        tf.get_default_graph().as_graph_def(add_shapes=True),
        saver.as_saver_def(),
        FLAGS.checkpoint_path,
        _OUTPUT_NAME,
        restore_op_name=None,
        filename_tensor_name=None,
        output_graph=FLAGS.export_path,
        clear_devices=True,
        initializer_nodes=None)
Example #11
0
    def run(self):
        # Normalize feeds and fetch
        fetch = self.fetch.split(",") if isinstance(self.fetch,
                                                    str) else self.fetch
        feeds = self.feeds.split(",") if isinstance(self.feeds,
                                                    str) else self.feeds

        # Find latest SavedModel export in path_saved_model
        subdirs = [
            str(path) for path in Path(self.path_saved_model).iterdir()
            if path.is_dir() and "temp" not in str(path)
        ]
        latest = str(sorted(subdirs)[-1])
        LOGGER.info(f"Using SavedModel {latest}")

        # Reload SavedModel Graph, optimize and export
        with tf.Session(graph=tf.Graph()) as sess:
            meta_graph_def = tf.saved_model.loader.load(
                sess, ["serve"], latest)
            graph_def = meta_graph_def.graph_def

            # Add table initializer if present, or create it
            if INIT_ALL_TABLES in {node.name for node in graph_def.node}:
                fetch.append(INIT_ALL_TABLES)
            else:
                table_initializers = tf.get_collection(
                    tf.GraphKeys.TABLE_INITIALIZERS)
                if table_initializers:
                    LOGGER.info(f"Adding {INIT_ALL_TABLES} Node to the graph")
                    table_init_op = tf.group(*table_initializers,
                                             name=INIT_ALL_TABLES)
                    node_def = table_init_op.node_def
                    graph_def.node.append(node_def)
                    fetch.append(INIT_ALL_TABLES)

            # Rename nodes
            graph_def = rename_nodes(graph_def, self.new_names)

            # Setup (create / remove) placeholders
            graph_def = make_placeholders(graph_def, feeds)

            # Keep only part of the graph that produces tensor 'fetch'
            graph_def = extract_sub_graph(graph_def, fetch)

            # Replace variables by constants
            graph_def = freeze_graph_with_def_protos(
                input_graph_def=graph_def,
                input_saver_def=None,
                input_checkpoint=None,
                output_node_names=",".join(fetch),
                restore_op_name=None,
                filename_tensor_name=None,
                output_graph=None,
                clear_devices=True,
                initializer_nodes=None,
                variable_names_blacklist=",".join(self.blacklisted_variables),
                input_saved_model_dir=latest,
                saved_model_tags=["serve"],
            )
            tf.io.write_graph(graph_def,
                              logdir=self.path_optimized_model,
                              name=self.graph_name,
                              as_text=False)
            LOGGER.info(
                f"Optimized Model successfully exported to {self.path_optimized_model}/{self.graph_name}"
            )