Exemplo n.º 1
0
 def export(self):
     pipeline_config = TrainEvalPipelineConfig()
     Merge(self.config_path.read_text(), pipeline_config)
     last_ckpt = max(self.training_path.glob("model.ckpt-*.meta"),
                     key=_get_ckpt_number_from_file).with_suffix("")
     n_steps = last_ckpt.suffix.split("-")[-1]
     export_inference_graph(
         input_type="image_tensor",
         pipeline_config=pipeline_config,
         trained_checkpoint_prefix=str(last_ckpt),
         output_directory=str(PIPELINES_DIR / self.task /
                              f"{self.name}__{n_steps}_steps"),
     )
Exemplo n.º 2
0
def _load_config(config_path):
    from object_detection.protos.pipeline_pb2 import TrainEvalPipelineConfig
    from google.protobuf.text_format import Merge
    config = TrainEvalPipelineConfig()

    with open(config_path, 'r') as f:
        config_str = f.read()

    lines = config_str.split('\n')
    lines = [line for line in lines if 'batch_norm_trainable' not in line]
    config_str = '\n'.join(lines)

    Merge(config_str, config)

    return config
Exemplo n.º 3
0
def build_detection_graph(config, checkpoint):
    """Build an object detection model from the TensorFlow model zoo.

    This function creates an object detection model, sourced from the
    TensorFlow object detection API.

    It is necessary to use this function to generate a frozen graph that is
    compatible with TensorFlow/TensorRT integration.  In addition to generating
    a graph that is compatible with TensorFlow's TensorRT package, this
    function performs other graph modifications, such as forced device
    placement, that improve performance on Jetson.  These graph modifications
    are tested with a subset of the object detection API and may or may not
    work well with models not listed.

    The workflow when using this method is:

    1. Train model using TensorFlow object detection API
    2. Build graph configured for Jetson using this function
    3. Optimize the graph output by this method with the TensorRT package in
       TensorFlow
    4. Execute in regular TensorFlow, or using the high level TFModel class

    :param config: path to the object detection pipeline config file
    :type config: string
    :param checkpoint: path to the checkpoint files prefix containing trained model params
    :type checkpoint: string
    :returns: the configured frozen graph representing object detection model
    :rtype: a tensorflow GraphDef
    """
    global input_name, output_map

    if isinstance(config, str):
        with open(config, 'r') as f:
            config_str = f.read()
            config = TrainEvalPipelineConfig()
            text_format.Merge(config_str, config)

    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True

    with tf.Graph().as_default() as tf_graph:
        with tf.Session(config=tf_config) as tf_sess:

            model = model_builder.build(model_config=config.model,
                                        is_training=False)

            tf_input = tf.placeholder(tf.float32, [1, None, None, 3],
                                      name=input_name)
            tf_preprocessed, tf_true_image_shapes = model.preprocess(tf_input)
            tf_predictions = model.predict(
                preprocessed_inputs=tf_preprocessed,
                true_image_shapes=tf_true_image_shapes)
            tf_postprocessed = model.postprocess(
                prediction_dict=tf_predictions,
                true_image_shapes=tf_true_image_shapes)

            tf_saver = tf.train.Saver()
            tf_saver.restore(save_path=checkpoint, sess=tf_sess)

            outputs = {}
            for key, op in tf_postprocessed.items():
                if key in output_map.keys():
                    outputs[output_map[key]] = \
                        tf.identity(op, name=output_map[key])

            frozen_graph = tf.graph_util.convert_variables_to_constants(
                tf_sess,
                tf_sess.graph_def,
                output_node_names=list(outputs.keys()))

            frozen_graph = convert_relu6(frozen_graph)

            remove_op(frozen_graph, 'Assert')
            """
            If non-split, this is good. but split model, device decide by before/after NMS.
            """
            # force CPU device placement for NMS ops
            #for node in frozen_graph.node:
            #    if 'NonMaxSuppression' in node.name:
            #        node.device = '/device:CPU:0'

    return frozen_graph, [input_name], list(outputs.keys())
Exemplo n.º 4
0
 def read_config(self):
     config = TrainEvalPipelineConfig()
     Merge((CONFIGS_DIR / self.config_name).read_text(), config)
     return config
from google.protobuf import text_format
from object_detection.protos.pipeline_pb2 import TrainEvalPipelineConfig
from object_detection.protos.string_int_label_map_pb2 import StringIntLabelMap, StringIntLabelMapItem

LABELMAP_PATH, CLASS_NAME, PIPELINE_PATH, SOURCE_MODEL_PATH, SOURCE_PIPELINE_PATH, TRAIN_TFRECORD_PATH, TEST_TFRECORD_PATH = sys.argv[
    1:8]

#Save labelmap
labelmap = StringIntLabelMap()
labelmap.item.append(StringIntLabelMapItem(id=1, name=CLASS_NAME))
with open(LABELMAP_PATH, 'w') as f:
    f.write(text_format.MessageToString(labelmap))

#Edit source pipeline and save as pipeline
pipeline = TrainEvalPipelineConfig()
with open(SOURCE_PIPELINE_PATH, 'r') as f:
    text_format.Merge(f.read(), pipeline)
pipeline.model.ssd.num_classes = 1
pipeline.train_input_reader.label_map_path = LABELMAP_PATH
pipeline.train_input_reader.tf_record_input_reader.input_path[
    0] = TRAIN_TFRECORD_PATH
pipeline.train_config.fine_tune_checkpoint = os.path.join(
    SOURCE_MODEL_PATH, 'checkpoint/ckpt-0')
pipeline.train_config.fine_tune_checkpoint_type = 'detection'
pipeline.train_config.batch_size = 4
pipeline.train_config.use_bfloat16 = False
pipeline.eval_input_reader[0].label_map_path = LABELMAP_PATH
pipeline.eval_input_reader[0].tf_record_input_reader.input_path[
    0] = TEST_TFRECORD_PATH
pipeline.eval_config.metrics_set[0] = 'coco_detection_metrics'
Exemplo n.º 6
0
def build_detection_graph(config_path, checkpoint):
    """Build an object detection model from the TensorFlow model zoo.

    This function creates an object detection model, sourced from the
    TensorFlow object detection API.

    It is necessary to use this function to generate a frozen graph that is
    compatible with TensorFlow/TensorRT integration.  In addition to generating
    a graph that is compatible with TensorFlow's TensorRT package, this
    function performs other graph modifications, such as forced device
    placement, that improve performance on Jetson.  These graph modifications
    are tested with a subset of the object detection API and may or may not
    work well with models not listed.

    The workflow when using this method is:

    1. Train model using TensorFlow object detection API
    2. Build graph configured for Jetson using this function
    3. Optimize the graph output by this method with the TensorRT package in
       TensorFlow
    4. Execute in regular TensorFlow, or using the high level TFModel class

    :param config_path: path to the object detection pipeline config file
    :type config_path: string
    :param checkpoint: path to the checkpoint files prefix containing trained model params
    :type checkpoint: string
    :returns: the configured frozen graph representing object detection model
    :rtype: a tensorflow GraphDef
    """
    global input_name, output_names

    if isinstance(config_path, str):
        pipeline_config = TrainEvalPipelineConfig()
        with open(config_path, 'r') as f:
            text_format.Merge(f.read(), pipeline_config)

    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True

    with tf.Graph().as_default() as tf_graph:
        with tf.Session(config=tf_config) as tf_sess:

            model = model_builder.build(model_config=pipeline_config.model,
                                        is_training=False,
                                        add_summaries=False)

            tf_input = tf.placeholder(tf.float32, [1, None, None, 3],
                                      name=input_name)
            tf_preprocessed, tf_true_image_shapes = model.preprocess(tf_input)
            tf_predictions = model.predict(
                preprocessed_inputs=tf_preprocessed,
                true_image_shapes=tf_true_image_shapes)
            tf_postprocessed = model.postprocess(
                prediction_dict=tf_predictions,
                true_image_shapes=tf_true_image_shapes)

            tf_saver = tf.train.Saver()
            tf_saver.restore(save_path=checkpoint, sess=tf_sess)

            for key, op in tf_postprocessed.items():
                if key in output_names:
                    _ = tf.identity(op, name=key)

            frozen_graph_def = tf.graph_util.convert_variables_to_constants(
                tf_sess, tf_sess.graph_def, output_node_names=output_names)

            frozen_graph_def = convert_relu6(frozen_graph_def)

            # The following line is commented out because it causes
            # trouble for faster_rcnn models...
            #remove_op(frozen_graph_def, 'Assert')

            # force CPU device placement for NMS ops
            for node in frozen_graph_def.node:
                if 'NonMaxSuppression' in node.name:
                    node.device = '/device:CPU:0'
                if 'faster_rcnn_' in config_path and 'SecondStage' in node.name:
                    node.device = '/device:CPU:0'
                if 'rfcn_' in config_path and 'SecondStage' in node.name:
                    node.device = '/device:CPU:0'

    return frozen_graph_def, [input_name], output_names