Beispiel #1
0
 def test_export_yields_correct_directory_structure(
     self, input_type='image_tensor'):
   tmp_dir = self.get_temp_dir()
   self._save_checkpoint_from_mock_model(tmp_dir)
   with mock.patch.object(
       model_builder, 'build', autospec=True) as mock_builder:
     mock_builder.return_value = FakeModel()
     output_directory = os.path.join(tmp_dir, 'output')
     pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
     exporter_lib_v2.export_inference_graph(
         input_type=input_type,
         pipeline_config=pipeline_config,
         trained_checkpoint_dir=tmp_dir,
         output_directory=output_directory)
     self.assertTrue(os.path.exists(os.path.join(
         output_directory, 'saved_model', 'saved_model.pb')))
     self.assertTrue(os.path.exists(os.path.join(
         output_directory, 'saved_model', 'variables', 'variables.index')))
     self.assertTrue(os.path.exists(os.path.join(
         output_directory, 'saved_model', 'variables',
         'variables.data-00000-of-00001')))
     self.assertTrue(os.path.exists(os.path.join(
         output_directory, 'checkpoint', 'ckpt-0.index')))
     self.assertTrue(os.path.exists(os.path.join(
         output_directory, 'checkpoint', 'ckpt-0.data-00000-of-00001')))
     self.assertTrue(os.path.exists(os.path.join(
         output_directory, 'pipeline.config')))
def main(_):
    # ste the gpu (device:GPU:0)
    print("Num GPUs Available: ",
          len(tf.config.experimental.list_physical_devices('GPU')))
    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
        # Restrict TensorFlow to only use the first GPU
        try:
            tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
            tf.config.experimental.set_memory_growth(gpus[0], True)
            logical_gpus = tf.config.experimental.list_logical_devices('GPU')
            print(len(gpus), "Physical GPUs,", len(logical_gpus),
                  "Logical GPU")
        except RuntimeError as e:
            # Visible devices must be set before GPUs have been initialized
            print(e)

    pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
    with tf.io.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
        text_format.Merge(f.read(), pipeline_config)
        text_format.Merge(FLAGS.config_override, pipeline_config)
        exporter_lib_v2.export_inference_graph(
            FLAGS.input_type, pipeline_config, FLAGS.trained_checkpoint_dir,
            FLAGS.output_directory, FLAGS.use_side_inputs,
            FLAGS.side_input_shapes, FLAGS.side_input_types,
            FLAGS.side_input_names)
Beispiel #3
0
    def test_export_checkpoint_and_run_inference_with_image(self):
        tmp_dir = self.get_temp_dir()
        self._save_checkpoint_from_mock_model(tmp_dir, conv_weight_scalar=2.0)
        with mock.patch.object(model_builder, 'build',
                               autospec=True) as mock_builder:
            mock_builder.return_value = FakeModel()
            exporter_lib_v2.INPUT_BUILDER_UTIL_MAP[
                'model_build'] = mock_builder
            output_directory = os.path.join(tmp_dir, 'output')
            pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
            exporter_lib_v2.export_inference_graph(
                input_type='image_tensor',
                pipeline_config=pipeline_config,
                trained_checkpoint_dir=tmp_dir,
                output_directory=output_directory)

            mock_model = FakeModel()
            ckpt = tf.compat.v2.train.Checkpoint(model=mock_model)
            checkpoint_dir = os.path.join(tmp_dir, 'output', 'checkpoint')
            manager = tf.compat.v2.train.CheckpointManager(ckpt,
                                                           checkpoint_dir,
                                                           max_to_keep=7)
            ckpt.restore(manager.latest_checkpoint).expect_partial()

            fake_image = tf.ones(shape=[1, 5, 5, 3], dtype=tf.float32)
            preprocessed_inputs, true_image_shapes = mock_model.preprocess(
                fake_image)
            predictions = mock_model.predict(preprocessed_inputs,
                                             true_image_shapes)
            detections = mock_model.postprocess(predictions, true_image_shapes)

            # 150 = conv_weight_scalar * height * width * channels = 2 * 5 * 5 * 3.
            self.assertAllClose(
                detections['detection_scores'],
                [[150 + 0.7, 150 + 0.6], [150 + 0.9, 150 + 0.0]])
Beispiel #4
0
    def test_export_saved_model_and_run_inference_for_segmentation(
            self, input_type='image_and_boxes_tensor'):
        tmp_dir = self.get_temp_dir()
        self._save_checkpoint_from_mock_model(tmp_dir)

        with mock.patch.object(model_builder, 'build',
                               autospec=True) as mock_builder:
            mock_builder.return_value = FakeModel()
            exporter_lib_v2.INPUT_BUILDER_UTIL_MAP[
                'model_build'] = mock_builder
            output_directory = os.path.join(tmp_dir, 'output')
            pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
            exporter_lib_v2.export_inference_graph(
                input_type=input_type,
                pipeline_config=pipeline_config,
                trained_checkpoint_dir=tmp_dir,
                output_directory=output_directory)

            saved_model_path = os.path.join(output_directory, 'saved_model')
            detect_fn = tf.saved_model.load(saved_model_path)
            image = self.get_dummy_input(input_type)
            boxes = tf.constant([
                [
                    [0.0, 0.0, 0.5, 0.5],
                    [0.5, 0.5, 0.8, 0.8],
                ],
            ])
            detections = detect_fn(tf.constant(image), boxes)

            detection_fields = fields.DetectionResultFields
            self.assertIn(detection_fields.detection_masks, detections)
            self.assertListEqual(
                list(detections[detection_fields.detection_masks].shape),
                [1, 2, 16])
Beispiel #5
0
    def test_export_saved_model_and_run_inference(self,
                                                  input_type='image_tensor'):
        tmp_dir = self.get_temp_dir()
        self._save_checkpoint_from_mock_model(tmp_dir)
        with mock.patch.object(model_builder, 'build',
                               autospec=True) as mock_builder:
            mock_builder.return_value = FakeModel()
            exporter_lib_v2.INPUT_BUILDER_UTIL_MAP[
                'model_build'] = mock_builder
            output_directory = os.path.join(tmp_dir, 'output')
            pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
            exporter_lib_v2.export_inference_graph(
                input_type=input_type,
                pipeline_config=pipeline_config,
                trained_checkpoint_dir=tmp_dir,
                output_directory=output_directory)

            saved_model_path = os.path.join(output_directory, 'saved_model')
            detect_fn = tf.saved_model.load(saved_model_path)
            image = self.get_dummy_input(input_type)
            detections = detect_fn(tf.constant(image))

            detection_fields = fields.DetectionResultFields
            self.assertAllClose(detections[detection_fields.detection_boxes],
                                [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]],
                                 [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]])
            self.assertAllClose(detections[detection_fields.detection_scores],
                                [[0.7, 0.6], [0.9, 0.0]])
            self.assertAllClose(detections[detection_fields.detection_classes],
                                [[1, 2], [2, 1]])
            self.assertAllClose(detections[detection_fields.num_detections],
                                [2, 1])
def main(_):
    pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
    with tf.io.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
        text_format.Merge(f.read(), pipeline_config)
    text_format.Merge(FLAGS.config_override, pipeline_config)
    exporter_lib_v2.export_inference_graph(FLAGS.input_type, pipeline_config,
                                           FLAGS.trained_checkpoint_dir,
                                           FLAGS.output_directory)
def export_model(trained_checkpoint_dir, pipeline_config_path, output_directory):
    pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
    with tf.io.gfile.GFile(pipeline_config_path, 'r') as f:
        text_format.Merge(f.read(), pipeline_config)
    text_format.Merge(FLAGS.config_override, pipeline_config)
    exporter_lib_v2.export_inference_graph(
        'image_tensor', pipeline_config, trained_checkpoint_dir,
        output_directory)
def main(_):
  if FLAGS.gpu_device:
    os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu_device)
  pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
  with tf.io.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
    text_format.Merge(f.read(), pipeline_config)
  text_format.Merge(FLAGS.config_override, pipeline_config)
  exporter_lib_v2.export_inference_graph(
      FLAGS.input_type, pipeline_config, FLAGS.trained_checkpoint_dir,
      FLAGS.output_directory, FLAGS.use_side_inputs, FLAGS.side_input_shapes,
      FLAGS.side_input_types, FLAGS.side_input_names)
Beispiel #9
0
    def export_model(self):
        configs = config_util.get_configs_from_pipeline_file(
            self._pipeline_config_path)
        pipeline_config = config_util.create_pipeline_proto_from_configs(
            configs)

        exporter_lib_v2.export_inference_graph(
            input_type="image_tensor",
            pipeline_config=pipeline_config,
            trained_checkpoint_dir=self._training_loop_path,
            output_directory=self._exported_path)
Beispiel #10
0
def main(_):
    with open('system_dict.json') as json_file:
        args = json.load(json_file) 
    
    pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
    with tf.io.gfile.GFile(args["pipeline_config_path"], 'r') as f:
        text_format.Merge(f.read(), pipeline_config)
    text_format.Merge(args["config_override"], pipeline_config)
    exporter_lib_v2.export_inference_graph(
                args["input_type"], pipeline_config, args["trained_checkpoint_dir"],
                args["output_directory"])
Beispiel #11
0
    def test_export_saved_model_and_run_inference_with_side_inputs(
            self, input_type='image_tensor', use_default_serving=True):
        tmp_dir = self.get_temp_dir()
        self._save_checkpoint_from_mock_model(tmp_dir)
        with mock.patch.object(model_builder, 'build',
                               autospec=True) as mock_builder:
            mock_builder.return_value = FakeModel()
            exporter_lib_v2.INPUT_BUILDER_UTIL_MAP[
                'model_build'] = mock_builder
            output_directory = os.path.join(tmp_dir, 'output')
            pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
            exporter_lib_v2.export_inference_graph(
                input_type=input_type,
                pipeline_config=pipeline_config,
                trained_checkpoint_dir=tmp_dir,
                output_directory=output_directory,
                use_side_inputs=True,
                side_input_shapes='1/2,2',
                side_input_names='side_inp_1,side_inp_2',
                side_input_types='tf.float32,tf.uint8')

            saved_model_path = os.path.join(output_directory, 'saved_model')
            detect_fn = tf.saved_model.load(saved_model_path)
            detect_fn_sig = detect_fn.signatures['serving_default']
            image = tf.constant(self.get_dummy_input(input_type))
            side_input_1 = np.ones((1, ), dtype=np.float32)
            side_input_2 = np.ones((2, 2), dtype=np.uint8)
            if use_default_serving:
                detections = detect_fn_sig(
                    input_tensor=image,
                    side_inp_1=tf.constant(side_input_1),
                    side_inp_2=tf.constant(side_input_2))
            else:
                detections = detect_fn(image, tf.constant(side_input_1),
                                       tf.constant(side_input_2))

            detection_fields = fields.DetectionResultFields
            self.assertAllClose(detections[detection_fields.detection_boxes],
                                [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]],
                                 [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]])
            self.assertAllClose(detections[detection_fields.detection_scores],
                                [[400.7, 400.6], [400.9, 400.0]])
            self.assertAllClose(detections[detection_fields.detection_classes],
                                [[1, 2], [2, 1]])
            self.assertAllClose(detections[detection_fields.num_detections],
                                [2, 1])
 def _export_saved_model(self):
     tmp_dir = self.get_temp_dir()
     self._save_checkpoint_from_mock_model(tmp_dir)
     output_directory = os.path.join(tmp_dir, 'output')
     saved_model_path = os.path.join(output_directory, 'saved_model')
     tf.io.gfile.makedirs(output_directory)
     with mock.patch.object(model_builder, 'build',
                            autospec=True) as mock_builder:
         mock_builder.return_value = FakeModel()
         output_directory = os.path.join(tmp_dir, 'output')
         pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
         exporter_lib_v2.export_inference_graph(
             input_type='tf_example',
             pipeline_config=pipeline_config,
             trained_checkpoint_dir=tmp_dir,
             output_directory=output_directory)
         saved_model_path = os.path.join(output_directory, 'saved_model')
     return saved_model_path
Beispiel #13
0
    def export_model(self) -> None:
        if not os.path.isdir(self.path.export_dir):
            os.makedirs(self.path.export_dir)

        pipeline_config_path: str = os.path.join(self.path.model_dir,
                                                 'pipeline.config')
        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        try:
            with tf.io.gfile.GFile(pipeline_config_path, 'r') as f:
                text_format.Merge(f.read(), pipeline_config)
            text_format.Merge('', pipeline_config)
            exporter_lib_v2.export_inference_graph(
                input_type='image_tensor',
                pipeline_config=pipeline_config,
                trained_checkpoint_dir=self.path.model_dir,
                output_directory=self.path.export_dir)
        except Exception as e:
            raise TensorflowInternalError(additional_message=e.__str__())
def main(_):
    trained_checkpoint_dir = './input'
    output_directory = './output'
    files = os.listdir(trained_checkpoint_dir)
    if not os.path.isdir(output_directory):
        os.makedirs(output_directory)

    for file in files:
        if file.endswith('.config'):
            pipeline_config_path = os.path.join(trained_checkpoint_dir, file)

    pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
    with tf.io.gfile.GFile(pipeline_config_path, 'r') as f:
        text_format.Merge(f.read(), pipeline_config)
    text_format.Merge(FLAGS.config_override, pipeline_config)
    exporter_lib_v2.export_inference_graph(
        FLAGS.input_type, pipeline_config, trained_checkpoint_dir,
        output_directory, FLAGS.use_side_inputs, FLAGS.side_input_shapes,
        FLAGS.side_input_types, FLAGS.side_input_names)
Beispiel #15
0
 def export(self, add_weight_as_input=False):
     """
         Exports trained model with or without additional weight input
     Args:
         add_weight_as_input: If true, add weight 'gesamtgewicht' as additional input to input signature
             of exported model
     """
     pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
     with tf.io.gfile.GFile(self.config_path, 'r') as f:
         text_format.Merge(f.read(), pipeline_config)
     exporter_lib_v2.export_inference_graph(
         input_type='image_tensor',
         pipeline_config=pipeline_config,
         trained_checkpoint_dir=self.checkpoint_path,
         output_directory=self.export_path,
         use_side_inputs=add_weight_as_input,
         side_input_shapes='1',
         side_input_types='tf.float32',
         side_input_names='weightScaled')
def main(train_images_dir,
         pipeline_config_path,
         output_directory,
         checkpoint_path,
         num_epochs=1,
         image_dict=None,
         labels_path=None,
         samples=None):
    detection_model, pipeline_proto, ckpt_manager = create_model(
        pipeline_config_path, output_directory, checkpoint_path)

    train_files = os.listdir(train_images_dir)
    random.shuffle(train_files)
    BATCH_SIZE = 32
    num_batches = (len(train_files) // BATCH_SIZE) - 1
    for epoch in range(num_epochs):
        for idx in range(num_batches):
            batch_files = train_files[BATCH_SIZE * idx:BATCH_SIZE * (idx + 1)]
            train_images_np, train_gt_box = load_images(
                train_images_dir, batch_files)
            train_image_tensors, gt_classes_one_hot_tensors, gt_box_tensors = \
                prepare_data(train_images_np, train_gt_box)
            detection_model, losses_dict = train_model(
                detection_model, train_images_np, train_image_tensors,
                gt_classes_one_hot_tensors, gt_box_tensors, ckpt_manager)
            logger.info(
                utils.log_results(epoch, num_epochs, idx, num_batches,
                                  losses_dict))
            if idx % 10 == 0:
                ckpt_manager.save()
                print('Checkpoint saved!')
    exporter_lib_v2.export_inference_graph(input_type='image_tensor',
                                           pipeline_config=pipeline_proto,
                                           trained_checkpoint_dir=os.path.join(
                                               output_directory,
                                               r'checkpoint'),
                                           output_directory=output_directory)
from object_detection.protos import pipeline_pb2

tf.enable_v2_behavior()

class FLAGS:
    modelname = 'fasterrcnn_resnet50_fpn'#not used here
    #modelbasefolder = '../models/ModelZoo/faster_rcnn_resnet50_v1_640x640_coco17_tpu-8/saved_model/'
    #modelfilename='faster_rcnn_resnet50_v1_640x640_coco17_tpu-8' #not used
    #showfig='True'
    #labelmappath = '../models/research/object_detection/data/mscoco_label_map.pbtxt'
    #threshold = 0.3
    pipeline_config_path = '/Developer/MyRepo/WaymoObjectDetection/2DObject/tfobjectdetection/tf_ssdresnet50_1024_pipeline_P100.config'
    input_type = 'image_tensor'
    trained_checkpoint_dir = '/Developer/MyRepo/mymodels/tf_ssdresnet50_output'
    output_directory='/Developer/MyRepo/mymodels/tf_ssdresnet50_output/exported130'
    use_side_inputs=False
    side_input_shapes=''
    side_input_types=''
    side_input_names=''


if __name__ == '__main__':
    pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
    with tf.io.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
        text_format.Merge(f.read(), pipeline_config)
    #text_format.Merge(FLAGS.config_override, pipeline_config)
    exporter_lib_v2.export_inference_graph(
        FLAGS.input_type, pipeline_config, FLAGS.trained_checkpoint_dir,
        FLAGS.output_directory, FLAGS.use_side_inputs, FLAGS.side_input_shapes,
        FLAGS.side_input_types, FLAGS.side_input_names)