Пример #1
0
  def test_get_configs_from_pipeline_file(self):
    """Test that proto configs can be read from pipeline config file."""
    pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
    pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
    pipeline_config.model.ssd.num_classes = 10
    pipeline_config.train_config.batch_size = 32
    pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
    pipeline_config.eval_config.num_examples = 20
    pipeline_config.eval_input_reader.add().queue_capacity = 100

    pipeline_config.Extensions[
        internal_pipeline_pb2.lstm_model].train_unroll_length = 5
    pipeline_config.Extensions[
        internal_pipeline_pb2.lstm_model].eval_unroll_length = 10

    _write_config(pipeline_config, pipeline_config_path)

    configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
    self.assertProtoEquals(pipeline_config.model, configs["model"])
    self.assertProtoEquals(pipeline_config.train_config,
                           configs["train_config"])
    self.assertProtoEquals(pipeline_config.train_input_reader,
                           configs["train_input_config"])
    self.assertProtoEquals(pipeline_config.eval_config, configs["eval_config"])
    self.assertProtoEquals(pipeline_config.eval_input_reader,
                           configs["eval_input_configs"])
    self.assertProtoEquals(
        pipeline_config.Extensions[internal_pipeline_pb2.lstm_model],
        configs["lstm_model"])
def main(_):
  flags.mark_flag_as_required('export_path')
  flags.mark_flag_as_required('frozen_graph_path')
  flags.mark_flag_as_required('pipeline_config_path')

  configs = config_util.get_configs_from_pipeline_file(
      FLAGS.pipeline_config_path)
  lstm_config = configs['lstm_model']

  input_arrays = ['input_video_tensor']
  output_arrays = [
      'TFLite_Detection_PostProcess',
      'TFLite_Detection_PostProcess:1',
      'TFLite_Detection_PostProcess:2',
      'TFLite_Detection_PostProcess:3',
  ]
  input_shapes = {
      'input_video_tensor': [lstm_config.eval_unroll_length, 320, 320, 3],
  }

  converter = tf.lite.TFLiteConverter.from_frozen_graph(
      FLAGS.frozen_graph_path,
      input_arrays,
      output_arrays,
      input_shapes=input_shapes)
  converter.allow_custom_ops = True
  tflite_model = converter.convert()
  ofilename = os.path.join(FLAGS.export_path)
  open(ofilename, 'wb').write(tflite_model)
Пример #3
0
def main(unused_argv):
  assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
  assert FLAGS.eval_dir, '`eval_dir` is missing.'
  if FLAGS.pipeline_config_path:
    configs = config_util.get_configs_from_pipeline_file(
        FLAGS.pipeline_config_path)
  else:
    configs = config_util.get_configs_from_multiple_files(
        model_config_path=FLAGS.model_config_path,
        eval_config_path=FLAGS.eval_config_path,
        eval_input_config_path=FLAGS.input_config_path)

  pipeline_proto = config_util.create_pipeline_proto_from_configs(configs)
  config_text = text_format.MessageToString(pipeline_proto)
  tf.gfile.MakeDirs(FLAGS.eval_dir)
  with tf.gfile.Open(os.path.join(FLAGS.eval_dir, 'pipeline.config'),
                     'wb') as f:
    f.write(config_text)

  model_config = configs['model']
  lstm_config = configs['lstm_model']
  eval_config = configs['eval_config']
  input_config = configs['eval_input_config']

  if FLAGS.eval_training_data:
    input_config.external_input_reader.CopyFrom(
        configs['train_input_config'].external_input_reader)
    lstm_config.eval_unroll_length = lstm_config.train_unroll_length

  model_fn = functools.partial(
      model_builder.build,
      model_config=model_config,
      lstm_config=lstm_config,
      is_training=False)

  def get_next(config, model_config, lstm_config, unroll_length):
    return seq_dataset_builder.build(config, model_config, lstm_config,
                                     unroll_length)

  create_input_dict_fn = functools.partial(get_next, input_config, model_config,
                                           lstm_config,
                                           lstm_config.eval_unroll_length)

  label_map = label_map_util.load_labelmap(input_config.label_map_path)
  max_num_classes = max([item.id for item in label_map.item])
  categories = label_map_util.convert_label_map_to_categories(
      label_map, max_num_classes)

  if FLAGS.run_once:
    eval_config.max_evals = 1

  evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, categories,
                     FLAGS.checkpoint_dir, FLAGS.eval_dir)
Пример #4
0
def main(unused_argv):
  assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
  assert FLAGS.eval_dir, '`eval_dir` is missing.'
  if FLAGS.pipeline_config_path:
    configs = config_util.get_configs_from_pipeline_file(
        FLAGS.pipeline_config_path)
  else:
    configs = config_util.get_configs_from_multiple_files(
        model_config_path=FLAGS.model_config_path,
        eval_config_path=FLAGS.eval_config_path,
        eval_input_config_path=FLAGS.input_config_path)

  pipeline_proto = config_util.create_pipeline_proto_from_configs(configs)
  config_text = text_format.MessageToString(pipeline_proto)
  tf.gfile.MakeDirs(FLAGS.eval_dir)
  with tf.gfile.Open(os.path.join(FLAGS.eval_dir, 'pipeline.config'),
                     'wb') as f:
    f.write(config_text)

  model_config = configs['model']
  lstm_config = configs['lstm_model']
  eval_config = configs['eval_config']
  input_config = configs['eval_input_config']

  if FLAGS.eval_training_data:
    input_config.external_input_reader.CopyFrom(
        configs['train_input_config'].external_input_reader)
    lstm_config.eval_unroll_length = lstm_config.train_unroll_length

  model_fn = functools.partial(
      model_builder.build,
      model_config=model_config,
      lstm_config=lstm_config,
      is_training=False)

  def get_next(config, model_config, lstm_config, unroll_length):
    return seq_dataset_builder.build(config, model_config, lstm_config,
                                     unroll_length)

  create_input_dict_fn = functools.partial(get_next, input_config, model_config,
                                           lstm_config,
                                           lstm_config.eval_unroll_length)

  label_map = label_map_util.load_labelmap(input_config.label_map_path)
  max_num_classes = max([item.id for item in label_map.item])
  categories = label_map_util.convert_label_map_to_categories(
      label_map, max_num_classes)

  if FLAGS.run_once:
    eval_config.max_evals = 1

  evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, categories,
                     FLAGS.checkpoint_dir, FLAGS.eval_dir)
Пример #5
0
def main(argv):
    del argv  # Unused.
    flags.mark_flag_as_required('output_directory')
    flags.mark_flag_as_required('pipeline_config_path')
    flags.mark_flag_as_required('trained_checkpoint_prefix')

    pipeline_config = config_util.get_configs_from_pipeline_file(
        FLAGS.pipeline_config_path)

    export_tflite_lstd_graph_lib.export_tflite_graph(
        pipeline_config,
        FLAGS.trained_checkpoint_prefix,
        FLAGS.output_directory,
        FLAGS.add_postprocessing_op,
        FLAGS.max_detections,
        FLAGS.max_classes_per_detection,
        use_regular_nms=FLAGS.use_regular_nms)
Пример #6
0
  def test_create_pipeline_proto_from_configs(self):
    """Tests that proto can be reconstructed from configs dictionary."""
    pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")

    pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
    pipeline_config.model.ssd.num_classes = 10
    pipeline_config.train_config.batch_size = 32
    pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
    pipeline_config.eval_config.num_examples = 20
    pipeline_config.eval_input_reader.add().queue_capacity = 100

    pipeline_config.Extensions[
        internal_pipeline_pb2.lstm_model].train_unroll_length = 5
    pipeline_config.Extensions[
        internal_pipeline_pb2.lstm_model].eval_unroll_length = 10
    _write_config(pipeline_config, pipeline_config_path)

    configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
    pipeline_config_reconstructed = (
        config_util.create_pipeline_proto_from_configs(configs))
    self.assertEqual(pipeline_config, pipeline_config_reconstructed)
Пример #7
0
def main(_):
    assert FLAGS.train_dir, '`train_dir` is missing.'
    if FLAGS.task == 0:
        tf.gfile.MakeDirs(FLAGS.train_dir)
    if FLAGS.pipeline_config_path:
        configs = config_util.get_configs_from_pipeline_file(
            FLAGS.pipeline_config_path)
        if FLAGS.task == 0:
            tf.gfile.Copy(FLAGS.pipeline_config_path,
                          os.path.join(FLAGS.train_dir, 'pipeline.config'),
                          overwrite=True)
    else:
        configs = config_util.get_configs_from_multiple_files(
            model_config_path=FLAGS.model_config_path,
            train_config_path=FLAGS.train_config_path,
            train_input_config_path=FLAGS.input_config_path)
        if FLAGS.task == 0:
            for name, config in [('model.config', FLAGS.model_config_path),
                                 ('train.config', FLAGS.train_config_path),
                                 ('input.config', FLAGS.input_config_path)]:
                tf.gfile.Copy(config,
                              os.path.join(FLAGS.train_dir, name),
                              overwrite=True)

    model_config = configs['model']
    lstm_config = configs['lstm_model']
    train_config = configs['train_config']
    input_config = configs['train_input_config']

    model_fn = functools.partial(model_builder.build,
                                 model_config=model_config,
                                 lstm_config=lstm_config,
                                 is_training=True)

    def get_next(config, model_config, lstm_config, unroll_length):
        data_augmentation_options = [
            preprocessor_builder.build(step)
            for step in train_config.data_augmentation_options
        ]
        return seq_dataset_builder.build(config,
                                         model_config,
                                         lstm_config,
                                         unroll_length,
                                         data_augmentation_options,
                                         batch_size=train_config.batch_size)

    create_input_dict_fn = functools.partial(get_next, input_config,
                                             model_config, lstm_config,
                                             lstm_config.train_unroll_length)

    env = json.loads(os.environ.get('TF_CONFIG', '{}'))
    cluster_data = env.get('cluster', None)
    cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None
    task_data = env.get('task', None) or {'type': 'master', 'index': 0}
    task_info = type('TaskSpec', (object, ), task_data)

    # Parameters for a single worker.
    ps_tasks = 0
    worker_replicas = 1
    worker_job_name = 'lonely_worker'
    task = 0
    is_chief = True
    master = ''

    if cluster_data and 'worker' in cluster_data:
        # Number of total worker replicas include "worker"s and the "master".
        worker_replicas = len(cluster_data['worker']) + 1
    if cluster_data and 'ps' in cluster_data:
        ps_tasks = len(cluster_data['ps'])

    if worker_replicas > 1 and ps_tasks < 1:
        raise ValueError(
            'At least 1 ps task is needed for distributed training.')

    if worker_replicas >= 1 and ps_tasks > 0:
        # Set up distributed training.
        server = tf.train.Server(tf.train.ClusterSpec(cluster),
                                 protocol='grpc',
                                 job_name=task_info.type,
                                 task_index=task_info.index)
        if task_info.type == 'ps':
            server.join()
            return

        worker_job_name = '%s/task:%d' % (task_info.type, task_info.index)
        task = task_info.index
        is_chief = (task_info.type == 'master')
        master = server.target

    trainer.train(create_input_dict_fn, model_fn, train_config, master, task,
                  FLAGS.num_clones, worker_replicas, FLAGS.clone_on_cpu,
                  ps_tasks, worker_job_name, is_chief, FLAGS.train_dir)
Пример #8
0
def main(_):
  assert FLAGS.train_dir, '`train_dir` is missing.'
  if FLAGS.task == 0:
    tf.gfile.MakeDirs(FLAGS.train_dir)
  if FLAGS.pipeline_config_path:
    configs = config_util.get_configs_from_pipeline_file(
        FLAGS.pipeline_config_path)
    if FLAGS.task == 0:
      tf.gfile.Copy(
          FLAGS.pipeline_config_path,
          os.path.join(FLAGS.train_dir, 'pipeline.config'),
          overwrite=True)
  else:
    configs = config_util.get_configs_from_multiple_files(
        model_config_path=FLAGS.model_config_path,
        train_config_path=FLAGS.train_config_path,
        train_input_config_path=FLAGS.input_config_path)
    if FLAGS.task == 0:
      for name, config in [('model.config', FLAGS.model_config_path),
                           ('train.config', FLAGS.train_config_path),
                           ('input.config', FLAGS.input_config_path)]:
        tf.gfile.Copy(
            config, os.path.join(FLAGS.train_dir, name), overwrite=True)

  model_config = configs['model']
  lstm_config = configs['lstm_model']
  train_config = configs['train_config']
  input_config = configs['train_input_config']

  model_fn = functools.partial(
      model_builder.build,
      model_config=model_config,
      lstm_config=lstm_config,
      is_training=True)

  def get_next(config, model_config, lstm_config, unroll_length):
    data_augmentation_options = [
        preprocessor_builder.build(step)
        for step in train_config.data_augmentation_options
    ]
    return seq_dataset_builder.build(
        config,
        model_config,
        lstm_config,
        unroll_length,
        data_augmentation_options,
        batch_size=train_config.batch_size)

  create_input_dict_fn = functools.partial(get_next, input_config, model_config,
                                           lstm_config,
                                           lstm_config.train_unroll_length)

  env = json.loads(os.environ.get('TF_CONFIG', '{}'))
  cluster_data = env.get('cluster', None)
  cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None
  task_data = env.get('task', None) or {'type': 'master', 'index': 0}
  task_info = type('TaskSpec', (object,), task_data)

  # Parameters for a single worker.
  ps_tasks = 0
  worker_replicas = 1
  worker_job_name = 'lonely_worker'
  task = 0
  is_chief = True
  master = ''

  if cluster_data and 'worker' in cluster_data:
    # Number of total worker replicas include "worker"s and the "master".
    worker_replicas = len(cluster_data['worker']) + 1
  if cluster_data and 'ps' in cluster_data:
    ps_tasks = len(cluster_data['ps'])

  if worker_replicas > 1 and ps_tasks < 1:
    raise ValueError('At least 1 ps task is needed for distributed training.')

  if worker_replicas >= 1 and ps_tasks > 0:
    # Set up distributed training.
    server = tf.train.Server(
        tf.train.ClusterSpec(cluster),
        protocol='grpc',
        job_name=task_info.type,
        task_index=task_info.index)
    if task_info.type == 'ps':
      server.join()
      return

    worker_job_name = '%s/task:%d' % (task_info.type, task_info.index)
    task = task_info.index
    is_chief = (task_info.type == 'master')
    master = server.target

  trainer.train(create_input_dict_fn, model_fn, train_config, master, task,
                FLAGS.num_clones, worker_replicas, FLAGS.clone_on_cpu, ps_tasks,
                worker_job_name, is_chief, FLAGS.train_dir)