def export(cfg, tfmo):

  checkpoint_path = tf.train.latest_checkpoint(cfg.model_dir)

  detector_params = cfg.detector_params.copy()
  with tf.Session() as sess:
    input_tensor = tf.placeholder(dtype=tf.float32, shape=(None,) + tuple(cfg.input_shape))

    for unnecessary_param in ['initial_weights_path',
                              'learning_rate',
                              'optimizer',
                              'weights_decay_factor',
                              'collect_priors_summary']:
      if unnecessary_param in detector_params:
        del detector_params[unnecessary_param]

    ssd = MobileNetSSD(input_tensor=input_tensor, is_training=False, **detector_params)
    ssd.detection_output()

    train_param, _ = ssd.create_transform_parameters(width=cfg.input_shape[0], height=cfg.input_shape[1])

    saver = tf.train.Saver()
    saver.restore(sess, checkpoint_path)

    mean_values = [train_param.mean_value for _ in range(3)]
    convert_to_ie(ssd, sess, os.path.join(cfg.model_dir, 'ie_model/'), tfmo, batch_size=1,
                  scale=1./train_param.scale, mean_values=mean_values)
示例#2
0
    def __init__(self,
                 batch_size,
                 input_shape,
                 json_path,
                 classes,
                 num_parallel_calls=2,
                 prefetch_size=2):
        self.batch_size = batch_size
        self.input_shape = input_shape
        self.json_path = json_path
        self.num_parallel_calls = num_parallel_calls
        self.prefetch_size = prefetch_size

        ObjectDetectorJson.init_cache(self.json_path,
                                      cache_type='NONE',
                                      classes=classes)

        dataset, self.dataset_size = ObjectDetectorJson.create_dataset(
            self.json_path, classes=classes)
        _, self.transform_param = MobileNetSSD.create_transform_parameters(
            *input_shape[:2])
        self.transformer = AnnotatedDataTransformer(self.transform_param,
                                                    is_training=False)

        print('Total evaluation steps: {}'.format(
            math.ceil(self.dataset_size / self.batch_size)))

        transform_fn = lambda value: ObjectDetectorJson.transform_fn(
            value, self.transformer)
        map_fn = lambda value: tf.py_func(transform_fn, [value],
                                          (tf.float32, tf.string))
        self.dataset = dataset.map(map_fn,
                                   num_parallel_calls=num_parallel_calls)
        self.dataset = self.dataset.batch(
            self.batch_size).prefetch(prefetch_size)
示例#3
0
    def __init__(self,
                 batch_size,
                 input_shape,
                 json_path,
                 cache_type='NONE',
                 classes=['bg'],
                 fill_with_current_image_mean=True,
                 num_parallel_calls=4,
                 prefetch_size=16):
        self.batch_size = batch_size
        self.input_shape = input_shape
        self.json_path = json_path
        self.cache_type = cache_type
        self.num_parallel_calls = num_parallel_calls
        self.prefetch_size = prefetch_size
        self.classes = classes

        ObjectDetectorJson.init_cache(self.json_path,
                                      cache_type,
                                      classes=classes)

        self.train_dataset, self.dataset_size = ObjectDetectorJson.create_dataset(
            self.json_path, classes)
        self.train_transform_param, _ = MobileNetSSD.create_transform_parameters(
            input_shape[0], input_shape[1], fill_with_current_image_mean)
        self.train_transformer = AnnotatedDataTransformer(
            self.train_transform_param, is_training=True)
示例#4
0
  def input_fn(self):
    dataset = self.create_dataset()

    _, transform_param = MobileNetSSD.create_transform_parameters(*self.input_shape[:2])
    transformer = AnnotatedDataTransformer(transform_param, is_training=False)

    transform_fn = lambda value: InputInferData.transform_fn(value, transformer)
    map_fn = lambda value: tf.py_func(transform_fn, [value], tf.float32)
    dataset = dataset.map(map_fn, num_parallel_calls=self.num_parallel_calls).batch(self.batch_size).prefetch(
      self.prefetch_size)

    image = dataset.make_one_shot_iterator().get_next()
    image.set_shape([None] + list(self.input_shape))
    return image
def freezing_graph(config, checkpoint, output_dir):
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    detector_params = config.detector_params.copy()
    with tf.Session() as sess:
        input_tensor = tf.placeholder(dtype=tf.float32,
                                      shape=(None, ) +
                                      tuple(config.input_shape))

        for unnecessary_param in [
                'initial_weights_path', 'learning_rate', 'optimizer',
                'weights_decay_factor', 'collect_priors_summary'
        ]:
            if unnecessary_param in detector_params:
                del detector_params[unnecessary_param]

        ssd = MobileNetSSD(input_tensor=input_tensor,
                           is_training=False,
                           **detector_params)
        ssd.detection_output()
        # For eval.py
        tf.get_variable('eval_iteration',
                        initializer=0,
                        dtype=tf.int32,
                        trainable=False)
        tf.get_variable('global_step',
                        initializer=tf.constant_initializer(0, dtype=tf.int64),
                        shape=(),
                        dtype=tf.int64,
                        trainable=False)

        train_param, _ = ssd.create_transform_parameters(
            width=config.input_shape[0], height=config.input_shape[1])

        saver = tf.train.Saver()
        saver.restore(sess, checkpoint)

        mean_values = [train_param.mean_value for _ in range(3)]
        print(mean_values)
        print(train_param.scale)
        print(1. / train_param.scale)

        ssd_config = ssd.get_config_for_tfmo()
        graph_file = os.path.join(output_dir, 'graph.pb')
        frozen = dump_frozen_graph(sess, graph_file,
                                   ssd_config['output_nodes'])

        # Generate custom_operations_config for mo
        ssd_config_path = frozen.replace('.pb.frozen', '.tfmo.json')
        with open(ssd_config_path, mode='w') as file:
            file.write(ssd_config['json'])

        return frozen, ssd_config_path, train_param, ssd_config
示例#6
0
  def sample_data(json_path, num_samples, input_shape, classes, seed=666):
    if num_samples == 0:
      return None

    data, _ = ObjectDetectorJson.json_iterator(json_path, classes)
    data = [x for x in data()]
    # data = ObjectDetectorJson.convert_coco_to_toolbox_format(COCO(json_path), classes)

    ObjectDetectorJson.init_cache(json_path, cache_type='NONE', classes=classes)

    rng = random.Random(seed)
    selected_items = rng.sample(range(len(data)), num_samples)

    _, transform_param = MobileNetSSD.create_transform_parameters(*input_shape[:2])
    transformer = AnnotatedDataTransformer(transform_param, is_training=False)

    transform_fn = lambda value: ObjectDetectorJson.transform_fn(value, transformer, add_original_image=True)
    return [transform_fn(data[i]) for i in selected_items]
示例#7
0
def detection_model(features, labels, mode, params):
  num_classes = params['num_classes']
  #initial_weights_path = params.get('initial_weights_path', '')
  #-------------------------------------------------------------------------------------------------
  initial_weights_path = './model.ckpt.data-00000-of-00001'
  log_dir = params['log_dir']
  collect_priors_summary = params['collect_priors_summary']

  data_format = params.get('data_format', 'NHWC')
  depth_multiplier = params.get('depth_multiplier', 1.0)
  priors_rule = params.get('priors_rule', 'caffe')
  custom_priors = params.get('priors', [])
  learning_rate = params.get('learning_rate', 0.01)
  steps_per_epoch = params.get('steps_per_epoch', 1)
  mobilenet_version = params.get('mobilenet_version', 'v2')
  weight_regularization = params.get('weight_regularization', 4e-5)
  optimizer_func = params.get('optimizer', lambda learning_rate: tf.train.AdagradOptimizer(learning_rate=learning_rate))

  # Override default FileWriter. Don't store the graph definition.
  # pylint: disable=protected-access
  tf.summary.FileWriterCache._cache[log_dir] = tf.summary.FileWriter(log_dir, graph=None)

  if callable(learning_rate):
    learning_rate = learning_rate()

  is_training = mode == tf.estimator.ModeKeys.TRAIN

  ssd = MobileNetSSD(input_tensor=features, num_classes=num_classes, depth_multiplier=depth_multiplier,
                     is_training=is_training, data_format=data_format, priors_rule=priors_rule,
                     priors=custom_priors, mobilenet_version=mobilenet_version,
                     weight_regularization=weight_regularization)  # 1. Build model

  if mode == tf.estimator.ModeKeys.PREDICT:
    decoded_predictions = ssd.detection_output(use_plain_caffe_format=False)
    return tf.estimator.EstimatorSpec(mode, predictions=decoded_predictions)

  assert mode in(tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL)
  targets = ssd.create_targets(labels)  # 2. Build GT from annotation

  if collect_priors_summary:
    with tf.name_scope('summary/'):
      assigned_priors = create_tensors_and_streaming_ops_for_assigned_priors(targets, ssd.priors_info, num_classes)
      detailed_assigned_priors = get_detailed_assigned_priors_summary_tf(assigned_priors, ssd.priors_info)

  loss_func = MultiboxLoss(neg_pos_ratio=3.0)  # 3. Build loss-object

  eval_iteration = tf.get_variable('eval_iteration', initializer=0, dtype=tf.int32, trainable=False)
  if mode == tf.estimator.ModeKeys.EVAL:
    eval_print_steps = steps_per_epoch // 50
    eval_print_steps = 1 if eval_print_steps == 0 else eval_print_steps

    every_eval_print_steps = tf.equal(tf.mod(eval_iteration + 1, eval_print_steps), 0)
    eval_iteration = tf.assign(eval_iteration, eval_iteration + 1)
    targets = with_dependencies([eval_iteration], targets)

    loss = loss_func.eval_summary(targets, ssd.predictions)
    loss = tf.cond(every_eval_print_steps,
                   lambda: tf.Print(loss, [tf.round(100 * eval_iteration / steps_per_epoch), loss], '[%][loss]: '),
                   lambda: loss)

    eval_metric_ops = {}
    for key, val in loss_func.eval_tensors.items():
      eval_metric_ops['loss_function/' + key] = tf.metrics.mean(val)

    if collect_priors_summary:
      for key, metric_ops in assigned_priors.items():  # We need only update ops
        eval_metric_ops[key] = metric_ops

      for key, assigned_priors_tensor in detailed_assigned_priors.items():
        eval_metric_ops['prior_histogram/' + key] = (assigned_priors_tensor, tf.no_op())

    decoded_predictions = ssd.detection_output(use_plain_caffe_format=False)
    eval_metric_ops['predictions'] = tf.contrib.metrics.streaming_concat(decoded_predictions)

    return tf.estimator.EstimatorSpec(
      mode,
      loss=loss,
      eval_metric_ops=eval_metric_ops
    )

  assert mode == tf.estimator.ModeKeys.TRAIN
  if initial_weights_path:
    tf.logging.info('Initialize from: ' + initial_weights_path)
    ssd.load_weights(initial_weights_path)

  bboxes = ssd._decode_boxes(ssd.predictions['locs'], priors=ssd.priors[0, 0], variance=ssd.priors[0, 1])
  loss = loss_func.loss(targets, ssd.predictions, bboxes)  # 4. Compute loss with NMS

  if collect_priors_summary:
    with tf.name_scope('summary/'):
      loss = with_dependencies([operation for key, (_, operation) in assigned_priors.items()], loss)

    for name, assigned_priors_tensor in detailed_assigned_priors.items():
      tf.summary.scalar(name, tf.reduce_sum(assigned_priors_tensor))

    py_func_ops = []
    priors_dir = os.path.join(log_dir, 'priors')

    with tf.name_scope('write_histogram'):
      every_epoch = tf.equal(tf.mod(tf.train.get_global_step() + 1, steps_per_epoch), 0)
      for name, (group, _) in assigned_priors.items():
        def write_hist2d():
          # pylint: disable=cell-var-from-loop
          return tf.py_func(write_histogram_2d_tf,
                            [group, pickle.dumps(ssd.priors_info), name, tf.train.get_global_step(), priors_dir],
                            tf.bool)

        write_hist2d_once_per_epoch = tf.cond(every_epoch, write_hist2d, tf.no_op)
        py_func_ops.append(write_hist2d_once_per_epoch)

      loss = with_dependencies(py_func_ops, loss)

  optimizer = optimizer_func(learning_rate)
  tf.summary.scalar('learning_rate', learning_rate)

  regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
  regularization_loss = tf.add_n(regularization_losses, name='loss_function/regularization_losses_sum')
  total_loss = tf.add(loss, regularization_loss, name='loss_function/total_loss')

  tf.summary.scalar('loss_function/regularization_loss', regularization_loss)

  with tf.variable_scope('train_loop'):
    train_op = optimizer.minimize(total_loss, global_step=tf.train.get_global_step())
    return tf.estimator.EstimatorSpec(mode, loss=total_loss, train_op=train_op)