Пример #1
0
    def test_model_variables(self):
        input_shape = (1, 512, 512, 3)
        model = efficientdet_keras.EfficientDetNet('efficientdet-d0')
        model.build(input_shape)
        eager_train_vars = sorted(
            [var.name for var in model.trainable_variables])
        eager_model_vars = sorted([var.name for var in model.variables])
        with tf.Graph().as_default():
            feats = tf.ones([1, 512, 512, 3])
            model = efficientdet_keras.EfficientDetNet('efficientdet-d0')
            model.build(input_shape)
            keras_train_vars = sorted(
                [var.name for var in model.trainable_variables])
            keras_model_vars = sorted([var.name for var in model.variables])
        with tf.Graph().as_default():
            feats = tf.ones([1, 512, 512, 3])
            legacy_arch.efficientdet(feats, 'efficientdet-d0')
            legacy_train_vars = sorted(
                [var.name for var in tf.trainable_variables()])
            legacy_model_vars = sorted(
                [var.name for var in tf.global_variables()])

        self.assertEqual(keras_train_vars, legacy_train_vars)
        self.assertEqual(keras_model_vars, legacy_model_vars)
        self.assertEqual(eager_train_vars, legacy_train_vars)
        self.assertEqual(eager_model_vars, legacy_model_vars)
Пример #2
0
 def test_model_variables(self):
     input_shape = (1, 512, 512, 3)
     model = efficientdet_keras.EfficientDetNet('efficientdet-d0')
     model.build(input_shape)
     eager_train_vars = sorted(
         [var.name for var in model.trainable_variables])
     eager_model_vars = sorted([var.name for var in model.variables])
     with tf.Graph().as_default():
         feats = tf.ones([1, 512, 512, 3])
         model = efficientdet_keras.EfficientDetNet('efficientdet-d0')
         model(feats, True)
         keras_train_vars = sorted(
             [var.name for var in tf.trainable_variables()])
         keras_model_vars = sorted(
             [var.name for var in tf.global_variables()])
         keras_update_ops = [
             op.name for op in tf.get_collection(tf.GraphKeys.UPDATE_OPS)
         ]
     with tf.Graph().as_default():
         feats = tf.ones([1, 512, 512, 3])
         legacy_arch.efficientdet(feats, 'efficientdet-d0')
         legacy_train_vars = sorted(
             [var.name for var in tf.trainable_variables()])
         legacy_model_vars = sorted(
             [var.name for var in tf.global_variables()])
         legacy_update_ops = [
             op.name for op in tf.get_collection(tf.GraphKeys.UPDATE_OPS)
         ]
     self.assertEqual(keras_train_vars, legacy_train_vars)
     self.assertEqual(keras_model_vars, legacy_model_vars)
     self.assertEqual(eager_train_vars, legacy_train_vars)
     self.assertEqual(eager_model_vars, legacy_model_vars)
     self.assertAllEqual(keras_update_ops, legacy_update_ops)
Пример #3
0
  def test_model_output(self):
    inputs_shape = [1, 512, 512, 3]
    config = hparams_config.get_efficientdet_config('efficientdet-d0')
    tmp_ckpt = os.path.join(tempfile.mkdtemp(), 'ckpt')
    with tf.Session(graph=tf.Graph()) as sess:
      feats = tf.ones(inputs_shape)
      tf.random.set_random_seed(SEED)
      model = efficientdet_keras.EfficientDetNet(config=config)
      outputs = model(feats)
      sess.run(tf.global_variables_initializer())
      keras_class_out, keras_box_out = sess.run(outputs)
      model.save_weights(tmp_ckpt)
    with tf.Session(graph=tf.Graph()) as sess:
      feats = tf.ones(inputs_shape)
      tf.random.set_random_seed(SEED)
      feats = legacy_arch.efficientdet(feats, config=config)
      sess.run(tf.global_variables_initializer())
      legacy_class_out, legacy_box_out = sess.run(feats)
    for i in range(3, 8):
      self.assertAllClose(
          keras_class_out[i - 3], legacy_class_out[i], rtol=1e-4, atol=1e-4)
      self.assertAllClose(
          keras_box_out[i - 3], legacy_box_out[i], rtol=1e-4, atol=1e-4)

    feats = tf.ones(inputs_shape)
    model = efficientdet_keras.EfficientDetNet(config=config)
    model.load_weights(tmp_ckpt)
    eager_class_out, eager_box_out = model(feats)
    for i in range(3, 8):
      self.assertAllClose(
          eager_class_out[i - 3], legacy_class_out[i], rtol=1e-4, atol=1e-4)
      self.assertAllClose(
          eager_box_out[i - 3], legacy_box_out[i], rtol=1e-4, atol=1e-4)
    def test_eager_output(self):
        inputs_shape = [1, 512, 512, 3]
        config = hparams_config.get_efficientdet_config('efficientdet-d0')
        config.heads = ['object_detection', 'segmentation']
        tmp_ckpt = os.path.join(tempfile.mkdtemp(), 'ckpt2')

        with tf.Session(graph=tf.Graph()) as sess:
            feats = tf.ones(inputs_shape)
            tf.random.set_random_seed(SEED)
            model = efficientdet_keras.EfficientDetNet(config=config)
            outputs = model(feats, True)
            sess.run(tf.global_variables_initializer())
            keras_class_out, keras_box_out, keras_seg_out = sess.run(outputs)
            model.save_weights(tmp_ckpt)

        feats = tf.ones(inputs_shape)
        model = efficientdet_keras.EfficientDetNet(config=config)
        model.load_weights(tmp_ckpt)
        eager_class_out, eager_box_out, eager_seg_out = model(feats, True)
        for i in range(5):
            self.assertAllClose(eager_class_out[i],
                                keras_class_out[i],
                                rtol=1e-4,
                                atol=1e-4)
            self.assertAllClose(eager_box_out[i],
                                keras_box_out[i],
                                rtol=1e-4,
                                atol=1e-4)
        self.assertAllClose(eager_seg_out, keras_seg_out, rtol=1e-4, atol=1e-4)
Пример #5
0
    def test_model_output(self):
        inputs_shape = [1, 512, 512, 3]
        config = hparams_config.get_efficientdet_config('efficientdet-d0')
        config.heads = ['object_detection', 'segmentation']
        tmp_ckpt = os.path.join(tempfile.mkdtemp(), 'ckpt')
        with tf.Session(graph=tf.Graph()) as sess:
            feats = tf.ones(inputs_shape)
            tf.random.set_random_seed(SEED)
            model = efficientdet_keras.EfficientDetNet(config=config)
            outputs = model(feats, True)
            sess.run(tf.global_variables_initializer())
            keras_class_out, keras_box_out, keras_seg_out = sess.run(outputs)
            grads = tf.nest.map_structure(
                lambda output: tf.gradients(output, feats), outputs)
            keras_class_grads, keras_box_grads, keras_seg_grads = sess.run(
                grads)
            model.save_weights(tmp_ckpt)
        with tf.Session(graph=tf.Graph()) as sess:
            feats = tf.ones(inputs_shape)
            tf.random.set_random_seed(SEED)
            outputs = legacy_arch.efficientdet(feats, config=config)
            sess.run(tf.global_variables_initializer())
            legacy_class_out, legacy_box_out = sess.run(outputs)
            grads = tf.nest.map_structure(
                lambda output: tf.gradients(output, feats), outputs)
            legacy_class_grads, legacy_box_grads = sess.run(grads)

        for i in range(3, 8):
            self.assertAllClose(keras_class_out[i - 3],
                                legacy_class_out[i],
                                rtol=1e-4,
                                atol=1e-4)
            self.assertAllClose(keras_box_out[i - 3],
                                legacy_box_out[i],
                                rtol=1e-4,
                                atol=1e-4)
            self.assertAllClose(keras_class_grads[i - 3],
                                legacy_class_grads[i],
                                rtol=1e-4,
                                atol=1e-4)
            self.assertAllClose(keras_box_grads[i - 3],
                                legacy_box_grads[i],
                                rtol=1e-4,
                                atol=1e-4)

        feats = tf.ones(inputs_shape)
        model = efficientdet_keras.EfficientDetNet(config=config)
        model.load_weights(tmp_ckpt)
        eager_class_out, eager_box_out, eager_seg_out = model(feats, True)
        for i in range(3, 8):
            self.assertAllClose(eager_class_out[i - 3],
                                legacy_class_out[i],
                                rtol=1e-4,
                                atol=1e-4)
            self.assertAllClose(eager_box_out[i - 3],
                                legacy_box_out[i],
                                rtol=1e-4,
                                atol=1e-4)
        self.assertAllClose(eager_seg_out, keras_seg_out, rtol=1e-4, atol=1e-4)
Пример #6
0
    def model_arch(feats, model_name=None, **kwargs):
      """Construct a model arch for keras models."""
      config = hparams_config.get_efficientdet_config(model_name)
      config.override(kwargs)
      model = efficientdet_keras.EfficientDetNet(config=config)

      #l=model.layers[0]  # efficientnet part
      #print(l.name)


      #layer_names=[]
      #feats_out=l.predict(feats,steps=1)  #predict
      #for ml in l.layers:
        #print(ml.name)
        #layer_names.append(ml.name)
      #save_feat_fig(feats_out)
      #exit()

      cls_out_list, box_out_list = model(feats, training=False)
      # convert the list of model outputs to a dictionary with key=level.
      assert len(cls_out_list) == config.max_level - config.min_level + 1
      assert len(box_out_list) == config.max_level - config.min_level + 1
      cls_outputs, box_outputs = {}, {}
      for i in range(config.min_level, config.max_level + 1):
        cls_outputs[i] = cls_out_list[i - config.min_level]
        box_outputs[i] = box_out_list[i - config.min_level]
      return cls_outputs, box_outputs
Пример #7
0
def main(_):
    train_examples = info.splits['train'].num_examples
    batch_size = 8
    steps_per_epoch = train_examples // batch_size

    train = dataset['train'].map(
        load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)
    test = dataset['test'].map(load_image_test)

    train_dataset = train.cache().shuffle(1000).batch(batch_size).repeat()
    train_dataset = train_dataset.prefetch(
        buffer_size=tf.data.experimental.AUTOTUNE)
    test_dataset = test.batch(batch_size)

    model = efficientdet_keras.EfficientDetNet('efficientdet-d0')
    model.build((1, 512, 512, 3))
    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])

    val_subsplits = 5
    val_steps = info.splits['test'].num_examples // batch_size // val_subsplits
    model.fit(train_dataset,
              epochs=20,
              steps_per_epoch=steps_per_epoch,
              validation_steps=val_steps,
              validation_data=test_dataset,
              callbacks=[])

    model.save_weights('./test/segmentation')

    print(create_mask(model(tf.ones((1, 512, 512, 3)), False)))
Пример #8
0
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.batch_size = FLAGS.batch_size
    config.val_json_file = FLAGS.val_json_file

    # dataset
    ds = dataloader.InputReader(
        FLAGS.val_file_pattern,
        is_training=False,
        use_fake_data=False,
        max_instances_per_image=config.max_instances_per_image)(config)

    # Network
    model = efficientdet_keras.EfficientDetNet(config=config)
    model.build((config.batch_size, None, None, 3))
    model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir))

    evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file)

    # compute stats for all batches.
    for images, labels in ds:
        config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS

        cls_outputs, box_outputs = model(images, training=False)
        detections = postprocess.generate_detections(config, cls_outputs,
                                                     box_outputs,
                                                     labels['image_scales'],
                                                     labels['source_ids'],
                                                     False)

        if FLAGS.enable_tta:
            images_flipped = tf.image.flip_left_right(images)
            cls_outputs_flipped, box_outputs_flipped = model(images_flipped,
                                                             training=False)
            detections_flipped = postprocess.generate_detections(
                config, cls_outputs_flipped, box_outputs_flipped,
                labels['image_scales'], labels['source_ids'], True)

            for d, df in zip(detections, detections_flipped):
                combined_detections = wbf.ensemble_detections(
                    config, tf.concat([d, df], 0))
                combined_detections = tf.stack([combined_detections])
                evaluator.update_state(
                    labels['groundtruth_data'].numpy(),
                    postprocess.transform_detections(
                        combined_detections).numpy())
        else:
            evaluator.update_state(
                labels['groundtruth_data'].numpy(),
                postprocess.transform_detections(detections).numpy())

    # compute the final eval results.
    metric_values = evaluator.result()
    metric_dict = {}
    for i, metric_value in enumerate(metric_values):
        metric_dict[evaluator.metric_names[i]] = metric_value
    print(metric_dict)
Пример #9
0
 def model_fn(inputs):
   model = efficientdet_keras.EfficientDetNet(
       config=hparams_config.Config(params))
   cls_out_list, box_out_list = model(inputs, params['is_training_bn'])
   cls_outputs, box_outputs = {}, {}
   for i in range(params['min_level'], params['max_level'] + 1):
     cls_outputs[i] = cls_out_list[i - params['min_level']]
     box_outputs[i] = box_out_list[i - params['min_level']]
   return cls_outputs, box_outputs
Пример #10
0
 def build(self, params_override=None):
   """Build model and restore checkpoints."""
   params = copy.deepcopy(self.params)
   if params_override:
     params.update(params_override)
   config = hparams_config.get_efficientdet_config(self.model_name)
   config.override(params)
   if self.only_network:
     self.model = efficientdet_keras.EfficientDetNet(config=config)
   else:
     self.model = efficientdet_keras.EfficientDetModel(config=config)
   image_size = utils.parse_image_size(params['image_size'])
   self.model.build((self.batch_size, *image_size, 3))
   util_keras.restore_ckpt(self.model, self.ckpt_path)
Пример #11
0
 def model_arch(feats, model_name=None, **kwargs):
     """Construct a model arch for keras models."""
     config = hparams_config.get_efficientdet_config(model_name)
     config.override(kwargs)
     model = efficientdet_keras.EfficientDetNet(config=config)
     cls_out_list, box_out_list = model(feats, training=False)
     # convert the list of model outputs to a dictionary with key=level.
     assert len(cls_out_list) == config.max_level - config.min_level + 1
     assert len(box_out_list) == config.max_level - config.min_level + 1
     cls_outputs, box_outputs = {}, {}
     for i in range(config.min_level, config.max_level + 1):
         cls_outputs[i] = cls_out_list[i - config.min_level]
         box_outputs[i] = box_out_list[i - config.min_level]
     return cls_outputs, box_outputs
Пример #12
0
 def build(self, params_override=None):
   """Build model and restore checkpoints."""
   params = copy.deepcopy(self.params)
   if params_override:
     params.update(params_override)
   config = hparams_config.get_efficientdet_config(self.model_name)
   config.override(params)
   if self.only_network:
     self.model = efficientdet_keras.EfficientDetNet(config=config)
   else:
     self.model = efficientdet_keras.EfficientDetModel(config=config)
   image_size = utils.parse_image_size(params['image_size'])
   self.model.build((self.batch_size, *image_size, 3))
   util_keras.restore_ckpt(self.model, self.ckpt_path,
                           self.params['moving_average_decay'],
                           skip_mismatch=False)
   if self.debug:
     tf.config.run_functions_eagerly(self.debug)
Пример #13
0
def main(_):
  config = hparams_config.get_efficientdet_config(FLAGS.model_name)
  config.override(FLAGS.hparams)
  config.batch_size = FLAGS.batch_size
  config.val_json_file = FLAGS.val_json_file

  # dataset
  ds = dataloader.InputReader(
      FLAGS.val_file_pattern,
      is_training=False,
      use_fake_data=False,
      max_instances_per_image=config.max_instances_per_image)(
          config)

  # Network
  model = efficientdet_keras.EfficientDetNet(config=config)
  model.build((config.batch_size, 512, 512, 3))
  model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir))

  evaluator = coco_metric.EvaluationMetric(
      filename=config.val_json_file)

  # compute stats for all batches.
  for images, labels in ds:
    cls_outputs, box_outputs = model(images, training=False)
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    detections = postprocess.generate_detections(config, cls_outputs,
                                                 box_outputs,
                                                 labels['image_scales'],
                                                 labels['source_ids'])
    evaluator.update_state(labels['groundtruth_data'].numpy(),
                           detections.numpy())

  # compute the final eval results.
  metric_values = evaluator.result()
  metric_dict = {}
  for i, metric_value in enumerate(metric_values):
    metric_dict[evaluator.metric_names[i]] = metric_value
  print(metric_dict)
Пример #14
0
def main(_):
    config = hparams_config.get_efficientdet_config('efficientdet-d0')
    config.batch_size = 8
    config.val_json_file = 'tmp/coco/annotations/instances_val2017.json'

    # dataset
    input_files = 'tmp/coco/val-00000-of-00032.tfrecord'
    is_training = False
    ds = dataloader.InputReader(
        input_files,
        is_training=is_training,
        use_fake_data=False,
        max_instances_per_image=config.max_instances_per_image)(config)

    # Network
    model = efficientdet_keras.EfficientDetNet(config=config)
    model.build((config.batch_size, 512, 512, 3))
    model.load_weights('tmp/efficientdet-d0/model')

    evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file)
    # compute stats for all batches.
    for images, labels in ds:
        cls_outputs, box_outputs = model(images, training=False)
        config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
        detections = postprocess.generate_detections(config, cls_outputs,
                                                     box_outputs,
                                                     labels['image_scales'],
                                                     labels['source_ids'])
        evaluator.update_state(labels['groundtruth_data'].numpy(),
                               detections.numpy())

    # compute the final eval results.
    metric_values = evaluator.result()
    metric_dict = {}
    for i, metric_value in enumerate(metric_values):
        metric_dict[evaluator.metric_names[i]] = metric_value
    print(metric_dict)
Пример #15
0
def main(_):
  config = hparams_config.get_efficientdet_config(FLAGS.model_name)
  config.override(FLAGS.hparams)
  config.val_json_file = FLAGS.val_json_file
  config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
  config.drop_remainder = False  # eval all examples w/o drop.
  config.image_size = utils.parse_image_size(config['image_size'])

  if config.strategy == 'tpu':
    tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
        FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
    tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)
    tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
    ds_strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver)
    logging.info('All devices: %s', tf.config.list_logical_devices('TPU'))
  elif config.strategy == 'gpus':
    ds_strategy = tf.distribute.MirroredStrategy()
    logging.info('All devices: %s', tf.config.list_physical_devices('GPU'))
  else:
    if tf.config.list_physical_devices('GPU'):
      ds_strategy = tf.distribute.OneDeviceStrategy('device:GPU:0')
    else:
      ds_strategy = tf.distribute.OneDeviceStrategy('device:CPU:0')

  with ds_strategy.scope():
    # Network
    model = efficientdet_keras.EfficientDetNet(config=config)
    model.build((None, *config.image_size, 3))
    util_keras.restore_ckpt(model,
                            tf.train.latest_checkpoint(FLAGS.model_dir),
                            config.moving_average_decay,
                            skip_mismatch=False)
    @tf.function
    def model_fn(images, labels):
      cls_outputs, box_outputs = model(images, training=False)
      detections = postprocess.generate_detections(config,
                                                   cls_outputs,
                                                   box_outputs,
                                                   labels['image_scales'],
                                                   labels['source_ids'])
      tf.numpy_function(evaluator.update_state,
                        [labels['groundtruth_data'],
                         postprocess.transform_detections(detections)], [])

    # Evaluator for AP calculation.
    label_map = label_util.get_label_map(config.label_map)
    evaluator = coco_metric.EvaluationMetric(
        filename=config.val_json_file, label_map=label_map)

    # dataset
    batch_size = FLAGS.batch_size   # global batch size.
    ds = dataloader.InputReader(
        FLAGS.val_file_pattern,
        is_training=False,
        max_instances_per_image=config.max_instances_per_image)(
            config, batch_size=batch_size)
    if FLAGS.eval_samples:
      ds = ds.take((FLAGS.eval_samples + batch_size - 1) // batch_size)
    ds = ds_strategy.experimental_distribute_dataset(ds)

    # evaluate all images.
    eval_samples = FLAGS.eval_samples or 5000
    pbar = tf.keras.utils.Progbar((eval_samples + batch_size - 1) // batch_size)
    for i, (images, labels) in enumerate(ds):
      ds_strategy.run(model_fn, (images, labels))
      pbar.update(i)

  # compute the final eval results.
  metrics = evaluator.result()
  metric_dict = {}
  for i, name in enumerate(evaluator.metric_names):
    metric_dict[name] = metrics[i]

  if label_map:
    for i, cid in enumerate(sorted(label_map.keys())):
      name = 'AP_/%s' % label_map[cid]
      metric_dict[name] = metrics[i + len(evaluator.metric_names)]
  print(FLAGS.model_name, metric_dict)
Пример #16
0
 def test_irregular_shape(self):
     config = hparams_config.get_efficientdet_config('efficientdet-d0')
     config.image_size = '896x1600'
     model = efficientdet_keras.EfficientDetNet(config=config)
     model(tf.ones([1, 896, 1600, 3]), False)
     model(tf.ones([1, 499, 333, 3]), False)
Пример #17
0
def main(_):
  config = hparams_config.get_efficientdet_config(FLAGS.model_name)
  config.override(FLAGS.hparams)
  config.batch_size = FLAGS.batch_size
  config.val_json_file = FLAGS.val_json_file
  config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
  base_height, base_width = utils.parse_image_size(config['image_size'])

  # Network
  model = efficientdet_keras.EfficientDetNet(config=config)
  model.build((config.batch_size, base_height, base_width, 3))
  model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir))

  # in format (height, width, flip)
  augmentations = [] 
  if FLAGS.enable_tta:
    for size_offset in (0, 128, 256):
      for flip in (False, True):
        augmentations.append((base_height + size_offset, base_width + size_offset, flip))
  else:
    augmentations.append((base_height, base_width, False))

  detections_per_source = dict()
  for height, width, flip in augmentations:
    config.image_size = (height, width)
    # dataset
    ds = dataloader.InputReader(
        FLAGS.val_file_pattern,
        is_training=False,
        use_fake_data=False,
        max_instances_per_image=config.max_instances_per_image)(
            config)

    # compute stats for all batches.
    for images, labels in ds:
      if flip:
        images = tf.image.flip_left_right(images)
      cls_outputs, box_outputs = model(images, training=False)
      detections = postprocess.generate_detections(config, cls_outputs,
                                                  box_outputs,
                                                  labels['image_scales'],
                                                  labels['source_ids'], flip)

      for id, d in zip(labels['source_ids'], detections):
        if id.numpy() in detections_per_source:
          detections_per_source[id.numpy()] = tf.concat([d, detections_per_source[id.numpy()]], 0)
        else:
          detections_per_source[id.numpy()] = d


  evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file)
  for d in detections_per_source.values():
    if FLAGS.enable_tta:
      d = wbf.ensemble_detections(config, d, len(augmentations))
    evaluator.update_state(
        labels['groundtruth_data'].numpy(),
        postprocess.transform_detections(tf.stack([d])).numpy())

  # compute the final eval results.
  metric_values = evaluator.result()
  metric_dict = {}
  for i, metric_value in enumerate(metric_values):
    metric_dict[evaluator.metric_names[i]] = metric_value
  print(metric_dict)
Пример #18
0
def main(_):
  config = hparams_config.get_efficientdet_config(FLAGS.model_name)
  config.override(FLAGS.hparams)
  config.batch_size = FLAGS.batch_size
  config.val_json_file = FLAGS.val_json_file

  # dataset
  ds = dataloader.InputReader(
      FLAGS.val_file_pattern,
      is_training=False,
      use_fake_data=False,
      max_instances_per_image=config.max_instances_per_image)(
          config)

  # Network
  model = efficientdet_keras.EfficientDetNet(config=config)
  model.build((config.batch_size, 512, 512, 3))
  model.load_weights(FLAGS.checkpoint)

  evaluator = coco_metric.EvaluationMetric(
      filename=config.val_json_file)

  # compute stats for all batches.
  for images, labels in ds:
    cls_outputs, box_outputs = model(images, training=False)
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    detections = postprocess.generate_detections(config, cls_outputs,
                                                 box_outputs,
                                                 labels['image_scales'],
                                                 labels['source_ids'])
    evaluator.update_state(labels['groundtruth_data'].numpy(),
Пример #19
0
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.batch_size = FLAGS.batch_size
    config.val_json_file = FLAGS.val_json_file
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    base_height, base_width = utils.parse_image_size(config['image_size'])

    if FLAGS.strategy == 'tpu':
        tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
        tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)
        tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
        ds_strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver)
        logging.info('All devices: %s', tf.config.list_logical_devices('TPU'))
    elif FLAGS.strategy == 'gpus':
        ds_strategy = tf.distribute.MirroredStrategy()
        logging.info('All devices: %s', tf.config.list_physical_devices('GPU'))
    else:
        if tf.config.list_physical_devices('GPU'):
            ds_strategy = tf.distribute.OneDeviceStrategy('device:GPU:0')
        else:
            ds_strategy = tf.distribute.OneDeviceStrategy('device:CPU:0')

    # in format (height, width, flip)
    augmentations = []
    if FLAGS.enable_tta:
        for size_offset in (0, 128, 256):
            for flip in (False, True):
                augmentations.append((base_height + size_offset,
                                      base_width + size_offset, flip))
    else:
        augmentations.append((base_height, base_width, False))

    all_detections = []
    all_labels = []
    with ds_strategy.scope():
        # Network
        model = efficientdet_keras.EfficientDetNet(config=config)
        model.build((config.batch_size, base_height, base_width, 3))
        model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir))

        first_loop = True
        for height, width, flip in augmentations:
            config.image_size = (height, width)
            # dataset
            ds = dataloader.InputReader(
                FLAGS.val_file_pattern,
                is_training=False,
                use_fake_data=False,
                max_instances_per_image=config.max_instances_per_image)(config)

            # create the function once per augmentation, since it closes over the
            # value of config, which gets updated with the new image size
            @tf.function
            def f(images, labels):
                cls_outputs, box_outputs = model(images, training=False)
                return postprocess.generate_detections(config, cls_outputs,
                                                       box_outputs,
                                                       labels['image_scales'],
                                                       labels['source_ids'],
                                                       flip)

            # inference
            for images, labels in ds:
                if flip:
                    images = tf.image.flip_left_right(images)
                detections = f(images, labels)

                all_detections.append(detections)
                if first_loop:
                    all_labels.append(labels)

            first_loop = False

    # collect the giant list of detections into a map from image id to
    # detections
    detections_per_source = dict()
    for batch in all_detections:
        for d in batch:
            img_id = d[0][0]
            if img_id.numpy() in detections_per_source:
                detections_per_source[img_id.numpy()] = tf.concat(
                    [d, detections_per_source[img_id.numpy()]], 0)
            else:
                detections_per_source[img_id.numpy()] = d

    # collect the groundtruth per image id
    groundtruth_per_source = dict()
    for batch in all_labels:
        for img_id, groundtruth in zip(batch['source_ids'],
                                       batch['groundtruth_data']):
            groundtruth_per_source[img_id.numpy()] = groundtruth

    # calucate the AP scores for all the images
    evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file)
    for img_id, d in detections_per_source.items():
        if FLAGS.enable_tta:
            d = wbf.ensemble_detections(config, d, len(augmentations))
        evaluator.update_state(
            tf.stack([groundtruth_per_source[img_id]]).numpy(),
            postprocess.transform_detections(tf.stack([d])).numpy())

    # compute the final eval results.
    if evaluator:
        metrics = evaluator.result()
        metric_dict = {}
        for i, name in enumerate(evaluator.metric_names):
            metric_dict[name] = metrics[i]

        label_map = label_util.get_label_map(config.label_map)
        if label_map:
            for i, cid in enumerate(sorted(label_map.keys())):
                name = 'AP_/%s' % label_map[cid]
                metric_dict[name] = metrics[i - len(evaluator.metric_names)]
        print(metric_dict)
Пример #20
0
def main(config):

    assert isinstance(
        config.image_size, int
    ), "WARNING: Please make sure that the config.image_size is an integer"
    train_sampler = SurfSampler(config, mode='train')
    valid_sampler = SurfSampler(config, mode='validation')
    valid_data = valid_sampler.__getitem__(0)
    test_sampler = SurfSampler(config, mode='test')

    optimizer, learning_rate = train_lib.get_optimizer(config)
    compression = hvd.Compression.fp16 if config.fp16_allreduce else hvd.Compression.none
    # Horovod: adjust learning rate based on number of GPUs.
    # scaled_lr = 0.001 * hvd.size()
    # optimizer = tf.optimizers.Adam(scaled_lr)

    # Horovod: add Horovod DistributedOptimizer.
    opt = hvd.DistributedOptimizer(optimizer, compression=compression),
    # device_dense='/GPU:0',
    # device_sparse='/GPU:1')#,
    # average_aggregated_gradients=True,
    # backward_passes_per_step=5)
    opt = opt[0]
    model = efficientdet_keras.EfficientDetNet(config=config)
    if os.path.isfile(os.path.join(config.log_dir, 'checkpoint')):
        print(
            f"Loading checkpoint from {os.path.join(config.log_dir,'checkpoint')} ..."
        )
        model.build(
            (config.batch_size, config.image_size, config.image_size, 3))
        model.compile(
            optimizer=opt,
            # https://www.tensorflow.org/addons/api_docs/python/tfa/losses/SigmoidFocalCrossEntropy
            # loss=tfa.losses.SigmoidFocalCrossEntropy(from_logits=True,
            #                                             reduction=tf.keras.losses.Reduction.AUTO),
            loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True,
                                                         label_smoothing=0.2),
            # Also include background in metrics
            metrics=['categorical_accuracy'
                     ],  #tf.keras.metrics.MeanIoU(config.seg_num_classes)],
            experimental_run_tf_function=False,
            run_eagerly=True)

        ckpt_path = tf.train.latest_checkpoint(config.log_dir)
        util_keras.restore_ckpt(model, ckpt_path, config.moving_average_decay)
    else:
        model.build(
            (config.batch_size, config.image_size, config.image_size, 3))
        model.compile(
            optimizer=opt,
            # https://www.tensorflow.org/addons/api_docs/python/tfa/losses/SigmoidFocalCrossEntropy
            # loss=tfa.losses.SigmoidFocalCrossEntropy(from_logits=True,
            #                                            reduction=tf.keras.losses.Reduction.AUTO),
            loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True,
                                                         label_smoothing=0.2),
            # Also include background in metrics
            metrics=['categorical_accuracy'
                     ],  #tf.keras.metrics.MeanIoU(config.seg_num_classes)],
            experimental_run_tf_function=False,
            run_eagerly=True)

        if config.pretrain_path:
            # Loading weights from pretrained path
            model.load_weights(config.pretrain_path,
                               by_name=True,
                               skip_mismatch=True)
    model.summary()

    # Calculate FLOPS
    # flops = get_flops(model, config)
    # print(f"FLOPS: {flops / 10 ** 9:.03} G")
    pdb.set_trace()

    callbacks = [
        # Horovod: broadcast initial variable states from rank 0 to all other processes.
        # This is necessary to ensure consistent initialization of all workers when
        # training is started with random weights or restored from a checkpoint.
        hvd.callbacks.BroadcastGlobalVariablesCallback(0),

        # Horovod: average metrics among workers at the end of every epoch.
        #
        # Note: This callback must be in the list before the ReduceLROnPlateau,
        # TensorBoard or other metrics-based callbacks.
        hvd.callbacks.MetricAverageCallback(),

        # Horovod: using `lr = 1.0 * hvd.size()` from the very beginning leads to worse final
        # accuracy. Scale the learning rate `lr = 1.0` ---> `lr = 1.0 * hvd.size()` during
        # the first three epochs. See https://arxiv.org/abs/1706.02677 for details.
        # hvd.callbacks.LearningRateWarmupCallback(warmup_epochs=3, initial_lr=scaled_lr, verbose=1),
        hvd.callbacks.LearningRateScheduleCallback(
            1,  # LR: 1 * learning_rate(epoch)
            learning_rate,
            start_epoch=0,
            end_epoch=config.num_epochs,
            staircase=True,
            momentum_correction=True,
            steps_per_epoch=config.steps_per_epoch)
    ]
    # Horovod: write logs on worker 0.
    verbose = 1 if hvd.rank() == 0 else 0
    if not config.evaluate:
        # Horovod: save checkpoints only on worker 0 to prevent other workers from corrupting them.
        # if hvd.rank() == 0:
        cb_options = train_lib.get_callbacks(config,
                                             train_sampler,
                                             valid_sampler,
                                             profile=False)
        callbacks.extend(cb_options)
        # with tf.device("/CPU:0"):
        model.fit(train_sampler,
                  epochs=config.num_epochs,
                  steps_per_epoch=config.steps_per_epoch,
                  validation_data=valid_data,
                  callbacks=callbacks,
                  use_multiprocessing=False,
                  validation_freq=1,
                  verbose=verbose)

        print(f"Finished training\n")

        print("Starting Evaluation...")

    evaluate(model, config, valid_sampler)
Пример #21
0
def main(_):
    img = Image.open(FLAGS.image_path)
    imgs = [np.array(img)]
    # Create model config.
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.is_training_bn = False
    # config.image_size = '640x640'
    # config.nms_configs.score_thresh = 0.01
    config.nms_configs.score_thresh = 0.4
    config.nms_configs.max_output_size = 100
    config.override(FLAGS.hparams)

    # Use 'mixed_float16' if running on GPUs.
    policy = tf.keras.mixed_precision.experimental.Policy('float32')
    tf.keras.mixed_precision.experimental.set_policy(policy)
    tf.config.experimental_run_functions_eagerly(FLAGS.debug)

    # Create model
    model = efficientdet_keras.EfficientDetNet(config=config)
    target_size = utils.parse_image_size(config.image_size)
    target_size = target_size + (3, )
    model_inputs = tf.keras.Input(shape=target_size)
    model(model_inputs, False)
    model.summary()

    # output layers detailed
    # for i in model.layers:
    #   print(i.name, i.input, i.output)

    model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir))

    # create new model to access intermediate layers
    effdet_model = tf.keras.Model(
        inputs=model.input,
        outputs=[
            model.get_layer(name='class_net').output,
            model.get_layer(name='box_net').output,
            model.backbone.layers[-3].output  # last layer
        ])

    # is only used for pre- and post-processing methods
    effdet_methods = efficientdet_keras.EfficientDetModel(config=config)

    # input image preprocessing
    imgs = tf.convert_to_tensor(imgs)
    inputs, scales = effdet_methods._preprocessing(imgs, config.image_size,
                                                   'infer')

    with tf.GradientTape() as tape:
        # Compute activations of the last conv layer and make the tape watch it
        cls_outputs, box_outputs, efficientnet_last_layer = effdet_model(
            inputs, False)

    # save gradients
    grads = None
    if FLAGS.gradient_type == 'cls':
        grads = tape.gradient(cls_outputs, efficientnet_last_layer)
    elif FLAGS.gradient_type == 'box':
        grads = tape.gradient(box_outputs, efficientnet_last_layer)

    assert grads != None
    grad_cam(grads, efficientnet_last_layer[0], img, imgs[0],
             FLAGS.gradient_type)

    ### bounding box visualization ###
    boxes, scores, classes, valid_len = effdet_methods._postprocess(
        cls_outputs, box_outputs, scales)

    # Visualize results.
    for i, img in enumerate(imgs):
        length = valid_len[i]
        img = inference.visualize_image(
            img,
            boxes[i].numpy()[:length],
            classes[i].numpy().astype(np.int)[:length],
            scores[i].numpy()[:length],
            min_score_thresh=config.nms_configs.score_thresh,
            max_boxes_to_draw=config.nms_configs.max_output_size)
        output_image_path = os.path.join(FLAGS.output_dir, str(i) + '.jpg')
        Image.fromarray(img).save(output_image_path)
        print('writing annotated image to ', output_image_path)
Пример #22
0
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.batch_size = FLAGS.batch_size
    config.val_json_file = FLAGS.val_json_file
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    base_height, base_width = utils.parse_image_size(config['image_size'])

    # Network
    model = efficientdet_keras.EfficientDetNet(config=config)
    model.build((config.batch_size, base_height, base_width, 3))
    model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir))

    @tf.function
    def f(imgs, labels, flip):
        cls_outputs, box_outputs = model(imgs, training=False)
        return postprocess.generate_detections(config, cls_outputs,
                                               box_outputs,
                                               labels['image_scales'],
                                               labels['source_ids'], flip)

    # in format (height, width, flip)
    augmentations = []
    if FLAGS.enable_tta:
        for size_offset in (0, 128, 256):
            for flip in (False, True):
                augmentations.append((base_height + size_offset,
                                      base_width + size_offset, flip))
    else:
        augmentations.append((base_height, base_width, False))

    evaluator = None
    detections_per_source = dict()
    for height, width, flip in augmentations:
        config.image_size = (height, width)
        # dataset
        ds = dataloader.InputReader(
            FLAGS.val_file_pattern,
            is_training=False,
            use_fake_data=False,
            max_instances_per_image=config.max_instances_per_image)(config)

        # compute stats for all batches.
        total_steps = FLAGS.eval_samples // FLAGS.batch_size
        progress = tf.keras.utils.Progbar(total_steps)
        for i, (images, labels) in enumerate(ds):
            progress.update(i, values=None)
            if i > total_steps:
                break

            if flip:
                images = tf.image.flip_left_right(images)
            detections = f(images, labels, flip)

            for img_id, d in zip(labels['source_ids'], detections):
                if img_id.numpy() in detections_per_source:
                    detections_per_source[img_id.numpy()] = tf.concat(
                        [d, detections_per_source[img_id.numpy()]], 0)
                else:
                    detections_per_source[img_id.numpy()] = d

            evaluator = coco_metric.EvaluationMetric(
                filename=config.val_json_file)
            for d in detections_per_source.values():
                if FLAGS.enable_tta:
                    d = wbf.ensemble_detections(config, d, len(augmentations))
                evaluator.update_state(
                    labels['groundtruth_data'].numpy(),
                    postprocess.transform_detections(tf.stack([d])).numpy())

    # compute the final eval results.
    if evaluator:
        metrics = evaluator.result()
        metric_dict = {}
        for i, name in enumerate(evaluator.metric_names):
            metric_dict[name] = metrics[i]

        label_map = label_util.get_label_map(config.label_map)
        if label_map:
            for i, cid in enumerate(sorted(label_map.keys())):
                name = 'AP_/%s' % label_map[cid]
                metric_dict[name] = metrics[i - len(evaluator.metric_names)]
        print(metric_dict)