Ejemplo n.º 1
0
    def test_eager_output(self):
        inputs_shape = [1, 512, 512, 3]
        config = hparams_config.get_efficientdet_config('efficientdet-d0')
        config.heads = ['object_detection', 'segmentation']
        tmp_ckpt = os.path.join(tempfile.mkdtemp(), 'ckpt2')

        with tf.Session(graph=tf.Graph()) as sess:
            feats = tf.ones(inputs_shape)
            tf.random.set_random_seed(SEED)
            model = efficientdet_keras.EfficientDetNet(config=config)
            outputs = model(feats, True)
            sess.run(tf.global_variables_initializer())
            keras_class_out, keras_box_out, keras_seg_out = sess.run(outputs)
            model.save_weights(tmp_ckpt)

        feats = tf.ones(inputs_shape)
        model = efficientdet_keras.EfficientDetNet(config=config)
        model.load_weights(tmp_ckpt)
        eager_class_out, eager_box_out, eager_seg_out = model(feats, True)
        for i in range(5):
            self.assertAllClose(eager_class_out[i],
                                keras_class_out[i],
                                rtol=1e-4,
                                atol=1e-4)
            self.assertAllClose(eager_box_out[i],
                                keras_box_out[i],
                                rtol=1e-4,
                                atol=1e-4)
        self.assertAllClose(eager_seg_out, keras_seg_out, rtol=1e-4, atol=1e-4)
Ejemplo n.º 2
0
 def test_model_variables(self):
     input_shape = (1, 512, 512, 3)
     model = efficientdet_keras.EfficientDetNet('efficientdet-d0')
     model.build(input_shape)
     eager_train_vars = sorted(
         [var.name for var in model.trainable_variables])
     eager_model_vars = sorted([var.name for var in model.variables])
     with tf.Graph().as_default():
         feats = tf.ones([1, 512, 512, 3])
         model = efficientdet_keras.EfficientDetNet('efficientdet-d0')
         model(feats, True)
         keras_train_vars = sorted(
             [var.name for var in tf.trainable_variables()])
         keras_model_vars = sorted(
             [var.name for var in tf.global_variables()])
         keras_update_ops = [
             op.name for op in tf.get_collection(tf.GraphKeys.UPDATE_OPS)
         ]
     with tf.Graph().as_default():
         feats = tf.ones([1, 512, 512, 3])
         legacy_arch.efficientdet(feats, 'efficientdet-d0')
         legacy_train_vars = sorted(
             [var.name for var in tf.trainable_variables()])
         legacy_model_vars = sorted(
             [var.name for var in tf.global_variables()])
         legacy_update_ops = [
             op.name for op in tf.get_collection(tf.GraphKeys.UPDATE_OPS)
         ]
     self.assertEqual(keras_train_vars, legacy_train_vars)
     self.assertEqual(keras_model_vars, legacy_model_vars)
     self.assertEqual(eager_train_vars, legacy_train_vars)
     self.assertEqual(eager_model_vars, legacy_model_vars)
     self.assertAllEqual(keras_update_ops, legacy_update_ops)
Ejemplo n.º 3
0
    def test_model_output(self):
        inputs_shape = [1, 512, 512, 3]
        config = hparams_config.get_efficientdet_config('efficientdet-d0')
        config.heads = ['object_detection', 'segmentation']
        with tf.Session(graph=tf.Graph()) as sess:
            feats = tf.ones(inputs_shape)
            tf.random.set_random_seed(SEED)
            model = efficientdet_keras.EfficientDetNet(config=config)
            outputs = model(feats, True)
            sess.run(tf.global_variables_initializer())
            keras_class_out, keras_box_out, _ = sess.run(outputs)
            grads = tf.nest.map_structure(
                lambda output: tf.gradients(output, feats), outputs)
            keras_class_grads, keras_box_grads, _ = sess.run(grads)
        with tf.Session(graph=tf.Graph()) as sess:
            feats = tf.ones(inputs_shape)
            tf.random.set_random_seed(SEED)
            outputs = legacy_arch.efficientdet(feats, config=config)
            sess.run(tf.global_variables_initializer())
            legacy_class_out, legacy_box_out = sess.run(outputs)
            grads = tf.nest.map_structure(
                lambda output: tf.gradients(output, feats), outputs)
            legacy_class_grads, legacy_box_grads = sess.run(grads)

        for i in range(3, 8):
            self.assertAllEqual(keras_class_out[i - 3], legacy_class_out[i])
            self.assertAllEqual(keras_box_out[i - 3], legacy_box_out[i])
            self.assertAllEqual(keras_class_grads[i - 3],
                                legacy_class_grads[i])
            self.assertAllEqual(keras_box_grads[i - 3], legacy_box_grads[i])
Ejemplo n.º 4
0
def main(_):
    dataset, info = tfds.load('oxford_iiit_pet:3.*.*', with_info=True)
    train_examples = info.splits['train'].num_examples
    batch_size = 8
    steps_per_epoch = train_examples // batch_size

    train = dataset['train'].map(
        load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)
    test = dataset['test'].map(load_image_test)

    train_dataset = train.cache().shuffle(1000).batch(batch_size).repeat()
    train_dataset = train_dataset.prefetch(
        buffer_size=tf.data.experimental.AUTOTUNE)
    test_dataset = test.batch(batch_size)
    config = hparams_config.get_efficientdet_config('efficientdet-d0')
    config.heads = ['segmentation']
    model = efficientdet_keras.EfficientDetNet(config=config)
    model.build((1, 512, 512, 3))
    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])

    val_subsplits = 5
    val_steps = info.splits['test'].num_examples // batch_size // val_subsplits
    model.fit(train_dataset,
              epochs=20,
              steps_per_epoch=steps_per_epoch,
              validation_steps=val_steps,
              validation_data=test_dataset,
              callbacks=[])

    model.save_weights('./testdata/segmentation')

    print(create_mask(model(tf.ones((1, 512, 512, 3)), False)))
Ejemplo n.º 5
0
 def model_fn(inputs):
   model = efficientdet_keras.EfficientDetNet(
       config=hparams_config.Config(params))
   cls_out_list, box_out_list = model(inputs, params['is_training_bn'])
   cls_outputs, box_outputs = {}, {}
   for i in range(params['min_level'], params['max_level'] + 1):
     cls_outputs[i] = cls_out_list[i - params['min_level']]
     box_outputs[i] = box_out_list[i - params['min_level']]
   return cls_outputs, box_outputs
Ejemplo n.º 6
0
    def test_eager_output(self):
        inputs_shape = [1, 512, 512, 3]
        config = hparams_config.get_efficientdet_config('efficientdet-d0')
        config.heads = ['object_detection']
        tmp_ckpt = os.path.join(tempfile.mkdtemp(), 'ckpt2')

        with tf.Session(graph=tf.Graph()) as sess:
            feats = tf.ones(inputs_shape)
            tf.random.set_random_seed(SEED)
            model = efficientdet_keras.EfficientDetNet(config=config)
            outputs = model(feats, True)
            grads = tf.nest.map_structure(
                lambda output: tf.gradients(output, feats), outputs)
            sess.run(tf.global_variables_initializer())
            keras_class_out, keras_box_out = sess.run(outputs)
            legacy_class_grads, legacy_box_grads = sess.run(grads)
            model.save_weights(tmp_ckpt)

        feats = tf.ones(inputs_shape)
        model = efficientdet_keras.EfficientDetNet(config=config)
        model.build(inputs_shape)
        model.load_weights(tmp_ckpt)

        @tf.function
        def _run(feats):
            with tf.GradientTape(persistent=True) as tape:
                tape.watch(feats)
                eager_class_out, eager_box_out = model(feats, True)
                class_grads, box_grads = tf.nest.map_structure(
                    lambda output: tape.gradient(output, feats),
                    [eager_class_out, eager_box_out])
            return eager_class_out, eager_box_out, class_grads, box_grads

        eager_class_out, eager_box_out, class_grads, box_grads = _run(feats)
        for i in range(5):
            self.assertAllEqual(eager_class_out[i], keras_class_out[i])
            self.assertAllEqual(eager_box_out[i], keras_box_out[i])
            self.assertAllEqual(class_grads[i], legacy_class_grads[i][0])
            self.assertAllEqual(box_grads[i], legacy_box_grads[i][0])
Ejemplo n.º 7
0
 def model_arch(feats, model_name=None, **kwargs):
   """Construct a model arch for keras models."""
   config = hparams_config.get_efficientdet_config(model_name)
   config.override(kwargs)
   model = efficientdet_keras.EfficientDetNet(config=config)
   cls_out_list, box_out_list = model(feats, training=False)
   # convert the list of model outputs to a dictionary with key=level.
   assert len(cls_out_list) == config.max_level - config.min_level + 1
   assert len(box_out_list) == config.max_level - config.min_level + 1
   cls_outputs, box_outputs = {}, {}
   for i in range(config.min_level, config.max_level + 1):
     cls_outputs[i] = cls_out_list[i - config.min_level]
     box_outputs[i] = box_out_list[i - config.min_level]
   return cls_outputs, box_outputs
Ejemplo n.º 8
0
 def test_hub_model(self):
     image = tf.random.uniform((1, 320, 320, 3))
     keras_model = efficientdet_keras.EfficientDetNet('efficientdet-lite0')
     tmp_ckpt = os.path.join(tempfile.mkdtemp(), 'ckpt')
     keras_model.config.model_dir = tmp_ckpt
     base_model = train_lib.EfficientDetNetTrainHub(
         keras_model.config,
         "https://tfhub.dev/tensorflow/efficientdet/lite0/feature-vector/1")
     cls_outputs, box_outputs = tf.function(base_model)(image,
                                                        training=False)
     keras_model.build(image.shape)
     d1 = {var.name: var for var in base_model.variables}
     for var in keras_model.variables:
         var.assign(d1[var.name].numpy())
     cls_outputs2, box_outputs2 = tf.function(keras_model)(image, False)
     for c1, b1, c2, b2 in zip(cls_outputs, box_outputs, cls_outputs2,
                               box_outputs2):
         self.assertAllEqual(c1, c2)
         self.assertAllEqual(b1, b2)
Ejemplo n.º 9
0
 def build(self, params_override=None):
     """Build model and restore checkpoints."""
     params = copy.deepcopy(self.params)
     if params_override:
         params.update(params_override)
     config = hparams_config.get_efficientdet_config(self.model_name)
     config.override(params)
     if self.only_network:
         self.model = efficientdet_keras.EfficientDetNet(config=config)
     else:
         self.model = efficientdet_keras.EfficientDetModel(config=config)
     image_size = utils.parse_image_size(params['image_size'])
     self.model.build((self.batch_size, *image_size, 3))
     util_keras.restore_ckpt(self.model,
                             self.ckpt_path,
                             self.params['moving_average_decay'],
                             skip_mismatch=False)
     if self.debug:
         tf.config.run_functions_eagerly(self.debug)
Ejemplo n.º 10
0
 def test_irregular_shape(self):
     config = hparams_config.get_efficientdet_config('efficientdet-d0')
     config.image_size = '896x1600'
     model = efficientdet_keras.EfficientDetNet(config=config)
     model(tf.ones([1, 896, 1600, 3]), False)
     model(tf.ones([1, 499, 333, 3]), False)
Ejemplo n.º 11
0
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.val_json_file = FLAGS.val_json_file
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    config.drop_remainder = False  # eval all examples w/o drop.
    config.image_size = utils.parse_image_size(config['image_size'])

    if config.strategy == 'tpu':
        tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
        tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)
        tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
        ds_strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver)
        logging.info('All devices: %s', tf.config.list_logical_devices('TPU'))
    elif config.strategy == 'gpus':
        ds_strategy = tf.distribute.MirroredStrategy()
        logging.info('All devices: %s', tf.config.list_physical_devices('GPU'))
    else:
        if tf.config.list_physical_devices('GPU'):
            ds_strategy = tf.distribute.OneDeviceStrategy('device:GPU:0')
        else:
            ds_strategy = tf.distribute.OneDeviceStrategy('device:CPU:0')

    with ds_strategy.scope():
        # Network
        model = efficientdet_keras.EfficientDetNet(config=config)
        model.build((None, *config.image_size, 3))
        util_keras.restore_ckpt(model,
                                tf.train.latest_checkpoint(FLAGS.model_dir),
                                config.moving_average_decay,
                                skip_mismatch=False)

        @tf.function
        def model_fn(images, labels):
            cls_outputs, box_outputs = model(images, training=False)
            detections = postprocess.generate_detections(
                config, cls_outputs, box_outputs, labels['image_scales'],
                labels['source_ids'])
            tf.numpy_function(evaluator.update_state, [
                labels['groundtruth_data'],
                postprocess.transform_detections(detections)
            ], [])

        # Evaluator for AP calculation.
        label_map = label_util.get_label_map(config.label_map)
        evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file,
                                                 label_map=label_map)

        # dataset
        batch_size = FLAGS.batch_size  # global batch size.
        ds = dataloader.InputReader(
            FLAGS.val_file_pattern,
            is_training=False,
            max_instances_per_image=config.max_instances_per_image)(
                config, batch_size=batch_size)
        if FLAGS.eval_samples:
            ds = ds.take((FLAGS.eval_samples + batch_size - 1) // batch_size)
        ds = ds_strategy.experimental_distribute_dataset(ds)

        # evaluate all images.
        eval_samples = FLAGS.eval_samples or 5000
        pbar = tf.keras.utils.Progbar(
            (eval_samples + batch_size - 1) // batch_size)
        for i, (images, labels) in enumerate(ds):
            ds_strategy.run(model_fn, (images, labels))
            pbar.update(i)

    # compute the final eval results.
    metrics = evaluator.result()
    metric_dict = {}
    for i, name in enumerate(evaluator.metric_names):
        metric_dict[name] = metrics[i]

    if label_map:
        for i, cid in enumerate(sorted(label_map.keys())):
            name = 'AP_/%s' % label_map[cid]
            metric_dict[name] = metrics[i + len(evaluator.metric_names)]
    print(FLAGS.model_name, metric_dict)