def test_model_variables(self):
        input_shape = (1, 512, 512, 3)
        model = efficientdet_keras.EfficientDetNet('efficientdet-d0')
        model.build(input_shape)
        eager_train_vars = sorted(
            [var.name for var in model.trainable_variables])
        eager_model_vars = sorted([var.name for var in model.variables])
        with tf.Graph().as_default():
            feats = tf.ones([1, 512, 512, 3])
            model = efficientdet_keras.EfficientDetNet('efficientdet-d0')
            model.build(input_shape)
            keras_train_vars = sorted(
                [var.name for var in model.trainable_variables])
            keras_model_vars = sorted([var.name for var in model.variables])
        with tf.Graph().as_default():
            feats = tf.ones([1, 512, 512, 3])
            legacy_arch.efficientdet(feats, 'efficientdet-d0')
            legacy_train_vars = sorted(
                [var.name for var in tf.trainable_variables()])
            legacy_model_vars = sorted(
                [var.name for var in tf.global_variables()])

        self.assertEqual(keras_train_vars, legacy_train_vars)
        self.assertEqual(keras_model_vars, legacy_model_vars)
        self.assertEqual(eager_train_vars, legacy_train_vars)
        self.assertEqual(eager_model_vars, legacy_model_vars)
Esempio n. 2
0
def main(_):
    train_examples = info.splits['train'].num_examples
    batch_size = 8
    steps_per_epoch = train_examples // batch_size

    train = dataset['train'].map(
        load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)
    test = dataset['test'].map(load_image_test)

    train_dataset = train.cache().shuffle(1000).batch(batch_size).repeat()
    train_dataset = train_dataset.prefetch(
        buffer_size=tf.data.experimental.AUTOTUNE)
    test_dataset = test.batch(batch_size)

    model = efficientdet_keras.EfficientDetNet('efficientdet-d0')
    model.build((1, 512, 512, 3))
    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])

    val_subsplits = 5
    val_steps = info.splits['test'].num_examples // batch_size // val_subsplits
    model.fit(train_dataset,
              epochs=20,
              steps_per_epoch=steps_per_epoch,
              validation_steps=val_steps,
              validation_data=test_dataset,
              callbacks=[])

    model.save_weights('./test/segmentation')

    print(create_mask(model(tf.ones((1, 512, 512, 3)), False)))
Esempio n. 3
0
 def model_fn(inputs):
     model = efficientdet_keras.EfficientDetNet(
         config=hparams_config.Config(params))
     cls_out_list, box_out_list = model(inputs,
                                        params['is_training_bn'])
     cls_outputs, box_outputs = {}, {}
     for i in range(params['min_level'], params['max_level'] + 1):
         cls_outputs[i] = cls_out_list[i - params['min_level']]
         box_outputs[i] = box_out_list[i - params['min_level']]
     return cls_outputs, box_outputs
    def test_model_output(self):
        inputs_shape = [1, 512, 512, 3]
        config = hparams_config.get_efficientdet_config('efficientdet-d0')
        config.heads = ['object_detection', 'segmentation']
        tmp_ckpt = os.path.join(tempfile.mkdtemp(), 'ckpt')
        with tf.Session(graph=tf.Graph()) as sess:
            feats = tf.ones(inputs_shape)
            tf.random.set_random_seed(SEED)
            model = efficientdet_keras.EfficientDetNet(config=config)
            outputs = model(feats, True)
            sess.run(tf.global_variables_initializer())
            keras_class_out, keras_box_out, keras_seg_out = sess.run(outputs)
            model.save_weights(tmp_ckpt)
        with tf.Session(graph=tf.Graph()) as sess:
            feats = tf.ones(inputs_shape)
            tf.random.set_random_seed(SEED)
            feats = legacy_arch.efficientdet(feats, config=config)
            sess.run(tf.global_variables_initializer())
            legacy_class_out, legacy_box_out = sess.run(feats)
        for i in range(3, 8):
            self.assertAllClose(keras_class_out[i - 3],
                                legacy_class_out[i],
                                rtol=1e-4,
                                atol=1e-4)
            self.assertAllClose(keras_box_out[i - 3],
                                legacy_box_out[i],
                                rtol=1e-4,
                                atol=1e-4)

        feats = tf.ones(inputs_shape)
        model = efficientdet_keras.EfficientDetNet(config=config)
        model.load_weights(tmp_ckpt)
        eager_class_out, eager_box_out, eager_seg_out = model(feats, True)
        for i in range(3, 8):
            self.assertAllClose(eager_class_out[i - 3],
                                legacy_class_out[i],
                                rtol=1e-4,
                                atol=1e-4)
            self.assertAllClose(eager_box_out[i - 3],
                                legacy_box_out[i],
                                rtol=1e-4,
                                atol=1e-4)
        self.assertAllClose(eager_seg_out, keras_seg_out, rtol=1e-4, atol=1e-4)
Esempio n. 5
0
 def model_arch(feats, model_name=None, **kwargs):
     """Construct a model arch for keras models."""
     config = hparams_config.get_efficientdet_config(model_name)
     config.override(kwargs)
     model = efficientdet_keras.EfficientDetNet(config=config)
     cls_out_list, box_out_list = model(feats, training=False)
     # convert the list of model outputs to a dictionary with key=level.
     assert len(cls_out_list) == config.max_level - config.min_level + 1
     assert len(box_out_list) == config.max_level - config.min_level + 1
     cls_outputs, box_outputs = {}, {}
     for i in range(config.min_level, config.max_level + 1):
         cls_outputs[i] = cls_out_list[i - config.min_level]
         box_outputs[i] = box_out_list[i - config.min_level]
     return cls_outputs, box_outputs
 def test_irregular_shape(self):
     config = hparams_config.get_efficientdet_config('efficientdet-d0')
     config.image_size = '896x1600'
     model = efficientdet_keras.EfficientDetNet(config=config)
     model(tf.ones([1, 896, 1600, 3]), False)
     model(tf.ones([1, 499, 333, 3]), False)
Esempio n. 7
0
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.batch_size = FLAGS.batch_size
    config.val_json_file = FLAGS.val_json_file
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    base_height, base_width = utils.parse_image_size(config['image_size'])

    # Network
    model = efficientdet_keras.EfficientDetNet(config=config)
    model.build((config.batch_size, base_height, base_width, 3))
    model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir))

    @tf.function
    def f(imgs, labels, flip):
        cls_outputs, box_outputs = model(imgs, training=False)
        return postprocess.generate_detections(config, cls_outputs,
                                               box_outputs,
                                               labels['image_scales'],
                                               labels['source_ids'], flip)

    # in format (height, width, flip)
    augmentations = []
    if FLAGS.enable_tta:
        for size_offset in (0, 128, 256):
            for flip in (False, True):
                augmentations.append((base_height + size_offset,
                                      base_width + size_offset, flip))
    else:
        augmentations.append((base_height, base_width, False))

    evaluator = None
    detections_per_source = dict()
    for height, width, flip in augmentations:
        config.image_size = (height, width)
        # dataset
        ds = dataloader.InputReader(
            FLAGS.val_file_pattern,
            is_training=False,
            use_fake_data=False,
            max_instances_per_image=config.max_instances_per_image)(config)

        # compute stats for all batches.
        total_steps = FLAGS.eval_samples // FLAGS.batch_size
        progress = tf.keras.utils.Progbar(total_steps)
        for i, (images, labels) in enumerate(ds):
            progress.update(i, values=None)
            if i > total_steps:
                break

            if flip:
                images = tf.image.flip_left_right(images)
            detections = f(images, labels, flip)

            for img_id, d in zip(labels['source_ids'], detections):
                if img_id.numpy() in detections_per_source:
                    detections_per_source[img_id.numpy()] = tf.concat(
                        [d, detections_per_source[img_id.numpy()]], 0)
                else:
                    detections_per_source[img_id.numpy()] = d

            evaluator = coco_metric.EvaluationMetric(
                filename=config.val_json_file)
            for d in detections_per_source.values():
                if FLAGS.enable_tta:
                    d = wbf.ensemble_detections(config, d, len(augmentations))
                evaluator.update_state(
                    labels['groundtruth_data'].numpy(),
                    postprocess.transform_detections(tf.stack([d])).numpy())

    # compute the final eval results.
    if evaluator:
        metrics = evaluator.result()
        metric_dict = {}
        for i, name in enumerate(evaluator.metric_names):
            metric_dict[name] = metrics[i]

        label_map = label_util.get_label_map(config.label_map)
        if label_map:
            for i, cid in enumerate(sorted(label_map.keys())):
                name = 'AP_/%s' % label_map[cid]
                metric_dict[name] = metrics[i - len(evaluator.metric_names)]
        print(metric_dict)