def main(_):
    train_examples = info.splits['train'].num_examples
    batch_size = 8
    steps_per_epoch = train_examples // batch_size

    train = dataset['train'].map(
        load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)
    test = dataset['test'].map(load_image_test)

    train_dataset = train.cache().shuffle(1000).batch(batch_size).repeat()
    train_dataset = train_dataset.prefetch(
        buffer_size=tf.data.experimental.AUTOTUNE)
    test_dataset = test.batch(batch_size)
    config = hparams_config.get_efficientdet_config('efficientdet-d0')
    config.heads = ['segmentation']
    model = efficientdet_keras.EfficientDetNet(config=config)
    model.build((1, 512, 512, 3))
    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])

    val_subsplits = 5
    val_steps = info.splits['test'].num_examples // batch_size // val_subsplits
    model.fit(train_dataset,
              epochs=20,
              steps_per_epoch=steps_per_epoch,
              validation_steps=val_steps,
              validation_data=test_dataset,
              callbacks=[])

    model.save_weights('./test/segmentation')

    print(create_mask(model(tf.ones((1, 512, 512, 3)), False)))
示例#2
0
 def model_fn(inputs):
   model = efficientdet_keras.EfficientDetNet(
       config=hparams_config.Config(params))
   cls_out_list, box_out_list = model(inputs, params['is_training_bn'])
   cls_outputs, box_outputs = {}, {}
   for i in range(params['min_level'], params['max_level'] + 1):
     cls_outputs[i] = cls_out_list[i - params['min_level']]
     box_outputs[i] = box_out_list[i - params['min_level']]
   return cls_outputs, box_outputs
示例#3
0
 def model_arch(feats, model_name=None, **kwargs):
     """Construct a model arch for keras models."""
     config = hparams_config.get_efficientdet_config(model_name)
     config.override(kwargs)
     model = efficientdet_keras.EfficientDetNet(config=config)
     cls_out_list, box_out_list = model(feats, training=False)
     # convert the list of model outputs to a dictionary with key=level.
     assert len(cls_out_list) == config.max_level - config.min_level + 1
     assert len(box_out_list) == config.max_level - config.min_level + 1
     cls_outputs, box_outputs = {}, {}
     for i in range(config.min_level, config.max_level + 1):
         cls_outputs[i] = cls_out_list[i - config.min_level]
         box_outputs[i] = box_out_list[i - config.min_level]
     return cls_outputs, box_outputs
示例#4
0
 def build(self, params_override=None):
     """Build model and restore checkpoints."""
     params = copy.deepcopy(self.params)
     if params_override:
         params.update(params_override)
     config = hparams_config.get_efficientdet_config(self.model_name)
     config.override(params)
     if self.only_network:
         self.model = efficientdet_keras.EfficientDetNet(config=config)
     else:
         self.model = efficientdet_keras.EfficientDetModel(config=config)
     image_size = utils.parse_image_size(params['image_size'])
     self.model.build((self.batch_size, *image_size, 3))
     util_keras.restore_ckpt(self.model,
                             self.ckpt_path,
                             self.params['moving_average_decay'],
                             skip_mismatch=False)
示例#5
0
def main(_):
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    config.val_json_file = FLAGS.val_json_file
    config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
    config.drop_remainder = False  # eval all examples w/o drop.
    config.image_size = utils.parse_image_size(config['image_size'])

    if config.strategy == 'tpu':
        tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
        tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)
        tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
        ds_strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver)
        logging.info('All devices: %s', tf.config.list_logical_devices('TPU'))
    elif config.strategy == 'gpus':
        ds_strategy = tf.distribute.MirroredStrategy()
        logging.info('All devices: %s', tf.config.list_physical_devices('GPU'))
    else:
        if tf.config.list_physical_devices('GPU'):
            ds_strategy = tf.distribute.OneDeviceStrategy('device:GPU:0')
        else:
            ds_strategy = tf.distribute.OneDeviceStrategy('device:CPU:0')

    with ds_strategy.scope():
        # Network
        model = efficientdet_keras.EfficientDetNet(config=config)
        model.build((None, *config.image_size, 3))
        util_keras.restore_ckpt(model,
                                tf.train.latest_checkpoint(FLAGS.model_dir),
                                config.moving_average_decay,
                                skip_mismatch=False)

        @tf.function
        def model_fn(images, labels):
            cls_outputs, box_outputs = model(images, training=False)
            detections = postprocess.generate_detections(
                config, cls_outputs, box_outputs, labels['image_scales'],
                labels['source_ids'])
            tf.numpy_function(evaluator.update_state, [
                labels['groundtruth_data'],
                postprocess.transform_detections(detections)
            ], [])

        # Evaluator for AP calculation.
        label_map = label_util.get_label_map(config.label_map)
        evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file,
                                                 label_map=label_map)

        # dataset
        batch_size = FLAGS.batch_size  # global batch size.
        ds = dataloader.InputReader(
            FLAGS.val_file_pattern,
            is_training=False,
            max_instances_per_image=config.max_instances_per_image)(
                config, batch_size=batch_size)
        if FLAGS.eval_samples:
            ds = ds.take((FLAGS.eval_samples + batch_size - 1) // batch_size)
        ds = ds_strategy.experimental_distribute_dataset(ds)

        # evaluate all images.
        eval_samples = FLAGS.eval_samples or 5000
        pbar = tf.keras.utils.Progbar(
            (eval_samples + batch_size - 1) // batch_size)
        for i, (images, labels) in enumerate(ds):
            ds_strategy.run(model_fn, (images, labels))
            pbar.update(i)

    # compute the final eval results.
    metrics = evaluator.result()
    metric_dict = {}
    for i, name in enumerate(evaluator.metric_names):
        metric_dict[name] = metrics[i]

    if label_map:
        for i, cid in enumerate(sorted(label_map.keys())):
            name = 'AP_/%s' % label_map[cid]
            metric_dict[name] = metrics[i + len(evaluator.metric_names)]
    print(FLAGS.model_name, metric_dict)