def model_arch(feats, model_name=None, **kwargs): """Construct a model arch for keras models.""" config = hparams_config.get_efficientdet_config(model_name) config.override(kwargs) model = efficientdet_keras.EfficientDetNet(config=config) #l=model.layers[0] # efficientnet part #print(l.name) #layer_names=[] #feats_out=l.predict(feats,steps=1) #predict #for ml in l.layers: #print(ml.name) #layer_names.append(ml.name) #save_feat_fig(feats_out) #exit() cls_out_list, box_out_list = model(feats, training=False) # convert the list of model outputs to a dictionary with key=level. assert len(cls_out_list) == config.max_level - config.min_level + 1 assert len(box_out_list) == config.max_level - config.min_level + 1 cls_outputs, box_outputs = {}, {} for i in range(config.min_level, config.max_level + 1): cls_outputs[i] = cls_out_list[i - config.min_level] box_outputs[i] = box_out_list[i - config.min_level] return cls_outputs, box_outputs
def main(_): dataset, info = tfds.load('oxford_iiit_pet:3.*.*', with_info=True) train_examples = info.splits['train'].num_examples batch_size = 8 steps_per_epoch = train_examples // batch_size train = dataset['train'].map( load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE) test = dataset['test'].map(load_image_test) train_dataset = train.cache().shuffle(1000).batch(batch_size).repeat() train_dataset = train_dataset.prefetch( buffer_size=tf.data.experimental.AUTOTUNE) test_dataset = test.batch(batch_size) config = hparams_config.get_efficientdet_config('efficientdet-d0') config.heads = ['segmentation'] model = efficientdet_keras.EfficientDetNet(config=config) model.build((1, 512, 512, 3)) model.compile( optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) val_subsplits = 5 val_steps = info.splits['test'].num_examples // batch_size // val_subsplits model.fit(train_dataset, epochs=20, steps_per_epoch=steps_per_epoch, validation_steps=val_steps, validation_data=test_dataset, callbacks=[]) model.save_weights('./testdata/segmentation') print(create_mask(model(tf.ones((1, 512, 512, 3)), False)))
def test_variables(self): config = hparams_config.get_efficientdet_config() feat_sizes = utils.get_feat_sizes(config.image_size, config.max_level) with tf.Graph().as_default(): feats = [ tf.random.uniform([1, 64, 64, 40]), tf.random.uniform([1, 32, 32, 112]), tf.random.uniform([1, 16, 16, 320]), tf.random.uniform([1, 8, 8, 64]), tf.random.uniform([1, 4, 4, 64]) ] efficientdet_arch_keras.build_bifpn_layer(feats, feat_sizes, config) vars1 = [var.name for var in tf.global_variables()] with tf.Graph().as_default(): feats = [ tf.random.uniform([1, 64, 64, 40]), tf.random.uniform([1, 32, 32, 112]), tf.random.uniform([1, 16, 16, 320]), tf.random.uniform([1, 8, 8, 64]), tf.random.uniform([1, 4, 4, 64]) ] legacy_arch.build_bifpn_layer(feats, feat_sizes, config) vars2 = [var.name for var in tf.global_variables()] self.assertEqual(vars1, vars2)
def test_build_feature_network(self): config = hparams_config.get_efficientdet_config('efficientdet-d0') config.max_level = 5 with tf.Session(graph=tf.Graph()) as sess: inputs = [ tf.ones([1, 64, 64, 40]), # level 3 tf.ones([1, 32, 32, 112]), # level 4 tf.ones([1, 16, 16, 320]), # level 5 ] tf.random.set_random_seed(SEED) new_feats1 = efficientdet_keras.FPNCells(config)(inputs, True) sess.run(tf.global_variables_initializer()) keras_feats = sess.run(new_feats1) with tf.Session(graph=tf.Graph()) as sess: inputs = { 0: tf.ones([1, 512, 512, 3]), 1: tf.ones([1, 256, 256, 16]), 2: tf.ones([1, 128, 128, 24]), 3: tf.ones([1, 64, 64, 40]), 4: tf.ones([1, 32, 32, 112]), 5: tf.ones([1, 16, 16, 320]) } tf.random.set_random_seed(SEED) new_feats2 = legacy_arch.build_feature_network(inputs, config) sess.run(tf.global_variables_initializer()) legacy_feats = sess.run(new_feats2) for i in range(config.min_level, config.max_level + 1): self.assertAllClose(keras_feats[i - config.min_level], legacy_feats[i])
def test_model_output(self): inputs_shape = [1, 512, 512, 3] config = hparams_config.get_efficientdet_config('efficientdet-d0') with tf.Session(graph=tf.Graph()) as sess: feats = tf.ones(inputs_shape) tf.random.set_random_seed(SEED) # feats = efficientdet_arch_keras.build_backbone(feats, config) # feats = efficientdet_arch_keras.build_feature_network(feats, config) # v = efficientdet_arch_keras.build_class_and_box_outputs(feats, config) v = efficientdet_arch_keras.EfficientDetModel(config=config)(feats) sess.run(tf.global_variables_initializer()) keras_class_out, keras_box_out = sess.run(v) with tf.Session(graph=tf.Graph()) as sess: feats = tf.ones(inputs_shape) tf.random.set_random_seed(SEED) feats = legacy_arch.efficientdet(feats, config=config) sess.run(tf.global_variables_initializer()) legacy_class_out, legacy_box_out = sess.run(feats) for i in range(3, 8): self.assertAllClose(keras_class_out[i - 3], legacy_class_out[i], rtol=1e-4, atol=1e-4) self.assertAllClose(keras_box_out[i - 3], legacy_box_out[i], rtol=1e-4, atol=1e-4) feats = tf.ones(inputs_shape) tf.random.set_random_seed(SEED) model = efficientdet_arch_keras.EfficientDetModel(config=config) eager_class_out, eager_box_out = model(feats) # pylint: disable=unused-variable
def test_output(self): config = hparams_config.get_efficientdet_config('efficientdet-d0') inputs_shape = [1, 512, 512, 3] config.max_level = config.min_level + 1 with tf.Session(graph=tf.Graph()) as sess: tf.random.set_random_seed(SEED) keras_inputs = [] for i in range(config.min_level, config.max_level + 1): keras_inputs.append( tf.ones(shape=inputs_shape, name='input', dtype=tf.float32)) output1 = efficientdet_keras.build_class_and_box_outputs( keras_inputs, config) sess.run(tf.global_variables_initializer()) keras_class, keras_box = sess.run(output1) with tf.Session(graph=tf.Graph()) as sess: tf.random.set_random_seed(SEED) legacy_inputs = dict() for i in range(config.min_level, config.max_level + 1): legacy_inputs[i] = tf.ones(shape=inputs_shape, name='input', dtype=tf.float32) output2 = legacy_arch.build_class_and_box_outputs( legacy_inputs, config) sess.run(tf.global_variables_initializer()) legacy_class, legacy_box = sess.run(output2) for i in range(config.min_level, config.max_level + 1): self.assertAllClose(keras_class[i - config.min_level], legacy_class[i]) self.assertAllClose(keras_box[i - config.min_level], legacy_box[i])
def test_output(self): config = hparams_config.get_efficientdet_config('efficientdet-d0') inputs_shape = [1, 512, 512, 3] config.max_level = config.min_level + 1 with tf.Session(graph=tf.Graph()) as sess: inputs = dict() for i in range(config.min_level, config.max_level + 1): inputs[i] = tf.ones(shape=inputs_shape, name='input', dtype=tf.float32) tf.random.set_random_seed(SEED) output1 = efficientdet_arch_keras.build_class_and_box_outputs( inputs, config) sess.run(tf.global_variables_initializer()) class_output1, box_output1 = sess.run(output1) with tf.Session(graph=tf.Graph()) as sess: inputs = dict() for i in range(config.min_level, config.max_level + 1): inputs[i] = tf.ones(shape=inputs_shape, name='input', dtype=tf.float32) tf.random.set_random_seed(SEED) output2 = legacy_arch.build_class_and_box_outputs(inputs, config) sess.run(tf.global_variables_initializer()) class_output2, box_output2 = sess.run(output2) for i in range(config.min_level, config.max_level + 1): self.assertAllEqual(class_output1[i], class_output2[i]) self.assertAllEqual(box_output1[i], box_output2[i])
def run_eval(args): logging.set_verbosity(logging.WARNING) args = utils.dict_to_namedtuple(args) config = hparams_config.get_efficientdet_config(args.model_name) config.override(args.hparams, allow_new_keys=True) config.image_size = utils.parse_image_size(config.image_size) params = dict(config.as_dict(), seed=None) logging.info(params) utils.setup_gpus() dataset = utils.get_dataset(args, 1, False, params, None) model = efficientdet_net.EfficientDetNet(params=params) model.compile() if args.weights: image_size = params["image_size"] model.predict(np.zeros((1, image_size[0], image_size[1], 3))) model.load_weights(args.weights) model.evaluate(dataset, steps=args.eval_steps)
def test_resample_feature_adder_compile(self): config = hparams_config.get_efficientdet_config("efficientdet-d0") feat_sizes = utils.get_feat_sizes(config.image_size, config.max_level) tf2.random.set_seed(SEED) inputs = [ tf2.keras.Input(shape=[512, 512, 3]), tf2.keras.Input(shape=[256, 256, 16]), tf2.keras.Input(shape=[128, 128, 24]), tf2.keras.Input(shape=[64, 64, 40]), tf2.keras.Input(shape=[32, 32, 112]), tf2.keras.Input(shape=[16, 16, 320]) ] outputs = efficientdet_arch_keras.ResampleFeatureAdder(config)(inputs) model = tf2.keras.Model(inputs=inputs, outputs=outputs) examples = [[ tf2.ones([1, 512, 512, 3]), tf2.ones([1, 256, 256, 16]), tf2.ones([1, 128, 128, 24]), tf2.ones([1, 64, 64, 40]), tf2.ones([1, 32, 32, 112]), tf2.ones([1, 16, 16, 320]) ]] preds = model(examples) try: utils.verify_feats_size(preds, feat_sizes=feat_sizes, min_level=config.min_level, max_level=config.max_level, data_format=config.data_format) except ValueError as err: self.assertFalse(True, msg=repr(err)) self.assertEqual(len(preds), 5, "P3-P7")
def test_build_feature_network(self): config = hparams_config.get_efficientdet_config('efficientdet-d0') with tf.Session(graph=tf.Graph()) as sess: inputs = { 0: tf.ones([1, 512, 512, 3]), 1: tf.ones([1, 256, 256, 16]), 2: tf.ones([1, 128, 128, 24]), 3: tf.ones([1, 64, 64, 40]), 4: tf.ones([1, 32, 32, 112]), 5: tf.ones([1, 16, 16, 320]) } tf.random.set_random_seed(SEED) new_feats1 = efficientdet_arch_keras.build_feature_network( inputs, config) sess.run(tf.global_variables_initializer()) new_feats1 = sess.run(new_feats1) with tf.Session(graph=tf.Graph()) as sess: inputs = { 0: tf.ones([1, 512, 512, 3]), 1: tf.ones([1, 256, 256, 16]), 2: tf.ones([1, 128, 128, 24]), 3: tf.ones([1, 64, 64, 40]), 4: tf.ones([1, 32, 32, 112]), 5: tf.ones([1, 16, 16, 320]) } tf.random.set_random_seed(SEED) new_feats2 = legacy_arch.build_feature_network(inputs, config) sess.run(tf.global_variables_initializer()) new_feats2 = sess.run(new_feats2) for i in range(config.min_level, config.max_level + 1): self.assertAllEqual(new_feats1[i], new_feats2[i])
def test_model_output(self): inputs_shape = [1, 512, 512, 3] config = hparams_config.get_efficientdet_config('efficientdet-d0') with tf.Session(graph=tf.Graph()) as sess: feats = tf.ones(inputs_shape) tf.random.set_random_seed(SEED) feats = efficientdet_arch_keras.build_backbone(feats, config) feats = efficientdet_arch_keras.build_feature_network( feats, config) feats = efficientdet_arch_keras.build_class_and_box_outputs( feats, config) # TODO(tanmingxing): Fix the failure for keras Model. # feats = efficientdet_arch_keras.EfficientDetModel(config=config)(feats) sess.run(tf.global_variables_initializer()) keras_class_out, keras_box_out = sess.run(feats) with tf.Session(graph=tf.Graph()) as sess: feats = tf.ones(inputs_shape) tf.random.set_random_seed(SEED) feats = legacy_arch.efficientdet(feats, config=config) sess.run(tf.global_variables_initializer()) legacy_class_out, legacy_box_out = sess.run(feats) for i in range(3, 8): self.assertAllEqual(keras_class_out[i - 3], legacy_class_out[i]) self.assertAllEqual(keras_box_out[i - 3], legacy_box_out[i]) feats = tf.ones(inputs_shape) tf.random.set_random_seed(SEED) model = efficientdet_arch_keras.EfficientDetModel(config=config) eager_class_out, eager_box_out = model(feats) for i in range(3, 8): # TODO(tanmingxing): fix the failing case. self.assertAllEqual(eager_class_out[i - 3], legacy_class_out[i]) self.assertAllEqual(eager_box_out[i - 3], legacy_box_out[i])
def test_model_output(self): inputs_shape = [1, 512, 512, 3] config = hparams_config.get_efficientdet_config('efficientdet-d0') tmp_ckpt = os.path.join(tempfile.mkdtemp(), 'ckpt') with tf.Session(graph=tf.Graph()) as sess: feats = tf.ones(inputs_shape) tf.random.set_random_seed(SEED) model = efficientdet_keras.EfficientDetNet(config=config) outputs = model(feats) sess.run(tf.global_variables_initializer()) keras_class_out, keras_box_out = sess.run(outputs) model.save_weights(tmp_ckpt) with tf.Session(graph=tf.Graph()) as sess: feats = tf.ones(inputs_shape) tf.random.set_random_seed(SEED) feats = legacy_arch.efficientdet(feats, config=config) sess.run(tf.global_variables_initializer()) legacy_class_out, legacy_box_out = sess.run(feats) for i in range(3, 8): self.assertAllClose( keras_class_out[i - 3], legacy_class_out[i], rtol=1e-4, atol=1e-4) self.assertAllClose( keras_box_out[i - 3], legacy_box_out[i], rtol=1e-4, atol=1e-4) feats = tf.ones(inputs_shape) model = efficientdet_keras.EfficientDetNet(config=config) model.load_weights(tmp_ckpt) eager_class_out, eager_box_out = model(feats) for i in range(3, 8): self.assertAllClose( eager_class_out[i - 3], legacy_class_out[i], rtol=1e-4, atol=1e-4) self.assertAllClose( eager_box_out[i - 3], legacy_box_out[i], rtol=1e-4, atol=1e-4)
def __init__(self, ckpt_path, debug, *args, **kwargs): """ Initialize the inference driver. Args: ckpt_path: checkpoint path, such as /tmp/efficientdet-d0/. debug: bool, if true, run in debug mode. """ super().__init__(*args, **kwargs) params = copy.deepcopy(self.params) config = hparams_config.get_efficientdet_config(self.model_name) config.override(params) precision = utils.get_precision(config.strategy, config.mixed_precision) policy = tf.keras.mixed_precision.Policy(precision) tf.keras.mixed_precision.set_global_policy(policy) self.model = efficientdet_keras.EfficientDetModel(config=config) image_size = utils.parse_image_size(config.image_size) self.model.build((self.batch_size, *image_size, 3)) util_keras.restore_ckpt(self.model, ckpt_path, config.moving_average_decay, skip_mismatch=False) self.debug = debug if debug: tf.config.run_functions_eagerly(debug)
def test_model_output(self): inputs_shape = [1, 512, 512, 3] config = hparams_config.get_efficientdet_config('efficientdet-d0') config.heads = ['object_detection', 'segmentation'] with tf.Session(graph=tf.Graph()) as sess: feats = tf.ones(inputs_shape) tf.random.set_random_seed(SEED) model = efficientdet_keras.EfficientDetNet(config=config) outputs = model(feats, True) sess.run(tf.global_variables_initializer()) keras_class_out, keras_box_out, _ = sess.run(outputs) grads = tf.nest.map_structure( lambda output: tf.gradients(output, feats), outputs) keras_class_grads, keras_box_grads, _ = sess.run(grads) with tf.Session(graph=tf.Graph()) as sess: feats = tf.ones(inputs_shape) tf.random.set_random_seed(SEED) outputs = legacy_arch.efficientdet(feats, config=config) sess.run(tf.global_variables_initializer()) legacy_class_out, legacy_box_out = sess.run(outputs) grads = tf.nest.map_structure( lambda output: tf.gradients(output, feats), outputs) legacy_class_grads, legacy_box_grads = sess.run(grads) for i in range(3, 8): self.assertAllEqual(keras_class_out[i - 3], legacy_class_out[i]) self.assertAllEqual(keras_box_out[i - 3], legacy_box_out[i]) self.assertAllEqual(keras_class_grads[i - 3], legacy_class_grads[i]) self.assertAllEqual(keras_box_grads[i - 3], legacy_box_grads[i])
def efficientdet(features, model_name=None, config=None, **kwargs): """Build EfficientDet model.""" if not config and not model_name: raise ValueError('please specify either model name or config') if not config: config = hparams_config.get_efficientdet_config(model_name) elif isinstance(config, dict): config = hparams_config.Config(config) # wrap dict in Config object if kwargs: config.override(kwargs) logging.info(config) # build backbone features. features = build_backbone(features, config) logging.info('backbone params/flops = {:.6f}M, {:.9f}B'.format( *utils.num_params_flops())) # build feature network. fpn_feats = build_feature_network(features, config) logging.info('backbone+fpn params/flops = {:.6f}M, {:.9f}B'.format( *utils.num_params_flops())) # build class and box predictions. class_outputs, box_outputs = build_class_and_box_outputs(fpn_feats, config) logging.info('backbone+fpn+box params/flops = {:.6f}M, {:.9f}B'.format( *utils.num_params_flops())) return class_outputs, box_outputs
def test_eager_output(self): inputs_shape = [1, 512, 512, 3] config = hparams_config.get_efficientdet_config('efficientdet-d0') config.heads = ['object_detection', 'segmentation'] tmp_ckpt = os.path.join(tempfile.mkdtemp(), 'ckpt2') with tf.Session(graph=tf.Graph()) as sess: feats = tf.ones(inputs_shape) tf.random.set_random_seed(SEED) model = efficientdet_keras.EfficientDetNet(config=config) outputs = model(feats, True) sess.run(tf.global_variables_initializer()) keras_class_out, keras_box_out, keras_seg_out = sess.run(outputs) model.save_weights(tmp_ckpt) feats = tf.ones(inputs_shape) model = efficientdet_keras.EfficientDetNet(config=config) model.load_weights(tmp_ckpt) eager_class_out, eager_box_out, eager_seg_out = model(feats, True) for i in range(5): self.assertAllClose(eager_class_out[i], keras_class_out[i], rtol=1e-4, atol=1e-4) self.assertAllClose(eager_box_out[i], keras_box_out[i], rtol=1e-4, atol=1e-4) self.assertAllClose(eager_seg_out, keras_seg_out, rtol=1e-4, atol=1e-4)
def test_backbone_feats(self): config = hparams_config.get_efficientdet_config('efficientdet-d0') images = tf.ones([4, 224, 224, 3]) feats = efficientdet_arch.build_backbone(images, config) self.assertEqual(list(feats.keys()), [0, 1, 2, 3, 4, 5]) self.assertEqual(feats[0].shape, [4, 224, 224, 3]) self.assertEqual(feats[5].shape, [4, 7, 7, 320])
def test_model_output(self): inputs_shape = [1, 512, 512, 3] config = hparams_config.get_efficientdet_config('efficientdet-d0') config.heads = ['object_detection', 'segmentation'] tmp_ckpt = os.path.join(tempfile.mkdtemp(), 'ckpt') with tf.Session(graph=tf.Graph()) as sess: feats = tf.ones(inputs_shape) tf.random.set_random_seed(SEED) model = efficientdet_keras.EfficientDetNet(config=config) outputs = model(feats, True) sess.run(tf.global_variables_initializer()) keras_class_out, keras_box_out, keras_seg_out = sess.run(outputs) grads = tf.nest.map_structure( lambda output: tf.gradients(output, feats), outputs) keras_class_grads, keras_box_grads, keras_seg_grads = sess.run( grads) model.save_weights(tmp_ckpt) with tf.Session(graph=tf.Graph()) as sess: feats = tf.ones(inputs_shape) tf.random.set_random_seed(SEED) outputs = legacy_arch.efficientdet(feats, config=config) sess.run(tf.global_variables_initializer()) legacy_class_out, legacy_box_out = sess.run(outputs) grads = tf.nest.map_structure( lambda output: tf.gradients(output, feats), outputs) legacy_class_grads, legacy_box_grads = sess.run(grads) for i in range(3, 8): self.assertAllClose(keras_class_out[i - 3], legacy_class_out[i], rtol=1e-4, atol=1e-4) self.assertAllClose(keras_box_out[i - 3], legacy_box_out[i], rtol=1e-4, atol=1e-4) self.assertAllClose(keras_class_grads[i - 3], legacy_class_grads[i], rtol=1e-4, atol=1e-4) self.assertAllClose(keras_box_grads[i - 3], legacy_box_grads[i], rtol=1e-4, atol=1e-4) feats = tf.ones(inputs_shape) model = efficientdet_keras.EfficientDetNet(config=config) model.load_weights(tmp_ckpt) eager_class_out, eager_box_out, eager_seg_out = model(feats, True) for i in range(3, 8): self.assertAllClose(eager_class_out[i - 3], legacy_class_out[i], rtol=1e-4, atol=1e-4) self.assertAllClose(eager_box_out[i - 3], legacy_box_out[i], rtol=1e-4, atol=1e-4) self.assertAllClose(eager_seg_out, keras_seg_out, rtol=1e-4, atol=1e-4)
def main(_): config = hparams_config.get_efficientdet_config(FLAGS.model_name) config.override(FLAGS.hparams) config.batch_size = FLAGS.batch_size config.val_json_file = FLAGS.val_json_file # dataset ds = dataloader.InputReader( FLAGS.val_file_pattern, is_training=False, use_fake_data=False, max_instances_per_image=config.max_instances_per_image)(config) # Network model = efficientdet_keras.EfficientDetNet(config=config) model.build((config.batch_size, None, None, 3)) model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir)) evaluator = coco_metric.EvaluationMetric(filename=config.val_json_file) # compute stats for all batches. for images, labels in ds: config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS cls_outputs, box_outputs = model(images, training=False) detections = postprocess.generate_detections(config, cls_outputs, box_outputs, labels['image_scales'], labels['source_ids'], False) if FLAGS.enable_tta: images_flipped = tf.image.flip_left_right(images) cls_outputs_flipped, box_outputs_flipped = model(images_flipped, training=False) detections_flipped = postprocess.generate_detections( config, cls_outputs_flipped, box_outputs_flipped, labels['image_scales'], labels['source_ids'], True) for d, df in zip(detections, detections_flipped): combined_detections = wbf.ensemble_detections( config, tf.concat([d, df], 0)) combined_detections = tf.stack([combined_detections]) evaluator.update_state( labels['groundtruth_data'].numpy(), postprocess.transform_detections( combined_detections).numpy()) else: evaluator.update_state( labels['groundtruth_data'].numpy(), postprocess.transform_detections(detections).numpy()) # compute the final eval results. metric_values = evaluator.result() metric_dict = {} for i, metric_value in enumerate(metric_values): metric_dict[evaluator.metric_names[i]] = metric_value print(metric_dict)
def test_fnode_compile(self): config = hparams_config.get_efficientdet_config("efficientdet-d0") fpn_config = legacy_arch.get_fpn_config(config.fpn_name, config.min_level, config.max_level, config.fpn_weight_method) feat_sizes = utils.get_feat_sizes(config.image_size, config.max_level) i = 0 fnode_cfg = fpn_config.nodes[i] examples = [[ tf2.ones([1, 512, 512, 3]), tf2.ones([1, 256, 256, 16]), tf2.ones([1, 128, 128, 24]), tf2.ones([1, 64, 64, 40]), tf2.ones([1, 32, 32, 112]), tf2.ones([1, 16, 16, 320]) ]] inputs = [ tf2.keras.Input(shape=[512, 512, 3]), tf2.keras.Input(shape=[256, 256, 16]), tf2.keras.Input(shape=[128, 128, 24]), tf2.keras.Input(shape=[64, 64, 40]), tf2.keras.Input(shape=[32, 32, 112]), tf2.keras.Input(shape=[16, 16, 320]) ] x = efficientdet_arch_keras.ResampleFeatureAdder(config)(inputs) outputs = efficientdet_arch_keras.FNode( feat_sizes[fnode_cfg['feat_level']]['height'], feat_sizes[fnode_cfg['feat_level']]['width'], fnode_cfg['inputs_offsets'], config.fpn_num_filters, config.apply_bn_for_resampling, config.is_training_bn, config.conv_after_downsample, config.conv_bn_act_pattern, config.separable_conv, config.act_type, strategy=config.strategy, weight_method=fpn_config.weight_method, data_format=config.data_format, name='fnode{}'.format(i))(x) model = tf2.keras.Model(inputs=inputs, outputs=outputs) preds = model(examples) self.assertEqual( len(preds), 6, msg= "Expected that FNode will add one more node (P6') to initial 5 (P3 - P7)" ) self.assertEqual(feat_sizes[fnode_cfg['feat_level']]['height'], preds[5].shape[1]) self.assertEqual(feat_sizes[fnode_cfg['feat_level']]['width'], preds[5].shape[2])
def build(self, params_override=None): """Build model and restore checkpoints.""" params = copy.deepcopy(self.params) if params_override: params.update(params_override) config = hparams_config.get_efficientdet_config(self.model_name) config.override(params) self.model = efficientdet_keras.EfficientDetModel(config=config) image_size = utils.parse_image_size(params['image_size']) self.model.build((self.batch_size, *image_size, 3)) util_keras.restore_ckpt(self.model, self.ckpt_path, params['moving_average_decay'])
def build_model(self, keras=False): with tf.Graph().as_default(): config = hparams_config.get_efficientdet_config() inputs_shape = [1, 512, 512, 3] inputs = dict() for i in range(config.min_level, config.max_level + 1): inputs[i] = tf.ones(shape=inputs_shape, name='input', dtype=tf.float32) if not keras: legacy_arch.build_class_and_box_outputs(inputs, config) else: efficientdet_arch_keras.build_class_and_box_outputs(inputs, config) return [n.name for n in tf.global_variables()]
def model_arch(feats, model_name=None, **kwargs): """Construct a model arch for keras models.""" config = hparams_config.get_efficientdet_config(model_name) config.override(kwargs) model = efficientdet_keras.EfficientDetNet(config=config) cls_out_list, box_out_list = model(feats, training=False) # convert the list of model outputs to a dictionary with key=level. assert len(cls_out_list) == config.max_level - config.min_level + 1 assert len(box_out_list) == config.max_level - config.min_level + 1 cls_outputs, box_outputs = {}, {} for i in range(config.min_level, config.max_level + 1): cls_outputs[i] = cls_out_list[i - config.min_level] box_outputs[i] = box_out_list[i - config.min_level] return cls_outputs, box_outputs
def main(_): config = hparams_config.get_efficientdet_config(FLAGS.model_name) config.override(FLAGS.hparams) config.val_json_file = FLAGS.val_json_file config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS config.drop_remainder = False # eval all examples w/o drop. config.image_size = utils.parse_image_size(config['image_size']) # Evaluator for AP calculation. label_map = label_util.get_label_map(config.label_map) evaluator = coco_metric.EvaluationMetric( filename=config.val_json_file, label_map=label_map) # dataset batch_size = 1 ds = dataloader.InputReader( FLAGS.val_file_pattern, is_training=False, max_instances_per_image=config.max_instances_per_image)( config, batch_size=batch_size) eval_samples = FLAGS.eval_samples if eval_samples: ds = ds.take((eval_samples + batch_size - 1) // batch_size) # Network lite_runner = LiteRunner(FLAGS.tflite_path) eval_samples = FLAGS.eval_samples or 5000 pbar = tf.keras.utils.Progbar((eval_samples + batch_size - 1) // batch_size) for i, (images, labels) in enumerate(ds): cls_outputs, box_outputs = lite_runner.run(images) detections = postprocess.generate_detections(config, cls_outputs, box_outputs, labels['image_scales'], labels['source_ids']) detections = postprocess.transform_detections(detections) evaluator.update_state(labels['groundtruth_data'].numpy(), detections.numpy()) pbar.update(i) # compute the final eval results. metrics = evaluator.result() metric_dict = {} for i, name in enumerate(evaluator.metric_names): metric_dict[name] = metrics[i] if label_map: for i, cid in enumerate(sorted(label_map.keys())): name = 'AP_/%s' % label_map[cid] metric_dict[name] = metrics[i + len(evaluator.metric_names)] print(FLAGS.model_name, metric_dict)
def _create_representative_dataset(self, file_pattern, num_calibration_steps): config = hparams_config.get_efficientdet_config(self.model_name) config.override(self.params) ds = dataloader.InputReader( file_pattern, is_training=False, max_instances_per_image=config.max_instances_per_image)( config, batch_size=self.batch_size) def representative_dataset_gen(): for image, _ in ds.take(num_calibration_steps): yield [image] return representative_dataset_gen
def build_model(self, keras=False): with tf.Graph().as_default(): config = hparams_config.get_efficientdet_config('efficientdet-d0') inputs_shape = [1, 512, 512, 3] legacy_inputs, keras_inputs = dict(), [] for i in range(config.min_level, config.max_level + 1): keras_inputs.append( tf.ones(shape=inputs_shape, name='input', dtype=tf.float32)) legacy_inputs[i] = keras_inputs[-1] if keras: efficientdet_keras.build_class_and_box_outputs(keras_inputs, config) else: legacy_arch.build_class_and_box_outputs(legacy_inputs, config) return [n.name for n in tf.global_variables()]
def main(_): # pylint: disable=line-too-long # Prepare images and checkpoints: please run these commands in shell. # !mkdir tmp # !wget https://user-images.githubusercontent.com/11736571/77320690-099af300-6d37-11ea-9d86-24f14dc2d540.png -O tmp/img.png # !wget https://storage.googleapis.com/cloud-tpu-checkpoints/efficientdet/coco/efficientdet-d0.tar.gz -O tmp/efficientdet-d0.tar.gz # !tar zxf tmp/efficientdet-d0.tar.gz -C tmp imgs = [np.array(Image.open(FLAGS.image_path))] nms_score_thresh, nms_max_output_size = 0.4, 100 # Create model config. config = hparams_config.get_efficientdet_config('efficientdet-d0') config.is_training_bn = False config.image_size = '1920x1280' config.nms_configs.score_thresh = nms_score_thresh config.nms_configs.max_output_size = nms_max_output_size # Use 'mixed_float16' if running on GPUs. policy = tf.keras.mixed_precision.experimental.Policy('float32') tf.keras.mixed_precision.experimental.set_policy(policy) tf.config.experimental_run_functions_eagerly(FLAGS.debug) # Create and run the model. model = efficientdet_keras.EfficientDetModel(config=config) height, width = utils.parse_image_size(config['image_size']) model.build((1, height, width, 3)) model.load_weights(FLAGS.checkpoint) model.summary() @tf.function def f(imgs): return model(imgs, training=False, post_mode='global') boxes, scores, classes, valid_len = f(imgs) # Visualize results. for i, img in enumerate(imgs): length = valid_len[i] img = inference.visualize_image(img, boxes[i].numpy()[:length], classes[i].numpy().astype( np.int)[:length], scores[i].numpy()[:length], min_score_thresh=nms_score_thresh, max_boxes_to_draw=nms_max_output_size) output_image_path = os.path.join(FLAGS.output_dir, str(i) + '.jpg') Image.fromarray(img).save(output_image_path) print('writing annotated image to ', output_image_path)
def build(self, params_override=None): """Build model and restore checkpoints.""" params = copy.deepcopy(self.params) if params_override: params.update(params_override) config = hparams_config.get_efficientdet_config(self.model_name) config.override(params) if self.only_network: self.model = efficientdet_keras.EfficientDetNet(config=config) else: self.model = efficientdet_keras.EfficientDetModel(config=config) image_size = utils.parse_image_size(params['image_size']) self.model.build((self.batch_size, *image_size, 3)) util_keras.restore_ckpt(self.model, self.ckpt_path, skip_mismatch=False)
def test_backbone(self): inputs_shape = [1, 512, 512, 3] config = hparams_config.get_efficientdet_config('efficientdet-d0') with tf.Session(graph=tf.Graph()) as sess: feats = tf.ones(inputs_shape) tf.random.set_random_seed(SEED) feats, _ = efficientdet_arch_keras.build_backbone(feats, config) sess.run(tf.global_variables_initializer()) feats1 = sess.run(feats) with tf.Session(graph=tf.Graph()) as sess: feats = tf.ones(inputs_shape) tf.random.set_random_seed(SEED) feats = legacy_arch.build_backbone(feats, config) sess.run(tf.global_variables_initializer()) feats2 = sess.run(feats) for key in list(feats.keys()): self.assertAllEqual(feats1[key], feats2[key])
def test_backbone(self): inputs_shape = [1, 512, 512, 3] config = hparams_config.get_efficientdet_config('efficientdet-d0') with tf.Session(graph=tf.Graph()) as sess: feats = tf.ones(inputs_shape) tf.random.set_random_seed(SEED) feats = efficientdet_keras.build_backbone(feats, config) sess.run(tf.global_variables_initializer()) keras_feats = sess.run(feats) with tf.Session(graph=tf.Graph()) as sess: feats = tf.ones(inputs_shape) tf.random.set_random_seed(SEED) feats = legacy_arch.build_backbone(feats, config) sess.run(tf.global_variables_initializer()) legacy_feats = sess.run(feats) for i, feat in enumerate(keras_feats): level = i + config.min_level self.assertAllClose(feat, legacy_feats[level])