Esempio n. 1
0
def export_tfhub(model_path, hub_destination):
  """Restores a tf.keras.Model and saves for TF-Hub."""
  model = resnet_model.resnet50(
      num_classes=imagenet_preprocessing.NUM_CLASSES, rescale_inputs=True)
  model.load_weights(model_path)
  model.save(
      os.path.join(hub_destination, "classification"), include_optimizer=False)

  # Extracts a sub-model to use pooling feature vector as model output.
  image_input = model.get_layer(index=0).get_output_at(0)
  feature_vector_output = model.get_layer(name="reduce_mean").get_output_at(0)
  hub_model = tf.keras.Model(image_input, feature_vector_output)

  # Exports a SavedModel.
  hub_model.save(
      os.path.join(hub_destination, "feature-vector"), include_optimizer=False)
Esempio n. 2
0
    def __init__(self, flags_obj, time_callback, epoch_steps):
        self.strategy = tf.distribute.get_strategy()
        self.flags_obj = flags_obj
        self.dtype = flags_core.get_tf_dtype(flags_obj)
        self.time_callback = time_callback

        # Input pipeline related
        batch_size = flags_obj.batch_size
        if batch_size % self.strategy.num_replicas_in_sync != 0:
            raise ValueError(
                'Batch size must be divisible by number of replicas : {}'.
                format(self.strategy.num_replicas_in_sync))

        # As auto rebatching is not supported in
        # `distribute_datasets_from_function()` API, which is
        # required when cloning dataset to multiple workers in eager mode,
        # we use per-replica batch size.
        self.batch_size = int(batch_size / self.strategy.num_replicas_in_sync)

        if self.flags_obj.use_synthetic_data:
            self.input_fn = common.get_synth_input_fn(
                height=imagenet_preprocessing.DEFAULT_IMAGE_SIZE,
                width=imagenet_preprocessing.DEFAULT_IMAGE_SIZE,
                num_channels=imagenet_preprocessing.NUM_CHANNELS,
                num_classes=imagenet_preprocessing.NUM_CLASSES,
                dtype=self.dtype,
                drop_remainder=True)
        else:
            self.input_fn = imagenet_preprocessing.input_fn

        self.model = resnet_model.resnet50(
            num_classes=imagenet_preprocessing.NUM_CLASSES,
            use_l2_regularizer=not flags_obj.single_l2_loss_op)

        lr_schedule = common.PiecewiseConstantDecayWithWarmup(
            batch_size=flags_obj.batch_size,
            epoch_size=imagenet_preprocessing.NUM_IMAGES['train'],
            warmup_epochs=common.LR_SCHEDULE[0][1],
            boundaries=list(p[1] for p in common.LR_SCHEDULE[1:]),
            multipliers=list(p[0] for p in common.LR_SCHEDULE),
            compute_lr_on_cpu=True)
        self.optimizer = common.get_optimizer(lr_schedule)
        # Make sure iterations variable is created inside scope.
        self.global_step = self.optimizer.iterations
        self.optimizer = performance.configure_optimizer(
            self.optimizer,
            use_float16=self.dtype == tf.float16,
            loss_scale=flags_core.get_loss_scale(flags_obj,
                                                 default_for_fp16=128))

        self.train_loss = tf.keras.metrics.Mean('train_loss', dtype=tf.float32)
        self.train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
            'train_accuracy', dtype=tf.float32)
        self.test_loss = tf.keras.metrics.Mean('test_loss', dtype=tf.float32)
        self.test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
            'test_accuracy', dtype=tf.float32)

        self.checkpoint = tf.train.Checkpoint(model=self.model,
                                              optimizer=self.optimizer)

        # Handling epochs.
        self.epoch_steps = epoch_steps
        self.epoch_helper = orbit.utils.EpochHelper(epoch_steps,
                                                    self.global_step)
        train_dataset = orbit.utils.make_distributed_dataset(
            self.strategy,
            self.input_fn,
            is_training=True,
            data_dir=self.flags_obj.data_dir,
            batch_size=self.batch_size,
            parse_record_fn=imagenet_preprocessing.parse_record,
            datasets_num_private_threads=self.flags_obj.
            datasets_num_private_threads,
            dtype=self.dtype,
            drop_remainder=True,
            training_dataset_cache=self.flags_obj.training_dataset_cache)
        orbit.StandardTrainer.__init__(
            self,
            train_dataset,
            options=orbit.StandardTrainerOptions(
                use_tf_while_loop=flags_obj.use_tf_while_loop,
                use_tf_function=flags_obj.use_tf_function))
        if not flags_obj.skip_eval:
            eval_dataset = orbit.utils.make_distributed_dataset(
                self.strategy,
                self.input_fn,
                is_training=False,
                data_dir=self.flags_obj.data_dir,
                batch_size=self.batch_size,
                parse_record_fn=imagenet_preprocessing.parse_record,
                dtype=self.dtype)
            orbit.StandardEvaluator.__init__(
                self,
                eval_dataset,
                options=orbit.StandardEvaluatorOptions(
                    use_tf_function=flags_obj.use_tf_function))
Esempio n. 3
0
def load_resnet(batch_size, num_classes):
    """Loads the ResNet model from TF Model Garden."""
    return resnet_model.resnet50(batch_size=batch_size,
                                 num_classes=num_classes)