Пример #1
0
def clone_and_build_model(model,
                          input_tensors=None,
                          target_tensors=None,
                          custom_objects=None,
                          compile_clone=True,
                          in_place_reset=False,
                          optimizer_iterations=None):
    """Clone a `Model` and build/compile it with the same settings used before.

  This function can be be run in the same graph or in a separate graph from the
  model. When using a separate graph, `in_place_reset` must be `False`.

  Note that, currently, the clone produced from this function may not work with
  TPU DistributionStrategy. Try at your own risk.

  Args:
    model: `tf.keras.Model` object. Can be Functional, Sequential, or
      sub-classed.
    input_tensors: Optional list of input tensors to build the model upon. If
      not provided, placeholders will be created.
    target_tensors: Optional list of target tensors for compiling the model. If
      not provided, placeholders will be created.
    custom_objects: Optional dictionary mapping string names to custom classes
      or functions.
    compile_clone: Boolean, whether to compile model clone (default `True`).
    in_place_reset: Boolean, whether to reset the model in place. Only used if
      the model is not a graph network. If the model is a subclassed model, then
      this argument must be set to `True` (default `False`). To restore the
      original model, use the function
      `in_place_subclassed_model_state_restoration(model)`.
    optimizer_iterations: An iterations variable that will be incremented by the
      optimizer if the clone is compiled. This argument is used when a Keras
      model is cloned into an Estimator model function, because Estimators
      create their own global step variable.

  Returns:
    Clone of the model.

  Raises:
    ValueError: Cloning fails in the following cases
      - cloning a subclassed model with `in_place_reset` set to False.
      - compiling the clone when the original model has not been compiled.
  """
    # Grab optimizer now, as we reset-in-place for subclassed models, but
    # want to maintain access to the original optimizer.
    orig_optimizer = model.optimizer
    if compile_clone and not orig_optimizer:
        raise ValueError(
            'Error when cloning model: compile_clone was set to True, but the '
            'original model has not been compiled.')

    if model._is_graph_network or isinstance(model, Sequential):
        if custom_objects:
            with CustomObjectScope(custom_objects):
                clone = clone_model(model, input_tensors=input_tensors)
        else:
            clone = clone_model(model, input_tensors=input_tensors)

        if all([
                isinstance(clone, Sequential), not clone._is_graph_network,
                getattr(model, '_build_input_shape', None) is not None
        ]):
            # Set model inputs to build the model and add input/output properties.
            # TODO(kathywu): Add multiple placeholders to handle edge case where
            # sequential model has multiple inputs.
            clone._set_inputs(
                K.placeholder(model._build_input_shape,
                              dtype=model.inputs[0].dtype))
    else:
        if not in_place_reset:
            raise ValueError(
                'Model is not a graph network (usually means that it is a subclassed '
                'model). The model cannot be cloned, but there is a workaround where '
                'the model is reset in-place. To use this, please set the argument '
                '`in_place_reset` to `True`. This will reset the attributes in the '
                'original model. To restore the attributes, call '
                '`in_place_subclassed_model_state_restoration(model)`.')
        clone = model
        _in_place_subclassed_model_reset(clone)
        if input_tensors is not None:
            if isinstance(input_tensors,
                          (list, tuple)) and len(input_tensors) == 1:
                input_tensors = input_tensors[0]
            clone._set_inputs(input_tensors)

    if compile_clone:
        if isinstance(orig_optimizer, optimizers.TFOptimizer):
            optimizer = optimizers.TFOptimizer(orig_optimizer.optimizer,
                                               optimizer_iterations)
            K.track_tf_optimizer(optimizer)
        else:
            optimizer_config = orig_optimizer.get_config()
            optimizer = orig_optimizer.__class__.from_config(optimizer_config)
            if optimizer_iterations is not None:
                optimizer.iterations = optimizer_iterations

        clone.compile(optimizer,
                      model.loss,
                      metrics=metrics_module.clone_metrics(
                          model._compile_metrics),
                      loss_weights=model.loss_weights,
                      sample_weight_mode=model.sample_weight_mode,
                      weighted_metrics=metrics_module.clone_metrics(
                          model._compile_weighted_metrics),
                      target_tensors=target_tensors)

    return clone
Пример #2
0
def _replicated_optimizer(opt, num_replicas):
    """Wrap the optimizer `opt` with CrossShardOptimizer if applicable."""
    if num_replicas == 1:
        return opt
    return keras_optimizers.TFOptimizer(
        optimizer=tpu_optimizer.CrossShardOptimizer(opt.optimizer))
Пример #3
0
        def _model_fn():
            """Compute fit/eval/predict for the TPU."""
            is_training = self.execution_mode == model_fn_lib.ModeKeys.TRAIN
            is_test = self.execution_mode == model_fn_lib.ModeKeys.EVAL
            is_predict = self.execution_mode == model_fn_lib.ModeKeys.PREDICT

            # During train/eval, we infeed our features as well as labels.
            if is_training or is_test:
                infeed_layers = self.model._input_layers + self.model._output_layers
            else:
                infeed_layers = self.model._input_layers

            # Generate our infeed operation to read features & labels.
            infeed_tensors = tpu_ops.infeed_dequeue_tuple(
                dtypes=[spec.dtype for spec in input_specs],
                shapes=[spec.shape for spec in input_specs],
                name='infeed-%s' % self.execution_mode)

            assert len(infeed_tensors) == len(infeed_layers), (
                'Infeed inputs did not match model: %s vs %s' %
                (infeed_layers, infeed_tensors))

            tpu_targets = []
            tpu_input_map = {}

            # Sort infeed outputs into inputs and labels for calling our Keras model.
            for tensor, layer in zip(infeed_tensors, infeed_layers):
                if layer in self.model._input_layers:
                    tpu_input_map[layer.name] = tensor
                if layer in self.model._output_layers:
                    tpu_targets.append(tensor)

            # Clone our CPU model, running within the TPU device context.
            with TPURewriteContext(tpu_input_map):
                # TODO(power): Replicate variables.
                with ops.device('/device:TPU:0'):
                    self._cloned_model = models.clone_model(self.model)

            # Create a copy of the optimizer for this graph.
            if isinstance(self.model.optimizer, keras_optimizers.TFOptimizer):
                cloned_optimizer = keras_optimizers.TFOptimizer(
                    self.model.optimizer.optimizer)
            else:
                logging.info('Cloning %s %s',
                             self.model.optimizer.__class__.__name__,
                             self._optimizer_config)
                cloned_optimizer = self.model.optimizer.__class__.from_config(
                    self._optimizer_config)

            if is_training or is_test:
                self._cloned_model.compile(
                    optimizer=_replicated_optimizer(cloned_optimizer),
                    loss=self.model.loss,
                    loss_weights=self.model.loss_weights,
                    metrics=self.model.metrics,
                    weighted_metrics=self.model.weighted_metrics,
                    target_tensors=tpu_targets,
                )

            # Compute our outfeed depending on the execution mode
            if is_training:
                self._cloned_model._make_train_function()
                self._outfeed_spec = [
                    tensor_spec.TensorSpec(tensor.shape, tensor.dtype,
                                           tensor.name)
                    for tensor in self._cloned_model.train_function.outputs
                ]
                return [
                    self._cloned_model.train_function.updates_op,
                    tpu_ops.outfeed_enqueue_tuple(
                        self._cloned_model.train_function.outputs,
                        name='outfeed-enqueue-train')
                ]
            elif is_test:
                self._cloned_model._make_test_function()
                self._outfeed_spec = [
                    tensor_spec.TensorSpec(tensor.shape, tensor.dtype,
                                           tensor.name)
                    for tensor in self._cloned_model.test_function.outputs
                ]
                return [
                    tpu_ops.outfeed_enqueue_tuple(
                        self._cloned_model.test_function.outputs,
                        name='outfeed-enqueue-test')
                ]
            elif is_predict:
                self._cloned_model._make_predict_function()
                self._outfeed_spec = [
                    tensor_spec.TensorSpec(tensor.shape, tensor.dtype,
                                           tensor.name)
                    for tensor in self._cloned_model.predict_function.outputs
                ]
                return [
                    tpu_ops.outfeed_enqueue_tuple(
                        self._cloned_model.predict_function.outputs,
                        name='outfeed-enqueue-predict',
                    )
                ]
            else:
                assert False, 'Unexpected execution mode: %s' % self.execution_mode
Пример #4
0
def clone_and_build_model(
    model, input_tensors=None, target_tensors=None, custom_objects=None,
    compile_clone=True, in_place_reset=False, optimizer_iterations=None):
  """Clone a `Model` and build/compile it with the same settings used before.

  This function can be be run in the same graph or in a separate graph from the
  model. When using a separate graph, `in_place_reset` must be `False`.

  Args:
    model: `tf.keras.Model` object. Can be Functional, Sequential, or
      sub-classed.
    input_tensors: Optional list of input tensors to build the model upon. If
      not provided, placeholders will be created.
    target_tensors: Optional list of target tensors for compiling the model. If
      not provided, placeholders will be created.
    custom_objects: Optional dictionary mapping string names to custom classes
      or functions.
    compile_clone: Boolean, whether to compile model clone (default `True`).
    in_place_reset: Boolean, whether to reset the model in place. Only used if
      the model is not a graph network. If the model is a subclassed model, then
      this argument must be set to `True` (default `False`). To restore the
      original model, use the function
      `in_place_subclassed_model_state_restoration(model)`.
    optimizer_iterations: An iterations variable that will be incremented by the
      optimizer if the clone is compiled. This argument is used when a Keras
      model is cloned into an Estimator model function, because Estimators
      create their own global step variable.

  Returns:
    Clone of the model.

  Raises:
    ValueError: if trying to clone a subclassed model, and `in_place_reset` is
      set to False.
  """
  if model._is_graph_network:
    if custom_objects:
      with CustomObjectScope(custom_objects):
        clone = clone_model(model, input_tensors=input_tensors)
    else:
      clone = clone_model(model, input_tensors=input_tensors)
  else:
    if not in_place_reset:
      raise ValueError(
          'Model is not a graph network (usually means that it is a subclassed '
          'model). The model cannot be cloned, but there is a workaround where '
          'the model is reset in-place. To use this, please set the argument '
          '`in_place_reset` to `True`. This will reset the attributes in the '
          'original model. To restore the attributes, call '
          '`in_place_subclassed_model_state_restoration(model)`.')
    clone = model
    _in_place_subclassed_model_reset(clone)
    if input_tensors is not None:
      if isinstance(input_tensors, (list, tuple)) and len(input_tensors) == 1:
        input_tensors = input_tensors[0]
      clone._set_inputs(input_tensors)

  # Compile/Build model
  if not compile_clone:
    if isinstance(clone, Sequential):
      clone.build()
  elif model.optimizer:
    if isinstance(model.optimizer, optimizers.TFOptimizer):
      optimizer = optimizers.TFOptimizer(
          model.optimizer.optimizer, optimizer_iterations)
      K.track_tf_optimizer(optimizer)
    else:
      optimizer_config = model.optimizer.get_config()
      optimizer = model.optimizer.__class__.from_config(optimizer_config)
      if optimizer_iterations is not None:
        optimizer.iterations = optimizer_iterations

    clone.compile(
        optimizer,
        model.loss,
        metrics=metrics_module.clone_metrics(model.metrics),
        loss_weights=model.loss_weights,
        sample_weight_mode=model.sample_weight_mode,
        weighted_metrics=metrics_module.clone_metrics(model.weighted_metrics),
        target_tensors=target_tensors)

  return clone
def clone_and_build_model(model,
                          input_tensors=None,
                          target_tensors=None,
                          custom_objects=None,
                          compile_clone=True,
                          in_place_reset=False,
                          optimizer_iterations=None,
                          optimizer_config=None):
    orig_optimizer = model.optimizer
    if compile_clone and not orig_optimizer:
        raise ValueError(
            'Error when cloning model: compile_clone was set to True, but the '
            'original model has not been compiled.')

    if model._is_graph_network or isinstance(model, Sequential):
        if custom_objects:
            with CustomObjectScope(custom_objects):
                clone = models.clone_model(model, input_tensors=input_tensors)
        else:
            clone = models.clone_model(model, input_tensors=input_tensors)

        if all([
                isinstance(clone, Sequential), not clone._is_graph_network,
                getattr(model, '_build_input_shape', None) is not None
        ]):
            clone._set_inputs(
                K.placeholder(model._build_input_shape,
                              dtype=model.inputs[0].dtype))
    else:
        if not in_place_reset:
            raise ValueError('.')
        clone = model
        _in_place_subclassed_model_reset(clone)
        if input_tensors is not None:
            if isinstance(input_tensors,
                          (list, tuple)) and len(input_tensors) == 1:
                input_tensors = input_tensors[0]
            clone._set_inputs(input_tensors)

    if compile_clone:
        if isinstance(orig_optimizer, optimizers.TFOptimizer):
            optimizer = optimizers.TFOptimizer(orig_optimizer.optimizer,
                                               optimizer_iterations)
            K.track_tf_optimizer(optimizer)
        else:
            optimizer_config = optimizer_config or orig_optimizer.get_config()
            #       print("orig_optimizer          :", orig_optimizer)
            #       print("orig_optimizer.c .      :",orig_optimizer.__class__)
            #       print("orig_optimizer.c.i .    :", orig_optimizer.__class__.__init__)
            #       print("optimizer_config        :", optimizer_config)
            #       print("orig_optimizer.c.i.args :", inspect.getargspec(orig_optimizer.__class__.__init__))
            #       print("orig_optimizer.c.i.dict :", orig_optimizer.__class__.__dict__)
            #       print("orig_optimizer.c._b_ .  :", orig_optimizer.__class__.__bases__)
            #orig_optimizer          : <horovod._keras.Adam object at 0x7f803812aac8>
            #orig_optimizer.c .      : <class 'horovod._keras.Adam'>
            #orig_optimizer.c.i .    : <function create_distributed_optimizer.<locals>._DistributedOptimizer.__init__ at 0x7f80480840d0>
            #optimizer_config        : {'name': 'Adam', 'learning_rate': 0.0014000001, 'decay': 0.0002, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False}
            #orig_optimizer.c.i.args : ArgSpec(args=['self', 'name', 'device_dense', 'device_sparse', 'compression', 'sparse_as_dense', 'config'], varargs=None, keywords=None, defaults=None)
            #orig_optimizer.c.i.dict : {'__module__': 'horovod._keras', '__init__': <function create_distributed_optimizer.<locals>._DistributedOptimizer.__init__ at 0x7f80480840d0>, 'get_gradients': <function create_distributed_optimizer.<locals>._DistributedOptimizer.get_gradients at 0x7f80480847b8>, '__doc__': None, '__abstractmethods__': frozenset(), '_abc_registry': <_weakrefset.WeakSet object at 0x7f8038112cf8>, '_abc_cache': <_weakrefset.WeakSet object at 0x7f803812ac50>, '_abc_negative_cache': <_weakrefset.WeakSet object at 0x7f803812a6d8>, '_abc_negative_cache_version': 51}
            if "horovod._keras" not in str(type(orig_optimizer)):
                optimizer = orig_optimizer.__class__.from_config(
                    optimizer_config)
            else:
                optimizer = orig_optimizer.__class__.__bases__[0].from_config(
                    optimizer_config)
            if optimizer_iterations is not None:
                optimizer.iterations = optimizer_iterations

        clone.compile(optimizer,
                      model.loss,
                      metrics=metrics_module.clone_metrics(
                          model._compile_metrics),
                      loss_weights=model.loss_weights,
                      sample_weight_mode=model.sample_weight_mode,
                      weighted_metrics=metrics_module.clone_metrics(
                          model._compile_weighted_metrics),
                      target_tensors=target_tensors)
    return clone
Пример #6
0
def clone_and_build_model(model,
                          input_tensors=None,
                          target_tensors=None,
                          custom_objects=None,
                          compile_clone=True,
                          in_place_reset=False,
                          optimizer_iterations=None):
    """1.13"""
    if compile_clone and not model.optimizer:
        raise ValueError(
            'Error when cloning model: compile_clone was set to True, but the '
            'original model has not been compiled.')

    if model._is_graph_network or isinstance(model, Sequential):
        if custom_objects:
            with CustomObjectScope(custom_objects):
                clone = models.clone_model(model, input_tensors=input_tensors)
        else:
            clone = models.clone_model(model, input_tensors=input_tensors)

        if all([
                isinstance(clone, Sequential),
                not models.clone._is_graph_network,
                getattr(model, '_build_input_shape', None) is not None
        ]):
            clone._set_inputs(
                K.placeholder(model._build_input_shape,
                              dtype=model.inputs[0].dtype))
    else:
        if not in_place_reset:
            raise ValueError('.')
        clone = model
        _in_place_subclassed_model_reset(clone)
        if input_tensors is not None:
            if isinstance(input_tensors,
                          (list, tuple)) and len(input_tensors) == 1:
                input_tensors = input_tensors[0]
            models.clone._set_inputs(input_tensors)

    if compile_clone and model.optimizer:
        if isinstance(model.optimizer, optimizers.TFOptimizer):
            optimizer = optimizers.TFOptimizer(model.optimizer.optimizer,
                                               optimizer_iterations)
            K.track_tf_optimizer(optimizer)
        else:
            optimizer_config = model.optimizer.get_config()
            optimizer = model.optimizer.__class__.from_config(optimizer_config)
            if optimizer_iterations is not None:
                optimizer.iterations = optimizer_iterations

        models.clone.compile(optimizer,
                             model.loss,
                             metrics=metrics_module.clone_metrics(
                                 model._compile_metrics),
                             loss_weights=model.loss_weights,
                             sample_weight_mode=model.sample_weight_mode,
                             weighted_metrics=metrics_module.clone_metrics(
                                 model._compile_weighted_metrics),
                             target_tensors=target_tensors)

    return clone
Пример #7
0
def clone_and_build_model(model,
                          input_tensors=None,
                          target_tensors=None,
                          custom_objects=None,
                          compile_clone=True,
                          in_place_reset=False,
                          optimizer_iterations=None,
                          optimizer_config=None):
    """Clone a `Model` and build/compile it with the same settings used before.

  This function can be be run in the same graph or in a separate graph from the
  model. When using a separate graph, `in_place_reset` must be `False`.

  Note that, currently, the clone produced from this function may not work with
  TPU DistributionStrategy. Try at your own risk.

  Args:
    model: `tf.keras.Model` object. Can be Functional, Sequential, or
      sub-classed.
    input_tensors: Optional list or dictionary of input tensors to build the
      model upon. If not provided, placeholders will be created.
    target_tensors: Optional list of target tensors for compiling the model. If
      not provided, placeholders will be created.
    custom_objects: Optional dictionary mapping string names to custom classes
      or functions.
    compile_clone: Boolean, whether to compile model clone (default `True`).
    in_place_reset: Boolean, whether to reset the model in place. Only used if
      the model is a subclassed model. In the case of a subclassed model,
      this argument must be set to `True` (default `False`). To restore the
      original model, use the function
      `in_place_subclassed_model_state_restoration(model)`.
    optimizer_iterations: An iterations variable that will be incremented by the
      optimizer if the clone is compiled. This argument is used when a Keras
      model is cloned into an Estimator model function, because Estimators
      create their own global step variable.
    optimizer_config: Optimizer config dictionary or list of dictionary
      returned from `get_config()`. This argument should be defined if
      `clone_and_build_model` is called in a different graph or session from
      the original model, and the optimizer is an instance of `OptimizerV2`.

  Returns:
    Clone of the model.

  Raises:
    ValueError: Cloning fails in the following cases
      - cloning a subclassed model with `in_place_reset` set to False.
      - compiling the clone when the original model has not been compiled.
  """
    # Grab optimizer now, as we reset-in-place for subclassed models, but
    # want to maintain access to the original optimizer.
    orig_optimizer = model.optimizer
    if compile_clone and not orig_optimizer:
        raise ValueError(
            'Error when cloning model: compile_clone was set to True, but the '
            'original model has not been compiled.')

    if compile_clone:
        compile_args = model._get_compile_args()  # pylint: disable=protected-access
        # Allows this method to be robust to switching graph and eager classes.
        model._get_compile_args = lambda: compile_args

    with CustomObjectScope(custom_objects or {}):
        if model._is_graph_network:
            clone = clone_model(model, input_tensors=input_tensors)
        elif isinstance(model, Sequential):
            clone = clone_model(model, input_tensors=input_tensors)
            if (not clone._is_graph_network
                    and model._build_input_shape is not None):
                if ops.executing_eagerly_outside_functions():
                    clone.build(model._build_input_shape)
                else:
                    clone._set_inputs(
                        K.placeholder(model._build_input_shape,
                                      dtype=model.inputs[0].dtype))
        else:
            try:
                # Prefer clonining the model if serial/deserial logic is implemented for
                # subclassed model.
                clone = model.__class__.from_config(model.get_config())
            except NotImplementedError:
                logging.warning(
                    'This model is a subclassed model. Please implement '
                    '`get_config` and `from_config` to better support '
                    'cloning the model.')
                if not in_place_reset:
                    raise ValueError(
                        'This model is a subclassed model. '
                        'Such a model cannot be cloned, but there is a workaround where '
                        'the model is reset in-place. To use this, please set the '
                        'argument `in_place_reset` to `True`. This will reset the '
                        'attributes in the original model. To restore the attributes, '
                        'call `in_place_subclassed_model_state_restoration(model)`.'
                    )
                clone = model
                _in_place_subclassed_model_reset(clone)
            if input_tensors is not None:
                if isinstance(input_tensors,
                              (list, tuple)) and len(input_tensors) == 1:
                    input_tensors = input_tensors[0]
                clone._set_inputs(input_tensors)

    if compile_clone:
        if isinstance(orig_optimizer, optimizers.TFOptimizer):
            optimizer = optimizers.TFOptimizer(orig_optimizer.optimizer,
                                               optimizer_iterations)
            K.track_tf_optimizer(optimizer)
        else:
            if not isinstance(orig_optimizer, (tuple, list)):
                orig_optimizer = [orig_optimizer]
            if optimizer_config is None:
                optimizer = [
                    opt.__class__.from_config(opt.get_config())
                    for opt in orig_optimizer
                ]
            elif isinstance(optimizer_config, dict):
                optimizer = [
                    orig_optimizer[0].__class__.from_config(optimizer_config)
                ]
            else:
                # optimizer config is list of dict, same order as orig_optimizer.
                optimizer = [
                    opt.__class__.from_config(opt_config)
                    for (opt,
                         opt_config) in zip(orig_optimizer, optimizer_config)
                ]
            if optimizer_iterations is not None:
                for opt in optimizer:
                    opt.iterations = optimizer_iterations

            if len(optimizer) == 1:
                optimizer = optimizer[0]

        compile_args['optimizer'] = optimizer
        if target_tensors is not None:
            compile_args['target_tensors'] = target_tensors
        # Ensure Metric objects in new model are separate from existing model.
        compile_args['metrics'] = metrics_module.clone_metrics(
            compile_args['metrics'])
        compile_args['weighted_metrics'] = metrics_module.clone_metrics(
            compile_args['weighted_metrics'])
        clone.compile(**compile_args)

    return clone
Пример #8
0
def _replicated_optimizer(opt):
  """Wrap the optimizer `opt` with CrossShardOptimizer if applicable."""
  return keras_optimizers.TFOptimizer(
      optimizer=tpu_optimizer.CrossShardOptimizer(opt.optimizer))