コード例 #1
0
ファイル: hdf5_format.py プロジェクト: rmlarsen/tensorflow
def load_model(filepath, custom_objects=None, compile=True):  # pylint: disable=redefined-builtin
  """Loads a model saved via `save_model`.

  Arguments:
      filepath: One of the following:
          - String, path to the saved model
          - `h5py.File` object from which to load the model
      custom_objects: Optional dictionary mapping names
          (strings) to custom classes or functions to be
          considered during deserialization.
      compile: Boolean, whether to compile the model
          after loading.

  Returns:
      A Keras model instance. If an optimizer was found
      as part of the saved model, the model is already
      compiled. Otherwise, the model is uncompiled and
      a warning will be displayed. When `compile` is set
      to False, the compilation is omitted without any
      warning.

  Raises:
      ImportError: if h5py is not available.
      ValueError: In case of an invalid savefile.
  """
  if h5py is None:
    raise ImportError('`load_model` requires h5py.')

  if not custom_objects:
    custom_objects = {}

  def convert_custom_objects(obj):
    """Handles custom object lookup.

    Arguments:
        obj: object, dict, or list.

    Returns:
        The same structure, where occurrences
            of a custom object name have been replaced
            with the custom object.
    """
    if isinstance(obj, list):
      deserialized = []
      for value in obj:
        deserialized.append(convert_custom_objects(value))
      return deserialized
    if isinstance(obj, dict):
      deserialized = {}
      for key, value in obj.items():
        deserialized[key] = convert_custom_objects(value)
      return deserialized
    if obj in custom_objects:
      return custom_objects[obj]
    return obj

  opened_new_file = not isinstance(filepath, h5py.File)
  if opened_new_file:
    f = h5py.File(filepath, mode='r')
  else:
    f = filepath

  model = None
  try:
    # instantiate model
    model_config = f.attrs.get('model_config')
    if model_config is None:
      raise ValueError('No model found in config file.')
    model_config = json.loads(model_config.decode('utf-8'))
    model = model_config_lib.model_from_config(model_config,
                                               custom_objects=custom_objects)

    # set weights
    load_weights_from_hdf5_group(f['model_weights'], model.layers)

    if compile:
      # instantiate optimizer
      training_config = f.attrs.get('training_config')
      if training_config is None:
        logging.warning('No training configuration found in save file: '
                        'the model was *not* compiled. Compile it manually.')
        return model
      training_config = json.loads(training_config.decode('utf-8'))
      optimizer_config = training_config['optimizer_config']
      optimizer = optimizers.deserialize(
          optimizer_config, custom_objects=custom_objects)

      # Recover loss functions and metrics.
      loss = convert_custom_objects(training_config['loss'])
      metrics = convert_custom_objects(training_config['metrics'])
      weighted_metrics = convert_custom_objects(
          training_config.get('weighted_metrics', None))
      sample_weight_mode = training_config['sample_weight_mode']
      loss_weights = training_config['loss_weights']

      # Compile model.
      model.compile(
          optimizer=optimizer,
          loss=loss,
          metrics=metrics,
          weighted_metrics=weighted_metrics,
          loss_weights=loss_weights,
          sample_weight_mode=sample_weight_mode)

      # Set optimizer weights.
      if 'optimizer_weights' in f:
        # Build train function (to get weight updates).
        # Models that aren't graph networks must wait until they are called
        # with data to _make_train_function() and so can't load optimizer
        # weights.
        if model._is_graph_network:  # pylint: disable=protected-access
          model._make_train_function()
          optimizer_weight_values = load_optimizer_weights_from_hdf5_group(f)
          try:
            model.optimizer.set_weights(optimizer_weight_values)
          except ValueError:
            logging.warning('Error in loading the saved optimizer '
                            'state. As a result, your model is '
                            'starting with a freshly initialized '
                            'optimizer.')
        else:
          logging.warning('Sequential models without an `input_shape` '
                          'passed to the first layer cannot reload their '
                          'optimizer state. As a result, your model is'
                          'starting with a freshly initialized optimizer.')

  finally:
    if opened_new_file:
      f.close()
  return model
コード例 #2
0
def legacy_load_model(filepath, custom_objects=None, compile=True):  # pylint: disable=redefined-builtin
    """
    legacy load model since my pretrained models could't be loaded to newer versions of tensorflow
    """
    if h5py is None:
        raise ImportError('`load_model` requires h5py.')

    if not custom_objects:
        custom_objects = {}

    def convert_custom_objects(obj):
        if isinstance(obj, list):
            deserialized = []
            for value in obj:
                deserialized.append(convert_custom_objects(value))
            return deserialized
        if isinstance(obj, dict):
            deserialized = {}
            for key, value in obj.items():
                deserialized[key] = convert_custom_objects(value)
            return deserialized
        if obj in custom_objects:
            return custom_objects[obj]
        return obj

    opened_new_file = not isinstance(filepath, h5py.File)
    if opened_new_file:
        f = h5py.File(filepath, mode='r')
    else:
        f = filepath

    try:
        # instantiate model
        model_config = f.attrs.get('model_config')
        if model_config is None:
            raise ValueError('No model found in config file.')
        model_config = json.loads(model_config.decode('utf-8'))
        model = model_from_config(model_config, custom_objects=custom_objects)

        # set weights
        load_weights_from_hdf5_group(f['model_weights'], model.layers)

        if compile:
            # instantiate optimizer
            training_config = f.attrs.get('training_config')
            if training_config is None:
                logging.warning(
                    'No training configuration found in save file: '
                    'the model was *not* compiled. Compile it manually.')
                return model
            training_config = json.loads(training_config.decode('utf-8'))
            optimizer_config = training_config['optimizer_config']
            optimizer = optimizers.deserialize(optimizer_config,
                                               custom_objects=custom_objects)

            # Recover loss functions and metrics.
            loss = convert_custom_objects(training_config['loss'])
            metrics = convert_custom_objects(training_config['metrics'])
            sample_weight_mode = training_config['sample_weight_mode']
            loss_weights = training_config['loss_weights']

            # Compile model.
            model.compile(optimizer=optimizer,
                          loss=loss,
                          metrics=metrics,
                          loss_weights=loss_weights,
                          sample_weight_mode=sample_weight_mode)

            # Set optimizer weights.
            if 'optimizer_weights' in f:
                # Build train function (to get weight updates).
                model._make_train_function()
                optimizer_weights_group = f['optimizer_weights']
                optimizer_weight_names = [
                    n.decode('utf8')
                    for n in optimizer_weights_group.attrs['weight_names']
                ]
                optimizer_weight_values = [
                    optimizer_weights_group[n] for n in optimizer_weight_names
                ]
                try:
                    model.optimizer.set_weights(optimizer_weight_values)
                except ValueError:
                    logging.warning('Error in loading the saved optimizer '
                                    'state. As a result, your model is '
                                    'starting with a freshly initialized '
                                    'optimizer.')
    finally:
        if opened_new_file:
            f.close()
    return model
def compile_args_from_training_config(training_config, custom_objects=None):
    """Return model.compile arguments from training config."""
    if custom_objects is None:
        custom_objects = {}

    optimizer_config = training_config['optimizer_config']
    optimizer = optimizers.deserialize(optimizer_config,
                                       custom_objects=custom_objects)

    # Recover losses.
    loss_config = training_config['loss']
    if isinstance(loss_config, list):  # Loss fed to compile as a list.
        loss = [losses.deserialize(lc, custom_objects) for lc in loss_config]
    elif isinstance(loss_config, dict) and 'class_name' not in loss_config:
        # Loss fed to compile as a dict.
        loss = {
            k: losses.deserialize(v, custom_objects)
            for (k, v) in loss_config.items()
        }
    else:  # Loss fed to compile as a str/ function/ class instance.
        loss = losses.deserialize(loss_config, custom_objects)

    # Recover metrics.
    metrics_config = training_config.get('metrics', None)
    if isinstance(metrics_config, dict):  # Metrics fed to compile as a dict.
        metrics = {
            k: convert_output_metrics(v, custom_objects)
            for (k, v) in metrics_config.items()
        }
    elif isinstance(metrics_config, list):  # Metrics fed to compile as a list.
        metrics = [
            convert_output_metrics(m, custom_objects) for m in metrics_config
        ]
    else:  # No metrics.
        metrics = None

    # Recover weighted metrics.
    weighted_metrics_config = training_config.get('weighted_metrics', None)
    if isinstance(weighted_metrics_config, dict):
        # Metrics fed to compile as a dict.
        weighted_metrics = {
            k: convert_output_metrics(v, custom_objects)
            for (k, v) in weighted_metrics_config.items()
        }
    elif isinstance(weighted_metrics_config, list):
        # Metrics fed to compile as a list.
        weighted_metrics = [
            convert_output_metrics(m, custom_objects)
            for m in weighted_metrics_config
        ]
    else:  # No metrics.
        weighted_metrics = None

    sample_weight_mode = training_config['sample_weight_mode']
    loss_weights = training_config['loss_weights']

    return dict(optimizer=optimizer,
                loss=loss,
                metrics=metrics,
                weighted_metrics=weighted_metrics,
                loss_weights=loss_weights,
                sample_weight_mode=sample_weight_mode)
コード例 #4
0
 def from_config(cls, config):
     optimizer = optimizers.deserialize(config.pop('optimizer'))
     return cls(optimizer, **config)
コード例 #5
0
 def parse_optimizer(self):
     optimizer = self.compile_config.get('optimizer')
     if optimizer and isinstance(optimizer, dict):
         self.compile_config['optimizer'] = deserialize(optimizer)
コード例 #6
0
ファイル: _default.py プロジェクト: wujinke/MDNT
def load_model(filepath,
               headpath=None,
               optmpath=None,
               custom_objects=None,
               compile=True):  # pylint: disable=redefined-builtin
    """Loads a model saved via `save_model`.
    This revised version split the configurations and weights of a model
    into two JSON files and an HDF5 file respectively. To learn why we should
    apply this technique. Check mdnt.save_model. Actually, this implement-
    ation requires users to use mdnt.save_model and mdnt.load_model together.
    Arguments:
        filepath: One of the following:
            - String, path to the saved model
            - `h5py.File` object from which to load the model
        headpath: One of the following:
            - String, path where to save the model configurations
            - `File` object where to save the model configurations
            - If set None, would get deduced from `filepath`
        optmpath: One of the following:
            - String, path where to save the model configurations
            - `File` object where to save the model configurations
            - If set None, would get deduced from `filepath`
            - In most cases, this variable could be left `None`
        custom_objects: Optional dictionary mapping names
            (strings) to custom classes or functions to be
            considered during deserialization.
        compile: Boolean, whether to compile the model
            after loading.
    Returns:
        A Keras model instance. If an optimizer was found
        as part of the saved model, the model is already
        compiled. Otherwise, the model is uncompiled and
        a warning will be displayed. When `compile` is set
        to False, the compilation is omitted without any
        warning.
    Raises:
        ImportError: if h5py is not available.
        ValueError: In case of an invalid savefile.
    """
    if h5py is None:
        raise ImportError('`load_model` requires h5py.')

    if not custom_objects:
        custom_objects = {}
    custom_objects.update(customObjects)

    def convert_custom_objects(obj):
        """Handles custom object lookup.
        Arguments:
            obj: object, dict, or list.
        Returns:
            The same structure, where occurrences
            of a custom object name have been replaced
            with the custom object.
        """
        if isinstance(obj, list):
            deserialized = []
            for value in obj:
                deserialized.append(convert_custom_objects(value))
            return deserialized
        if isinstance(obj, dict):
            if ('class_name' in obj) and (obj['class_name'] in custom_objects):
                return deserialize_keras_object(
                    obj,
                    module_objects=globals(),
                    custom_objects=custom_objects,
                    printable_module_name='loss function')
            deserialized = {}
            for key, value in obj.items():
                deserialized[key] = convert_custom_objects(value)
            return deserialized
        if obj in custom_objects:
            return custom_objects[obj]
        return obj

    # Examine the input type and open the HDF5 file.
    opened_new_file = not isinstance(filepath, h5py.File)
    if opened_new_file:
        psfx = os.path.splitext(filepath)[1].casefold()
        if (psfx != '.h5') and (psfx != '.hdf5'):
            filepath = filepath + '.h5'
        f = h5py.File(filepath, mode='r')
    else:
        f = filepath

    # Infer the headpath if set None.
    if headpath is None:
        headpath = f.filename
        if hasattr(headpath, 'decode'):
            headpath = headpath.decode('utf-8')
        headpath = os.path.splitext(headpath)[0]
        ind = headpath.rfind('-')
        if ind != -1:
            headpath = headpath[:ind]
        ind = headpath.find('_loss')
        if ind != -1:
            headpath = headpath[:ind]
        ind = headpath.find('_acc')
        if ind != -1:
            headpath = headpath[:ind]
        headpath = headpath + '.json'

    # Examine the input type and open the JSON file.
    opened_new_head = not isinstance(headpath, io.IOBase)
    if opened_new_head:
        psfx = os.path.splitext(headpath)[1].casefold()
        if (psfx != '.json'):
            headpath = headpath + '.json'
        fh = open(headpath, 'r')
    else:
        fh = headpath

    # Check optimizer configuration file when setting `compile`.
    if compile:
        if optmpath is None:
            optmpath = f.filename
            if hasattr(optmpath, 'decode'):
                optmpath = optmpath.decode('utf-8')
            optmpath = os.path.splitext(optmpath)[0] + '.json'

        if isinstance(optmpath, str) and optmpath == headpath:
            optmpath = os.path.splitext(optmpath)[0] + '_opt.json'

        # Examine the file existence and open the JSON file.
        opened_new_optm = not isinstance(optmpath, io.IOBase)
        if opened_new_optm:
            psfx = os.path.splitext(optmpath)[1].casefold()
            if (psfx != '.json'):
                optmpath = optmpath + '.json'
            if os.path.isfile(optmpath):
                fo = open(optmpath, 'r')
            else:
                fo = None
        else:
            fo = optmpath
    else:
        fo = None

    model = None
    try:
        # Load all configurations from JSON file.
        json_dict = json.loads(fh.read())
        # instantiate model
        model_config = json_dict.get('model_config')
        if model_config is None:
            raise ValueError('No model found in config file.')
        model = model_from_config(model_config, custom_objects=custom_objects)

        # set weights
        load_weights_from_hdf5_group(f['model_weights'], json_dict,
                                     model.layers)

        if compile:
            # instantiate optimizer
            if fo is not None:
                json_optm_dict = json.loads(fo.read())
            else:
                json_optm_dict = dict()
            training_config = json_optm_dict.get('training_config')
            if training_config is None:
                logging.warning(
                    'No training configuration found in save file: '
                    'the model was *not* compiled. Compile it manually.')
                return model
            optimizer_config = training_config['optimizer_config']
            optimizer = optimizers.deserialize(optimizer_config,
                                               custom_objects=custom_objects)

            # Recover loss functions and metrics.
            loss = convert_custom_objects(training_config['loss'])
            metrics = convert_custom_objects(training_config['metrics'])
            weighted_metrics = convert_custom_objects(
                training_config.get('weighted_metrics', None))
            sample_weight_mode = training_config['sample_weight_mode']

            if 'loss_weights' in f:
                loss_weights = load_loss_weights_from_hdf5_group(
                    f['loss_weights'], json_optm_dict)
            else:  # Compatibility for old versions.
                loss_weights = training_config['loss_weights']

            # Compile model.
            model.compile(optimizer=optimizer,
                          loss=loss,
                          metrics=metrics,
                          weighted_metrics=weighted_metrics,
                          loss_weights=loss_weights,
                          sample_weight_mode=sample_weight_mode)

            # Set optimizer weights.
            if 'optimizer_weights' in f:
                # Build train function (to get weight updates).
                # Models that aren't graph networks must wait until they are called
                # with data to _make_train_function() and so can't load optimizer
                # weights.
                if model._is_graph_network:  # pylint: disable=protected-access
                    model._make_train_function()
                    optimizer_weights_group = f['optimizer_weights']
                    optimizer_weight_names = load_attributes_from_hdf5_group(
                        json_dict, optimizer_weights_group.name,
                        'weight_names')
                    optimizer_weight_values = [
                        optimizer_weights_group[n]
                        for n in optimizer_weight_names
                    ]
                    try:
                        model.optimizer.set_weights(optimizer_weight_values)
                    except ValueError:
                        logging.warning('Error in loading the saved optimizer '
                                        'state. As a result, your model is '
                                        'starting with a freshly initialized '
                                        'optimizer.')
                else:
                    logging.warning(
                        'Sequential models without an `input_shape` '
                        'passed to the first layer cannot reload their '
                        'optimizer state. As a result, your model is'
                        'starting with a freshly initialized optimizer.')

    finally:
        if opened_new_file:
            f.close()
        if opened_new_head:
            fh.close()
        if fo is not None:
            if opened_new_optm:
                fo.close()
    return model