Beispiel #1
0
def _get_model_weight_metrics(model: keras.models.Model,
                              obs_horizon,
                              n_channels,
                              n_noise_channels):
    weight_matrix = model.get_weights()[0]
    W_variance = np.var(weight_matrix)
    weight_tensor = food_search_env._unflatten_weight_matrix(weight_matrix,
                                                             obs_horizon,
                                                             n_channels,
                                                             n_actions=4)
    noise_part = weight_tensor[:, :, -n_noise_channels:, :]
    W_noise_part_variance = np.var(noise_part)
    outside_part = np.r_[
        weight_tensor[:, 0, :].ravel(),
        weight_tensor[:, -1, :].ravel(),
        weight_tensor[0, 1:-1, :].ravel(),
        weight_tensor[-1, 1:-1, :].ravel()
    ]
    W_outside_part_variance = np.var(outside_part)

    inside_part = weight_tensor[1:-1, 1:-1, :]
    W_inside_part_variance = np.var(inside_part)

    # n_inside = inside_part.size
    # n_outside = weight_matrix.size
    # assert n_outside == weight_tensor.size
    # n_total = n_inside + n_outside
    # W_outside_part_variance = (n_total / n_inside)*W_variance - (n_outside/n_inside) * W_inside_part_variance

    return {
        'W_variance': W_variance,
        'W_noise_part_variance': W_noise_part_variance,
        'W_outside_part_variance': W_outside_part_variance,
        'W_inside_part_variance': W_inside_part_variance
    }
Beispiel #2
0
    def check_model_precision(self,
                              model: keras.models.Model,
                              state: "State") -> keras.models.Model:
        """ Check the model's precision.

        If this is a new model, then
        Rewrite an existing model's training precsion mode from mixed-float16 to float32 or
        vice versa.

        This is not easy to do in keras, so we edit the model's config to change the dtype policy
        for compatible layers. Create a new model from this config, then port the weights from the
        old model to the new model.

        Parameters
        ----------
        model: :class:`keras.models.Model`
            The original saved keras model to rewrite the dtype
        state: ~:class:`plugins.train.model._base.model.State`
            The State information for the model

        Returns
        -------
        :class:`keras.models.Model`
            The original model with the datatype updated
        """
        if get_backend() == "amd":  # Mixed precision not supported on amd
            return model

        if self.use_mixed_precision and not state.mixed_precision_layers:
            # Switching to mixed precision on a model which was started in FP32 prior to the
            # ability to switch between precisions on a saved model is not supported as we
            # do not have the compatible layer names
            logger.warning("Switching from Full Precision to Mixed Precision is not supported on "
                           "older model files. Reverting to Full Precision.")
            return model

        config = model.get_config()

        if not self.use_mixed_precision and not state.mixed_precision_layers:
            # Switched to Full Precision, get compatible layers from model if not already stored
            state.add_mixed_precision_layers(self._get_mixed_precision_layers(config["layers"]))

        self._switch_precision(config["layers"], state.mixed_precision_layers)

        new_model = keras.models.Model().from_config(config)
        new_model.set_weights(model.get_weights())
        logger.info("Mixed precision has been updated from '%s' to '%s'",
                    not self.use_mixed_precision, self.use_mixed_precision)
        del model
        return new_model