Beispiel #1
0
    def __init__(self,
                 file_writer: LogdirWriter,
                 model: Model,
                 minibatch_size: Optional[int] = 100,
                 display_progress: Optional[bool] = True) -> None:
        """
        :param model: Model tensor
        :param file_writer: Event file writer object.
        :param minibatch_size: Number of points per minibatch
        :param display_progress: if True the task displays the progress of calculating LML.
        """

        super().__init__(file_writer, model)
        self._minibatch_size = minibatch_size
        self._full_lml = tf.placeholder(settings.float_type, shape=())
        self._summary = tf.summary.scalar(model.name + '/full_lml',
                                          self._full_lml)

        self.wrapper = None  # type: Callable[[Iterator], Iterator]
        if display_progress:  # pragma: no cover
            try:
                import tqdm
                self.wrapper = tqdm.tqdm
            except ImportError:
                logger = settings.logger()
                if logger.isEnabledFor(logging.WARNING):
                    logger.warning(
                        "LML monitor task: to display progress install `tqdm`."
                    )
        if self.wrapper is None:
            self.wrapper = lambda x: x
Beispiel #2
0
    def __init__(self, file_writer: LogdirWriter, model: Model, minibatch_size: Optional[int] = 100,
                 display_progress: Optional[bool] = True) -> None:
        """
        :param model: Model tensor
        :param file_writer: Event file writer object.
        :param minibatch_size: Number of points per minibatch
        :param display_progress: if True the task displays the progress of calculating LML.
        """

        super().__init__(file_writer, model)
        self._minibatch_size = minibatch_size
        self._full_lml = tf.placeholder(settings.tf_float, shape=())
        self._summary = tf.summary.scalar(model.name + '/full_lml', self._full_lml)

        self.wrapper = None  # type: Callable[[Iterator], Iterator]
        if display_progress:  # pragma: no cover
            try:
                import tqdm
                self.wrapper = tqdm.tqdm
            except ImportError:
                logger = settings.logger()
                if logger.isEnabledFor(logging.WARNING):
                    logger.warning("LML monitor task: to display progress install `tqdm`.")
        if self.wrapper is None:
            self.wrapper = lambda x: x
Beispiel #3
0
def restore_session(session, checkpoint_dir):
    """
    Restores Tensorflow session from the latest checkpoint.
    :param session: The TF session
    :param checkpoint_dir: checkpoint files directory.
    """
    checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
    logger = settings.logger()
    if logger.isEnabledFor(logging.INFO):
        logger.info("Restoring session from `%s`.", checkpoint_path)

    saver = tf.train.Saver(max_to_keep=1)
    saver.restore(session, checkpoint_path)
Beispiel #4
0
def restore_session(session: tf.Session, checkpoint_dir: str,
                    saver: Optional[tf.train.Saver] = None) -> None:
    """
    Restores Tensorflow session from the latest checkpoint.
    :param session: The TF session
    :param checkpoint_dir: checkpoint files directory.
    :param saver: The saver object, if not provided a default saver object will be created.
    """
    checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
    logger = settings.logger()
    if logger.isEnabledFor(logging.INFO):
        logger.info("Restoring session from `%s`.", checkpoint_path)

    saver = saver or get_default_saver()
    saver.restore(session, checkpoint_path)
Beispiel #5
0
def restore_session(session: tf.Session, checkpoint_dir: str,
                    saver: Optional[tf.train.Saver] = None) -> None:
    """
    Restores Tensorflow session from the latest checkpoint.
    :param session: The TF session
    :param checkpoint_dir: checkpoint files directory.
    :param saver: The saver object, if not provided a default saver object will be created.
    """
    checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
    logger = settings.logger()
    if logger.isEnabledFor(logging.INFO):
        logger.info("Restoring session from `%s`.", checkpoint_path)

    saver = saver or get_default_saver()
    saver.restore(session, checkpoint_path)
Beispiel #6
0
import tensorflow as tf
from gpflow import settings

logger = settings.logger()

def conditional(Kmn, Kmm, Knn, f, *, full_cov=False, q_sqrt=None, white=False):
    """
    Given a g1 and g2, and distribution p and q such that
      p(g2) = N(g2;0,Kmm)
      p(g1) = N(g1;0,Knn)
      p(g1|g2) = N(g1;0,Knm)
    And
      q(g2) = N(g2;f,q_sqrt*q_sqrt^T)
    This method computes the mean and (co)variance of
      q(g1) = \int q(g2) p(g1|g2)
    :param Kmn: P x M x N
    :param Kmm: M x M
    :param Knn: P x N x N  or P x N
    :param f: M x R
    :param full_cov: bool
    :param q_sqrt: R x M x M (lower triangular)
    :param white: bool
    :return: N x R  or R x N x N
    """
    logger.debug("base conditional")
    # compute kernel stuff
    num_func = tf.shape(f)[1]  # R

    Lm = tf.cholesky(Kmm)

    def solve_A(MN_Kmn):