Exemplo n.º 1
0
    def __init__(self,
                 X,
                 Y,
                 W,
                 kern,
                 feat=None,
                 mean_function=None,
                 Z=None,
                 **kwargs):
        """
        X is a data matrix, size N x D
        Y is a data matrix, size N x R
        Z is a matrix of pseudo inputs, size M x D
        kern, mean_function are appropriate GPflow objects
        This method only works with a Gaussian likelihood.
        """
        X = DataHolder(X)
        Y = DataHolder(Y)
        likelihood = likelihoods.Gaussian()
        GPModel.__init__(self, X, Y, kern, likelihood, mean_function, **kwargs)
        self.feature = features.inducingpoint_wrapper(feat, Z)
        self.num_data = X.shape[0]

        self.W_prior = tf.ones(W.shape, dtype=settings.float_type) / W.shape[1]
        self.W = Parameter(W)
        self.num_inducing = Z.shape[0] * W.shape[1]
Exemplo n.º 2
0
    def __init__(self, dim_in, dim_out,
                 kern, likelihood,
                 feat=None,
                 mean_function=None,
                 q_diag=False,
                 whiten=True,
                 Z=None,
                 num_data=None,
                 q_mu=None,
                 q_sqrt=None,
                 **kwargs):

        super(SVGP, self).__init__(**kwargs)

        self.dim_in = dim_in
        self.dim_out = dim_out
        self.num_latent = dim_out
        self.mean_function = mean_function or Zero(output_dim=self.num_latent)
        self.kern = kern
        self.likelihood = likelihood

        self.q_diag, self.whiten = q_diag, whiten
        self.feature = features.inducingpoint_wrapper(feat, Z)

        # init variational parameters
        num_inducing = len(self.feature)
        self._init_variational_parameters(num_inducing, q_mu, q_sqrt, q_diag)

        # Create placeholders
        self.X_mu_ph = tf.placeholder(settings.float_type, [None, dim_in])
        self.X_var_ph = tf.placeholder(settings.float_type, [None, dim_in, dim_in])
        self.Y_ph = tf.placeholder(settings.float_type, [None, dim_out])
        self.data_scale = tf.placeholder(settings.float_type, [])
Exemplo n.º 3
0
    def __init__(self, input_dim, output_dim, num_inducing, kernel,
                 mean_function=None, multitask=False, name=None):
        """
        input_dim is an integer
        output_dim is an integer
        num_inducing is the number of inducing inputs
        kernel is a kernel object (or list of kernel objects)
        """

        super(Layer, self).__init__(name=name)

        self.input_dim = input_dim
        self.output_dim = output_dim
        self.num_inducing = num_inducing
        if multitask:
            Z = np.zeros((self.num_inducing, self.input_dim + 1))
        else:
            Z = np.zeros((self.num_inducing, self.input_dim))

        self.feature = inducingpoint_wrapper(None, Z)

        if isinstance(kernel, list):
            self.kernel = ParamList(kernel)
        else:
            self.kernel = kernel

        self.mean_function = mean_function or Zero(output_dim=self.output_dim)

        shape = (self.num_inducing, self.output_dim)

        self.q_mu = Parameter(np.zeros(shape))

        q_sqrt = np.vstack([np.expand_dims(np.eye(self.num_inducing), 0)
                            for _ in range(self.output_dim)])
        self.q_sqrt = Parameter(q_sqrt)
Exemplo n.º 4
0
    def __init__(self, input_dim, output_dim, num_inducing, kernel_list,
                 share_Z=False, mean_function=None, multitask=False, name=None):

        if output_dim%len(kernel_list) != 0:
            raise ValueError("Output dimension must be a multiple of the number of kernels")

        super(MultikernelLayer, self).__init__(input_dim=input_dim,
                                    output_dim=output_dim,
                                    num_inducing=num_inducing,
                                    kernel=kernel_list,
                                    mean_function=mean_function,
                                    multitask=multitask,
                                    name=name)

        self.num_kernels = len(kernel_list)
        self._shared_Z = share_Z
        self.offset = int(self.output_dim/self.num_kernels)

        if not self._shared_Z:
            del self.feature
            if multitask:
                Z = np.zeros((self.num_inducing, self.input_dim+1))
            else:
                Z = np.zeros((self.num_inducing, self.input_dim))

            self.feature = ParamList([inducingpoint_wrapper(None, Z.copy()) for _ in range(self.num_kernels)])
Exemplo n.º 5
0
    def __init__(self,
                 X,
                 Y,
                 kern,
                 likelihood,
                 feat=None,
                 mean_function=None,
                 num_latent=None,
                 q_diag=False,
                 whiten=True,
                 minibatch_size=None,
                 Z=None,
                 num_data=None,
                 q_mu=None,
                 q_sqrt=None,
                 **kwargs):
        """
        - X is a data matrix, size N x D
        - Y is a data matrix, size N x P
        - kern, likelihood, mean_function are appropriate GPflow objects
        - Z is a matrix of pseudo inputs, size M x D
        - num_latent is the number of latent process to use, defaults to one.
        - q_diag is a boolean. If True, the covariance is approximated by a
          diagonal matrix.
        - whiten is a boolean. If True, we use the whitened representation of
          the inducing points.
        - minibatch_size, if not None, turns on mini-batching with that size.
        - num_data is the total number of observations, default to X.shape[0]
          (relevant when feeding in external minibatches)
        """
        # sort out the X, Y into MiniBatch objects if required.
        if minibatch_size is None:
            X = DataHolder(X)
            Y = DataHolder(Y)
        else:
            X = Minibatch(X, batch_size=minibatch_size, seed=0)
            Y = Minibatch(Y, batch_size=minibatch_size, seed=0)

        # init the super class, accept args
        if num_latent is None:
            num_latent = 1
        GPModel.__init__(self, X, Y, kern, likelihood, mean_function,
                         num_latent, **kwargs)
        self.num_data = num_data or X.shape[0]
        self.num_classes = X.shape[1]
        self.q_diag, self.whiten = q_diag, whiten
        self.feature = features.inducingpoint_wrapper(feat, Z)

        # init variational parameters
        num_inducing = len(self.feature)

        self._init_variational_parameters(num_inducing, q_mu, q_sqrt, q_diag)
Exemplo n.º 6
0
    def __init__(self,
                 X,
                 Y,
                 W1,
                 W1_index,
                 W2,
                 W2_index,
                 kern,
                 feat=None,
                 mean_function=None,
                 Z=None,
                 **kwargs):
        """
        X is a data matrix, size N x D
        Y is a data matrix, size N x R
        Z is a matrix of pseudo inputs, size M x D
        W1, size NxK
        W1_index PxL

        W2, size NxL
        W2_index PxL

        kern, mean_function are appropriate GPflow objects
        This method only works with a Gaussian likelihood.
        """
        X = DataHolder(X)
        Y = DataHolder(Y, fix_shape=True)
        likelihood = likelihoods.Gaussian()
        GPModel.__init__(self, X, Y, kern, likelihood, mean_function, **kwargs)
        self.feature = features.inducingpoint_wrapper(feat, Z)
        self.num_data = X.shape[0]

        self.W1_prior = Parameter(np.log(
            np.ones(W1.shape[1], dtype=settings.float_type) / W1.shape[1]),
                                  trainable=False)
        self.W1 = Parameter(W1)
        self.W1_index = DataHolder(W1_index, dtype=np.int32, fix_shape=True)
        self.K = W1.shape[1]

        self.W2_prior = Parameter(np.log(
            np.ones(W2.shape[1], dtype=settings.float_type) / W2.shape[1]),
                                  trainable=False)
        self.W2 = Parameter(W2)
        self.W2_index = DataHolder(W2_index, dtype=np.int32, fix_shape=True)
        self.L = W2.shape[1]

        self.num_inducing = Z.shape[0]
Exemplo n.º 7
0
    def __init__(self, X, Y, W, kern, idx=None, feat=None, Z=None,
                 mean_function=None, q_diag=False, whiten=False,
                 q_mu=None, q_sqrt=None,
                 minibatch_size=None, num_latent=None, **kwargs):
        """
        X is a data matrix, size N x D
        Y is a data matrix, size N x R
        Z is a matrix of pseudo inputs, size M x D
        kern, mean_function are appropriate GPflow objects
        This method only works with a Gaussian likelihood.
        """
        num_data = X.shape[0]

        if minibatch_size is None:
            X = DataHolder(X, fix_shape=True)
            Y = DataHolder(Y, fix_shape=True)

        else:
            X = Minibatch(X, batch_size=minibatch_size, seed=0)
            Y = Minibatch(Y, batch_size=minibatch_size, seed=0)

        # init the super class
        likelihood = likelihoods.Gaussian()
        num_latent = W.shape[1]
        GPModel.__init__(self, X, Y, kern, likelihood, mean_function,
                         num_latent=num_latent, **kwargs)

        if minibatch_size is not None:
            idx = Minibatch(np.arange(num_data), batch_size=minibatch_size, seed=0, dtype=np.int32)

        self.idx = idx
        self.W = Parameter(W, trainable=False)
        self.K = self.W.shape[1]
        self.W_prior = Parameter(np.ones(self.K) / self.K, trainable=False)
        self.num_data = num_data
        self.feature = features.inducingpoint_wrapper(feat, Z)

        self.minibatch_size = minibatch_size
        self.q_diag, self.whiten = q_diag, whiten

        # init variational parameters
        num_inducing = len(self.feature)
        self._init_variational_parameters(
            num_inducing, q_mu, q_sqrt, q_diag)
Exemplo n.º 8
0
    def __init__(self,
                 X,
                 Y,
                 kern,
                 likelihood,
                 mean_function=None,
                 feat=None,
                 Z=None,
                 q_diag=False,
                 whiten=True,
                 minibatch_size=None,
                 num_data=None,
                 num_latent=None,
                 q_mu=None,
                 q_sqrt=None,
                 alpha=None,
                 alpha_tilde=None,
                 **kwargs):
        """
        - X is a data matrix, size N x D
        - Y contains the annotations. It is a numpy array of matrices with 2 columns, gathering pairs (annotator, annotation).
        - kern, likelihood, mean_function are appropriate GPflow objects
        - feat and Z define the pseudo inputs, usually feat=None and Z size M x D
        - q_diag, boolean indicating whether posterior covariance must be diagonal
        - withen, boolean indicating whether a whitened representation of the inducing points is used
        - minibatch_size, if not None, turns on mini-batching with that size
        - num_data is the total number of observations, default to X.shape[0] (relevant when feeding in external minibatches)
        - num_latent is the number of latent GP to be used. For multi-class likelihoods, this equals the number of classes. However, for many binary likelihoods, num_latent=1.
        - q_mu (M x K), q_sqrt (M x K or K x M x M), alpha (A x K x K), alpha_tilde (A x K x K), initializations for these parameters (all of them but alpha to be estimated).
        """
        if minibatch_size is None:
            X = DataHolder(X)
        else:
            X = Minibatch(X, batch_size=minibatch_size, seed=0)
        class_keys = np.unique(np.concatenate([y[:, 1] for y in Y]))
        num_classes = len(class_keys)
        num_latent = num_latent or num_classes
        GPModel.__init__(self, X, None, kern, likelihood, mean_function,
                         num_latent, **kwargs)
        self.class_keys = class_keys
        self.num_classes = num_classes
        self.num_latent = num_latent
        self.annot_keys = np.unique(np.concatenate([y[:, 0] for y in Y]))
        self.num_annotators = len(self.annot_keys)
        self.num_data = num_data or X.shape[0]
        self.q_diag, self.whiten = q_diag, whiten
        self.feature = features.inducingpoint_wrapper(feat, Z)
        self.num_inducing = len(self.feature)

        ###### Initializing Y_idxs as minibatch or placeholder (and the associated idxs to slice q_unn) ######################
        startTime = time.time()
        Y_idxs = np.array([
            np.stack((np.array(
                [np.flatnonzero(v == self.annot_keys)[0] for v in y[:, 0]]),
                      np.array([
                          np.flatnonzero(v == self.class_keys)[0]
                          for v in y[:, 1]
                      ])),
                     axis=1) for y in Y
        ])  # same as Y but with indexes
        S = np.max([v.shape[0] for v in Y_idxs])
        ###########################################
        ## pmr modification for CPU
        #Y_idxs_cr = np.array([np.concatenate((y,-1*np.ones((S-y.shape[0],2))),axis=0) for y in Y_idxs]).astype(np.int16) # NxSx2
        aux = np.array([self.num_annotators, 0])
        Y_idxs_cr = np.array([
            np.concatenate((y, np.tile(aux, (S - y.shape[0], 1))), axis=0)
            for y in Y_idxs
        ]).astype(np.int16)  # NxSx2
        ###########################################

        if minibatch_size is None:
            self.Y_idxs_cr = DataHolder(Y_idxs_cr)
            self.idxs_mb = DataHolder(np.arange(self.num_data))
        else:
            self.Y_idxs_cr = Minibatch(Y_idxs_cr,
                                       batch_size=minibatch_size,
                                       seed=0)
            self.idxs_mb = Minibatch(np.arange(self.num_data),
                                     batch_size=minibatch_size,
                                     seed=0)
        print("Time taken in Y_idxs creation:", time.time() - startTime)

        ########## Initializing q #####################################
        startTime = time.time()
        q_unn = np.array(
            [np.bincount(y[:, 1], minlength=self.num_classes) for y in Y_idxs])
        q_unn = q_unn + np.ones(q_unn.shape)
        q_unn = q_unn / np.sum(q_unn, axis=1, keepdims=True)
        self.q_unn = Parameter(q_unn, transform=transforms.positive)  # N x K
        print("Time taken in q_unn initialization:", time.time() - startTime)

        ######## Initializing alpha (fix) and alpha_tilde (trainable) ################3
        #if alpha is None:
        #    self.alpha = tf.constant(np.ones((self.num_annotators,self.num_classes,self.num_classes), dtype=settings.float_type)) # A x K x K
        #else:
        #    self.alpha = tf.constant(alpha, dtype=settings.float_type) # A x K x K

        if alpha is None:
            alpha = np.ones(
                (self.num_annotators, self.num_classes, self.num_classes),
                dtype=settings.float_type)  # A x K x K
        self.alpha = Parameter(alpha,
                               transform=transforms.positive,
                               trainable=False)

        startTime = time.time()
        alpha_tilde = self._init_behaviors(q_unn, Y_idxs)
        print("Time taken in alpha_tilde initialization:",
              time.time() - startTime)
        self.alpha_tilde = Parameter(
            alpha_tilde, transform=transforms.positive)  # A x K x K
        ################################################################################
        ##### Initializing the variational parameters  ####################################
        self._init_variational_parameters(q_mu, q_sqrt)
Exemplo n.º 9
0
    def __init__(self, X, Y, W1, W2, kern, likelihood,
                 idx=None, W1_idx=None, W2_idx=None, feat=None,
                 mean_function=None,
                 num_latent=None,
                 q_diag=False,
                 whiten=True,
                 minibatch_size=None,
                 Z=None,
                 num_data=None,
                 q_mu=None,
                 q_sqrt=None,
                 **kwargs):
        """
        - X is a data matrix, size N x D
        - Y is a data matrix, size N x P
        - kern, likelihood, mean_function are appropriate GPflow objects
        - Z is a matrix of pseudo inputs, size M x D
        - num_latent is the number of latent process to use, default to
          Y.shape[1]
        - q_diag is a boolean. If True, the covariance is approximated by a
          diagonal matrix.
        - whiten is a boolean. If True, we use the whitened representation of
          the inducing points.
        - minibatch_size, if not None, turns on mini-batching with that size.
        - num_data is the total number of observations, default to X.shape[0]
          (relevant when feeding in external minibatches)
        """
        # sort out the X, Y into MiniBatch objects if required.
        num_data = X.shape[0]

        if minibatch_size is None:
            X = DataHolder(X)
            Y = DataHolder(Y)

            if W1_idx is not None:
                W1_idx = DataHolder(W1_idx, fix_shape=True)

            if W2_idx is not None:
                W2_idx = DataHolder(W2_idx, fix_shape=True)
        else:
            X = Minibatch(X, batch_size=minibatch_size, seed=0)
            Y = Minibatch(Y, batch_size=minibatch_size, seed=0)
            
            idx = Minibatch(np.arange(num_data), batch_size=minibatch_size, seed=0, dtype=np.int32)
            if W1_idx is not None:
                W1_idx = Minibatch(
                    W1_idx, batch_size=minibatch_size, seed=0, dtype=np.int32)

            if W2_idx is not None:
                W2_idx = Minibatch(
                    W2_idx, batch_size=minibatch_size, seed=0, dtype=np.int32)

        # init the super class, accept args
        num_latent = W1.shape[1] * W2.shape[1]
        GPModel.__init__(self, X, Y, kern, likelihood, mean_function, num_latent, **kwargs)
        self.num_data = num_data or X.shape[0]
        self.q_diag, self.whiten = q_diag, whiten
        self.feature = features.inducingpoint_wrapper(feat, Z)

        self.idx = idx
        self.W1_idx = W1_idx
        self.W2_idx = W2_idx

        self.K1 = W1.shape[1]
        self.W1 = Parameter(W1, trainable=False, dtype=settings.float_type)
        self.W1_prior = Parameter(np.ones(self.K1) / self.K1, trainable=False)

        self.K2 = W2.shape[1]
        self.W2 = Parameter(W2, trainable=False, dtype=settings.float_type)
        self.W2_prior = Parameter(np.ones(self.K2) / self.K2, trainable=False)

        # init variational parameters
        num_inducing = len(self.feature)
        self._init_variational_parameters(num_inducing, q_mu, q_sqrt, q_diag)