Example #1
0
def fill_mat(m, n, i=None, j=None):
    """
    Fill a matrix m of size [a, b] into a larger one [p, q],
    according to given indices i, j.
    """
    m, n = np.atleast_2d(m), np.atleast_2d(n)
    a, b = m.shape
    p, q = n.shape
    i = np.arange(a) if i is None else np.atleast_1d(i)
    j = np.arange(b) if j is None else np.atleast_1d(j)

    if a > p or b > q:
        raise ValueError("Shape error!")
    if len(i) != a or len(j) != b:
        raise ValueError("Indices not match!")
    if not (max(i) < p and max(j) < q):
        raise ValueError("Indices out of bound!")

    Ti = np.zeros((p, a))
    Tj = np.zeros((b, q))
    for u, v in enumerate(i):
        Ti[v, u] = 1
    for u, v in enumerate(j):
        Tj[u, v] = 1
    return Ti @ m @ Tj + n
Example #2
0
    def _unnormalized_posterior(self, observations, ps):
        ps = np.atleast_1d(ps)
        observations = np.atleast_1d(observations)

        likelihood_term = self.likelihood(observations, ps)
        prior_term = self.prior(ps)

        return likelihood_term * prior_term
Example #3
0
 def __init__(self, mu, var):
     self.norm_const = - 0.5*np.log(2*np.pi)
     self.mu = np.atleast_1d(mu).flatten()
     self.var = np.atleast_1d(var).flatten() 
     self.dim = np.prod(self.var.shape)
     assert(self.mu.shape == self.var.shape)
     self.std = np.sqrt(var)
     self.logstd = np.log(self.std)
Example #4
0
def log_p(x, z):
    x = np.atleast_1d(x)
    z = np.atleast_1d(z)
    lp = -gammaln(a0) + a0 * np.log(b0) \
         + (a0 - 1) * np.log(z) - b0 * z
    ll = np.sum(-gammaln(x[:, None] + 1) - z[None, :] +
                x[:, None] * np.log(z[None, :]),
                axis=0)
    return lp + ll
Example #5
0
    def Descent(self,
                scales,
                nuggets,
                nsteps=10,
                tolerance=1e-5,
                progress=DefaultOutput):
        history_para = []
        scales = np.atleast_1d(scales)
        nuggets = np.atleast_1d(nuggets)
        para = np.concatenate([nuggets, scales])
        self.step_size = np.full((1, ), self.step_nuggets_size)
        self.step_size = np.concatenate(
            [self.step_size,
             np.full(scales.shape, self.step_scales_size)])

        pCalculator = GradientDescentForEmulator.ProgressCalculator()
        progress = 0
        for i in range(nsteps):
            new_para, grad = self.StepDescent(para)

            # stop updating parameters that reaches max values
            idCap = new_para > self.scaleMax
            new_para[idCap] = self.scaleMax
            grad[idCap] = 0

            para = new_para
            history_para.append(new_para)
            (scales, nuggets) = new_para[1:], new_para[0]

            mag = np.linalg.norm(grad * self.step_size)

            progress = pCalculator.Get(nsteps, i, mag, tolerance)
            pub.sendMessage(
                "GradientProgress",
                step=i,
                progress=progress,
                mag=mag,
                nuggets=nuggets,
                scales=scales,
            )
            # or mag < 0.5*(self.step_scales_size + self.step_nuggets_size):
            if (mag < tolerance):
                break
        if progress < 100:
            pub.sendMessage(
                "GradientProgress",
                step=i,
                progress=100,
                mag=mag,
                nuggets=nuggets,
                scales=scales,
            )
        pub.sendMessage("GradientEnd")
        return np.array(history_para)
 def train(self, indata, outdata, epochs, step, gain):
     print("DoubleHump is training!")
     self.v = np.zeros(len(self.parameters), dtype=float)
     for epoch in range(epochs):
         loss_avg = 0.0
         for sample in np.random.permutation(len(indata)):
             loss_avg += self.correct(np.atleast_1d(indata[sample]),
                                      np.atleast_1d(outdata[sample]),
                                      step, gain)
         loss_avg /= len(indata)
         if epoch % 10 == 0:
             print("Epoch: {0} | Loss: {1}".format(epoch, loss_avg))
     print("DoubleHump done training!")
Example #7
0
def logpdf(x, mu, sigma2):
    """
    not really logpdf. we need to use the weights
    to keep track of normalizing factors that differ
    across clusters
    """
    mask = np.where(np.logical_not(np.isnan(x)))
    x = np.atleast_1d(x[mask])
    mu = np.atleast_1d(mu[mask])
    D = x.size

    if D == 0:
        return 0
    sigma2 = sigma2 * np.ones(D)
    return np.sum([norm.logpdf(x[d], mu[d], np.sqrt(sigma2[d])) for d in range(D)])
    def fold(self, flat_val, free=None, validate_value=None):
        free = self._free_with_default(free)
        flat_val = np.atleast_1d(flat_val)

        if flat_val.ndim != 1:
            raise ValueError('The argument to fold must be a 1d vector.')

        expected_length = self.flat_length(free=free)
        if flat_val.size != expected_length:
            error_string = \
                'Wrong size for array.  Expected {}, got {}'.format(
                    str(expected_length),
                    str(flat_val.size))
            raise ValueError(error_string)

        if free:
            constrained_array = \
                _constrain_array(flat_val, self._lb, self._ub)
            return constrained_array.reshape(self._shape)
        else:
            folded_val = flat_val.reshape(self._shape)
            valid, msg = self.validate_folded(folded_val, validate_value)
            if not valid:
                raise ValueError(msg)
            return folded_val
Example #9
0
    def fold(self, flat_val, free=None, validate_value=None):
        free = self._free_with_default(free)
        flat_val = np.atleast_1d(flat_val)
        if len(flat_val.shape) != 1:
            raise ValueError('The argument to fold must be a 1d vector.')
        if flat_val.size != self.flat_length(free):
            error_string = \
                'Wrong size for parameter.  Expected {}, got {}'.format(
                    str(self.flat_length(free)), str(flat_val.size))
            raise ValueError(error_string)

        flat_length = self.__base_pattern.flat_length(free)
        folded_array = np.array([
            self.__base_pattern.fold(flat_val[self._stacked_obs_slice(
                item, flat_length)],
                                     free=free,
                                     validate_value=validate_value)
            for item in itertools.product(*self.__array_ranges)
        ])

        folded_val = np.reshape(folded_array, self.__shape)

        if not free:
            valid, msg = self.validate_folded(folded_val,
                                              validate_value=validate_value)
            if not valid:
                raise ValueError(msg)
        return folded_val
Example #10
0
    def fold(self, flat_val, free=None, validate_value=None):
        free = self._free_with_default(free)
        flat_val = np.atleast_1d(flat_val)
        if len(flat_val.shape) != 1:
            raise ValueError('The argument to fold must be a 1d vector.')
        flat_length = self.flat_length(free)
        if flat_val.size != flat_length:
            error_string = \
                ('Wrong size for pattern dictionary {}.\n' +
                 'Expected {}, got {}.').format(
                    str(self), str(flat_length), str(flat_val.size))
            raise ValueError(error_string)

        # TODO: add an option to do this -- and other operations -- in place.
        folded_val = OrderedDict()
        offset = 0
        for pattern_name, pattern in self.__pattern_dict.items():
            pattern_flat_length = pattern.flat_length(free)
            pattern_flat_val = flat_val[offset:(offset + pattern_flat_length)]
            offset += pattern_flat_length
            # Containers must not mix free and non-free values, so do not
            # use default values for free.
            folded_val[pattern_name] = \
                pattern.fold(pattern_flat_val,
                             free=free,
                             validate_value=validate_value)
        if not free:
            valid, msg = self.validate_folded(folded_val,
                                              validate_value=validate_value)
            if not valid:
                raise ValueError(msg)
        return folded_val
Example #11
0
    def cond_prob_live(self, frequency, recency, T):
        """
        Conditional probability alive.

        Compute the probability that a customer with history (frequency,
        recency, T) is currently alive.
        From https://www.researchgate.net/publication/247219660_Empirical_validation_and_comparison_of_models_for_customer_base_analysis
        Appendix A, eq. (5)

        Parameters
        ----------
        frequency: array or float
            historical frequency of customer.
        recency: array or float
            historical recency of customer.
        T: array or float
            age of the customer.

        Returns
        -------
        array:
            value representing probability of being alive

        """
        r, alpha, a, b = self._unload_params("r", "alpha", "a", "b")
        return np.atleast_1d(1.0 / (1 + (a / (b + frequency)) *
                                    ((alpha + T) /
                                     (alpha + recency))**(r + frequency)))
Example #12
0
    def _get_responsibilities(self, pi, g, beta, mu_ivp, alpha):
        """ Gets the posterior responsibilities for each comp. of the mixture.
        """
        probs = [[]]*len(self.N_data)
        for i, ifx in enumerate(self._ifix):

            zM = self._forward(g, beta, mu_ivp[i], ifx)

            for q, yq in enumerate(self.Y_train_):
                logprob = norm.logpdf(
                    yq, zM[self.data_inds[q], :, q], scale=1/np.sqrt(alpha))

                # sum over the dimension component
                logprob = logprob.sum(-1)

                if probs[q] == []:
                    probs[q] = logprob

                else:
                    probs[q] = np.column_stack((probs[q], logprob))
        probs = [lp - pi for lp in probs]
        # subtract the maxmium for exponential normalize
        probs = [p - np.atleast_1d(p.max(axis=-1))[:, None]
                 for p in probs]
        probs = [np.exp(p) / np.exp(p).sum(-1)[:, None] for p in probs]

        return probs
Example #13
0
    def conditional_probability_alive(self, frequency, recency, T):
        """
        Compute conditional probability alive.

        Compute the probability that a customer with history
        (frequency, recency, T) is currently alive.

        From http://www.brucehardie.com/notes/021/palive_for_BGNBD.pdf

        Parameters
        ----------
        frequency: array or scalar
            historical frequency of customer.
        recency: array or scalar
            historical recency of customer.
        T: array or scalar
            age of the customer.

        Returns
        -------
        array
            value representing a probability
        """

        r, alpha, a, b = self._unload_params("r", "alpha", "a", "b")

        log_div = (r + frequency) * np.log(
            (alpha + T) /
            (alpha + recency)) + np.log(a / (b + np.maximum(frequency, 1) - 1))

        return np.atleast_1d(np.where(frequency == 0, 1.0, expit(-log_div)))
 def get_single_par_kl(self, single_free_par, ind):
     free_par = np.concatenate(
         [ self.free_par[:ind],
           np.atleast_1d(single_free_par),
           self.free_par[(ind + 1):]])
     self.glmm_par.set_free(free_par)
     return model.get_kl()
Example #15
0
    def _get_responsibilities(self, pi, g, beta, mu_ivp, alpha):
        """ Gets the posterior responsibilities for each comp. of the mixture.
        """
        probs = [[]] * len(self.N_data)
        for i, ifx in enumerate(self._ifix):

            zM = self._forward(g, beta, mu_ivp[i], ifx)

            for q, yq in enumerate(self.Y_train_):
                logprob = norm.logpdf(yq,
                                      zM[self.data_inds[q], :, q],
                                      scale=1 / np.sqrt(alpha))

                # sum over the dimension component
                logprob = logprob.sum(-1)

                if probs[q] == []:
                    probs[q] = logprob

                else:
                    probs[q] = np.column_stack((probs[q], logprob))
        probs = [lp - pi for lp in probs]
        # subtract the maxmium for exponential normalize
        probs = [p - np.atleast_1d(p.max(axis=-1))[:, None] for p in probs]
        probs = [np.exp(p) / np.exp(p).sum(-1)[:, None] for p in probs]

        return probs
Example #16
0
    def fisher_information_matrix(self, params):
        """
        Computes the Fisher Information Matrix.

        Returns
        -------
        fisher : ndarray
            Fisher Information Matrix
        """
        n_params = len(np.atleast_1d(params))
        fisher = np.zeros(shape=(n_params, n_params))

        if not hasattr(self.mean, 'gradient'):
            _grad = lambda mean, argnum, params: jacobian(mean, argnum=argnum)(*params)
        else:
            _grad = lambda mean, argnum, params: mean.gradient(*params)[argnum]

        grad_mean = [_grad(self.mean, i, params) for i in range(n_params)]

        for i in range(n_params):
            for j in range(i, n_params):
                fisher[i, j] = np.nansum(grad_mean[i] * grad_mean[j])
                fisher[j, i] = fisher[i, j]

        return fisher / self.var
def get_data_log_lik_terms(glmm_par, x_mat, y_vec, y_g_vec, gh_x, gh_w):
    e_beta = glmm_par['beta'].e()
    var_beta = glmm_par['beta'].var()

    # atleast_1d is necessary for indexing by y_g_vec to work right.
    e_u = np.atleast_1d(glmm_par['u'].e())
    var_u = np.atleast_1d(glmm_par['u'].var())

    # Log likelihood from data.
    z_mean = e_u[y_g_vec] + np.squeeze(np.matmul(x_mat, e_beta))
    z_sd = np.sqrt(
        var_u[y_g_vec] +
        np.squeeze(np.einsum('nk,k,nk->n', x_mat, var_beta, x_mat)))
    return \
        y_vec * z_mean - \
        modeling.get_e_logistic_term_guass_hermite(
            z_mean, z_sd, gh_x, gh_w, aggregate_all=False)
Example #18
0
 def _cumulative_hazard(self, params, times):
     times = np.atleast_1d(times)
     n = times.shape[0]
     times = times.reshape((n, 1))
     bp = np.append(self.breakpoints, [np.inf])
     M = np.minimum(np.tile(bp, (n, 1)), times)
     M = np.hstack([M[:, tuple([0])], np.diff(M, axis=1)])
     return np.dot(M, 1 / params)
 def freeing_jacobian(self, folded_val, sparse=True):
     jac_array = \
         _unconstrain_array_jacobian(folded_val, self._lb, self._ub)
     jac_array = np.atleast_1d(jac_array).flatten()
     if sparse:
         return osp.sparse.diags(jac_array)
     else:
         return np.diag(jac_array)
Example #20
0
def one_hot(z, K):
    z = np.atleast_1d(z).astype(int)
    assert np.all(z >= 0) and np.all(z < K)
    shp = z.shape
    N = z.size
    zoh = np.zeros((N, K))
    zoh[np.arange(N), np.arange(K)[np.ravel(z)]] = 1
    zoh = np.reshape(zoh, shp + (K,))
    return zoh
Example #21
0
    def _test_transform_output(self, original_fun, patterns, free, retnums,
                               original_is_flat):
        # original_fun must take no arguments.

        fun_trans = paragami.TransformFunctionOutput(original_fun, patterns,
                                                     free, original_is_flat,
                                                     retnums)

        # Check that the flattened and original function are the same.
        def check_equal(orig_val, trans_val, pattern, free):
            # Use the flat representation to check that parameters are equal.
            if original_is_flat:
                assert_array_almost_equal(
                    orig_val, pattern.flatten(trans_val, free=free))
            else:
                assert_array_almost_equal(pattern.flatten(orig_val, free=free),
                                          trans_val)

        patterns_array = np.atleast_1d(patterns)
        free_array = np.atleast_1d(free)
        retnums_array = np.atleast_1d(retnums)

        orig_rets = original_fun()
        trans_rets = fun_trans()
        if isinstance(orig_rets, tuple):
            self.assertTrue(len(orig_rets) == len(trans_rets))

            # Check that the non-transformed return values are the same.
            for ind in range(len(orig_rets)):
                if not np.isin(ind, retnums):
                    assert_array_almost_equal(orig_rets[ind], trans_rets[ind])

            # Check that the transformed return values are the same.
            for ret_ind in range(len(retnums_array)):
                ind = retnums_array[ret_ind]
                check_equal(orig_rets[ind], trans_rets[ind],
                            patterns_array[ret_ind], free_array[ret_ind])
        else:
            check_equal(orig_rets, trans_rets, patterns_array[0],
                        free_array[0])

        # Check that the string method works.
        str(fun_trans)
def get_mle_data_log_lik_terms(mle_par, x_mat, y_vec, y_g_vec):
    beta = mle_par['beta'].get()

    # atleast_1d is necessary for indexing by y_g_vec to work right.
    e_u = np.atleast_1d(mle_par['u'].get())

    # Log likelihood from data.
    z = e_u[y_g_vec] + np.squeeze(np.matmul(x_mat, beta))

    return y_vec * z - np.log1p(np.exp(z))
Example #23
0
def grouped_sum(x, groups, num_groups=None):
    """Sum the array `x` by its first index according to indices in `groups`.

    Parameters
    ------------
    x: numpy.ndarray
        An array of dimension (N, D1, ..., DK)
    groups:
        A length-N vector of zero-indexed integers mapping the first index
        of x to groups.
    num_groups:
        Optional, the total number of groups.  If unspecified, one plus the
        largest element of `groups` is used.

    Returns
    -----------
    A (num_groups, D1, ..., DK) dimensional vector, where entry [g, ...]
    contains the sum of the entries `x[n, :]`` where `groups[n] == g`.
    """
    x = np.atleast_1d(x)
    groups = np.atleast_1d(groups).astype('int64')
    if (groups.ndim > 1):
        raise ValueError('groups must be a vector.')

    n_obs = len(groups)
    if x.shape[0] != n_obs:
        raise ValueError('The first dimension of x must match the length of groups')
    max_group = np.max(groups)
    if num_groups is None:
        num_groups = max_group + 1
    else:
        if max_group >= num_groups:
            raise ValueError(
                'The largest group is >= the number of groups.')

    result = np.zeros((num_groups, ) + x.shape[1:])
    for n in range(n_obs):
        if x.ndim > 1:
            result[groups[n], :] += x[n, :]
        else:
            result[groups[n]] += x[n]
    return result
 def flatten(self, folded_val, free=None, validate_value=None):
     free = self._free_with_default(free)
     folded_val = np.atleast_1d(folded_val)
     valid, msg = self.validate_folded(folded_val, validate_value)
     if not valid:
         raise ValueError(msg)
     if free:
         return \
             _unconstrain_array(folded_val, self._lb, self._ub).flatten()
     else:
         return folded_val.flatten()
Example #25
0
    def fisher_information_matrix(self, theta):
        n_params = len(np.atleast_1d(theta))
        fisher = np.empty(shape=(n_params, n_params))
        grad_mean = self.mean.gradient(*theta)
        mean = self.mean(*theta)

        for i in range(n_params):
            for j in range(i, n_params):
                fisher[i, j] = (grad_mean[i] * grad_mean[j] / mean).sum()
                fisher[j, i] = fisher[i, j]
        return len(self.data) * fisher / (1 - self.mean(*theta))
Example #26
0
    def __init__(self,
                 length_scales: np.ndarray,
                 sigma_f: Union[np.ndarray, float] = 1,
                 sigma_eps: Union[np.ndarray, float] = 1,
                 length_scale_pen: float = 100,
                 signal_to_noise: float = 500):
        """
        Gaussian Process Regression
        penalty parameters are for the modified log-likelhihood optimization as in Deisenroth(2010)
        :param length_scales: prior for length scale values
        :param sigma_f: prior for signal variance
        :param sigma_eps: prior for noise variance
        :param length_scale_pen: penalty for lengthscales
        :param signal_to_noise: signal to noise ratio in order to trade off signal and noise variance
        """

        # kernel definition as in Deisenroth(2010), p.10
        self.kernel = RBFKernel() + WhiteNoiseKernel()

        # data of GP
        self.x = None
        self.y = None

        # hyperparameters of GP
        self.length_scales = length_scales
        self.sigma_f = np.atleast_1d(sigma_f)
        self.sigma_eps = np.atleast_1d(sigma_eps)

        # dimensionality of data points
        self.n_targets = None
        self.state_dim = None

        # params to penalize bad hyperparams choices when optimizing log likelihood GP,
        # this is only required for the dynamics model
        self.length_scale_pen = length_scale_pen
        self.signal_to_noise = signal_to_noise

        # container for caching gram matrix, betas and inv of gram matrix
        self.K = None
        self.betas = None
        self.K_inv = None
Example #27
0
    def sample_invwishart(S, nu):
        n = S.shape[0]
        chol = np.linalg.cholesky(S)

        if (nu <= 81 + n) and (nu == np.round(nu)):
            x = npr.randn(nu, n)
        else:
            x = np.diag(np.sqrt(np.atleast_1d(chi2.rvs(nu - np.arange(n)))))
            x[np.triu_indices_from(x, 1)] = npr.randn(n*(n-1)//2)
        R = np.linalg.qr(x, 'r')
        T = solve_triangular(R.T, chol.T, lower=True).T
        return np.dot(T, T.T)
Example #28
0
def mvt_ppf(component_cum_prob, mu, L, df):
    from scipy.stats import norm, chi2
    mu = np.atleast_1d(mu).flatten()
    assert(component_cum_prob.shape[1] == mu.size+1)
    L = np.atleast_2d(L)
    rval = []
    for r in range(component_cum_prob.shape[0]):
        samp_mvn_0mu = L.dot(norm.ppf(component_cum_prob[r, :-1]))
        samp_chi2 = chi2.ppf(component_cum_prob[r, -1], df)
        samp_mvt_0mu = samp_mvn_0mu * np.sqrt(df / samp_chi2)
        rval.append(mu + samp_mvt_0mu)
    return np.array(rval)
Example #29
0
File: mniw.py Project: lfywork/svae
    def sample_invwishart(S, nu):
        n = S.shape[0]
        chol = np.linalg.cholesky(S)

        if (nu <= 81 + n) and (nu == np.round(nu)):
            x = npr.randn(nu, n)
        else:
            x = np.diag(np.sqrt(np.atleast_1d(chi2.rvs(nu - np.arange(n)))))
            x[np.triu_indices_from(x, 1)] = npr.randn(n * (n - 1) // 2)
        R = np.linalg.qr(x, 'r')
        T = solve_triangular(R.T, chol.T, lower=True).T
        return np.dot(T, T.T)
 def validate_folded(self, folded_val, validate_value=None):
     folded_val = np.atleast_1d(folded_val)
     shape_ok, err_msg = self._validate_folded_shape(folded_val)
     if not shape_ok:
         return shape_ok, err_msg
     if validate_value is None:
         validate_value = self.default_validate
     if validate_value:
         if (np.array(folded_val < self._lb)).any():
             return False, 'Value beneath lower bound.'
         if (np.array(folded_val > self._ub)).any():
             return False, 'Value above upper bound.'
     return True, ''
Example #31
0
    def unwrap_params(self, params):
        n_features = self.x.shape[0]

        split1 = self.state_dim * n_features  # split for RBF centers
        split2 = self.n_targets * n_features + split1  # split for training targets/weights

        x = params[:split1].reshape(n_features, self.state_dim)
        y = params[split1:split2].reshape(n_features, self.n_targets)
        length_scales = params[split2:-1]
        sigma_eps = params[-1]

        # ensure noise is an numpy array
        self.x, self.y, self.length_scales, self.sigma_eps = x, y, length_scales, np.atleast_1d(sigma_eps)
Example #32
0
 def gradient(self, params):
     # use the gradient if the model provides it.
     # if not, compute it using autograd.
     if not hasattr(self.mean, 'gradient'):
         _grad = lambda mean, argnum, params: jacobian(mean, argnum)(*params)
     else:
         _grad = lambda mean, argnum, params: mean.gradient(*params)[argnum]
     n_params = len(np.atleast_1d(params))
     grad_likelihood = np.array([])
     for i in range(n_params):
         grad = _grad(self.mean, i, params)
         grad_likelihood = np.append(grad_likelihood,
                                     np.nansum(grad * (1 - self.data / self.mean(*params))))
     return grad_likelihood
    def flat_indices(self, folded_bool, free=None):
        # If no indices are specified, save time and return an empty array.
        if not np.any(folded_bool):
            return np.array([], dtype=int)

        free = self._free_with_default(free)
        folded_bool = np.atleast_1d(folded_bool)
        shape_ok, err_msg = self._validate_folded_shape(folded_bool)
        if not shape_ok:
            raise ValueError(err_msg)
        if free:
            return self.__free_folded_indices[folded_bool]
        else:
            return self.__nonfree_folded_indices[folded_bool]
def NumberOfSteps(
    ax, clf, model_X, model_Y, training_idx, validation_idx, history_para
):
    training_scores = []
    validation_scores = []

    training_X = model_X[training_idx]
    training_Y = model_Y[training_idx]
    validation_X = model_X[validation_idx]
    validation_Y = model_Y[validation_idx]
    for idx, para in history_para.iterrows():
        pub.sendMessage(
            "NumberOfStepsProgress",
            progress=idx /
            history_para.shape[0])
        nuggets = []
        scales = []
        for idemu, emulator in enumerate(clf["Emulator"].emulators):
            nuggets.append(para["Nuggets%d" % idemu])
            scales.append(
                para[
                    [
                        "Scales%d_%d" % (idemu, idinput)
                        for idinput in range(model_X.shape[1])
                    ]
                ].values
            )
        clf["Emulator"].scales = np.atleast_2d(scales)
        clf["Emulator"].nuggets = np.atleast_1d(nuggets)

        clf.Fit(training_X, training_Y)
        # training_scores.append(clf.Score(training_X, training_Y))
        # validation_scores.append(clf.Score(validation_X, validation_Y))
        training_scores.append(clf.ChiSq(training_X, training_Y))
        validation_scores.append(clf.ChiSq(validation_X, validation_Y))

    ax.plot(
        range(history_para.shape[0]),
        training_scores,
        label=r"Training $\chi^2$/deg. free",
    )
    ax.plot(
        range(history_para.shape[0]),
        validation_scores,
        label=r"Validation $\chi^2$/deg. free",
    )
    ax.set_xlabel("Number of ephoes")
    ax.set_ylabel(r"$\chi^2$/deg. free")
    ax.legend()
Example #35
0
 def __init__(self, mu, K, Ki = None, logdet_K = None, L = None): 
     mu = np.atleast_1d(mu).flatten()
     K = np.atleast_2d(K) 
     assert(np.prod(mu.shape) == K.shape[0] )
     assert(K.shape[0] == K.shape[1])
     
     self.mu = mu
     self.K = K
     (val, vec) = np.linalg.eigh(K)
     idx = np.arange(mu.size-1,-1,-1)
     (self.eigval, self.eigvec) = (np.diag(val[idx]), vec[:,idx])
     self.eig = self.eigvec.dot(np.sqrt(self.eigval))
     self.dim = K.shape[0]
     #(self.Ki, self.logdet) = (np.linalg.inv(K), np.linalg.slogdet(K)[1])
     (self.Ki, self.L, self.Li, self.logdet) = pdinv(K)
     
     self.lpdf_const = -0.5 *np.float(self.dim * np.log(2 * np.pi)
                                        + self.logdet)
Example #36
0
 def __init__(self, mu, K, df, Ki = None, logdet_K = None, L = None):
     mu = np.atleast_1d(mu).flatten()
     K = np.atleast_2d(K)
     assert(np.prod(mu.shape) == K.shape[0] )
     assert(K.shape[0] == K.shape[1])
     self.mu = mu
     self.K = K
     self.df = df
     self._freeze_chi2 = stats.chi2(df)
     self.dim = K.shape[0]
     self._df_dim = self.df + self.dim
     #(self.Ki,  self.logdet) = (np.linalg.inv(K), np.linalg.slogdet(K)[1])
     (self.Ki, self.L, self.Li, self.logdet) = pdinv(K)
     
     
     self.lpdf_const = np.float(gammaln((self.df + self.dim) / 2)
                                -(gammaln(self.df/2)
                                  + (log(self.df)+log(np.pi)) * self.dim*0.5
                                  + self.logdet * 0.5)
                                )
Example #37
0
def mvt_rvs(n, mu, L, df):
    from scipy.stats import uniform
    mu = np.atleast_1d(mu).flatten()
    return mvt_ppf(uniform.rvs(size = (n, mu.size+1)), mu, L, df)
Example #38
0
 def set_mu(self, mu):
     self.mu = np.atleast_1d(mu).flatten()