Example #1
0
File: t.py Project: ronrest/pyrpy
def dt(x, df=1, loc=0, scale=1, ncp=None, log=False):
    """
    Density Function for the t distribution.
    Returns the probability density value at the value x.

    ARGS:
    ---------------
    :param x (float, array of floats):
        The value(s) of x
    :param df (float):
        degrees of freedom
    :param loc: array_like, optional
        location parameter (default=0)
    :param scale: float, optional
        scale (default=1)
    :param ncp (float):
        non-centrality parameter delta.
        Currently not implemented.
    :param log (bool):
        take the log?


    RETURN:
    ---------------
    :return:        returns an array of density values
    """
    # ==========================================================================
    if log:
        return t.logpdf(x, df=df, loc=0, scale=1)
    else:
        return t.pdf(x, df=df, loc=0, scale=1)
Example #2
0
def discrete_trunc_t_logpdf(x, df, domain, loc=0, scale=1):
    # make sure is numpy array
    x = array(x)

    # get the indices of the values we are interested in
    indices = empty(x.shape)
    for index, val in ndenumerate(x):
        indices[index] = domain.index(val)

    # number of elements in domain
    n = len(domain)

    # the shape for each of the values in the domain
    shape = (n,) + x.shape

    # get the values from the unmodified t over the domain
    all_log_prob = empty(shape)
    for i in range(n):
        all_log_prob[i] = t.logpdf(domain[i]*ones(x.shape), df, loc=loc, scale=scale)

    # normalize these values
    total_log_prob = logaddexp.reduce(all_log_prob, axis=0)
    norm_log_prob = all_log_prob - total_log_prob

    # get the values needed to return
    log_prob = empty(x.shape)
    for i, val in ndenumerate(x):
        # first get the index of the domain that x is at
        index = domain.index(val)

        # now get the appropriate value for the log_prob of x
        log_prob[i] = norm_log_prob[(index,)+i]

    return log_prob
Example #3
0
    def evaluateLogLikelihoodHessian(self, par):

        print(par)

        df = par[0]
        self.par[1] = par[1]
        self.par[2] = par[2]
        self.par[3] = par[3]

        # Extract the degrees of freedom and the dimension
        p = (self.uhat).shape[1]
        n = (self.uhat).shape[0]

        self.constructCorrelationMatrix(p)

        # Compute the percentile function on univariate t
        tppf_uhat = t.ppf(self.uhat, df)

        # Calculate the first part of the log-likelihood
        part1 = 0

        for ii in range(n):

            part1 += multiTLogPDF(tppf_uhat[ii, :], np.zeros(p), self.P, df, p)

        # Calculate the second part of the log-likelihood
        part2 = np.sum(t.logpdf(tppf_uhat, df))

        return part1 - part2
Example #4
0
    def evaluateLogPosteriorBFGS(self, par):

        if hasattr(par, "__len__"):

            self.par[0] = par[0]

            if (len(par) >= 2):
                self.par[1] = par[1]
            if (len(par) >= 3):
                self.par[2] = par[2]
            if (len(par) >= 4):
                self.par[3] = par[3]
        else:
            self.par[0] = par

        # Extract the degrees of freedom and the dimension
        p = (self.uhat).shape[1]
        n = (self.uhat).shape[0]

        self.constructCorrelationMatrix(p)

        # Compute the percentile function on univariate t
        tppf_uhat = t.ppf(self.uhat, self.par[0])

        # Calculate the first part of the log-likelihood
        part1 = 0

        for ii in range(n):
            part1 += multiTLogPDF(tppf_uhat[ii, :],
                                  np.zeros(p), self.P, self.par[0], p)

        # Calculate the second part of the log-likelihood
        part2 = np.sum(t.logpdf(tppf_uhat, self.par[0]))

        out = part2 - part1 - self.logPrior()

        print((par, out))
        return out
 def lnpdf(self, x):
     from scipy.stats import t
     return t.logpdf(x, self.nu, self.mu, self.sigma)
Example #6
0
 def q_log_pdf(θold, θ):
     logpdf = t.logpdf((θold - θ) / scale_walk, loc=0, scale=1, df=ν)
     return np.sum(logpdf, axis=1, keepdims=True)
 def log_likelihood(predictions):
     return np.sum(
         student_t.logpdf(predictions,
                          loc=expt_means,
                          scale=expt_uncs,
                          df=7))
    #NOTE Returns ln(M), not log10(M)! This is how the formula is defined!
    mu = invLogLam(logLam0, a, b, B_lam, z, logRich)
    if sigma_mass == 0:
        return mu
    return np.array([t.rvs(df, loc = m, scale = sigma_mass, size =  size)\
                    for m in mu])#(logRich.shape[0], size)

#draw one set of samples, rather than re-drawing each cycle
#use truths as really really good guess. Can relax later.
logMassSamples = invLogLamSample(logL0_true, a_true, b_true, B_l_true,sigma_mass, redshift, logRichness, size = nSamples)
massSamples = np.exp(logMassSamples)

logPMass = log_n_approx(massSamples,redshift)
logPMass[massSamples<Mmin] = -np.inf

logPSample = np.array([t.logpdf(lms,df, loc = invLogLam(logL0_true, a_true, b_true, B_l_true, redshift, lr), scale = sigma_mass)\
                     for lms, lr in izip(logMassSamples, logRichness)])


def log_liklihood(logL0, a,b, B_l, sigma, z, logRich):

    logPRich = np.array([norm.logpdf(lr, loc =logLam(logL0, a, b, B_l, z, ms), scale = sigma)\
                         for lr, ms in izip(logRich, massSamples)])

    logL_k = logsumexp(logPRich+logPMass-logPSample, axis = 1) - np.log(nSamples)#mean of weights

    return np.sum(logL_k)

def log_posterior(theta,z, logRich):
    #print theta
    logL0,a,b, B_l, sigma = theta[:]
Example #9
0
    def forward_filter(self):

        T         = self.T         # Number of timesteps
        obs_dim   = self.obs_dim   # Dimension of observed data
        state_dim = self.state_dim # Dimension of state vector
        
        if self.obs_discount:
            self.gamma_n = np.zeros(T)
            self.s       = np.zeros(T)
            self.s[0]    = self.s0
           
        else:
            V = self.V # Dimensions of [obs_dim,obs_dim]
            
        self.r = np.zeros(T)       # For unknown obs. variance
        self.e = np.zeros([T,obs_dim])   # Forecast error
        self.f = np.zeros([T,obs_dim])   # Forecast mean
        self.m = np.zeros([T,state_dim])   # State vector/matrix posterior mean
        self.a = np.zeros([T,state_dim])   # State vector/matrix prior mean
        self.Q = np.zeros([T,obs_dim,obs_dim]) # Forecast covariance
        self.A = np.zeros([T,state_dim,obs_dim]) # Adaptive coefficient vector
        self.R = np.zeros([T,state_dim,state_dim]) # State vector prior variance
        self.C = np.zeros([T,state_dim,state_dim]) # State vector posterior variance
        self.B = np.zeros([T,state_dim,state_dim]) # Retrospective ???
        
        # If we want to change the tracked quantities all at once later,
        # it would be handy to be able to reference all of them at the 
        # same time.
        self.dynamic_names = ['F','Y','r' , 'e', 'f' ,'m' ,'a', 'Q', 'A', 'R','C','B']
        
        if self.obs_discount:
            self.dynamic_names = self.dynamic_names + ['gamma_n','s']
        if self.dynamic_G:
            self.dynamic_names = self.dynamic_names + ['G']

        # Forward filtering
        # For each time step, we ingest a new observation and update our priors
        # to posteriors.
        for t in range(T):
            self.t = t
            self.filter_step(t)
                        
        # The last thing we want to do is tabulate the current
        # step's contribution to the overall log-likelihood.
        if self.calculate_ll:
            if self.obs_discount:
                # We need the shape parameters for the preceding time step in the current
                # timestep's calculation of the log likelihood. This just offsets the 
                # vector of shape parameters.
                shifted_gamma = np.roll(np.squeeze(self.gamma_n),1)
                shifted_gamma[0]  = 1.0
                self.log_likelihood = student_t.logpdf(np.squeeze(self.e),
                                                          shifted_gamma,
                                                          scale=np.squeeze(np.sqrt(self.Q)))
            else:
                self.log_likelihood = norm.logpdf(np.squeeze(self.e), 
                                                     scale=np.squeeze(np.sqrt(self.Q)))

            # This is the marginal model likelihood.
            self.ll_sum = np.sum(self.log_likelihood)
        
        if self.nancheck:
            try:
                for array in [self.A,self.C,self.Q,self.m,self.log_likelihood]:
                    assert np.any(np.isnan(array)) == False
                          
            except AssertionError:
                print 'NaN values encountered in forward filtering.'
        
        self.populate_scores()
        
        self.is_filtered = True