コード例 #1
0
def get_logprior(sample, alpha_prior, beta_prior):
    a_alpha = -alpha_prior[0] / alpha_prior[1]
    b_alpha = (1 - alpha_prior[0]) / alpha_prior[1]
    a_beta = -beta_prior[0] / beta_prior[1]
    b_beta = np.inf
    return truncnorm.logpdf(sample[0], a_alpha, b_alpha, alpha_prior[0], alpha_prior[1]) \
                                + truncnorm.logpdf(sample[1], a_alpha, b_alpha, alpha_prior[0], alpha_prior[1]) \
                                + truncnorm.logpdf(sample[2], a_beta, b_beta, beta_prior[0], beta_prior[1])
コード例 #2
0
ファイル: utils.py プロジェクト: weilin2018/climatology
 def log_q(self, h, new_h, step_h):
     f1, f2 = h[0], h[1]
     new_f1, new_f2 = new_h[0], new_h[1]
     log_q1 = truncnorm.logpdf(new_f1, (self.lower - f1) / step_h,
                               (self.upper - f1) / step_h,
                               loc=f1,
                               scale=step_h)
     log_q2 = truncnorm.logpdf(new_f2, (self.lower - f2) / step_h,
                               (self.upper - f2) / step_h,
                               loc=f2,
                               scale=step_h)
     return log_q1 + log_q2
コード例 #3
0
def evaluate_truncated_normal(x, hyperparameters):
    mu, sigma, a, b = hyperparameters
    if x > a and x < b:
        ar, br = (a - mu) / sigma, (b - mu) / sigma
        return truncnorm.logpdf(x, ar, br, loc=mu, scale=sigma)
    else:
        return -np.inf
コード例 #4
0
ファイル: auxGradNew.py プロジェクト: Gabriel-Ducrocq/MCMC
def compute_log_proposal(cl_old, cl_new):
    clip_low = -cl_old / np.sqrt(config.variance_auxGrad_prop)
    return truncnorm.logpdf(cl_new,
                            a=clip_low,
                            b=np.inf,
                            loc=cl_old,
                            scale=np.sqrt(config.variance_auxGrad_prop))
コード例 #5
0
    def _logpdf(self, xi, givenx):
        if self._call_initial_proposal:
            return self.initial_proposal.logpdf(xi, givenx)
        # cache the intersects and width for repeated calls with givenx and xi
        x0 = [xi[p] for p in self.parameters]
        x1 = [givenx[p] for p in self.parameters]

        if x0 + x1 == self._cache['hash']:
            in1, in2 = self._cache['intersects']
            width = self._cache['width']
        else:
            # calculate the intersections and width
            in1, in2 = self._intersects(givenx, self.eigvects[:, self._ind])
            width = numpy.linalg.norm([in2[p] - in1[p]
                                       for p in self.parameters])
            # update the cached data
            self._cache.update({'hash': x1 + x0, 'intersects': (in1, in2),
                                'width': width})
        mu = numpy.linalg.norm([givenx[p] - in1[p] for p in self.parameters])
        xi = numpy.linalg.norm([xi[p] - in1[p] for p in self.parameters])

        a = - mu / self.eigvals[self._ind]
        b = (width - mu) / self.eigvals[self._ind]
        return truncnorm.logpdf(xi, a, b, loc=mu,
                                scale=self.eigvals[self._ind])
コード例 #6
0
    def ll_t(self, spec, pt, rt, qt, alt):
        r_loc = self.get_r_t(spec, rt, alt)
        for thing in r_loc:
            if thing < 0:
                return -(10**50)
        r_loc = np.array(r_loc)
        r_loc = r_loc.astype(np.float64)
        self.data[spec][self.sc] = np.array(self.data[spec][self.sc])
        self.data[spec][self.sc] = self.data[spec][self.sc].astype(np.float64)
        ret = np.array([0.])
        ret = ret.astype(np.float64)
        mylen = len(self.data[spec][self.sc])
        self.llnbLib.llnbinom(ctypes.c_double(np.float64(pt)),
                              ctypes.c_double(np.float64(qt)),
                              np.ctypeslib.as_ctypes(r_loc),
                              np.ctypeslib.as_ctypes(self.data[spec][self.sc]),
                              np.ctypeslib.as_ctypes(ret), ctypes.c_int(mylen))
        ret[0] = float(ret[0])
        ret[0] += mylen * (truncnorm.logpdf(rt,
                                            a=0,
                                            b=self.baseroot[0] * 100,
                                            loc=self.baseroot[0],
                                            scale=10 * self.baseroot[0]))
        #print self.baseroot[0]
        #beta_a = self.baseroot[1]-2.*self.baseroot[1]**2
        #beta_b = 1-3.*self.baseroot[1]+2.*self.baseroot[1]**2
        #print beta_a*beta_b/((beta_b+beta_a)**2 * (beta_a+beta_b+1)), self.baseroot[1]/5.
        #ret[0]+= mylen*(beta.logpdf(pt,a=beta_a, b=beta_b))
        #ret[0]+=mylen*(gamma.logpdf(pt,a=2., scale=self.baseroot[1]/2.))

        #print ret[0], self.q0, self.r0, self.data[spec][self.sc][0:1]
        return ret[0]
コード例 #7
0
ファイル: test_bounds.py プロジェクト: igresh/refnx
    def test_pdf(self):
        pdf = PDF(norm)

        # even if it's really far out it's still a valid value
        assert_equal(pdf.valid(1003), 1003)
        # logp
        assert_equal(pdf.logp(0), norm.logpdf(0))

        # examine dist with finite support
        pdf = PDF(truncnorm(-1, 1))
        assert_equal(pdf.logp(-2), -np.inf)
        assert_equal(pdf.logp(-0.5), truncnorm.logpdf(-0.5, -1, 1))

        # obtain a random value of a bounds instance
        vals = pdf.rvs(size=1000)
        assert_(np.min(vals) >= -1)
        assert_(np.min(vals) <= 1)

        # test a uniform distribution
        pdf = PDF(uniform(1, 9))
        assert_equal(pdf.logp(2), np.log(1.0 / 9.0))
        assert_equal(pdf.logp(10.0), np.log(1.0 / 9.0))

        # test the invcdf
        rando = np.random.uniform(size=10)
        pdf = PDF(truncnorm(-1, 1))
        assert_equal(pdf.invcdf(rando), truncnorm.ppf(rando, -1, 1))
コード例 #8
0
 def get_logpdf(self, vectors, normalizer=lambda x: x, sentence_start_indexes=[]):
     unpaddedX, unpaddedY = self.create_training_data(vectors)
     paddedX, paddedY = self.create_padded_training_data(vectors, sentence_start_indexes)
     X = np.concatenate((unpaddedX, paddedX))
     Y = np.concatenate((unpaddedY, paddedY))
     predictions = self.generator.predict(X)
     similarities = [normalizer(self.cosine_similarity(y, y_p)) for y, y_p in zip(Y, predictions)]
     similarities.extend([-1 * s for s in similarities])
     sd = np.std(similarities)
     return lambda s: truncnorm.logpdf(s, 0, np.inf, 0, sd)
コード例 #9
0
def _log_normal_truncada_desplazada(x, limite_inferior, limite_superior, media,
                                    desvio):
    """
    Devuelve el log de la densidad de una normal truncada entre los límites indicados
    con la media y desvío recibidos
    """
    a, b = (limite_inferior - media) / desvio, (limite_superior -
                                                media) / desvio
    return truncnorm.logpdf(
        x, a, b, loc=media, scale=desvio
    )  # TODO La versión del paper devuelve -Inf si x == limite_inferior o x == limite_superior
コード例 #10
0
 def score_samples(self, X):
     # TODO Array shape check
     samples = X.reshape(-1)
     return (logsumexp([
         truncnorm.logpdf(
             samples, a=lower, b=upper, loc=mean, scale=self.bandwidth) +
         log_weight for (mean, lower, upper, log_weight) in zip(
             self.points_, self.lowers_, self.uppers_, self.log_weights_)
     ],
                       axis=0,
                       return_sign=False) - self.log_normalizer_)
コード例 #11
0
def get_truncated_normal(mean=0, sd=1, low=0, upp=10, log=False):
    from scipy.stats import truncnorm

    if log:
        return truncnorm.logpdf((low - mean) / sd, (upp - mean) / sd,
                                loc=mean,
                                scale=sd)
    else:
        return truncnorm((low - mean) / sd, (upp - mean) / sd,
                         loc=mean,
                         scale=sd)
コード例 #12
0
ファイル: advancedpriors.py プロジェクト: pacargile/ThePayne
    def logp_age_from_feh(self, age, feh_mean=-0.2, max_age=13.8, min_age=0.,
                          feh_age_ctr=-0.5, feh_age_scale=0.5,
                          nsigma_from_max_age=2., max_sigma=4., min_sigma=1.):
        """
        Log-prior for the age in the disk component of the galaxy. Designed to
        follow the disk metallicity prior.
        Parameters
        ----------
        age : `~numpy.ndarray` of shape (N)
            The ages of the corresponding models whose `Z` has been provided.
        feh_mean : float, optional
            The mean metallicity. Default is `-0.2`.
        max_age : float, optional
            The maximum allowed mean age (in Gyr). Default is `13.8`.
        min_age : float, optional
            The minimum allowed mean age (in Gyr). Default is `0.`.
        feh_age_ctr : float, optional
            The mean metallicity where the mean age is halfway between
            `max_age` and `min_age`. Default is `-0.5`.
        feh_age_scale : float, optional
            The exponential scale-length at which the mean age approaches
            `max_age` or `min_age` as it moves to lower or higher mean metallicity,
            respectively. Default is `0.5`.
        nsigma_from_max_age : float, optional
            The number of sigma away the mean age should be from `max_age`
            (i.e. the mean age is `nsigma_from_max_age`-sigma lower
            than `max_age`). Default is `2.`.
        max_sigma : float, optional
            The maximum allowed sigma (in Gyr). Default is `4.`.
        min_sigma : float, optional
            The minimum allowed sigma (in Gyr). Default is `1.`.
        Returns
        -------
        logp : `~numpy.ndarray` of shape (N)
            The corresponding normalized ln(probability).
        """

        # Compute mean age.
        age_mean_pred = ((max_age - min_age)
                         / (1. + np.exp((feh_mean - feh_age_ctr) / feh_age_scale))
                         + min_age)

        # Compute age spread.
        age_sigma_pred = (max_age - age_mean_pred) / nsigma_from_max_age
        age_sigma_pred = min(max(age_sigma_pred, min_sigma), max_sigma)  # bound

        # Compute log-probability.
        a = (min_age - age_mean_pred) / age_sigma_pred
        b = (max_age - age_mean_pred) / age_sigma_pred
        lnprior = truncnorm.logpdf(age, a, b,
                                   loc=age_mean_pred, scale=age_sigma_pred)

        return lnprior
コード例 #13
0
 def test_normal_log_pdf(self):
     from scipy.stats import truncnorm
     mu = 3.0
     var = 2.0
     d = 1.0
     normal_log_pdf2 = self.get_theano_normal_log_pdf()
     for x in np.linspace(mu-d,mu+d,50):
         # print x
         expected = truncnorm.logpdf(x, -d/np.sqrt(var), d/np.sqrt(var), mu, np.sqrt(var))
         got1 = normal_log_pdf(x,mu,var,d)
         got2 = normal_log_pdf2(x,mu,var,d)
         # print x,expected,got1,got2
         self.assertAlmostEqual(got1,expected,places=6)
         self.assertAlmostEqual(got2,expected,places=6)
コード例 #14
0
ファイル: test_bounds.py プロジェクト: brotwasme/refnx
    def test_pdf(self):
        pdf = PDF(norm)

        # even if it's really far out it's still a valid value
        assert_equal(pdf.valid(1003), 1003)
        # logp
        assert_equal(pdf.lnprob(0), norm.logpdf(0))

        # examine dist with finite support
        pdf = PDF(truncnorm(-1, 1), seed=1)
        assert_equal(pdf.lnprob(-2), -np.inf)
        assert_equal(pdf.lnprob(-0.5), truncnorm.logpdf(-0.5, -1, 1))

        # obtain a random value of a bounds instance
        vals = pdf.rvs(size=1000)
        assert_(np.min(vals) >= -1)
        assert_(np.min(vals) <= 1)
コード例 #15
0
    def log_pdf(self, old_position, new_position, sigma):
        # Evaluate the log pdf of the truncated normal proposal at the new position with mean old_position and std dev
        # sigma
        if not hasattr(sigma, "__len__"):
            sigma = np.ones_like(old_position) * sigma
        old_position = [
            old_position
        ] if not hasattr(old_position, "__len__") else old_position
        new_position = [
            new_position
        ] if not hasattr(new_position, "__len__") else new_position
        sigma = [sigma] if not hasattr(sigma, "__len__") else sigma

        tot_log_pdf = 0
        for i in range(len(old_position)):
            a, b = (self.lower_bound[i] - old_position[i]) / sigma[i], (
                self.upper_bound[i] - old_position[i]) / sigma[i]
            tot_log_pdf += truncnorm.logpdf(new_position[i],
                                            a,
                                            b,
                                            loc=old_position[i],
                                            scale=sigma[i])
        return tot_log_pdf
コード例 #16
0
    def log_prob(self, value):
        if self._validate_args:
            self._validate_sample(value)
        return super(TruncatedNormal, self).log_prob(
            self._to_std_rv(value)) - self._log_scale


if __name__ == '__main__':
    from scipy.stats import truncnorm
    loc, scale, a, b = 1., 2., 1., 2.
    tn_pt = TruncatedNormal(loc, scale, a, b)
    mean_pt, var_pt = tn_pt.mean.item(), tn_pt.variance.item()
    alpha, beta = (a - loc) / scale, (b - loc) / scale
    mean_sp, var_sp = truncnorm.stats(alpha,
                                      beta,
                                      loc=loc,
                                      scale=scale,
                                      moments='mv')
    print('mean', mean_pt, mean_sp)
    print('var', var_pt, var_sp)
    print('cdf',
          tn_pt.cdf(1.4).item(),
          truncnorm.cdf(1.4, alpha, beta, loc=loc, scale=scale))
    print('icdf',
          tn_pt.icdf(0.333).item(),
          truncnorm.ppf(0.333, alpha, beta, loc=loc, scale=scale))
    print('logpdf',
          tn_pt.log_prob(1.5).item(),
          truncnorm.logpdf(1.5, alpha, beta, loc=loc, scale=scale))
    print('entropy', tn_pt.entropy.item(),
          truncnorm.entropy(alpha, beta, loc=loc, scale=scale))
コード例 #17
0
ファイル: model_ez.py プロジェクト: adrn/hq-pe
def lntruncnorm(x, mu, sigma, clip_a, clip_b):
    a, b = (clip_a - mu) / sigma, (clip_b - mu) / sigma
    return truncnorm.logpdf(x, a, b, loc=mu, scale=sigma)
コード例 #18
0
def truncated_normal_logpdf(x, mean, var, lower=0, upper=1):
    std = np.sqrt(var)
    a = (lower - mean) / std
    b = (upper - mean) / std
    return truncnorm.logpdf(x, a, b, loc=mean, scale=std)
コード例 #19
0
ファイル: truncreg.py プロジェクト: Py4Etrics/py4etrics
 def _truncreg(y, x, left, right, beta, s):
     Xb = np.dot(x, beta)
     _l = (left - Xb) / np.exp(s)
     _r = (right - Xb) / np.exp(s)
     return truncnorm.logpdf(y, a=_l, b=_r, loc=Xb, scale=np.exp(s))
コード例 #20
0
ファイル: distributions.py プロジェクト: mdsmith749/pyDE
def truncnorm_logpdf(x, loc, scale, a=0, b=float('inf')):
    a = (a - loc) / (scale)
    b = (b - loc) / (scale)
    lp = truncnorm.logpdf(x=x, loc=loc, scale=scale, a=a, b=b)
    return lp
コード例 #21
0
def truncnorm_logpdf(x, a, b, mean, std):
    a_use = (a - mean) / std
    b_use = (b - mean) / std
    return truncnorm.logpdf(x, a_use, b_use, mean, std)
コード例 #22
0
    def mcmc(self, sessions_id, std_RW, nb_chains, nb_steps, initial_point, adaptive=True):
        '''
        Perform with inference MCMC
        Params:
            sessions_id (array of int): gives the sessions to be used for the training (for instance, if you have 4 sessions,
                and you want to train only of the first 3, put sessions_ids = np.array([0, 1, 2])).        
            std_RM (float) : standard deviation of the random walk for proposal.
            nb_chains (int) : number of MCMC chains to run in parallel
            nb_steps (int) : number of MCMC steps
            initial_point (array of float of size nb_params): gives the initial_point for all chains of MCMC. All chains
                start from the same point
        '''
        if initial_point is None:
            if np.any(np.isinf(self.lb_params)) or np.any(np.isinf(self.ub_params)):
                assert(False), 'because your bounds are infinite, an initial_point must be specified'
            initial_point = (self.lb_params + self.ub_params)/2.
            import sobol_seq
            grid = np.array(sobol_seq.i4_sobol_generate(self.nb_params, nb_chains))
            initial_point = self.lb_params + grid * (self.ub_params - self.lb_params)
        
        if nb_steps is None:
            nb_steps, early_stop = int(5000), True
            if self.nb_params <= 5:
                Nburn, nb_minimum = 500, 1000
            else:
                Nburn, nb_minimum = 1000, 2000
            print('Launching MCMC procedure with {} chains, {} max steps and {} std_RW. Early stopping is activated'.format(nb_chains, nb_steps, std_RW))
        else:            
            early_stop = False
            Nburn = int(nb_steps/2)
            print('Launching MCMC procedure with {} chains, {} steps and {} std_RW'.format(nb_chains, nb_steps, std_RW))
        
        if len(initial_point.shape) == 1:
            initial_point = np.tile(initial_point[np.newaxis], (nb_chains, 1))

        if adaptive:
            print('with adaptive MCMC...')

        print('initial point for MCMC is {}'.format(initial_point))

        adaptive_proposal=None
        lkd_list = [self.evaluate(initial_point, sessions_id, clean_up_gpu_memory=False)]
        self.R_list = []
        params_list = [initial_point]
        acc_ratios = np.zeros([nb_chains])
        for i in tqdm(range(int(nb_steps))):
            if adaptive_proposal is None:
                a, b = (self.lb_params - params_list[-1]) / std_RW, (self.ub_params - params_list[-1]) / std_RW
                proposal = truncnorm.rvs(a, b, params_list[-1], std_RW)
                a_p, b_p = (self.lb_params - proposal) / std_RW, (self.ub_params - proposal) / std_RW
                prop_liks = self.evaluate(proposal, sessions_id, clean_up_gpu_memory=False)
                log_alpha = (prop_liks - lkd_list[-1] 
                            + truncnorm.logpdf(params_list[-1], a_p, b_p, proposal, std_RW).sum(axis=1)
                            - truncnorm.logpdf(proposal, a, b, params_list[-1], std_RW).sum(axis=1))
            else:
                proposal = adaptive_proposal(params_list[-1], Sigma, Lambda)
                valid = np.all((proposal > self.lb_params) * (proposal < self.ub_params), axis=1)
                proposal_modified = valid[:, np.newaxis] * proposal + (1 - valid[:, np.newaxis]) * initial_point
                prop_liks = self.evaluate(proposal_modified, sessions_id, clean_up_gpu_memory=False)
                log_alpha = (prop_liks - lkd_list[-1])
                log_alpha[valid==False] = -np.inf

            accep = np.expand_dims(log_alpha > np.log(np.random.rand(len(log_alpha))), -1)
            new_params = proposal * accep + params_list[-1] * (1 - accep)
            new_lkds   = prop_liks * np.squeeze(accep) + lkd_list[-1] * (1 - np.squeeze(accep))

            params_list.append(new_params)
            lkd_list.append(new_lkds)
            acc_ratios += np.squeeze(accep) * 1

            if early_stop and (i > Nburn) and (i > nb_minimum):
                R = self.inference_validated(np.array(params_list)[Nburn:])
                self.R_list.append(R)
                if i%100==0:
                    print('Gelman-Rubin factor is {}'.format(R))
                if np.all(np.abs(R - 1) < 0.15):
                    print('Early stopping criteria was validated at step {}. R values are: {}'.format(i, R))
                    break

            if adaptive and i>=Nburn: # Adaptive MCMC following Andrieu and Thoms 2008 or Baker 2014
                Gamma = (1/(i - Nburn + 1)**0.5)
                if i==Nburn:
                    print('Adaptive MCMC starting...')
                    from scipy.stats import multivariate_normal
                    params = np.array(params_list)[-int(Nburn/2):].reshape(-1, self.nb_params)
                    Mu = params.mean(axis=0)
                    Sigma = np.cov(params.T)
                    Lambda = np.ones(len(params_list[-1])) #(2.38**2)/self.nb_params
                    AlphaStar = 0.234
                    def adaptive_proposal(m, s, l, constrained=False):
                        list_proposals = []
                        if not constrained:
                            for k in range(len(l)):
                                list_proposals.append(multivariate_normal.rvs(mean=m[k], cov=l[k] * s))
                        else:
                            for k in range(len(l)):
                                while True:
                                    candidate = multivariate_normal.rvs(mean=m, cov=s)
                                    if np.all((candidate > self.lb_params) * (candidate < self.ub_params)):
                                        break
                                list_proposals.append(candidate)
                        return np.array(list_proposals)
                else:
                    param = params_list[-1].reshape(-1, self.nb_params)
                    Alpha_estimated = np.minimum((np.exp(log_alpha)), 1)#.mean()
                    if i%100==0:
                        print('acceptance is {}'.format(np.mean(acc_ratios/i)))
                    Lambda = Lambda * np.exp(Gamma * (Alpha_estimated - AlphaStar))
                    Mu = Mu + Gamma * (param.mean(axis=0) - Mu)
                    Sigma = Sigma + Gamma * (np.cov(param.T) - Sigma)

        print('final posterior_mean is {}'.format(np.array(params_list)[Nburn:].mean(axis=(0,1))))
        acc_ratios = acc_ratios/i
        if i==(nb_steps-1) and early_stop:
            print('Warning : inference has not converged according to Gelman-Rubin')

        if self.use_gpu: # clean up gpu memory
            torch.cuda.empty_cache()

        print('acceptance ratio is of {}. Careful, this ratio should be close to 0.234. If not, change the standard deviation of the random walk'.format(acc_ratios.mean()))
        return np.array(params_list), np.array(lkd_list), np.array(self.R_list)
コード例 #23
0
ファイル: utils.py プロジェクト: weilin2018/climatology
 def log_q(self, h, new_h, step_h):
     return truncnorm.logpdf(new_h, (self.lower - h) / step_h,
                             (self.upper - h) / step_h,
                             loc=h,
                             scale=step_h)