Ejemplo n.º 1
0
    def __init__(self, grid, occam=2.0):
        # test = Hypothesis('testGrid')
        self.grid = grid
        self.hypotheses = None
        self.occam = occam

        self.primitives = list()
        self.primitives = [self.And, self.Or, self.Then]
        self.objects = grid.objects.keys()
        self.space = [self.primitives, self.objects]

        # Uniform pDistribution for sampling across objects and primitivres
        self.pPrim = np.zeros(len(self.primitives))
        self.pPrim = uniform.pdf(self.pPrim)
        self.pPrim /= self.pPrim.sum()

        self.pObj = np.zeros(len(self.objects))
        self.pObj = uniform.pdf(self.pObj)
        self.pObj /= self.pObj.sum()

        self.p = [self.pPrim, self.pObj]

        self.primCount = 0
        self.primHypotheses = list()

        self.setBetaDistribution()
Ejemplo n.º 2
0
 def p(s1, s2, w, b):
     prior_s1 = uniform.pdf(s2, loc=0.5, scale=4.5)
     prior_s2 = uniform.pdf(s1, loc=0.5, scale=4.5)
     prior_w = uniform.pdf(w, loc=0., scale=2.)
     prior_b = uniform.pdf(b, loc=0., scale=2.)
     likl = np.prod(norm.pdf(y, loc=x * w + b, scale=[s1, s2]))
     return prior_s1 * prior_s2 * prior_w * prior_b * likl
    def test_system_ode_solver(self):
        # example taken from the paper Hobson Klimmek 2015

        UNIFORM_SUPP = [[-1., 1.],
                        [-2.,
                         2.]]  # UNIFORM_SUPP[0][0] <= UNIFORM_SUPP[1][0] <= UNIFORM_SUPP[1][1] <= UNIFORM_SUPP[1][0]

        density_1 = lambda tt: uniform.pdf(tt,
                                           loc=UNIFORM_SUPP[0][0],
                                           scale=UNIFORM_SUPP[0][1] - UNIFORM_SUPP[0][0])
        density_2 = lambda tt: uniform.pdf(tt,
                                           loc=UNIFORM_SUPP[1][0],
                                           scale=UNIFORM_SUPP[1][1] - UNIFORM_SUPP[1][0])

        def density_mu(tt):
            return density_1(tt)

        def density_nu(tt):
            return density_2(tt)

        def density_eta(tt):
            return np.maximum(density_mu(tt) - density_nu(tt), 0)

        def density_gamma(tt):
            return np.maximum(density_nu(tt) - density_mu(tt), 0)

        def p_dash_open_formula(tt, xx, yy):
            return (tt - yy) / (yy - xx) * density_eta(tt) / density_gamma(xx)

        def q_dash_open_formula(tt, xx, yy):
            return (xx - tt) / (yy - xx) * density_eta(tt) / density_gamma(yy)

        tt = np.linspace(-1 * 0.999, 0.5, 1000)
        starting_points = [[1.99, -1.01], [1.01, -1.99]]
        # forward equation
        empirical = diff_eq.system_ODE_solver(tt, starting_points[0],
                                              [p_dash_open_formula, q_dash_open_formula],
                                              left_or_right="left")
        q, p = zip(*empirical)
        p = function_iterable.replace_nans_numpy(np.array(p))
        q = function_iterable.replace_nans_numpy(np.array(q))

        true_p = lambda tt: -1 / 2 * (np.sqrt(12. - 3. * tt * tt) + tt)
        true_q = lambda tt: 1 / 2 * (np.sqrt(12. - 3. * tt * tt) - tt)
        error = np.mean(np.abs(function_iterable.replace_nans_numpy(p) - true_p(tt)))
        error += np.mean(np.abs(function_iterable.replace_nans_numpy(q) - true_q(tt)))

        # backward equation
        tt = np.linspace(-0.5, 1 * 0.999, 2000)
        # forward equation
        empirical = diff_eq.system_ODE_solver(tt, starting_points[1],
                                              [p_dash_open_formula, q_dash_open_formula],
                                              left_or_right="left")
        q, p = zip(*empirical)
        p = function_iterable.replace_nans_numpy(np.array(p))
        q = function_iterable.replace_nans_numpy(np.array(q))
        error += np.mean(function_iterable.replace_nans_numpy(p) - true_p(tt))
        error += np.mean(function_iterable.replace_nans_numpy(q) - true_q(tt))

        assert error < 0.1
Ejemplo n.º 4
0
    def pdf(self, X):
        """
        generates random samples from the uniform prior, for 3 parameters.
        param:
                  Ndata, number of samples
                  left, left boundary
                  right, right boundary
        output:
                  theta, random samples of size (Ndata,3)

        """
        left = np.log(self.left)
        right = np.log(self.right)

        # left = loc
        # right = loc + scale
        loc = left
        scale = right - left

        k0 = uniform.pdf(X[:, 0], loc=loc[0], scale=scale[0])
        k1 = uniform.pdf(X[:, 1], loc=loc[1], scale=scale[1])
        k2 = uniform.pdf(X[:, 2], loc=loc[2], scale=scale[2])

        theta = np.vstack((k0, k1, k2)).T
        return np.prod(theta, axis=1)
Ejemplo n.º 5
0
    def indiv_MHRW(self, theta, cov, epsilon, prev_gen_sample, prev_gen_eta):
        ''' 
        Return the Metropolis Hastings Random Walk Kernel updates of the thetas=(phi, eta, tau) 
        theta: (array-like) the prior to simulate the population from
        cov: ((3,3)-array): The covariance matrix of the thetas previously computed
        epsilon (float): The tolerance level of the current iteration
        prev_gen_sample (array-like): A sample previously generated
        prev_gen_eta (array-like): The eta associated to that sample
        -------------------------------------------------------------------------
        returns (array-like): the updated or not theta
        '''

        new_theta = theta + np.random.multivariate_normal([0, 0, 0], (
            (2.38**2) / 3) * cov)  # Has dimensionality is 3
        new_theta = new_theta / new_theta.sum(
        )  # reparametrization on the real line. Is it the right way to do it ?

        new_pop, fail_to_gen_pop = self.simulate_population(theta,
                                                            verbose=False)
        if fail_to_gen_pop == False:
            new_sample = np.random.choice(new_pop,
                                          self.sample_size,
                                          replace=True)
            new_gen_eta = self.compute_eta(new_sample)

            indicatrices_ratio = int(
                self.compute_rho(new_gen_eta) < epsilon) / int(
                    self.compute_rho(prev_gen_eta) < epsilon)
            proposal_ratio = 1  # random walk proposal is symetric then the ratio of q(theta*,theta)/q(theta,theta*)=1
            new_priors = np.array([
                gamma.pdf(x=new_theta[0], a=0.1),
                uniform.pdf(x=new_theta[1], loc=0, scale=theta[0]),
                truncnorm.pdf(x=new_theta[2],
                              a=0,
                              b=10**10,
                              loc=0.198,
                              scale=0.067352)
            ])
            old_priors = np.array([
                gamma.pdf(x=theta[0], a=0.1),
                uniform.pdf(x=theta[1], loc=0, scale=theta[0]),
                truncnorm.pdf(x=theta[2],
                              a=0,
                              b=10**10,
                              loc=0.198,
                              scale=0.067352)
            ])

            acceptance_probas = np.minimum(
                np.ones(len(theta)),
                indicatrices_ratio * proposal_ratio * new_priors / old_priors)
            unif_draws = uniform.rvs(size=len(theta))

            new_theta_accepted = unif_draws <= acceptance_probas
            final_theta = np.where(new_theta_accepted, new_theta, theta)

        else:
            final_theta = theta

        return final_theta  # Might return acceptance probas
	def __init__(self, grid, occam=2.0):		
		# test = Hypothesis('testGrid')
		self.grid = grid
		self.hypotheses = None
		self.occam = occam

		self.primitives = list()
		self.primitives = [self.And,self.Or,self.Then]
		self.objects = grid.objects.keys()
		self.space = [self.primitives, self.objects]

		# Uniform pDistribution for sampling across objects and primitivres
		self.pPrim = np.zeros(len(self.primitives))
		self.pPrim = uniform.pdf(self.pPrim)
		self.pPrim /= self.pPrim.sum()

		self.pObj = np.zeros(len(self.objects))
		self.pObj = uniform.pdf(self.pObj)
		self.pObj /= self.pObj.sum()

		self.p = [self.pPrim,self.pObj]

		self.primCount = 0
		self.primHypotheses = list()

		self.setBetaDistribution()
Ejemplo n.º 7
0
def unif():
    """
    均匀分布函数
    """
    x = np.arange(-0.01, 2.01, 0.01)
    y = uniform.pdf(x, loc=0.0, scale=1.0)
    y1 = uniform.pdf(x, loc=0.0, scale=2.0)
    return x, y, y1
Ejemplo n.º 8
0
    def pdf(self, x: Tuple[float]):
        """Find the PDF for a certain x value.

        Args:
            x (float): The value for which the PDF is needed.
        """
        return uniform.pdf(x[0], loc=self.x_lower_bound, scale=self.x_upper_bound - self.x_lower_bound) \
               * uniform.pdf(x[1], loc=self.y_lower_bound, scale=self.y_upper_bound - self.y_lower_bound)
Ejemplo n.º 9
0
def model_priors(enableInteriorViscosity):
    kLink_prior = lambda sample: uniform.pdf(sample, 10.0, 290.0)
    kBend_prior = lambda sample: uniform.pdf(sample, 50.0, 350.0)
    viscosityRatio_prior = lambda sample: uniform.pdf(sample, 1.0, 14.0)

    if enableInteriorViscosity:
        return [kLink_prior, kBend_prior, viscosityRatio_prior]
    else:
        return [kLink_prior, kBend_prior]
def model_prior(sample, enableInteriorViscosity):
    kLink_prior = uniform.pdf(sample[0], 10.0, 290.0)
    kBend_prior = uniform.pdf(sample[1], 50.0, 350.0)

    if enableInteriorViscosity:
        viscosityRatio_prior = uniform.pdf(sample[2], 1.0, 14.0)
        return np.prod([kLink_prior, kBend_prior, viscosityRatio_prior])
    else:
        return np.prod([kLink_prior, kBend_prior])
Ejemplo n.º 11
0
    def inferPosterior(self, state, action):
        """
			Uses inference engine to compute posterior probability from the 
			likelihood and prior (beta distribution).
		"""

        # Beta Distribution
        # self.prior = np.linspace(.01,1.0,101)
        # self.prior = beta.pdf(self.prior,1.4,1.4)
        # self.prior /= self.prior.sum()

        # Shifted Exponential
        # self.prior = np.zeros(101)
        # for i in range(50):
        # 	self.prior[i + 50] = i * .02
        # self.prior[100] = 1.0
        # self.prior = expon.pdf(self.prior)
        # self.prior[0:51] = 0
        # self.prior *= self.prior
        # self.prior /= self.prior.sum()

        # # Shifted Beta
        # self.prior = np.linspace(.01,1.0,101)
        # self.prior = beta.pdf(self.prior,1.2,1.2)
        # self.prior /= self.prior.sum()
        # self.prior[0:51] = 0

        # Uniform
        self.prior = np.linspace(.01, 1.0, 101)
        self.prior = uniform.pdf(self.prior)
        self.prior /= self.prior.sum()
        self.prior[0:51] = 0

        self.posterior = self.likelihood * self.prior
        self.posterior /= self.posterior.sum()
Ejemplo n.º 12
0
    def run():
        N = 10000  #experiments
        x_Values = sorted(genRandos(0, 11, N))  #get rando nums
        pdf_list = pdf(x_Values, N)  #list returned
        cdf_list = cdf(x_Values, N)

        fig, axs = plt.subplots(2)  #created 2 graphs
        fig.suptitle("Part A")
        plt.setp(axs, xticks=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])  #x marks
        axs[0].plot(list(x_Values),
                    pdf_list)  #sorted values with appended pdf list
        axs[0].set_title("Probability Distribution Function")
        axs[1].plot(list(x_Values),
                    cdf_list)  #sorted values with appended cdf list
        axs[1].set_title("Cumulative Distribution Function")

        #Using stats.uniform for pdf and cdf
        x = np.linspace(0, 11, 10000)  #spacing for graph points
        fig, axs = plt.subplots(2)
        fig.suptitle("Part B")
        plt.setp(axs, xticks=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
        axs[0].plot(x, uniform.pdf(x, 1, 9))  #pre built function for pdf
        axs[0].set_title("Probability Distribution Function")
        axs[1].plot(x, uniform.cdf(x, 1, 9))  #prebuilt function for cdf
        axs[1].set_title("Cumulative Distribution Function")
        plt.show()
Ejemplo n.º 13
0
def dunif(x, minimum=0,maximum=1):
    """
    Calculates the point estimate of the uniform distribution
    """
    from scipy.stats import uniform
    result=uniform.pdf(x=x,loc=minimum,scale=maximum-minimum)
    return result
Ejemplo n.º 14
0
 def pdf(self, x):
     pdf = 1.0
     x = np.atleast_2d(x)
     for ii in range(self.input_dim):
         bd = self.domain[ii]
         pdf *= uniform.pdf(x[:,ii], bd[0], bd[1]-bd[0])
     return pdf
Ejemplo n.º 15
0
def prior(p_in):
    """
    A function to return the prior density for a given value of p.
    """
    return uniform.pdf(
        p_in, 0, 1
    )  #giving me the pdf of any point in space, the area under the curve needs to integrate to 1
Ejemplo n.º 16
0
def mixture_model_pdf(x,
                      precision=STARTING_PRECISION,
                      guess_rate=STARTING_GUESS_RATE,
                      bias=STARTING_BIAS):
    """Returns a probability density function for a mixture model.
    
    Parameters
    ----------
    x : A list (or other iterable object) of values for the x axis. For example
        `range(-180, 181)` would generate the PDF for every relevant value.
    precision: The precision (or kappa) parameter. This is inversely related to
               the standard deviation, and is a value in degrees.
    guess_rate: The proportion of guess responses (0 - 1).
    bias: The bias (or loc) parameter in degrees.
    
    Returns
    -------
    An array with probability densities for each value of x.
    """

    x = np.radians(x)
    pdf_vonmises = vonmises.pdf(x=x,
                                kappa=np.radians(precision),
                                loc=np.radians(bias))
    pdf_uniform = uniform.pdf(x, loc=-np.pi, scale=2 * np.pi)
    return pdf_vonmises * (1 - guess_rate) + pdf_uniform * guess_rate
Ejemplo n.º 17
0
def calculate_row(Estart):

    # Set up container arrays in polar coordinates
    rvals, thetavals = np.linspace(0.01, L,
                                   N), np.linspace(-.99 * np.pi / 2,
                                                   .99 * np.pi / 2, N)
    phivals = np.linspace(0, 2 * np.pi, N)

    # Output array of energy deposition
    a = np.zeros([N, N, N], float)

    x, y, z, dEdV, total_energy = [], [], [], [], []

    # parameters for rms spread (Abril Table 1 & eq 5)
    a1 = -0.058
    a2 = -1.868
    b1 = 9.39E-3
    b2 = 1.56E-3
    C1 = b1 * (Estart**a1)
    C2 = b2 * (Estart**a2)

    # populate entries of array via a double loop
    for i, r in enumerate(rvals):
        if i % 10 == 0: print("starting i={0}".format(i))
        # determine dEdx for this depth
        dEdx_index = np.searchsorted(linear_x, r)
        if dEdx_index == len(linear_dEdx):
            dEdx_val = 0
            dE_val = 0
        else:
            dEdx_val = linear_dEdx[dEdx_index]
            dE_val = linear_dE[dEdx_index]

        # determine spread of Gaussian at this depth    (Abril eqn 4)
        sigma = 0.1 + (C1 * (r * 1E4) + C2 *
                       (r * 1E4)**2) / (r * 1E4)  # in radians

        for j, theta in enumerate(thetavals):
            for k, phi in enumerate(phivals):
                # store energy deposited at this theta, r, phi
                # We want dE/dV * (r**2 * sin(theta)), but the expression simplifies:
                # dE = (dE/dr)*dr*(norm()*dtheta)*(uniform()*dphi)
                # dV = r**2 *sin(theta) * dr * dtheta * dphi
                # Thus we end up with just (dE/dr)*(norm())*(uniform())
                a[i, j,
                  k] += norm.pdf(theta, 0, sigma) * dEdx_val * uniform.pdf(
                      phi, 0, 2 * np.pi)
                # and store Cartesian data for output
                x.append(r * np.sin(theta) * np.cos(phi))
                y.append(r * np.sin(theta) * np.sin(phi))
                z.append(r * np.cos(theta))
                total_energy.append(a[i, j, k])

    t = simps(simps(simps(a, phivals), thetavals), rvals)

    print(t)

    return x, y, z, total_energy
Ejemplo n.º 18
0
def simulate_and_forward_density(distrub, par=None):
    if distrub == 'r':
        insertion_spot = expon.rvs()
        q = expon.pdf(insertion_spot)
    else:
        insertion_spot = uniform.rvs()
        branch_length = par
        q = uniform.pdf(insertion_spot * branch_length, scale=branch_length)
    return insertion_spot, q
Ejemplo n.º 19
0
    def pdf(self, x: float):
        """Find the PDF for a certain x value.

        Args:
            x (float): The value for which the PDF is needed.
        """
        return uniform.pdf(x,
                           loc=self.lower_bound,
                           scale=self.upper_bound - self.lower_bound)
Ejemplo n.º 20
0
 def density(self, box, samples):
     samples_density = list()
     for mu in samples:
         p_mu = 1.0
         assert len(mu) == len(box)
         for (mu_j, box_j) in zip(mu, box):
             p_mu *= uniform.pdf(mu_j, box_j[0], box_j[1])
         samples_density.append(p_mu)
     return samples_density
Ejemplo n.º 21
0
def expectation(n):
    est = 0
    for i in range(n):
        qsamp = np.random.uniform(-5, 5)
        fpq = qsamp**2 * (norm.pdf(qsamp, loc=0, scale=1) /
                          uniform.pdf(qsamp, loc=-5, scale=10))
        est += fpq

    return est / n
def plot_uniform(min, max):
    f1 = open('uniform.csv', "r")
    x_uniform = f1.read().split('\n')[:-1]
    x_uniform = [float(s) for s in x_uniform]
    X1 = np.arange(min - 1, max + 1, 0.1)
    Y1 = uniform.pdf(X1, min, max)
    plt.hist(x_uniform, bins=30, density=True)
    plt.plot(X1, Y1, color='r')
    plt.show()
Ejemplo n.º 23
0
def expectation2(n):
    est = 0
    samples = []
    for i in range(n):
        qsamp = np.random.uniform(-1, 1)
        fpq = qsamp**2 * (p(qsamp) / uniform.pdf(qsamp, loc=-1, scale=2))
        samples.append(fpq)
        est += fpq

    return est / n, samples
Ejemplo n.º 24
0
def uniform_distribution(select_size,
                         loc=-m.sqrt(3),
                         scale=2 * m.sqrt(3),
                         asked=rvs,
                         x=0):
    if asked == rvs:
        return uniform.rvs(size=select_size, loc=loc, scale=scale)
    elif asked == pdf:
        return uniform.pdf(x, loc=loc, scale=scale)
    elif asked == cdf:
        return uniform.cdf(x, loc=loc, scale=scale)
Ejemplo n.º 25
0
def get_pdf(dist, x, df=1, loc=0, scale=1):
    if dist == 'normal':
        y = norm.pdf(x, loc=loc, scale=scale)
    elif dist == 'uniform':
        y = uniform.pdf(x, loc=loc, scale=scale)
    elif dist == 'chi2':
        y = chi2.pdf(x, df)
    else:
        print("No distribution found with name {:s}".format(dist))
        y = np.zeros_like(x)
    return y
Ejemplo n.º 26
0
    def probability(self, x, log):

        if self.log:
            x = np.log(x)

        if log:
            out = np.squeeze(uniform.logpdf(x, self._min, self.scale))
            return np.sum(out) if self.multivariate else out
        else:
            out = np.squeeze(uniform.pdf(x, self._min, self.scale))
            return np.prod(out) if self.multivariate else out
Ejemplo n.º 27
0
    def pdf(self, x):
        x = np.atleast_2d(restore_X(x))
        pdf = norm.pdf(x[:,0], loc=0.1, scale=0.0161812) \
            * lognorm.pdf(x[:,1], s=1.0056, scale=np.exp(7.71))

        lb, ub = map(list, zip(*self.domain))
        lb, ub = restore_X(np.array(lb)), restore_X(np.array(ub))

        for ii in range(2, self.input_dim):
            bd = self.domain[ii]
            pdf *= uniform.pdf(x[:,ii], lb[ii], ub[ii]-lb[ii])
        return pdf / np.prod(ub-lb)
def posterior(theta, prior, ip_data):
    """
    Posterior density function.

    Parameters:
    -----------
    theta : float
        Unknown (logarithm of) thermal diffusivity.
    prior : array_like ([dist_type, param1, param2])
        Vector defining the prior distribution through the dist_type ('normal'
        or 'unif'), and two parameters (floats, mean and std for Gaussian, LHS
        and range for Uniform).
    ip_data : dictionary
        Parameters defining the inverse problem.

    Returns:
    --------
    posterior : nparray
        Value of the (un-normalised) posterior density evaluated at the input
        theta.
    prior : nparray
        Value of the prior density evaluated at the input theta.
    likelihood : nparray
        Value of the likelihood function evaluated at the input theta.
    """

     # Ensure input theta is nparray.
    theta = np.array(theta)


    # Prior density.
    if prior[0] == "unif":
        p0 = Unif.pdf(theta, prior[1], prior[2]-prior[1])  # Uniform pdf
    elif prior[0] == "normal":
        p0 = Norm.pdf(theta, prior[1], prior[2]) # Gaussian pdf


    # Single theta value.
    if theta.ndim == 0:
        F = uxt(ip_data['x_obs'], ip_data['t_obs'], np.exp(theta)).ravel()
        L = mvNorm.pdf(F, ip_data['d'], ip_data['sig_rho'])

    # Multiple theta values.
    elif theta.ndim == 1:
        F = uxt(ip_data['x_obs'], ip_data['t_obs'], np.exp(theta))
        F = F.reshape(-1, F.shape[2])

        L = np.stack([mvNorm.pdf(F[:,i], ip_data['d'], ip_data['sig_rho']) for i in range(theta.shape[0])])


    # Return evaluation of the posterior, prior and likelihood.
    return p0*L, p0, L
Ejemplo n.º 29
0
def pdf(size=1e4, a=.004, b=.01):
    """Make a grid of IFR for plotting.
    
    Args:
        size (int): Number of values in grid.
        a,b (float): Parameters of the distribution U(a,b).
    Returns:
        np.array: Grid of x-axis.
        np.array: Values of U(a,b) PDF for the grid.
    """
    xgrid = np.linspace(a - (b - a) / 4, b + (b - a) / 4, num=int(size))
    fx = uniform.pdf(xgrid, a, b - a)
    return xgrid, fx
Ejemplo n.º 30
0
def compute_uniform_cdf(min_val, max_val, saving_folder):

    # range of values for which CDF is computed (100 is arbitrary and should be high enough)
    x_range = np.linspace(0, 50, 1000)

    # Cumulative distribution function
    pdf = uniform.pdf(x_range, loc=min_val, scale=(max_val - min_val))
    cdf = uniform.cdf(x_range, loc=min_val, scale=(max_val - min_val))

    # Storing cdf, pdf and x_range vectors together
    proba_mat = np.vstack([x_range, cdf, pdf]).T

    return proba_mat
Ejemplo n.º 31
0
    def _plot_prior_posterior(self, chains, show_charts):
        n_bins = int(sqrt(chains.shape[0]))
        n_cols = int(self.n_param**0.5)
        n_rows = n_cols + 1 if self.n_param > n_cols**2 else n_cols
        subplot_shape = (n_rows, n_cols)

        plt.figure(figsize=(7 * 1.61, 7))

        for count, param in enumerate(list(self.params)):
            mu = self.prior_info.loc[str(param)]['mean']
            sigma = self.prior_info.loc[str(param)]['std']
            a = self.prior_info.loc[str(param)]['param a']
            b = self.prior_info.loc[str(param)]['param b']
            dist = self.prior_info.loc[str(param)]['distribution']

            ax = plt.subplot2grid(subplot_shape,
                                  (count // n_cols, count % n_cols))
            ax.hist(chains[str(param)],
                    bins=n_bins,
                    density=True,
                    color='royalblue',
                    edgecolor='black')
            ax.set_title(self.prior_dict[param]['label'])
            x_min, x_max = ax.get_xlim()
            x = linspace(x_min, x_max, n_bins)

            if dist == 'beta':
                y = beta.pdf(x, a, b)
                ax.plot(x, y, color='red')

            elif dist == 'gamma':
                y = gamma.pdf(x, a, scale=b)
                ax.plot(x, y, color='red')

            elif dist == 'invgamma':
                y = (b**a) * invgamma.pdf(x, a) * exp((1 - b) / x)
                ax.plot(x, y, color='red')

            elif dist == 'uniform':
                y = uniform.pdf(x, loc=a, scale=b - a)
                ax.plot(x, y, color='red')

            else:  # Normal
                y = norm.pdf(x, loc=mu, scale=sigma)
                ax.plot(x, y, color='red')

        plt.tight_layout()

        if show_charts:
            plt.show()
	def inferPosterior(self, state, action, prior='uniform'):
		"""
			Uses inference engine to compute posterior probability from the 
			likelihood and prior (beta distribution).
		"""

		if prior == 'beta':
			# Beta Distribution
			self.prior = np.linspace(.01,1.0,101)
			self.prior = beta.pdf(self.prior,1.4,1.4)
			self.prior /= self.prior.sum()

		elif prior == 'shiftExponential':
			# Shifted Exponential
			self.prior = np.zeros(101)
			for i in range(50):
				self.prior[i + 50] = i * .02
			self.prior[100] = 1.0
			self.prior = expon.pdf(self.prior)
			self.prior[0:51] = 0
			self.prior *= self.prior
			self.prior /= self.prior.sum()

		elif prior == 'shiftBeta':
			# Shifted Beta
			self.prior = np.linspace(.01,1.0,101)
			self.prior = beta.pdf(self.prior,1.2,1.2)
			self.prior /= self.prior.sum()
			self.prior[0:51] = 0

		elif prior == 'uniform':
			# Uniform
			self.prior = np.zeros(len(self.sims))	
			self.prior = uniform.pdf(self.prior)
			self.prior /= self.prior.sum()


		self.posterior = self.likelihood * self.prior
		self.posterior /= self.posterior.sum()
	def inferPosterior(self, state, action):
		"""
			Uses inference engine to compute posterior probability from the 
			likelihood and prior (beta distribution).
		"""

		# Beta Distribution
		# self.prior = np.linspace(.01,1.0,101)
		# self.prior = beta.pdf(self.prior,1.4,1.4)
		# self.prior /= self.prior.sum()

		# Shifted Exponential
		# self.prior = np.zeros(101)
		# for i in range(50):
		# 	self.prior[i + 50] = i * .02
		# self.prior[100] = 1.0
		# self.prior = expon.pdf(self.prior)
		# self.prior[0:51] = 0
		# self.prior *= self.prior
		# self.prior /= self.prior.sum()

		# # Shifted Beta
		# self.prior = np.linspace(.01,1.0,101)
		# self.prior = beta.pdf(self.prior,1.2,1.2)
		# self.prior /= self.prior.sum()
		# self.prior[0:51] = 0

		# Uniform
		self.prior = np.linspace(.01,1.0,101)
		self.prior = uniform.pdf(self.prior)
		self.prior /= self.prior.sum()
		self.prior[0:51] = 0


		self.posterior = self.likelihood * self.prior
		self.posterior /= self.posterior.sum()
Ejemplo n.º 34
0
    def build_para(self, charts=False):

        home = '/home/nealbob'
        folder = '/Dropbox/Thesis/STATS/chapter3/'
        folder2 = '/Dropbox/Thesis/STATS/chapter2/'
        folder7 = '/Dropbox/Thesis/STATS/chapter7/'
        out = '/Dropbox/Thesis/IMG/chapter3/'
        out7 = '/Dropbox/Thesis/IMG/chapter7/'
        img_ext = '.pdf'

        MDB_dams = pandas.read_csv(home + folder + 'MDB_dams.csv')
        AUS_RIVERS = pandas.read_csv(home + folder + 'AUS_RIVERS.csv')
        OMEGA = pandas.read_csv(home + folder7 + 'omega.csv')

        #MDB_table = pandas.read_csv(home + folder + 'MDB_table.csv')
        #open(home + folder + "MDB_table.tex", "w").write(MDB_table.to_latex(index=False, float_format=lambda x: '%10.1f' % x ))

        #################       WATER SUPPLY        ################

        #---------------#       K - capacity

        K = MDB_dams['K']
        sample = (K > 50000)
        K = K[sample] / 1000

        bins = np.linspace(min(K), max(K), 18)
        data = [K]

        chart = {'OUTFILE' : (home + out + 'K' + img_ext),
                 'XLABEL' : 'Storage capacity (GL)',
                 'XMIN' : min(K),
                 'XMAX' : max(K),
                 'BINS' : bins}

        if charts:
            self.build_chart(chart, data, chart_type='hist')

        #---------------#       I_over_K - mean inflow over storage capacity

        K = MDB_dams['K']
        E_I = MDB_dams['mean_I']
        sample = (K > 50000)
        I_over_K = E_I[sample] / K[sample]

        I_K_param = np.zeros(2)
        I_K_param[0] = np.percentile(I_over_K, 10)
        I_K_param[1] = np.percentile(I_over_K, 83)


        x = np.linspace(0.2, 5, 500)
        y = uniform.pdf(x, loc=I_K_param[0], scale=(I_K_param[1]-I_K_param[0]))

        bins = np.linspace(min(I_over_K), 4, 18)
        bins[17] = 15
        data = [I_over_K, x, y]
        data2 = [I_over_K]

        chart = {'OUTFILE' : (home + out + 'I_over_K' + img_ext),
                 'XLABEL' : 'Mean annual inflow over storage capacity',
                 'XMIN' : 0,
                 'XMAX' : 4,
                 'BINS' : bins}

        chart2 = {'OUTFILE' : (home + out + 'I_over_K_2' + img_ext),
                  'XLABEL' : 'Mean annual inflow over storage capacity',
                  'XMIN' : 0,
                  'XMAX' : 4,
                  'BINS' : bins}

        if charts:
            self.build_chart(chart, data, chart_type='hist')
            self.build_chart(chart2, data2, chart_type='hist')

        #---------------#       SD_over_I - SD of inflow over mean inflow
        sample = AUS_RIVERS['MAR'] < 700
        SD_over_I = AUS_RIVERS['Cv'][sample]

        bins = np.linspace(min(SD_over_I), max(SD_over_I), 13)
        x = np.linspace(min(SD_over_I), max(SD_over_I), 500)
        y = uniform.pdf(x, loc=0.43, scale=(1-0.4))

        data = [SD_over_I, x, y]
        data2 = [SD_over_I]

        chart = {'OUTFILE' : (home + out + 'SD_over_I' + img_ext),
                 'XLABEL' : 'Standard deviation of annual inflow over mean',
                 'XMIN' : 0,
                 'XMAX' : max(SD_over_I),
                 'BINS' : bins}

        chart2 = {'OUTFILE' : (home + out + 'SD_over_I_2' + img_ext),
                  'XLABEL' : 'Standard deviation of annual inflow over mean',
                  'XMIN' : 0,
                  'XMAX' : max(SD_over_I),
                  'BINS' : bins}

        if charts:
            self.build_chart(chart, data, chart_type='hist')
            self.build_chart(chart2, data2, chart_type='hist')

        #---------------#       SA_over_K - surface area over capacity

        SA = MDB_dams['sa']
        sample = (K > 50000)
        SA_over_K = SA[sample] / K[sample]
        bins = np.linspace(min(SA_over_K), max(SA_over_K), 28)
        SA_K_param = np.zeros(2)
        SA_K_param[0] = np.percentile(SA_over_K, 10)
        SA_K_param[1] = np.percentile(SA_over_K, 81.5)
        x = np.linspace(min(SA_over_K), max(SA_over_K), 500)
        y = uniform.pdf(x, loc=SA_K_param[0], scale=(SA_K_param[1] - SA_K_param[0]))

        data = [SA_over_K, x, y]
        data2 = [SA_over_K]

        chart = {'OUTFILE' : (home + out + 'SA_over_K' + img_ext),
                 'XLABEL' : 'Storage surface area over storage capacity',
                 'XMIN' : min(SA_over_K),
                 'XMAX' : max(SA_over_K),
                 'BINS' : bins}

        chart2 = {'OUTFILE' : (home + out + 'SA_over_K_2' + img_ext),
                  'XLABEL' : 'Storage surface area over storage capacity',
                  'XMIN' : min(SA_over_K),
                  'XMAX' : max(SA_over_K),
                  'BINS' : bins}

        if charts:
            self.build_chart(chart, data, chart_type='hist')
            self.build_chart(chart2, data2, chart_type='hist')

        #---------------#       evap - net evaporation rate

        evap = 0.75 * (MDB_dams['net_evap'] / 1000)
        folder = '/Dropbox/Thesis/STATS/chapter3/'
        evap = evap[sample]
        folder = '/Dropbox/Thesis/STATS/chapter3/'
        sample2 = evap > 0
        evap = evap[sample2]

        bins = np.linspace(min(evap), max(evap), 18)

        evap_param = np.zeros(2)
        evap_param[0] = np.percentile(evap, 15)
        evap_param[1] = np.percentile(evap, 85)

        x = np.linspace(min(evap), max(evap), 500)
        y = uniform.pdf(x, loc=evap_param[0], scale=(evap_param[1]-evap_param[0]))
        data = [evap, x, y]
        data2 = [evap]

        chart = {'OUTFILE' : (home + out + 'evap' + img_ext),
                 'XLABEL' : 'Average annual evaporation rate (meters)',
                 'XMIN' : min(evap),
                 'XMAX' : max(evap),
                 'BINS' : bins}

        chart2 = {'OUTFILE' : (home + out + 'evap_2' + img_ext),
                  'XLABEL' : 'Average annual evaporation rate (meters)',
                  'XMIN' : min(evap),
                  'XMAX' : max(evap),
                  'BINS' : bins}

        if charts:
            self.build_chart(chart, data, chart_type='hist')
            self.build_chart(chart2, data2, chart_type='hist')

        #---------------#      d_loss - delivery losses in irrigation areas

        d_loss = np.zeros([5, 2])

        Murray = pandas.read_csv(home + folder2 + 'Murray_loss.csv')
        y = Murray['Loss']
        N = len(y)
        X = np.vstack([Murray['Pumped'], np.ones(N)]).T
        d_loss[0,:] = np.linalg.lstsq(X, y)[0]
        d_loss[0,1] = d_loss[0,1] / np.mean(Murray['Pumped'])

        Shep = pandas.read_csv(home + folder2 + 'Shep_loss.csv')
        y = Shep['Loss']
        N = len(y)
        X = np.vstack([Shep['Pumped'], np.ones(N)]).T
        d_loss[1,:] = np.linalg.lstsq(X, y)[0]
        d_loss[1,1] = d_loss[1,1] / (np.mean(Shep['Pumped']))

        Jemm = pandas.read_csv(home + folder2 + 'Jemm_loss.csv')
        y = Jemm['Loss']
        N = len(y)
        X = np.vstack([Jemm['Pumped'], np.ones(N)]).T
        d_loss[2,:] = np.linalg.lstsq(X, y)[0]
        d_loss[2,1] = d_loss[2,1] / (np.mean(Jemm['Pumped']))

        Coll = pandas.read_csv(home + folder2 + 'Coll_loss.csv')
        y = Coll['Loss']
        N = len(y)
        X = np.vstack([Coll['Pumped'], np.ones(N)]).T
        d_loss[3,:] = np.linalg.lstsq(X, y)[0]
        d_loss[3,1] = d_loss[3,1] / (np.mean(Coll['Pumped']))

        MIA = pandas.read_csv(home + folder2 + 'MIA_loss.csv')
        y = MIA['Loss']
        N = len(y)
        X = np.vstack([MIA['Pumped'], np.ones(N)]).T
        d_loss[4,:] = np.linalg.lstsq(X, y)[0]
        d_loss[4,1] = d_loss[4,1] / (np.mean(MIA['Pumped']))

        #---------------#   Inflow autocorrelation

        rho_param = [0.2, 0.3]

        #################       WATER DEMAND         ################


        #---------------#      Theta parameters (yield functions)

        theta_mu = np.zeros([6, 2])
        theta_mu[:,0] = np.array([154.7, 236.7, -35.8, 20.7, 14.9, -48.5])
        #theta_mu[:,1] = np.array([-1785.4, 2545.3, -157.2, 1924.3, -537.5, -118.9])
        theta_mu[:,1] = np.array([-1773.8, 2135.0, -133.3, 1597.1, -520.9, -100.8])

        #clow = (41.5+14.9+231.7)*0.1
        #chigh = (-76.5+2153.9-537.5)*0.1

        theta_sig = np.zeros([6, 2])
        #theta_sig[:,0] = np.array([0, 51.1, 16.8, 96.8, 38.8, 17.3])
        #theta_sig[:,1] = np.array([0, 306.2, 22.3, 2370.1, 952.2, 151.4])
        theta_sig[:,0] = np.array([0, 51.1, 16.8, 0, 0, 17.3])
        theta_sig[:,1] = np.array([0, 306.2, 22.3, 0, 0, 151.4])

        q_bar_limits = np.zeros([2,2])
        q_bar_limits[:, 0] = np.array([0.5, 6.5])
        q_bar_limits[:, 1] = np.array([5, 14])

        w_ha = np.linspace(0,3,100)
        profit_ha = theta_mu[0,0] + theta_mu[1,0]*w_ha + theta_mu[2,0] * w_ha**2 + theta_mu[3,0]*1 + theta_mu[4,0]*1**2 + theta_mu[5,0]*1*w_ha

        data = [[w_ha, profit_ha]]
        chart = {'OUTFILE' : (home + out + 'low_yield' + img_ext),
                 'XLABEL' : 'Water use per unit land (ML / HA)',
                 'XMIN' : min(w_ha),
                 'XMAX' : max(w_ha),
                 'YMIN' : 0,
                 'YMAX' : max(profit_ha)*1.05,
                 'YLABEL' : 'Profit per unit land (\$ / HA)'}
        if charts:
            self.build_chart(chart, data, chart_type='plot')

        w_ha = np.linspace(0,9,100)
        profit_ha = theta_mu[0,1] + theta_mu[1,1]*w_ha + theta_mu[2,1] * w_ha**2 + theta_mu[3,1]*1 + theta_mu[4,1]*1**2 + theta_mu[5,1]*1*w_ha

        data = [[w_ha, profit_ha]]
        chart = {'OUTFILE' : (home + out + 'high_yield' + img_ext),
                 'XLABEL' : 'Water use per unit land (ML / HA)',
                 'XMIN' : min(w_ha),
                 'XMAX' : max(w_ha),
                 'YMIN' : min(profit_ha)*1.5,
                 'YMAX' : max(profit_ha)*1.05,
                 'YLABEL' : 'Profit per unit land (\$ / HA)'}
        if charts:
            self.build_chart(chart, data, chart_type='plot')


        #---------------#       Epsilon parameters (yield functions)

        rho_eps_param = np.array([0.3, 0.5])
        sig_eta_param = np.array([0.1, 0.2])

        #################       Final Parameters       ################

        self.I_K_param = I_K_param
        self.SD_I_param = np.array([0.4, 1])
        self.rho_param = [0.2, 0.3]
        self.SA_K_param = SA_K_param
        self.evap_param = evap_param
        self.d_loss_param_a = np.array([0, 0.15])
        self.d_loss_param_b = np.array([0.15, 0.30])
        self.theta_mu = theta_mu
        self.theta_sig = theta_sig / 3
        self.q_bar_limits = q_bar_limits
        self.rho_eps_param = np.array([0.3, 0.5])
        self.sig_eta_param = np.array([0.1, 0.2])
        self.prop_high = np.array([0.05, 0.35])
        self.target_price =  10 #np.array([50,200])
        self.t_cost_param = np.array([10, 100])
        self.Lambda_high_param = np.array([1, 2])
        self.relative_risk_aversion = np.array([0, 3])

        #### Summer-Winter model - chapter 7
        self.ch7_param = {}
        self.ch7_param['omega_mu'] = [0.55, 0.75]
        self.ch7_param['omega_sig'] = [0.09, 0.12]
        self.ch7_param['omega_ab'] = [0.3, 0.90]
        self.ch7_param['delta_a'] = [0.02, 0.06]
        self.ch7_param['omegadelta'] = [0.22, 0.36]
        self.ch7_param['F_bar'] = [1, 1]
        self.ch7_param['delta_b'] = [0.3, 0.6]
        self.ch7_param['delta_Ea'] = [0, 0.1]
        self.ch7_param['delta_Eb'] = [0.1, 0.3]
        self.ch7_param['delta_R'] = [0, 0.2]
        self.ch7_param['b_1'] = [0, 0.6]
        self.ch7_param['b_value'] = [30, 130]
        self.ch7_param['Bhat_alpha'] = [0.1, 0.5]
        self.ch7_param['inflow_share'] = [0, 0.5]
        self.ch7_param['capacity_share'] = [0, 0.5]
        self.ch7_param['High'] = [1, 1]
        self.ch7_param['e_sig'] = [0, 1]

        para_dist = [self.I_K_param, self.SD_I_param, self.rho_param, self.SA_K_param, self.evap_param, self.d_loss_param_a, self.d_loss_param_b,  self.t_cost_param, self.Lambda_high_param, [100, 100], [30, 70], [30, 70], self.theta_mu, self.theta_sig, self.q_bar_limits, self.rho_eps_param, self.sig_eta_param, self.prop_high, self.target_price, self.relative_risk_aversion, self.ch7_param]

        with open('para_dist.pkl', 'wb') as f:
            pickle.dump(para_dist, f)
            f.close()

        rows = ['$E[I_t]/K$', '$c_v$', '$\rho_I$' ,'$\alpha K^{2/3} / K $' ,  '$\delta_0$', '$\delta_{1a}$', '$\delta_{1b}$', '$\tau$', '$\Lambda_{high}$', '$n$', '$n_low$', '$n_high$', ]
        cols = ['Min', 'Central case', 'Max']
        n_rows = 12
        data = []
        for i in range(n_rows):
            record = {}
            record['Min'] = para_dist[i][0]
            record['Central case'] = np.mean(para_dist[i])
            record['Max'] = para_dist[i][1]
            data.append(record)
        tab = pandas.DataFrame(data)
        tab.index = rows
        tab_text = tab.to_latex(float_format =  '{:,.2f}'.format, columns=['Min', 'Central case', 'Max' ], index=True)
        with open(home + folder + "para_table.txt", "w") as f:
            f.write(tab_text)
            f.close()
def prior(p_in):
    """
    A function to return the prior density for a given value of p.
    """
    return uniform.pdf(p_in,0,1)
Ejemplo n.º 36
0
Archivo: ex_10.py Proyecto: amaggi/bda
        integrand[i] = prior_pdf[i] * likelihood(y, n_values[i])
    return np.sum(integrand)
ev = evidence(Y_SEEN, n_values, prior_pdf)

# get the posterior
post_pdf = np.empty(len(n_values), dtype=float)
for i in xrange(len(n_values)):
    post_pdf[i] = likelihood(Y_SEEN, n_values[i]) * prior_pdf[i] / ev

post_mean = np.int(np.round(trapz(n_values * post_pdf, n_values)))
post_stdev = np.int(np.round(np.sqrt(trapz((n_values-post_mean)**2 * post_pdf, n_values))))

print post_mean, post_stdev

# redo for non-informative prior
prior_pdf_uniform = uniform.pdf(n_values, loc=1, scale=FACT*N_MEAN)
ev = evidence(Y_SEEN, n_values, prior_pdf_uniform)

# get the posterior
post_pdf_uniform = np.empty(len(n_values), dtype=float)
for i in xrange(len(n_values)):
    post_pdf_uniform[i] = likelihood(Y_SEEN, n_values[i]) * prior_pdf_uniform[i] / ev

post_mean = np.int(np.round(trapz(n_values * post_pdf_uniform, n_values)))
post_stdev = np.int(np.round(np.sqrt(trapz((n_values-post_mean)**2 * post_pdf_uniform, n_values))))

print post_mean, post_stdev

fig, axes = plt.subplots(1, 2)
plt.sca(axes[0])
plt.plot(n_values, prior_pdf, label='geometric')
Ejemplo n.º 37
0
    def generateDistributionPlotValues(self, specification) :
            
        sample_number = 1000
        x_values = []
        y_values = []

        # Generate plot values from selected distribution via PDF
        distribution = specification['distribution']
        if distribution == 'uniform' :
            lower = specification['settings']['lower']
            upper = specification['settings']['upper']
            base = upper - lower
            incr = base/sample_number
            for i in range(sample_number) :
                x_values.append(lower+i*incr)
            y_values = uniform.pdf(x_values, loc=lower, scale=base).tolist()
        elif distribution == 'normal' :
            mean = specification['settings']['mean']
            std_dev = specification['settings']['std_dev']
            x_min = mean - 3*std_dev
            x_max = mean + 3*std_dev
            incr = (x_max - x_min)/sample_number
            for i in range(sample_number) :
                x_values.append(x_min+i*incr)
            y_values = norm.pdf(x_values, loc=mean, scale=std_dev).tolist()
        elif distribution == 'triangular' :
            a = specification['settings']['a']
            base = specification['settings']['b'] - a
            c_std = (specification['settings']['c'] - a)/base
            incr = base/sample_number
            for i in range(sample_number) :
                x_values.append(a+i*incr)
            y_values = triang.pdf(x_values, c_std, loc=a, scale=base).tolist()
        elif distribution == 'lognormal' :
            lower = specification['settings']['lower']
            scale = specification['settings']['scale']
            sigma = specification['settings']['sigma']
            x_max = lognorm.isf(0.01, sigma, loc=lower, scale=scale)
            incr = (x_max - lower)/sample_number
            for i in range(sample_number) :
                x_values.append(lower+i*incr)
            y_values = lognorm.pdf(x_values, sigma, loc=lower, scale=scale).tolist()
        elif distribution == 'beta' :
            lower = specification['settings']['lower']
            base = specification['settings']['upper'] - lower
            incr = base/sample_number
            for i in range(sample_number) :
                x_values.append(lower+i*incr)
            a = specification['settings']['alpha']
            b = specification['settings']['beta']
            y_values = beta.pdf(x_values, a, b, loc=lower, scale=base).tolist()

        # Remove any nan/inf values
        remove_indexes = []
        for i in range(sample_number) :
            if not np.isfinite(y_values[i]) :
                remove_indexes.append(i)
        for i in range(len(remove_indexes)) :
            x_values = np.delete(x_values, i)
            y_values = np.delete(y_values, i)

        return { 'x_values' : x_values, 'y_values' : y_values }