Ejemplo n.º 1
0
def obj_fun(mu_param, log_sigma_param, log_c_param):
    global inputs
    G_input = log_unlormalized_pdf(inputs, mu_param, log_sigma_param,
                                   log_c_param) - np.log(
                                       norm.pdf(inputs, noise_mu, noise_sigma))
    G_noise = log_unlormalized_pdf(noise, mu_param, log_sigma_param,
                                   log_c_param) - np.log(
                                       norm.pdf(noise, noise_mu, noise_sigma))
    h_input = logistic_fn(G_input)
    h_noise = logistic_fn(G_noise)

    loss = np.log(h_input) + np.log(1 - h_noise)
    return -.5 * (1 / no_of_sample) * np.sum(loss)
Ejemplo n.º 2
0
    def normal(self, x, mu, sigma):
        r"""
        The probability density function of the Normal distribution evaluated
        at :code:`x` given parameters of mean of :code:`mu` and standard deviation
        of :code:`sigma`.

        Example:

            >>> import pyhf
            >>> pyhf.set_backend("jax")
            >>> pyhf.tensorlib.normal(0.5, 0., 1.)
            DeviceArray(0.35206533, dtype=float64)
            >>> values = pyhf.tensorlib.astensor([0.5, 2.0])
            >>> means = pyhf.tensorlib.astensor([0., 2.3])
            >>> sigmas = pyhf.tensorlib.astensor([1., 0.8])
            >>> pyhf.tensorlib.normal(values, means, sigmas)
            DeviceArray([0.35206533, 0.46481887], dtype=float64)

        Args:
            x (:obj:`tensor` or :obj:`float`): The value at which to evaluate the Normal distribution p.d.f.
            mu (:obj:`tensor` or :obj:`float`): The mean of the Normal distribution
            sigma (:obj:`tensor` or :obj:`float`): The standard deviation of the Normal distribution

        Returns:
            JAX ndarray: Value of Normal(x|mu, sigma)
        """
        return norm.pdf(x, loc=mu, scale=sigma)
def EI(mean, std, best):
    # from https://people.orie.cornell.edu/pfrazier/Presentations/2011.11.INFORMS.Tutorial.pdf
    delta = -(mean - best)
    deltap = -(mean - best)
    deltap = np.clip(deltap, a_min=0.)
    Z = delta / std
    EI = deltap - np.abs(deltap) * norm.cdf(-Z) + std * norm.pdf(Z)
    return -EI[0]
def EIC(mean, std, best):
    # Constrained expected improvement
    delta = -(mean[0, :] - best)
    deltap = -(mean[0, :] - best)
    deltap = np.clip(deltap, a_min=0.)
    Z = delta / std[0, :]
    EI = deltap - np.abs(deltap) * norm.cdf(-Z) + std * norm.pdf(Z)
    constraints = np.prod(norm.cdf(mean[1:, :] / std[1:, :]), axis=0)
    return -EI[0] * constraints[0]
Ejemplo n.º 5
0
def plot_component_norm_pdfs(
    log_component_weights, component_mus, log_component_scales, xmin, xmax, ax, title
):
    component_weights = normalize_weights(np.exp(log_component_weights))
    component_scales = np.exp(log_component_scales)
    x = np.linspace(xmin, xmax, 1000).reshape(-1, 1)
    pdfs = component_weights * norm.pdf(x, loc=component_mus, scale=component_scales)
    for component in range(pdfs.shape[1]):
        ax.plot(x, pdfs[:, component])
    ax.set_title(title)
Ejemplo n.º 6
0
def actor_loss(actor_params, fixed_critic_params, env_dynamics, batch):
    state, _, _, _, alpha = batch

    inputs = np.concatenate((state, alpha), 1)
    action = actor_forward(actor_params, inputs)

    inputs = np.concatenate((state, action, alpha), 1)
    q_s, upsilon_s = critic_forward(fixed_critic_params, inputs)

    cvar = q_s - (norm.pdf(alpha) / norm.cdf(alpha)) * np.sqrt(upsilon_s)

    return -cvar.mean()
Ejemplo n.º 7
0
def check_discretize_grad():

    def f(y, t): return -y + np.sin(2. * y) - np.cos(2. * t)
    def g(y, t): return np.exp(np.sin(2. * y) - np.cos(2. * t))

    init_ps = lambda x: norm.pdf(x, 0., 0.8)
    exact_grad_init_ps = vmap(grad(init_ps))
    exact_grad_grad_init_ps = vmap(grad(grad(init_ps)))
    ylims = [-2.1, 2.1]
    xs = np.linspace(ylims[0], ylims[1], 10000)
    pvals = init_ps(xs)
    density1 = build_fd_func(ylims[0], ylims[1], pvals)

    density = vmap(density1)
    density_grad = vmap(grad(density1))
    density_grad_grad = vmap(grad(grad(density1)))

    ylims2 = [-2.2, 2.2]
    xs2 = np.linspace(ylims2[0], ylims2[1], 30000)

    # Set up figure.
    fig = plt.figure(figsize=(8, 6), facecolor='white')
    ax = fig.add_subplot(111, frameon=False)
    plt.ion()
    plt.show(block=False)
    plt.plot(xs2, density(xs2), 'g')
    plt.plot(xs2, density_grad(xs2), 'b')
    plt.plot(xs2, exact_grad_init_ps(xs2), 'b--')
    plt.plot(xs2, density_grad_grad(xs2), 'r')
    plt.plot(xs2, exact_grad_grad_init_ps(xs2), 'r--')

    dp_dt = lambda x, t, p: fokker_planck(f, g, x, t, p)
    print(dp_dt(np.array(0.1), 0.2, density1))
    fp = vmap(lambda x: dp_dt(x, 0., density1))
    plt.plot(xs2, fp(xs2), 'k')

    ax.set_xlabel('x')
    ax.set_ylabel('p')
    plt.draw()
    plt.pause(100)
Ejemplo n.º 8
0
 def variational_entropy(
     self, zeta: jnp.DeviceArray, phi: jnp.DeviceArray
 ) -> jnp.DeviceArray:
     probs = norm.pdf(zeta, loc=self.mu(phi), scale=self.omega(phi))
     return -(probs * jnp.log(probs)).sum()
Ejemplo n.º 9
0
 def prob(self, value):
     assert_array(value, shape=(..., ) + self.batch_shape)
     return norm.pdf(value, loc=self._loc, scale=self._scale)
Ejemplo n.º 10
0
def f_jvp(primals, tangents):
    u, = primals
    u_dot, = tangents
    primal_out = ndtri_(u)
    tangent_out = (1 / norm.pdf(primal_out)) * u_dot
    return primal_out, tangent_out
                                         (n_samples, ))
Z_adf = sigmoid(jnp.einsum("mij,sm->sij", Phispace, adf_samples))
Z_adf = Z_adf.mean(axis=0)

# ** Plotting predictive distribution **
colors = ["black" if el else "white" for el in y]

## Add posterior marginal for ADF-estimated weights
for i in range(ndims):
    mean, std = mu_t[i], jnp.sqrt(tau_t[i])
    #fig = figures[f"weights_marginals_w{i}"]
    fig = figures[f"logistic_regression_weights_marginals_w{i}"]
    ax = fig.gca()
    x = jnp.linspace(mean - 4 * std, mean + 4 * std, 500)
    ax.plot(x,
            norm.pdf(x, mean, std),
            label="posterior (ADF)",
            linestyle="dashdot")
    ax.legend()

fig_adf, ax = plt.subplots()
title = "ADF Predictive distribution"
demo.plot_posterior_predictive(ax, X, Xspace, Z_adf, title, colors)
#figures["predictive_distribution_adf"] = fig_adf
#figures["logistic_regression_surface_adf"] = fig_adf
pml.savefig("logistic_regression_surface_adf.pdf")

# Posterior vs time

lcolors = ["black", "tab:blue", "tab:red"]
elements = mu_t_hist.T, tau_t_hist.T, w_laplace, lcolors
Ejemplo n.º 12
0
    def get_marginals(self,
                      parameter_estimates=None,
                      invF=None,
                      ranges=None,
                      gridsize=None):
        """
        Creates list of 1D and 2D marginal distributions ready for plotting

        The marginal distribution lists from full distribution array. For every
        parameter the full distribution is summed over every other parameter to
        get the 1D marginals and for every combination the 2D marginals are
        calculated by summing over the remaining parameters. The list is made
        up of a list of n_params lists which contain n_columns number of
        objects. The value of the distribution comes from

        Parameters
        ----------
        parameter_estimates: float(n_targets, n_params) or None, default=None
            The parameter estimates of each target data. If None the class
            instance parameter estimates are used
        invF: float(n_targets, n_params, n_params) or None, default=None
            The inverse Fisher information matrix for each target. If None the
            class instance inverse Fisher information matrices are used
        ranges : list or None, default=None
            A list of arrays containing the gridpoints for the marginal
            distribution for each parameter. If None the class instance ranges
            are used determined by the prior range
        gridsize : list or None, default=None
            If using own `ranges` then the gridsize for these ranges must be
            passed (not checked)

        Returns
        -------
        list of lists:
            The 1D and 2D marginal distributions for each parameter (of pair)

        Todo
        ----
        Need to multiply the distribution by the prior to get the posterior
        Maybe move to TensorFlow probability?
        Make sure that using several Fisher estimates works
        """
        if parameter_estimates is None:
            parameter_estimates = self.parameter_estimates
        n_targets = parameter_estimates.shape[0]
        if invF is None:
            invF = self.invF
        if ranges is None:
            ranges = self.ranges
        if gridsize is None:
            gridsize = self.gridsize
        marginals = []
        for row in range(self.n_params):
            marginals.append([])
            for column in range(self.n_params):
                if column == row:
                    marginals[row].append(
                        jax.vmap(lambda mean, _invF: norm.pdf(
                            ranges[column], mean, np.sqrt(_invF)))(
                                parameter_estimates[:, column], invF[:, column,
                                                                     column]))
                elif column < row:
                    X, Y = np.meshgrid(ranges[row], ranges[column])
                    unravelled = np.vstack([X.ravel(), Y.ravel()]).T
                    marginals[row].append(
                        jax.vmap(lambda mean, _invF: multivariate_normal.pdf(
                            unravelled, mean, _invF).reshape(
                                ((gridsize[column], gridsize[row]))))(
                                    parameter_estimates[:, [row, column]],
                                    invF[:, [row, row, column, column],
                                         [row, column, row, column]].reshape(
                                             (n_targets, 2, 2))))
        return marginals
Ejemplo n.º 13
0
    def to_quadrature(f, cur_y, cur_mean, cur_var):

        log_prob = bernoulli_probit_lik(cur_y, f)
        q = norm.pdf(f, cur_mean, jnp.sqrt(cur_var))

        return log_prob * q
Ejemplo n.º 14
0
def plot_fokker():

    def f(y, t): return -y + np.sin(2. * y) - np.cos(2. * t)
    def g(y, t): return 0.1#np.exp(np.sin(2. * y)) #- np.cos(2. * t))

    t0 = 0.1
    t1 = 0.2
    ts = np.linspace(t0, t1, 100)

    init_ps = lambda x: norm.pdf(x, 0., 0.3)
    ylims = [-2.1, 2.1]
    xs = np.linspace(ylims[0], ylims[1], 200)
    pvals = init_ps(xs)
    density1 = build_fd_func(ylims[0], ylims[1], pvals)

    #ylims2 = [-2., 2.]
    #xs2 = np.linspace(ylims2[0], ylims2[1], 3000)

    def dp_dt(x, t, p):
        return fokker_planck(f, g, x, t, p)

    # Set up figure.
    fig = plt.figure(figsize=(8, 6), facecolor='white')
    ax = fig.add_subplot(111, frameon=False)
    plt.ion()
    plt.show(block=False)
    ax.set_xlabel('x')
    ax.set_ylabel('p')

    p = init_ps(xs)
    if False:

        def dynamics(p, t, args):
            xs, ylims = args
            density1 = build_fd_func(ylims[0], ylims[1], p)
            fp = vmap(lambda x: dp_dt(x, t, density1))
            return fp(xs)
        full_density = odeint(dynamics, p, ts, (xs, ylims))

    if True:
        ps = [p]
        for i in enumerate(ts[1:]):

            dt = 0.01
            density1 = build_fd_func(ylims[0], ylims[1], p)
            t = 0. #ts[i]
            fp = vmap(lambda x: dp_dt(x, t, density1))

            # Euler steps.  Todo: replace with odeint
            p = p + dt * fp(xs)
            ps.append(p)

            #plt.plot(xs, p, 'g')
            #plt.plot(xs, fp(xs), 'b')
            #plt.draw()
            #plt.pause(100)

        full_density = np.array(ps)


    # Set up figure.
    fig = plt.figure(figsize=(8, 6), facecolor='white')
    ax = fig.add_subplot(111, frameon=False)
    plt.ion()
    plt.show(block=False)
    #plt.cla()
    plot_gradient_field(ax, f, xlimits=[t0, t1], ylimits=ylims)

    X, T = np.meshgrid(xs, ts)
    ax.contour(T, X, full_density)

    ax.set_xlabel('t')
    ax.set_ylabel('y')

    #for i in range(3):
    #    rng = random.PRNGKey(i)
    #    ys = sdeint_ito(f, g, y0, ts, rng, fargs, dt=1e-3)
    #    ax.plot(ts, ys, 'g-')

    plt.draw()
    plt.pause(100)
Ejemplo n.º 15
0
        num_samples, niters, optimizer)

    print(lower_bounds)
    Sigma = b @ b.T + jnp.diag(c**2)

    def is_pos_def(x):
        return jnp.all(jnp.linalg.eigvals(x) > 0)

    print(is_pos_def(Sigma))

    step = 0.001

    fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(12, 8))
    for i, ax in enumerate(axes.flatten()):
        if i < 8:
            x = jnp.arange(mu[i] - 3 * jnp.sqrt(Sigma[i][i]),
                           mu[i] + 3 * jnp.sqrt(Sigma[i][i]) + step, step)

            y = norm.pdf(x, mu[i], jnp.sqrt(Sigma[i][i]))
            title = f'$\Theta_{i}$'
            ax.set_title(title, fontsize=14)
            ax.plot(x, y, '-')
        else:
            ax.plot(lower_bounds)
            ax.set_title('Lower Bound')

    plt.tight_layout()
    pml.savefig("vb_gauss_lowrank_labour_force.pdf")
    pml.savefig("vb_gauss_lowrank_labour_force.png")
    plt.show()