def variational_objective(params, t):
     """Provides a stochastic estimate of the variational lower bound."""
     mean, log_std = unpack_params(params)
     samples = rs.randn(num_samples, D) * np.exp(log_std) + mean
     lower_bound = gaussian_entropy(log_std) + np.mean(logprob(samples, t))
     loss = np.mean(logprob(samples, t))
     print("loss is "+ str(loss))
     return -lower_bound
Esempio n. 2
0
 def variational_objective(params, t):
     """Provides a stochastic estimate of the variational lower bound."""
     mean, log_std = unpack_params(params)
     generatedSample=rs.randn(num_samples, D) * np.exp(log_std)
     samples = generatedSample + mean
     #samples: sample of weights
     #t: targets
     #inputs used in logprob is the inputs user initial generated
     logvalue = logprob(samples, t)
     lower_bound = gaussian_entropy(log_std) + np.mean(logprob(samples, t))
     loss = np.mean(logprob(samples, t))
     print("loss is "+ str(loss))
     return -lower_bound
def fit_maxlike(x, r_guess):
    # follows Wikipedia's section on negative binomial max likelihood
    assert np.var(x) > np.mean(x), "Likelihood-maximizing parameters don't exist!"
    loglike = lambda r, p: np.sum(negbin_loglike(r, p, x))
    p = lambda r: np.sum(x) / np.sum(r+x)
    rprime = lambda r: grad(loglike)(r, p(r))
    r = newton(rprime, r_guess)
    return r, p(r)
Esempio n. 4
0
    def lift(vy): return np.mean(vy - init_vy)

    def objective(params):
Esempio n. 5
0
 def drag(vx): return np.mean(init_vx - vx)
 def lift(vy): return np.mean(vy - init_vy)
Esempio n. 6
0
 def frac_err(W_vect, X, T):
     return np.mean(np.argmax(T, axis=1) != np.argmax(pred_fun(W_vect, X), axis=1))
Esempio n. 7
0
 def distance_from_target_image(smoke):
     return np.mean((target - smoke)**2)