def find_weights(self):  #trains the model on the neural network
        i = 1
        while i < self.tp.iter + 1:
            self.randomize_batches()
            for batchdata in self.make_batch():
                self.w1 = np.squeeze(self.w1)
                self.w2 = np.squeeze(self.w2)

                gn1 = self.grad_func_w1(self.w1, self.w2,
                                        batchdata)  #generate gradient
                gn2 = self.grad_func_w2(self.w1, self.w2,
                                        batchdata)  #generate gradient

                (self.w1, self.tp.mn1,
                 self.tp.vn1) = self.update_weights(self.w1, self.tp.mn1,
                                                    self.tp.vn1, gn1,
                                                    i)  #Update weight, mn, vn
                (self.w2, self.tp.mn2,
                 self.tp.vn2) = self.update_weights(self.w2, self.tp.mn2,
                                                    self.tp.vn2, gn2,
                                                    i)  #Update weight, mn, vn

                self.loss_array = np.concatenate(
                    (self.loss_array,
                     [self.loss_func(self.w1, self.w2, self.data)]),
                    axis=0)  #store value of loss at iteration
                #                #print(self.loss_func(self.params,self.data))
                i = i + 1
                if i % 100 == 0:
                    print(i)
示例#2
0
    def figure_subspace_update(self, T, x_true=None):
        self._fig_subspace.clf()

        if x_true:
            X_true = np.array([x_true[k] for k in range(0, T + 1)])
            X_true = X_true.squeeze().T

        X_pred = np.array([self._mu[k] for k in range(0, T + 1)])
        X_pred = X_pred.squeeze().T
        X_pred = X_pred.reshape(self._r, T + 1)

        used = set()
        for l in range(self._r):
            ax = self._fig_subspace.add_subplot(2, int((self._r + 1) / 2),
                                                l + 1)
            if x_true:
                pl = np.argmin([
                    np.linalg.norm(X_true[l] -
                                   X_pred[m]) if not m in used else np.inf
                    for m in range(self._r)
                ])
                ax.plot(np.squeeze(X_true[l, :]), color="#004488", alpha=0.7)
                ax.plot(np.squeeze(X_pred[pl, :]), "--", color="#bb5566")
                used.add(pl)
            else:
                ax.plot(np.squeeze(X_pred[l, :]), "--", color="#bb5566")
            ax.axis("off")

        plt.pause(0.01)
示例#3
0
def fwd_model_2d(arr, x1, x2, z, R, eps, varsigma=1):
    """
    Apply fwd model to space-time array arr (must be on grid of spatial locs); predicts at same time points as input.
    (Note: want dense input within integral bounds to get good results!) Output can be requested on non-grid.
    :param arr: matrix (nx1, nx2, nt)
    :param x1: x1 (nx1, 1) vector of observation spatial locations
    :param x2: x2 (nx2, 1) vector of observation spatial locations
    :param z: z (nz, 2) matrix of predicted spatial locations
    :param R: forward model parameter
    :param eps: forward model singularity parameter
    :param varsigma: scalar conductivity
    :return: matrix (nz, nt)
    """
    nt = arr.shape[2]
    nz = z.shape[0]
    res = np.zeros((nz, nt))
    for t in range(nt):
        arr_tmp = np.squeeze(arr[:, :, t]) # (nx1, nx2)
        for i in range(nz):
            deltax1 = z[i,0] - x1   # (nx1, 1)
            deltax2 = z[i,1] - x2.T # (1, nx2)
            wt = b_fwd_2d(deltax1, deltax2, R, eps)
            toint = wt * arr_tmp # (nx1, nx2)
            res[i, t] = scipy.integrate.trapz(scipy.integrate.trapz(toint, x=np.squeeze(x1), axis=0), x=np.squeeze(x2), axis=0)
    return res #/(4*np.pi*varsigma)
示例#4
0
 def backward(self, x_seq, u_seq):
     self.v[-1] = self.lf(x_seq[-1])
     self.v_x[-1] = self.lf_x(x_seq[-1])
     self.v_xx[-1] = self.lf_xx(x_seq[-1])
     k_seq = []
     kk_seq = []
     for t in range(self.pred_time - 1, -1, -1):
         f_x_t = self.f_x(x_seq[t], u_seq[t])
         f_u_t = self.f_u(x_seq[t], u_seq[t])
         q_x = self.l_x(x_seq[t], u_seq[t]) + np.matmul(f_x_t.T, self.v_x[t + 1])
         q_u = self.l_u(x_seq[t], u_seq[t]) + np.matmul(f_u_t.T, self.v_x[t + 1])
         q_xx = self.l_xx(x_seq[t], u_seq[t]) + \
           np.matmul(np.matmul(f_x_t.T, self.v_xx[t + 1]), f_x_t) + \
           np.dot(self.v_x[t + 1], np.squeeze(self.f_xx(x_seq[t], u_seq[t])))
         tmp = np.matmul(f_u_t.T, self.v_xx[t + 1])
         q_uu = self.l_uu(x_seq[t], u_seq[t]) + np.matmul(tmp, f_u_t) + \
           np.dot(self.v_x[t + 1], np.squeeze(self.f_uu(x_seq[t], u_seq[t])))
         q_ux = self.l_ux(x_seq[t], u_seq[t]) + np.matmul(tmp, f_x_t) + \
           np.dot(self.v_x[t + 1], np.squeeze(self.f_ux(x_seq[t], u_seq[t])))
         inv_q_uu = np.linalg.inv(q_uu)
         k = -np.matmul(inv_q_uu, q_u)
         kk = -np.matmul(inv_q_uu, q_ux)
         dv = 0.5 * np.matmul(q_u, k)
         self.v[t] += dv
         self.v_x[t] = q_x - np.matmul(np.matmul(q_u, inv_q_uu), q_ux)
         self.v_xx[t] = q_xx + np.matmul(q_ux.T, kk)
         k_seq.append(k)
         kk_seq.append(kk)
     k_seq.reverse()
     kk_seq.reverse()
     return k_seq, kk_seq
示例#5
0
def gen_marg_poiss(y_train, params, n_latents, n_neurons, coeffs, a1, N, D):
    len_sc, W = unpack_params(params)

    Da2W = (D * coeffs.T[2]) * W
    quad = 2 * Da2W @ W.T

    #cdiag = gpf.mkcovs.mkcovdiag_ASD_wellcond(init_len_sc+0, np.ones(np.size(init_len_sc)), nxcirc, wwnrm = wwnrm,addition = 1e-7).T
    C = [make_cov(N, i) + 1e-7 * np.eye(N) for i in len_sc]
    #all_cdiag = np.reshape(cdiag.T,np.size(init_len_sc)*N_four,-1)
    Cinv = [np.linalg.inv(C[i]) for i in np.arange(n_latents)]

    if n_latents is 1:
        sigma_inv = 2 * D * a2W.T @ W + Cinv[0]
    else:
        sigma_inv = np.kron(quad, np.eye(N)) + block_diag(Cinv, n_latents)
        #sigma_inv = 2*D*a2W.T@W + block_diag(Cinv, n_latents)
    Wkron = np.kron(W, np.eye(N))

    second = Wkron @ (y_train - D * a1.T)
    mutot = np.squeeze(np.linalg.solve(sigma_inv, second))
    #sigma = np.linalg.inv(sigma_inv)
    #mutot = np.squeeze([email protected]@(y_train- D*a1.T))

    logl = np.squeeze((1 / 2) * mutot.T @ (sigma_inv) @ mutot)

    logdetC = 0
    for i in np.arange(n_latents):
        logdetC = logdetC - (1 / 2) * np.linalg.slogdet(C[i])[1]

    neglogpost = logdetC + -(1 / 2) * np.linalg.slogdet(sigma_inv)[1]

    return -(neglogpost + logl)
示例#6
0
def create_job(kwargs):
    import warnings
    warnings.filterwarnings("ignore")

    # pendulum env
    env = gym.make('Pendulum-TO-v0')
    env._max_episode_steps = 10000
    env.unwrapped.dt = 0.02
    env.unwrapped.umax = np.array([2.5])
    env.unwrapped.periodic = False

    dm_state = env.observation_space.shape[0]
    dm_act = env.action_space.shape[0]

    state = env.reset()
    init_state = tuple([state, 1e-4 * np.eye(dm_state)])
    solver = MBGPS(env,
                   init_state=init_state,
                   init_action_sigma=25.,
                   nb_steps=300,
                   kl_bound=.1,
                   action_penalty=1e-3,
                   activation={
                       'shift': 250,
                       'mult': 0.5
                   })

    solver.run(nb_iter=100, verbose=False)

    solver.ctl.sigma = np.dstack([1e-1 * np.eye(dm_act)] * 300)
    data = solver.rollout(nb_episodes=1, stoch=True, init=state)

    obs, act = np.squeeze(data['x'], axis=-1).T, np.squeeze(data['u'],
                                                            axis=-1).T
    return obs, act
示例#7
0
文件: laplace.py 项目: as4529/gp3
    def search_step(self, obj_prev, min_obj, delta_alpha,
                    step_size, max_it, t, opt_step):
        """
        Executes one step of a backtracking line search
        Args:
            obj_prev (np.array): previous objective
            obj_search (np.array): current objective
            min_obj (np.array): current minimum objective
            delta_alpha (np.array): change in step size
            step_size (np.array): current step size
            max_it (int): maximum number of line search iterations
            t (np.array): current line search iteration
            opt_step (np.array): optimal step size until now

        Returns: updated parameters
        """
        alpha_search = np.squeeze(self.alpha + step_size * delta_alpha)
        f_search = np.squeeze(kron_mvp(self.Ks, alpha_search)) + self.mu

        if self.k_diag is not None:
            f_search += np.multiply(self.k_diag, alpha_search)

        obj_search = self.log_joint(f_search, alpha_search)

        if min_obj > obj_search:
            opt_step = step_size
            min_obj = obj_search
        step_size = self.tau * step_size

        t = t + 1

        return obj_prev, min_obj, delta_alpha,\
            step_size, max_it, t, opt_step
示例#8
0
def debug_scp_iteration_plot( tx_next, u_next, xbar, ubar, x0, T, i_iter):

	unl = u_next
	x_curr = x0
	
	Xnl = []
	Vnl_nlx = []
	Vnl_lx = []
	tV_nlx = []
	tV_lx = []

	for k,t in enumerate(T):
		x_next = x_curr + dynamics.get_dxdt( x_curr, unl[:,k], t) * param.get('dt')
		R_k, w_k = dynamics.get_linear_lyapunov( xbar[:,k], ubar[:,k], t)
		Vnl_nlx.append( dynamics.get_V( x_curr, t))
		Vnl_lx.append( dynamics.get_V( tx_next[:,k],t))
		tV_nlx.append( np.matmul( R_k, x_curr) + w_k )
		tV_lx.append( np.matmul( R_k, tx_next[:,k]) + w_k)		
		Xnl.append( x_curr)
		x_curr = x_next

	Xnl = np.asarray(Xnl)
	Vnl_nlx = np.asarray(Vnl_nlx)
	Vnl_lx = np.asarray(Vnl_lx)
	tV_nlx = np.asarray(tV_nlx)
	tV_lx = np.asarray(tV_lx)

	plot_scp_iteration_state( Xnl, np.transpose(tx_next,(1,0,2)), \
		np.transpose(xbar,(1,0,2)), T, title = str(param.get('controller')) + ' State' + \
		'\nIteration: ' + str(i_iter) + '\nTime: ' + str(T[0]))

	plot_scp_iteration_lyapunov( np.squeeze(Vnl_nlx), np.squeeze(Vnl_lx), np.squeeze( tV_nlx), \
		np.squeeze( tV_lx), T, title = str(param.get('controller')) + ' Lyapunov' + \
		'\nIteration: ' + str(i_iter) + '\nTime: ' + str(T[0]))
示例#9
0
def sinkhorn(P):
    """Fit the diagonal matrices in Sinkhorn Knopp's algorithm
    """

    N = P.shape[0]
    max_thresh = 1 + 1e-3
    min_thresh = 1 - 1e-3
    _iterations = 0
    _max_iter = 100
    # _stopping_condition = None
    # Initialize r and c, the diagonals of D1 and D2
    # and warn if the matrix does not have support.
    r = np.ones((N, 1))
    pdotr = np.dot(P.T, r)
    # total_support_warning_str = (
    #     "Matrix P must have total support. "
    #     "See documentation"
    # )
    # if not np.all(pdotr != 0):
    #     warnings.warn(total_support_warning_str, UserWarning)
    #
    c = 1 / pdotr
    pdotc = np.dot(P, c)
    # if not np.all(pdotc != 0):
    #     warnings.warn(total_support_warning_str, UserWarning)
    #
    r = 1 / pdotc
    # del pdotr, pdotc

    P_eps = P
    # infs = np.diag(N * [-np.inf])
    # P_eps = np.exp(P+infs)

    # while np.any(np.sum(P_eps, axis=1) < min_thresh) \
    #         or np.any(np.sum(P_eps, axis=1) > max_thresh) \
    #         or np.any(np.sum(P_eps, axis=0) < min_thresh) \
    #         or np.any(np.sum(P_eps, axis=0) > max_thresh):
    for i in range(_max_iter):
        c = 1 / np.dot(P.T, r)
        r = 1 / np.dot(P, c)

        _D1 = np.diag(np.squeeze(r))
        _D2 = np.diag(np.squeeze(c))
        P_eps = np.dot(np.dot(_D1, P), _D2)

        # _iterations += 1

        if _iterations >= _max_iter:
            _stopping_condition = "max_iter"
            break

    # if not _stopping_condition:
    #     _stopping_condition = "epsilon"

    _D1 = np.diag(np.squeeze(r))
    _D2 = np.diag(np.squeeze(c))
    P_eps = np.dot(np.dot(_D1, P), _D2)

    return P_eps
示例#10
0
 def loss(theta, X, y):
     y_hat = self.sigmoid(np.dot(X, theta))
     y_hat = np.squeeze(y_hat)
     y = np.squeeze(y)
     res = -np.sum(
         y.dot(np.log10(y_hat)) + (1 - y).dot(np.log10(1 - y_hat)))
     res = res / X.shape[0]
     return res
示例#11
0
def lnpdf(theta):
    input = np.atleast_2d(theta)
    lnpdf.counter += len(input)
    output = np.empty((len(input)))
    grad_out = np.empty((len(input), input.shape[1]))
    for i in range(len(input)):
        output[i] = tmp_lnpdf(input[i])
        grad_out[i, :] = grad(tmp_lnpdf)(input[i])
    return np.squeeze(output), np.squeeze(grad_out)
示例#12
0
 def loss(theta, X, y):
     y_hat = self.sigmoid(np.dot(X, theta))
     y_hat = np.squeeze(y_hat)
     y = np.squeeze(y)
     error = -np.sum(
         y.dot(np.log10(y_hat)) + (1 - y).dot(np.log10(1 - y_hat)))
     error = error / X.shape[0]
     error += self.l2_coef / (2 * X.shape[0]) * np.sum(np.square(theta))
     return error
def loss_func(w1, w2, w3, data):
    xy = data[:, :-1]
    z = data[:, 2]
    w1 = np.squeeze(w1)
    w2 = np.squeeze(w2)
    w3 = np.squeeze(w3)
    z_pred = forward_pass(xy, w1, w2, w3)
    loss = np.sum((z_pred - z)**2)
    return loss
示例#14
0
def plot_V(V,T,title = 'Lyapunov Convergence'):
	fig, ax = plt.subplots()
	Vdot = np.gradient(V, param.get('dt'), axis = 0)

	ax.plot(T,np.squeeze(V),label = 'V')
	ax.plot(T,np.squeeze(Vdot),label = 'Vdot')
	plt.title(title)
	plt.legend()
	ax.grid(True)
    def predict(self, X):

        m = X.shape[0]
        if self.fit_intercept:
            X = np.concatenate((np.ones(m).reshape(-1, 1), X), axis=1)
        pred = self.predicition(self.coef_, X)
        pred = np.squeeze(pred)
        res = np.squeeze(pred >= 0.5).astype(int)
        # print(pd.DataFrame({0:pred,1:res,2:np.array(self.y).squeeze()}))
        return pd.Series(res)
示例#16
0
def eq_constraint_elimination(func,
                              eqConstraintsMat,
                              optimizer,
                              initialX,
                              interval=[-1e15, 1e15],
                              ftol=1e-6,
                              maxIters=1e3,
                              maxItersLS=200):
    fevals = 0
    xLen = initialX.shape[0]  # n
    bLen = eqConstraintsMat['b'].shape[0]  # p

    F, x_hat = eq_constraint_elimination_composer(eqConstraintsMat)
    paramFunc = eq_constraint_elimination_func(func, F, x_hat)

    # Choose any initial Z
    initialZ = np.zeros((xLen - bLen, 1))  # Must be (n - p)x1

    # Scipy optimizer
    # optimizer  = spo.minimize(paramFunc, initialZ, method='BFGS', tol=ftol)
    # zOpt       = optimizer.x
    # zFevals    = optimizer.nfev

    # Debug
    # print("F: ", F.shape)
    # print("A:", eqConstraintsMat['A'].shape)
    # print("b:", eqConstraintsMat['b'].shape)
    # print("x_hat: ", x_hat.shape)
    # print("initialZ: ", initialZ.shape)
    # print("paramFunc(z0): ", paramFunc(initialZ))
    # print("F@z + x_hat", (np.dot(F, initialZ) + x_hat).shape)
    # print((np.dot(F, initialZ) + x_hat))
    # print("zOpt: ", zOpt.shape)
    # print("xOpt", xOpt.shape)
    # print("fOpt", fOpt.shape)
    # print("Restr check:\nAx* = b\n{} = {}".format(
    # np.squeeze(np.dot(eqConstraintsMat['A'], xOpt)), eqConstraintsMat['b']
    # ))
    # input()
    # Find z* to minimize parametrized cost function
    algorithm = optimizer(paramFunc,
                          initialZ,
                          interval=interval,
                          ftol=ftol,
                          maxIters=maxIters,
                          maxItersLS=maxItersLS)

    zOpt, _, zFevals = algorithm.optimize()
    zOpt.shape = (xLen - bLen, 1)

    fevals += zFevals
    xOpt = np.squeeze(np.dot(F, zOpt) + x_hat)
    fOpt = np.squeeze(func(xOpt))

    return xOpt, fOpt, fevals
 def loss(self, theta, x, y):
     assert (x.shape[0] == y.shape[0])
     pred = self.predicition(theta, x)
     pred = np.squeeze(pred)
     y = np.squeeze(y)
     res = -np.sum(y.dot(np.log10(pred)) + (1 - y).dot(np.log10(1 - pred)))
     res = res / x.shape[0]
     res += self.l2_coef / (2 * x.shape[0]) * np.sum(np.square(theta))
     res += self.l1_coef / (2 * x.shape[0]) * np.sum(np.abs(theta))
     # print(res)
     return res
示例#18
0
def make_model(model_name):
    if model_name == "baseball":
        # baseball model and data
        lnpdf_named = baseball.lnpdf
        lnpdf_flat = baseball.lnpdf_flat
        lnpdft = lambda z, t: np.squeeze(lnpdf_flat(z, t))
        lnpdf = lambda z: np.squeeze(lnpdf_flat(z, 0))
        D = baseball.D
        return lnpdf, D, None
    elif model_name == "frisk":
        lnpdf, unpack, D, sdf, pnames = frisk.make_model_funs(precinct_type=1)
        return lnpdf, D, pnames
示例#19
0
def generate_mixture_data(num_obs, true_centroids, true_probs, x_covs):
    true_z = np.random.multinomial(1, true_probs, num_obs)
    true_z_ind = np.full(num_obs, -1)
    for row in np.argwhere(true_z):
        true_z_ind[row[0]] = row[1]

    x = np.array([
        np.random.multivariate_normal(
            mean=np.squeeze(true_centroids[true_z_ind[n], :]),
            cov=np.squeeze(x_covs[n, :])) for n in range(num_obs)
    ])

    return x, true_z, true_z_ind
示例#20
0
 def f(x):
     demo = demo_func(x)
     demo.set_mut_rate(1.0)
     configs = momi.build_config_list(demo.leafs, tuple(states), n_lins)
     #print(configs)
     # print demo.graph['cmd']
     #sfs, branch_len = expected_sfs(demo.demo_hist, configs), expected_total_branch_len(
     #    demo.demo_hist, sampled_pops=demo.pops, sampled_n=demo.n)
     sfs, branch_len = demo.expected_sfs(
         configs, length=1, return_dict=False), demo.expected_branchlen(
             dict(zip(configs.sampled_pops, configs.sampled_n)))
     if normalized:
         return np.squeeze(sfs / branch_len)
     return np.squeeze(sfs)
示例#21
0
def plot_test( V, LgV, LfV, U, T):
	# currently testing lyapunov and derivatives
	
	Vdot_n = np.gradient(V, param.get('dt'), axis = 0)
	Vdot_a = []
	for t in range(len(T)):
		Vdot_a.append( LfV[t] + np.matmul( LgV[t], U[t]))
	Vdot_a = np.asarray(Vdot_a)

	fig, ax = plt.subplots()
	plt.plot( T, np.squeeze(V), label = 'V')
	plt.plot( T, np.squeeze(Vdot_n), label = 'Vdot n')
	plt.plot( T, np.squeeze(Vdot_a), label = 'Vdot a')
	plt.legend()
	plt.title('Testing')
示例#22
0
def initialize_local_meanfield(node_potentials):
    # TODO maybe i should initialize the hmm states instead of the lds states...

    def compute_stats(sampled_states):
        out = np.outer
        get_init_stats = lambda x0: (out(x0, x0), x0, 1., 1.)
        get_pair_stats = lambda x1, x2: (out(x1, x1), out(x1, x2), out(x2, x2),
                                         1.)

        init_stats = get_init_stats(sampled_states[0])
        pair_stats = map(get_pair_stats, sampled_states[:-1],
                         sampled_states[1:])

        return init_stats, map(np.array, zip(*pair_stats))

    # construct random walk natparam
    N = node_potentials[0].shape[1]
    A = 0.9 * np.eye(N)
    init_params = -1. / 2 * np.eye(N), np.zeros(N), 0.
    pair_params = -1. / 2 * np.dot(A.T, A), A.T, -1. / 2 * np.eye(N), 0.
    natparam = init_params, pair_params

    sampled_states = np.squeeze(
        natural_lds_sample(natparam, node_potentials, num_samples=1))
    init_stats, pair_stats = compute_stats(sampled_states)

    return init_stats, pair_stats
示例#23
0
def linear_regression_online_update(m_km1, P_km1, H, m_obs, var_obs):
    # m_km1: Prior mean
    # P_km1: Prior cov
    # H: Link from latent to observed
    # m_obs: Mean of observation
    # var_obs: Variance of observation

    # We need to work with matrices here or the maths will be wrong
    assert all([len(x.shape) == 2 for x in [m_km1, H]]), \
        'm_km1 and H must have two dimensions each!'

    v_k = m_obs - H @ m_km1

    S_k = H @ P_km1 @ H.T + var_obs
    K_k = P_km1 @ H.T * (1 / S_k)
    m_k = m_km1 + K_k * v_k
    P_k = P_km1 - (K_k * S_k) @ K_k.T

    # Calculate the log marginal likelihood here too
    # sign, logdet = np.linalg.slogdet(2 * np.pi * S_k)
    # logdet_a = sign * logdet

    # Do this another way
    # rel_chol = np.linalg.cholesky(2 * np.pi * S_k)
    # TODO: CHECK THIS
    logdet = logdet_via_cholesky(2 * np.pi * S_k)

    # Second part
    quadratic_term = 0.5 * v_k.T @ np.linalg.solve(S_k, v_k)
    energy_contrib = 0.5 * logdet + quadratic_term

    return m_k, P_k, np.squeeze(energy_contrib)
示例#24
0
def _composite_log_likelihood(data,
                              demo,
                              mut_rate=None,
                              truncate_probs=0.0,
                              vector=False,
                              p_missing=None,
                              use_pairwise_diffs=False,
                              **kwargs):
    try:
        sfs = data.sfs
    except AttributeError:
        sfs = data

    sfs_probs = np.maximum(
        expected_sfs(demo, sfs.configs, normalized=True, **kwargs),
        truncate_probs)
    log_lik = sfs._integrate_sfs(np.log(sfs_probs), vector=vector)

    # add on log likelihood of poisson distribution for total number of SNPs
    if mut_rate is not None:
        log_lik = log_lik + \
            _mut_factor(sfs, demo, mut_rate, vector,
                        p_missing, use_pairwise_diffs)

    if not vector:
        log_lik = np.squeeze(log_lik)
    return log_lik
示例#25
0
 def compute_MSE(self):
     """Used in fitting models lab to display MSE performance
     for hand-fitting exercises"""
     outputs = np.squeeze(self.funk(self.data_inputs))
     squared_errors = np.square(self.correct_outputs - outputs)
     MSE = np.mean(squared_errors)
     return MSE
示例#26
0
    def setUp(self):
        np.seterr(all='raise')
        self.X = None
        self.cost = lambda X: np.exp(np.sum(X**2))

        n = self.n = 15

        Y = self.Y = rnd.randn(n)
        A = self.A = rnd.randn(n)

        # Calculate correct cost and grad...
        self.correct_cost = np.exp(np.sum(Y**2))
        self.correct_grad = correct_grad = 2 * Y * np.exp(np.sum(Y**2))

        # ... and hess
        # First form hessian matrix H
        # Convert Y and A into matrices (row vectors)
        Ymat = np.matrix(Y)
        Amat = np.matrix(A)

        diag = np.eye(n)

        H = np.exp(np.sum(Y**2)) * (4 * Ymat.T.dot(Ymat) + 2 * diag)

        # Then 'left multiply' H by A
        self.correct_hess = np.squeeze(np.array(Amat.dot(H)))

        self.backend = AutogradBackend()
示例#27
0
 def __init__(self, K, D, pi0=None, M=0):
     super(FixedInitialStateDistribution, self).__init__(K, D, M=M)
     if pi0 is not None:
         # Handle the case where user passes a numpy array of (K, 1) instead of (K,)
         pi0 = np.squeeze(np.array(pi0))
         assert len(pi0) == K, "Array passed as pi0 is of the wrong length"
         self.log_pi0 = np.log(pi0 + 1e-16)
示例#28
0
文件: synth.py 项目: aasensio/DNHazel
    def get_spectra(self, T, mu, velocity):
        """
        Return the Kurucz spectrum for a value of the temperature, astrocentric angle and velocity. It linearly
        interpolates on the set of computed spectra
        Args:
            T (float) : temperature in K
            mu (float) : astrocentric angle in the range [0,1]
            velocity (float) : velocity shift of the spectrum in km/s
        
        Returns:
            spectrum (float) : interpolated and shifted spectrum
        """
        idT = np.searchsorted(self.T, T) - 1
        idmu = np.searchsorted(self.mus, mu) - 1

# Simple bilinear interpolation
        xd = (T - self.T[idT]) / (self.T[idT+1] - self.T[idT])
        yd = (mu - self.mus[idmu]) / (self.mus[idmu+1] - self.mus[idmu])

        c0 = self.spectrum[idT,idmu,:] * (1.0 - xd) + self.spectrum[idT+1,idmu,:] * xd
        c1 = self.spectrum[idT,idmu+1,:] * (1.0 - xd) + self.spectrum[idT+1,idmu+1,:] * xd

        tmp = np.squeeze(c0 * (1.0 - yd) + c1 * yd)

# Velocity shift in pixel units
        shift_in_pxl = velocity / self.velocity_per_pxl

        return self.shift_spectrum(tmp, shift_in_pxl) #nd.shift(tmp, shift_in_pxl)
def loss_func(parameters,data):
    parameters=np.squeeze(parameters)
    x=data[:,:-1]
    y=data[:,-1]
    ai=sigmoid(parameters,x)
    loss=-(np.sum(y*np.log(ai)+(1-y)*np.log(1-ai)))
    return loss
示例#30
0
    def _update_params(self, x, result):
        self.fevals = self.fevals + 1

        # Autograd ArrayBox behaves differently from numpy, that fixes it.
        result_copy = remove_arraybox(result)
        x_copy = remove_arraybox(x)

        assert np.any(np.isnan(result_copy)) == False, "X out of domain"

        found_best = np.all(result_copy <= self.best_f)

        if self._has_eqc:
            if found_best:
                self.best_z = x_copy
            x_copy = np.squeeze(
                self._null_space_feasible_matrix @ np.reshape(x_copy,
                                                              (-1, 1)) +
                self._feasible_vector)

        self.all_evals += [result_copy]
        self.all_x += [x_copy]

        if found_best:
            self._best_x = x_copy
            self._best_f = result_copy
            self.all_best_x += [self.best_x]
            self.all_best_f += [self.best_f]
        return result
示例#31
0
    def f_scp(self, x, u):
        # input:
        # 	x, nd array, (n,)
        # 	u, nd array, (m,1)
        # output
        # 	sp1, nd array, (n,)

        # parameters
        m_p = self.mass_pole
        m_c = self.mass_cart
        l = self.length_pole
        g = self.g

        # s = [q,qdot], q = [x,th]
        u = agnp.reshape(u, (self.m, 1))
        q = agnp.reshape(x[0:2], (2, 1))
        qdot = agnp.reshape(x[2:], (2, 1))
        th = x[1]
        thdot = x[3]

        # EOM from learning+control@caltech
        # D = agnp.array([[m_c+m_p,m_p*l*agnp.cos(th)],[m_p*l*agnp.cos(th),m_p*(l**2)]])
        a = m_c + m_p
        b = m_p * l * agnp.cos(th)
        c = m_p * l * agnp.cos(th)
        d = m_p * (l**2)
        Dinv = 1 / (a * d - b * c) * agnp.array([[d, -b], [-c, a]])

        C = agnp.array([[0, -m_p * l * thdot * agnp.sin(th)], [0, 0]])
        G = agnp.array([[0], [-m_p * g * l * agnp.sin(th)]])
        B = agnp.array([[1], [0]])
        qdotdot = agnp.dot(Dinv, agnp.dot(B, u) - agnp.dot(C, qdot) - G)

        res = agnp.vstack([qdot, qdotdot])
        return agnp.squeeze(res)
示例#32
0
    def setUp(self):
        np.seterr(all='raise')
        self.X = None
        self.cost = lambda X: np.exp(np.sum(X**2))

        n = self.n = 15

        Y = self.Y = rnd.randn(n)
        A = self.A = rnd.randn(n)

        # Calculate correct cost and grad...
        self.correct_cost = np.exp(np.sum(Y ** 2))
        self.correct_grad = 2 * Y * np.exp(np.sum(Y ** 2))

        # ... and hess
        # First form hessian matrix H
        # Convert Y and A into matrices (row vectors)
        Ymat = np.matrix(Y)
        Amat = np.matrix(A)

        diag = np.eye(n)

        H = np.exp(np.sum(Y ** 2)) * (4 * Ymat.T.dot(Ymat) + 2 * diag)

        # Then 'left multiply' H by A
        self.correct_hess = np.squeeze(np.array(Amat.dot(H)))

        self.backend = AutogradBackend()
示例#33
0
def test_elementwise_grad():
    def simple_fun(a):
        return a + np.sin(a) + np.cosh(a)

    A = npr.randn(10)

    exact = elementwise_grad(simple_fun)(A)
    numeric = np.squeeze(np.array([nd(simple_fun, A[i]) for i in range(len(A))]))
    check_equivalent(exact, numeric)
示例#34
0
def test_elementwise_grad_multiple_args():
    def simple_fun(a, b):
        return a + np.sin(a) + np.cosh(b)

    A = 0.9
    B = npr.randn(10)
    argnum = 1

    exact = elementwise_grad(simple_fun, argnum=argnum)(A, B)
    numeric = np.squeeze(np.array([nd(simple_fun, A, B[i])[argnum] for i in range(len(B))]))
    check_equivalent(exact, numeric)
示例#35
0
def mog_logmarglike(x, means, covs, pis, ind=0):
    """ marginal x or y (depending on ind) """
    K = pis.shape[0]
    xx = np.atleast_2d(x)
    centered = xx.T - means[:,ind,np.newaxis].T
    logprobs = []
    for kk in xrange(K):
        quadterm  = centered[:,kk] * centered[:,kk] * (1./covs[kk,ind,ind])
        logprobsk = -.5*quadterm - .5*np.log(2*np.pi) \
                    -.5*np.log(covs[kk,ind,ind]) + np.log(pis[kk])
        logprobs.append(np.squeeze(logprobsk))
    logprobs = np.array(logprobs)
    logprob  = scpm.logsumexp(logprobs, axis=0)
    if np.isscalar(x):
        return logprob[0]
    else:
        return logprob 
示例#36
0
def rand_natparam(n, k):
    return np.squeeze(np.stack([rand_dirichlet(n) for _ in range(k)]))
示例#37
0
def rand_natparam(n, k):
    return np.squeeze(np.stack([rand_gaussian(n) for _ in range(k)]))
示例#38
0
 def fun(x): return to_scalar(np.squeeze(x))
 d_fun = lambda x : to_scalar(grad(fun)(x))