示例#1
0
def plot_y_vs_u(xs_train, ys_train, P, xs_test=None, ys_test=None, xlim=None, ylim=None):
    s = 0.75
    alpha = 0.8
    us_train = np.dot(xs_train, P)
    if P.shape[1] == 1:
        fig, ax = plt.subplots()
        ax.scatter(us_train[:,0], ys_train, color='r', label='train', s=s, alpha=alpha)
        if not ys_test is None:
            us_test = np.dot(xs_test, P)
            ax.scatter(us_test[:,0], ys_test, color='b', label='test', s=s, alpha=alpha)
            ax.legend()
        ax.set_xlabel('u0')
        ax.set_ylabel('y')
        ax.set_title('y vs projected features')
        basic.display_fig_inline(fig)
    elif P.shape[1] == 2:
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        ax.scatter(us_train[:,0], us_train[:,1],ys_train, color='r', label='train', s=s, alpha=alpha)
        if not ys_test is None:
            us_test = np.dot(xs_test, P)
            ax.scatter(us_test[:,0], us_test[:,1],ys_test, color='b', label='test', s=s, alpha=alpha)
        ax.set_xlabel('u0')
        ax.set_ylabel('u1')
        ax.set_zlabel('y')
        ax.set_title('y vs projected features')
        basic.display_fig_inline(fig)
示例#2
0
文件: mniw.py 项目: WuCPMark/svae
def standard_to_natural(nu, S, M, K):
    Kinv = np.linalg.inv(K)
    A = Kinv
    B = np.dot(Kinv, M.T)
    C = S + np.dot(M, B)
    d = nu
    return (A, B, C, d)
示例#3
0
def get_KMM_ws_given_P(xs_train, xs_test, get_K, B_max, eps, P):

    # project data
    us_train = np.dot(xs_train, P)
    us_test = np.dot(xs_test, P)

    return get_KMM_ws(B_max, eps, get_K, us_train, us_test)
    def normalizing_flows(z_0, norm_flow_params):
        '''
        z_0: [n_samples, D]
        u: [D,1]
        w: [D,1]
        b: [1]
        '''

        current_z = z_0
        all_zs = []
        all_zs.append(z_0)
        for params_k in norm_flow_params:

            u = params_k[0]
            w = params_k[1]
            b = params_k[2]

            # Appendix equations
            m_x = -1. + np.log(1.+np.exp(np.dot(w.T,u)))
            u_k = u + (-1. + np.log(1.+np.exp(np.dot(w.T,u))) - np.dot(w.T,u)) *  (w/np.linalg.norm(w))
            # u_k = u

            # [D,1]
            term1 = np.tanh(np.dot(current_z,w)+b)
            # [n_samples, D]
            term1 = np.dot(term1,u_k.T)
            # [n_samples, D]
            current_z = current_z + term1
            all_zs.append(current_z)

        return current_z, all_zs
def get_predictions(Aopt, inX, s_bp):
    s_bp_phi = ascdata.generate_phi(s_bp, d, A_phi, b_phi)
    y_pred = []
    for i in range(inX.shape[0]):
        wi = np.dot(Aopt, inX[i])
        y_pred.append(np.dot(wi.T, s_bp_phi[i]))
    return y_pred
示例#6
0
def mvt_logpdf(x, mu, Li, df):
    dim = Li.shape[0]
    Ki = np.dot(Li.T, Li)

    #determinant is just multiplication of diagonal elements of cholesky
    logdet = 2*log(1./np.diag(Li)).sum()
    lpdf_const = (gammaln((df + dim) / 2)
                                   -(gammaln(df/2)
                                     + (log(df)+log(np.pi)) * dim*0.5
                                     + logdet * 0.5)
                                   )

    x = np.atleast_2d(x)
    if x.shape[1] != mu.size:
        x = x.T
    assert(x.shape[1] == mu.size
               or x.shape[0] == mu.size)
    
    d = (x - mu.reshape((1 ,mu.size))).T
    
    Ki_d_scal = np.dot(Ki, d) /df          #vector
    d_Ki_d_scal_1 = diag_dot(d.T, Ki_d_scal) + 1. #scalar
    

    res_pdf = (lpdf_const 
               - 0.5 * (df + dim) * np.log(d_Ki_d_scal_1)).flatten() 
    if res_pdf.size == 1:
        res_pdf = np.float(res_pdf)
    return res_pdf
    def _cumulative_hazard(self, params, T, *Xs):
        alpha_params = params[self._LOOKUP_SLICE["alpha_"]]
        alpha_ = np.exp(np.dot(Xs[0], alpha_params))

        beta_params = params[self._LOOKUP_SLICE["beta_"]]
        beta_ = np.exp(np.dot(Xs[1], beta_params))
        return np.log1p((T / alpha_) ** beta_)
示例#8
0
def natural_condition_on(J, h, y, Jxx, Jxy, Jyy=None, logZ=None):
    # NOTE: assumes Jxy is *negative* definite, usually h - np.dot(Jxy, y.T).T
    J_cond, h_cond = J + Jxx, h + np.dot(Jxy, y.T).T

    if Jyy is None or logZ is None:
        return J_cond, h_cond
    return (J_cond, h_cond), logZ + np.dot(y, np.dot(Jyy, y.T))
示例#9
0
 def ns_loss_a(Wsub):
     h = Wsub[0, :N]
     vwo = Wsub[1, N:]
     vwi_negs = Wsub[2:, N:]
     vwo_h = npa.dot(vwo, h)
     vwi_negs_h = npa.dot(vwi_negs, h)
     return -npa.log(siga(vwo_h)) - npa.sum(npa.log(siga(-vwi_negs_h)))
示例#10
0
    def backward_pass(self, delta):
        if len(delta.shape) == 2:
            delta = delta[:, np.newaxis, :]
        n_samples, n_timesteps, input_shape = delta.shape
        p = self._params

        # Temporal gradient arrays
        grad = {k: np.zeros_like(p[k]) for k in p.keys()}

        dh_next = np.zeros((n_samples, input_shape))
        output = np.zeros((n_samples, n_timesteps, self.input_dim))

        # Backpropagation through time
        for i in reversed(range(n_timesteps)):
            dhi = self.activation_d(self.states[:, i, :]) * (delta[:, i, :] + dh_next)

            grad['W'] += np.dot(self.last_input[:, i, :].T, dhi)
            grad['b'] += delta[:, i, :].sum(axis=0)
            grad['U'] += np.dot(self.states[:, i - 1, :].T, dhi)

            dh_next = np.dot(dhi, p['U'].T)

            d = np.dot(delta[:, i, :], p['U'].T)
            output[:, i, :] = np.dot(d, p['W'].T)

        # Change actual gradient arrays
        for k in grad.keys():
            self._params.update_grad(k, grad[k])
        return output
def cost(usv):
    delta = .5
    u = usv[0]
    s = usv[1]
    vt = usv[2]
    X = np.dot(np.dot(u, np.diag(s)), vt)
    return np.sum(np.sqrt((X - A)**2 + delta**2) - delta)
示例#12
0
    def mean(self, test_points, g=None):
        if g is None:
            g = np.concatenate([val for _, val in self.__obs])
        L, Lbar = calc_side_matrices(self.__operators, self.__operators_bar, self.__obs, test_points, self.__op_cache, self.__fun_args)
        mu_multiplier = np.dot(Lbar, self.__LLbar_inv)

        return np.dot(mu_multiplier, g)
示例#13
0
def magcal_residual(X, a, mb):
    """ residual from all observations given magnetometer eccentricity, bias,
    gyro bias, and gyro scale"""

    # (x-c)T A^T A (x-c) = 1
    # x^T Ax - 2x^T Ac + c^T Ac = 1

    # a b c | x' = ax + by + cz
    # 0 d e | y' = dy + ez
    # 0 0 f | z' = fz
    # z = 1/f z'
    # y = 1/d (y' - e/f z')
    # x = 1/a (x' - b/d(y' - e/f z') - c/f z')
    #   = 1/a (x' - b/d y' - (be/df - c/f) z')
    # (x-c) A^T A (x-c)
    # [(A x) - (A c)]^2 - 1 = 0

    # y = A(x-c)
    # y /= ||y||
    # q(x; A, c) = (A^-1 (y+c) - x)^2

    Y = np.dot(X - mb, Ainv(a)).T
    Y /= np.linalg.norm(Y, axis=0)
    # Y /= np.sqrt(np.sum(np.square(Y), axis=0))
    Y = np.dot(Y.T, Amatrix(a)) + mb
    return np.mean(np.sum(np.square(X - Y), axis=1))
示例#14
0
def scalar_log_lik(theta_1, theta_2, x):
    arg = (x - theta_1)
    lik1 = 1.0 / np.sqrt(2 * SIGMA_x ** 2 * np.pi) * np.exp(- np.dot(arg, arg) / (2 * SIGMA_x ** 2))
    arg = (x - theta_1 - theta_2)
    lik2 = 1.0 / np.sqrt(2 * SIGMA_x ** 2 * np.pi) * np.exp(- np.dot(arg, arg) / (2 * SIGMA_x ** 2))

    return np.log(0.5 * lik1 + 0.5 * lik2)
示例#15
0
def get_obj(use_yu, obj_from_ws_and_Ks, xs_train, xs_test, Ky, y, KMM_get_K, SDR_get_K, cvxopt_solver, A, b, P, B=None):

    # wrapper

    us_train = np.dot(xs_train, P)
    us_test = np.dot(xs_test, P)
#    pdb.set_trace()
    Ku, kappau = get_KMM_params(us_train, us_test, KMM_get_K)
#    pdb.set_trace()
    wsopt = cvxopt_solver(Ku, kappau, A, b)
    SDR_Ku = SDR_get_K(us_train, us_train)
    if B is None:
        if not use_yu:
            return obj_from_ws_and_Ks((Ky, SDR_Ku), wsopt)
        else:
            try:
                return obj_from_ws_and_Ks((Ky, SDR_Ku), (y, us_train), wsopt)
            except Exception as e:
                print e
                pdb.set_trace()
    else:
        if not use_yu:
            return obj_from_ws_and_Ks((Ky, SDR_Ku), wsopt, B) # NEW!
        else:
            return obj_from_ws_and_Ks((Ky, SDR_Ku), (y, us_train), wsopt, B)
示例#16
0
    def variational_log_density(params, samples):
        '''
        samples: [n_samples, D]
        u: [D,1]
        w: [D,1]
        b: [1]
        Returns: [num_samples]
        '''
        n_samples = len(samples)

        mean = params[0]
        log_std = params[1]
        u = params[2]
        w = params[3]
        b = params[4]

        # print (samples.shape)

        # samples = sample_diag_gaussian(mean, log_std, num_samples, rs)
        z_k = normalizing_flows(samples, u, w, b)

        logp_zk = logprob(z_k)
        logp_zk = np.reshape(logp_zk, [n_samples, 1])

        logq_z0 = diag_gaussian_log_density(samples, mean, log_std)
        logq_z0 = np.reshape(logq_z0, [n_samples, 1])

        # [n_samples, D]
        phi = np.dot((1.-np.tanh(np.dot(samples,w)+b)**2), w.T)

        # [n_samples, 1]
        sum_nf = np.log(abs(1+np.dot(phi, u)))

        # return logq_z0 - sum_nf
        return np.reshape(logq_z0 - sum_nf, [n_samples])
示例#17
0
def get_dobj_dP(use_yu, xs_train, xs_test, Ky, y, SDR_get_K, KMM_get_K, dobj_dP_thru_Ku, lin_solver, cvxopt_solver, df_dws, d_dP_df_dws, d_dws_df_dws, dobj_dwsopt, A, b, P, B=None):
    # wrapper
    # calculate intermediate stuff
    us_train = np.dot(xs_train, P)
    us_test = np.dot(xs_test, P)
    KMM_Ku, kappau = get_KMM_params(us_train, us_test, KMM_get_K)
    wsopt = cvxopt_solver(KMM_Ku, kappau, A, b)

    # gradient thru wsopt
    SDR_Ku = SDR_get_K(us_train, us_train)
    if B is None:
        if not use_yu:
            dobj_dP_thru_wsopt_val = get_dL_dp_thru_xopt(lin_solver, df_dws, d_dP_df_dws, d_dws_df_dws, dobj_dwsopt, A, b, wsopt, P, L_args=(Ky, SDR_Ku)) # L_args is the 2nd argument to dobj_dwsopt
        else:
            dobj_dP_thru_wsopt_val = get_dL_dp_thru_xopt(lin_solver, df_dws, d_dP_df_dws, d_dws_df_dws, dobj_dwsopt, A, b, wsopt, P, L_args=(Ky, SDR_Ku, y, us_train))
    else:
        if not use_yu:
            dobj_dP_thru_wsopt_val = get_dL_dp_thru_xopt(lin_solver, df_dws, d_dP_df_dws, d_dws_df_dws, dobj_dwsopt, A, b, wsopt, P, L_args=(Ky, SDR_Ku, B)) # NEW! means dobj_dwsopt will accept h in 2nd argument
        else:
            dobj_dP_thru_wsopt_val = get_dL_dp_thru_xopt(lin_solver, df_dws, d_dP_df_dws, d_dws_df_dws, dobj_dwsopt, A, b, wsopt, P, L_args=(Ky, SDR_Ku, y, us_train, B))

    # gradient thru Ku
    if B is None:
        dobj_dP_thru_Ku_val = dobj_dP_thru_Ku(P, wsopt)
    else:
        dobj_dP_thru_Ku_val = dobj_dP_thru_Ku(P, wsopt, B) # NEW!

    return dobj_dP_thru_wsopt_val + dobj_dP_thru_Ku_val
示例#18
0
文件: linalg.py 项目: HIPS/autograd
 def vjp(g):
     vjps = []
     q_vjp = solve_sylvester(anp.transpose(a), anp.transpose(b), g)
     if 0 in argnums: vjps.append(-anp.dot(q_vjp, anp.transpose(ans)))
     if 1 in argnums: vjps.append(-anp.dot(anp.transpose(ans), q_vjp))
     if 2 in argnums: vjps.append(q_vjp)
     return tuple(vjps)
示例#19
0
    def setUp(self):
        self.m = m = 5
        self.n = n = 2
        self.k = k = 1
        self.man = Grassmann(m, n, k=k)

        self.proj = lambda x, u: u - npa.dot(x, npa.dot(x.T, u))
 def logprob(inA, inX, iny, ins_phi):
     RMS = 0
     for i in range(len(iny)):
         wi = np.dot(inA, inX[i])
         RMS_current = (iny[i] - np.dot(wi, ins_phi[i]))**2
         RMS += RMS_current
     return -RMS
 def setUp(self):
     self.m = m = 20
     self.n = n = 2
     self.k = k = 1
     self.man = Stiefel(m, n, k=k)
     self.proj = lambda x, u: u - npa.dot(x, npa.dot(x.T, u) +
                                          npa.dot(u.T, x)) / 2
示例#22
0
def natural_predict_grad(g, ans, belief, J11, J12, J22, logZ):
    J, h = belief
    (g_J_predict, g_h_predict), g_lognorm = g

    # re-run forward pass (need to pass these things back here!)
    L, v, v2, temp, h, lognorm = natural_predict_forward_temps(J, J11, J12, h)
    J12 = -J12

    # run the backward pass
    # NEEDS: L, v, v2, temp
    # ALSO USES: h, lognorm
    g_temp = np.dot(temp, (g_J_predict + g_J_predict.T)/2.)
    g_L_1 = solve_triangular_grad_arg0(g_temp, temp, L, J12, 'N')

    g_a = -np.dot(J12, g_h_predict)
    g_L_2 = solve_triangular_grad_arg0(g_a, v2, L, v, 'T')
    g_v_1 = solve_triangular_grad_arg1(g_a, v2, L, v, 'T')

    g_L_3 = lognorm_grad_arg0(g_lognorm, lognorm, L, v)
    g_v_2 = lognorm_grad_arg1(g_lognorm, lognorm, L, v)

    g_L_4 = solve_triangular_grad_arg0(g_v_1 + g_v_2, v, L, h, 'N')
    # print 'high-level: {}'.format((g_v_1 + g_v_2, v, L))
    g_h   = solve_triangular_grad_arg1(g_v_1 + g_v_2, v, L, h, 'N')
    # print 'high_level: {}'.format(g_h)

    g_J = cholesky_grad(L, g_L_1 + g_L_2 + g_L_3 + g_L_4)

    return (-2*g_J, g_h)
	def predict(self, x):
		if self.prob_func_ == "sigmoid":
			prob = (1.0 / (1.0 + np.exp(-np.dot(x, self.coef_) - self.intercept_)))[:,np.newaxis]
			prob = np.concatenate((1.0-prob, prob), axis=1)
		else: # self.prob_func_ == "softmax"
			prob = np.exp(np.dot(x, self.coef_.T) + self.intercept_)
			prob /= np.sum(prob, axis=1)[:,np.newaxis]
		return np.array([self.classes_[i] for i in np.argmax(prob, axis=1)])
示例#24
0
文件: utils.py 项目: fw0/domain_adapt
def ortho(P):
    ans = np.zeros(P.shape)
    x_dim, z_dim = P.shape
    for j in xrange(z_dim):
        temp = P[:,j] - np.dot(ans, np.dot(ans.T, P[:,j]))
        temp = temp / np.linalg.norm(temp)
        ans[:,j] = temp
    return ans
示例#25
0
    def backward_pass(self, delta):
        dW = np.dot(self.last_input.T, delta)
        db = np.sum(delta, axis=0)

        # Update gradient values
        self._params.update_grad('W', dW)
        self._params.update_grad('b', db)
        return np.dot(delta, self._params['W'].T)
示例#26
0
    def _cumulative_hazard(self, params, T, *Xs):
        mu_params = params[self._LOOKUP_SLICE["mu_"]]
        mu_ = np.dot(Xs[0], mu_params)

        sigma_params = params[self._LOOKUP_SLICE["sigma_"]]
        sigma_ = np.exp(np.dot(Xs[1], sigma_params))
        Z = (np.log(T) - mu_) / sigma_
        return -logsf(Z)
示例#27
0
文件: mniw.py 项目: WuCPMark/svae
def natural_to_standard(natparam):
    A, B, C, d = natparam
    nu = d
    Kinv = A
    K = symmetrize(np.linalg.inv(Kinv))
    M = np.dot(K, B).T
    S = C - np.dot(M, B)
    return nu, S, M, K
示例#28
0
def PhotometricError(iref, inew, R, T, points, D):
    # points is a tuple ([y], [x]); convert to homogeneous
    siz = iref.shape
    npoints = len(points[0])
    f = siz[1]  # focal length, FIXME
    Xref = np.vstack(((points[1] - siz[1]*0.5) / f,  # x
                      (siz[0]*0.5 - points[0]) / f,  # y (left->right hand)
                      np.ones(npoints)))             # z = 1
    # this is confusingly written -- i am broadcasting the translation T to
    # every column, but numpy broadcasting only works if it's rows, hence all
    # the transposes
    # print D * Xref
    Xnew = (np.dot(so3.exp(R), (D * Xref)).T + T).T
    # print Xnew
    # right -> left hand projection
    proj = Xnew[0:2] / Xnew[2]
    p = (-proj[1]*f + siz[0]*0.5, proj[0]*f + siz[1]*0.5)
    margin = 10  # int(siz[0] / 5)
    inwindow_mask = ((p[0] >= margin) & (p[0] < siz[0]-margin-1) &
                     (p[1] >= margin) & (p[1] < siz[1]-margin-1))
    npts_inw = sum(inwindow_mask)
    if npts_inw < 10:
        return 1e6, np.zeros(6 + npoints)
    # todo: filter points which are now out of the window
    oldpointidxs = (points[0][inwindow_mask],
                    points[1][inwindow_mask])
    newpointidxs = (p[0][inwindow_mask], p[1][inwindow_mask])
    origpointidxs = np.nonzero(inwindow_mask)[0]
    E = InterpolatedValues(inew, newpointidxs) - iref[oldpointidxs]
    # dE/dk ->
    # d/dk r_p^2 = d/dk (Inew(w(r, T, D, p)) - Iref(p))^2
    # = -2r_p dInew/dp dp/dw dw/dX dX/dk
    # = -2r_p * g(w(r, T, D, p)) * dw(r, T, D, p)
    # intensity gradients for each point
    Ig = InterpolatedGradients(inew, newpointidxs)
    # TODO: use tensors for this
    # gradients for R, T, and D
    gradient = np.zeros(6 + npoints)
    for i in range(npts_inw):
        # print 'newidx (y,x) = ', newpointidxs[0][i], newpointidxs[1][i]
        # Jacobian of w
        oi = origpointidxs[i]
        Jw = dw(Xref[0][oi], Xref[1][oi], D[oi], R, T)
        # scale back up into pixel space, right->left hand coords to get
        # Jacobian of p
        Jp = f * np.vstack((-Jw[1], Jw[0]))
        # print origpointidxs[i], 'Xref', Xref[:, i], 'Ig', Ig[:, i], \
        #     'dwdRz', Jw[:, 2], 'dpdRz', Jp[:, 2]
        # full Jacobian = 2*E + Ig * Jp
        J = np.sign(E[i]) * np.dot(Ig[:, i], Jp)
        # print '2 E[i]', 2*E[i], 'Ig*Jp', np.dot(Ig[:, i], Jp)
        gradient[:6] += J[:6]
        # print J[:6]
        gradient[6+origpointidxs[i]] += J[6]

    print R, T, np.sum(np.abs(E)), npts_inw
    # return ((0.2*(npoints - npts_inw) + np.dot(E, E)), gradient)
    return np.sum(np.abs(E)) / (npts_inw), gradient / (npts_inw)
def neural_net_predict(params, inputs):
    """Params is a list of (weights, bias) tuples.
       inputs is an (N x D) matrix."""
    for W, b in params[:-1]:
        outputs = batch_normalize(np.dot(inputs, W) + b)
        inputs = relu(outputs)
    outW, outb = params[-1]
    outputs = np.dot(inputs, outW) + outb
    return outputs
示例#30
0
	def eval_log_properly(self, x):
		det = np.linalg.det(self.Sigma)
		const = (self.size/2.0)*np.log(2*np.pi)
		const = -0.5*np.log(det) - const
		prec = np.linalg.inv(self.Sigma)
		t = np.subtract(x, self.Mu)
		v = np.dot(np.transpose(t), prec)
		v = -0.5*np.dot(v, t)
		return const + v
示例#31
0
 def fun(A):
     rng = np.random.RandomState(0)
     z = np.dot(np.linalg.cholesky(A), rng.randn(A.shape[0]))
     return np.linalg.norm(z)
示例#32
0
def rand_psd(D):
    mat = npr.randn(D,D)
    return np.dot(mat, mat.T)
 def model(self, x, w):
     a = w[0] + np.dot(x.T, w[1:])
     return a.T
示例#34
0
def log_gaussian_density(x, mu, sigma):

    b, log_det_sigma = solve_posdef(sigma, x - mu)
    const = x.shape[0] * np.log(2 * np.pi)  # Can remove if needed
    return -0.5 * (np.dot(x - mu, b) + log_det_sigma + const)
示例#35
0
 def loss(W_vect, X, T):
     log_prior = -L2_reg * np.dot(W_vect.T, W_vect)
     log_lik = np.sum(predictions(W_vect, X) * T)
     return -log_prior - log_lik
示例#36
0
def test_hessian_vector_product():
    fun = lambda a: np.sum(np.sin(a))
    a = npr.randn(5)
    v = npr.randn(5)
    H = hessian(fun)(a)
    check_equivalent(np.dot(H, v), hessian_vector_product(fun)(a, v))
    def outputs(weights,
                input_set,
                fence_set,
                output_set=None,
                return_pred_set=False):
        update_x_weights = parser.get(weights, 'update_x_weights')
        update_h_weights = parser.get(weights, 'update_h_weights')
        reset_x_weights = parser.get(weights, 'reset_x_weights')
        reset_h_weights = parser.get(weights, 'reset_h_weights')
        thidden_x_weights = parser.get(weights, 'thidden_x_weights')
        thidden_h_weights = parser.get(weights, 'thidden_h_weights')
        output_h_weights = parser.get(weights, 'output_h_weights')
        data_count = len(fence_set) - 1
        feat_count = input_set.shape[0]

        ll = 0.0
        n_i_track = -1
        fence_base = fence_set[0]
        interval = fence_set[1] - fence_set[0]
        pred_set = None

        if return_pred_set:
            pred_set = np.zeros(
                (output_count, int(input_set.shape[1] / interval)))
            print('Prediction set sized ', pred_set.shape)

        # loop through sequences and time steps
        for data_iter in range(data_count):

            # print('Executing iteration %d'%data_iter)

            hiddens = copy(parser.get(weights, 'init_hiddens'))

            fence_post_1 = fence_set[data_iter] - fence_base
            fence_post_2 = fence_set[data_iter + 1] - fence_base
            time_count = fence_post_2 - fence_post_1
            curr_input = input_set[:, fence_post_1:fence_post_2]

            for time_iter in range(time_count):
                hiddens = update(
                    np.expand_dims(np.hstack((curr_input[:, time_iter], 1)),
                                   axis=0), hiddens, update_x_weights,
                    update_h_weights, reset_x_weights, reset_h_weights,
                    thidden_x_weights, thidden_h_weights)

            n_i_track += 1

            if output_set is not None:
                # subtract a small number so -1
                out_proba = softmax_sigmoid(np.dot(hiddens, output_h_weights))
                out_lproba = safe_log(out_proba)
                ll += np.sum(output_set[:, n_i_track] * out_lproba)
            else:
                out_proba = softmax_sigmoid(np.dot(hiddens, output_h_weights))
                out_lproba = safe_log(out_proba)

            if return_pred_set:
                agm = np.argmax(out_lproba[0])
                pred_set[agm, n_i_track] = int(1)

        return ll, pred_set
示例#38
0
testx = np.copy(test['x'])
test_dict = pecmy.rewrap_y(testx)
test_dict["theta_m"]

# 07/23/2019
# originally feeding wrong war values to Lagrangian, checking everything again w/ equality constraints
# compare war vals to rcvs and make sure everything looks right
# still fails with equality constraints, trying with lower bounds on multipliers but epsilon ball around 0 in dGm_ji
# still returns nonsense after 230 minutes

war_diffs = np.array([-1, 1, 0])
np.where(war_diffs < 0, 0, war_diffs)

pecmy.rewrap_y(test['x'])

np.dot(np.array([1, 2, 3]), np.array([1, 2, 3]))

test1 = pecmy.rewrap_y(test['x'])
pecmy.rewrap_m(test1['m'])

# CLAIMS 'INEQUALITY CONSTRAINTS INCOMPATIBLE' after 8923 function evaluations, ~40 minutes of running
# only 14 jacobian evaluations
# attempting with equality constraints
# here we get 'Singular matrix C in LSQ subproblem' in first Jacobian evaluation, refers to constraint qualification problem, study up on this
# Might be due to starting at zero multipliers, or because derivative wrt own policy is linear combination of other derivatives when gamma = 1
# removing derivative wrt own allocation doesn't seem to help though
# problem seems to be that we weren't selecting self.M[j], rather selecting whole vector
# fixing this lets everything run, but gamma shrinks to zero and everything goes to shit
# got objective value down to 56 w/o gamma but hit same error at minute 111, not clear why here, estimates were pretty stable generally. SSE seemed to be monotonically decreasing in c_hat for most of estimation routine.
# try starting at zero lambda vector
# problem is that equality constraint jacobian inversion fails for weird trial paramters, try with upper and lower constraints?
示例#39
0
 def forward_pass(self, inputs, param_vector):
     params = self.parser.get(param_vector, 'params')
     biases = self.parser.get(param_vector, 'biases')
     if inputs.ndim > 2:
         inputs = inputs.reshape((inputs.shape[0], np.prod(inputs.shape[1:])))
     return self.nonlinearity(np.dot(inputs[:, :], params) + biases)
示例#40
0
 def vector_product(x, v):
     return np.sin(np.dot(v, df(x)))
示例#41
0
 def PNLL(self, W_vect, X, Y, N=None):
     '''Penalized negative log likelihood.'''
     self.num_obj_fun_calls += 1
     log_prior = -self.L2_reg * np.dot(W_vect, W_vect)
     log_lik = -self.NLL(W_vect, X, Y, N)
     return -(log_lik + log_prior)
示例#42
0
 def loss_jacobian(self, packed_coef_inter, X_batch, y_batch):
     y_pred = np.dot(X_batch, packed_coef_inter)  # svm decision function
     idx = np.argwhere(np.abs(y_pred - y_batch) > self.epsilon).ravel()
     return np.dot(y_batch[idx] - y_pred[idx], X_batch[idx])
示例#43
0
 def propose(self, theta):
     theta = np.atleast_1d(theta)
     if self.L.shape[1] != theta.shape[0]:
         raise ParameterError("theta and L have incompatible shapes")
     xi = np.random.normal(size=theta.shape)
     return theta + np.dot(self.L, xi), 0.0
def squared_loss(y, y_hat):
    return np.dot((y - y_hat), (y - y_hat))
示例#45
0
def prior_kl(global_natparam, prior_natparam):
    expected_stats = flat(prior_expectedstats(global_natparam))
    natparam_difference = flat(global_natparam) - flat(prior_natparam)
    logZ_difference = prior_logZ(global_natparam) - prior_logZ(prior_natparam)
    return np.dot(natparam_difference, expected_stats) - logZ_difference
示例#46
0
 def predictions(W_vect, inputs):
     for W, b in unpack_layers(W_vect):
         outputs = np.dot(inputs, W) + b
         inputs = np.tanh(outputs)
     return outputs - logsumexp(outputs, axis=1, keepdims=True)
示例#47
0
def logistic_predictions(weights, inputs):
    return sigmoid(np.dot(inputs, weights))
def RKF45_Integrator(t_start, t_stop, h0, x0, A):
	# An integrator using a 4(5) RKF method
	T_0 = time.time()
	"""
	x0 = initial conditions
	t_start = start time
	t_stop = end time
	n_step = number of steps
	A = A(t) matrix function
	"""
	Ndim = x0.size
	x_ = np.zeros((1, Ndim)) # set up the array of x values
	t_ = np.zeros(1)			# set up the array of t values
	t_[0] = t_start
	x_[0,:] = x0
	h = h0
	h_min = h0*(10**(-2))
	h_max = 5*h0
	n = 0
	t = t_start
	#
	S = 0.98				# safety factor
	#
	while t <= t_stop:
		x_n = x_[n,:].reshape(Ndim, 1)
		Err_small = False
		h_new = h
		while Err_small == False:
			# compute the predictions using 4th and 5th order RK methods
			test_M = np.matrix([[1, 2], [1, 2]])
			k1 = np.dot(test_M,x_n)
			k1 = np.dot(h*A(t),x_n)
			k2 = h*A(t + 0.25*h) @ (x_n + 0.25*k1)
			k3 = h*A(t + (3/8)*h) @ (x_n + (3/32)*k1 + (9/32)*k2)
			k4 = h*A(t + (12/13)*h) @ (x_n + (1932/2197)*k1 - (7200/2197)*k2 + (7296/2197)*k3)
			k5 = h*A(t + h) @ (x_n + (439/216)*k1 - 8*k2 + (3680/513)*k3 - (845/4104)*k4)
			k6 = h*A(t + 0.5*h) @ (x_n - (8/27)*k1 + 2*k2 - (3544/2565)*k3 + (1859/4104)*k4 - (11/40)*k5)
			y_np1 = x_n + (25/216)*k1 + (1408/2565)*k3 + (2197/4101)*k4 - (11/40)*k5
			z_np1 = x_n + (16/135)*k1 + (6656/12825)*k3 + (28561/56430)*k4 - (9/50)*k5 + (2/55)*k6
			#
			Err =  ferr(y_np1, z_np1)
			"""
			Err_max = ε(rtol*|z_np1| + atol)
			"""
			Err_max = epsilon_RK*(rtol_RK*np.abs(z_np1) + atol_RK)
			Err_ratio = np.asscalar(np.mean(Err / Err_max))
			#
			if Err_ratio <= 1:
				h_new = h*S*np.power(Err_ratio, -1.0/5)
				#Delta = max(np.asscalar(max(Err)), epsilon_RK*0.1)
				#h_new = h*(epsilon_RK*h/Delta)**(1/4)
				if h_new > 10*h:	# limit how fast the step size can increase
					h_new = 10*h
				if h_new > h_max:	# limit the maximum step size
					h_new = h_max
				Err_small = True # break loop
			elif Err_ratio > 1:
				h_new = h*S*np.power(np.abs(Err_ratio), -1.0/4)
				#h_new = h*(epsilon_RK*h/np.asscalar(max(Err)))**(1/4)
				if h_new < 0.2*h:	# limit how fast the step size decreases
					h_new = 0.2*h
				if h_new < h_min:	# limit the minimum step size
					h_new = h_min
					Err_small = True # break loop
				elif h_new >= h_min:
					h = h_new
		t = t + h
		x_ = np.vstack((x_,z_np1.reshape(1, Ndim))) # add x_n+1 to the array of x values
		t_ = np.append(t_, t) 						  # add t_n+1 to the array of t values
		n = n + 1
		h = h_new
		if True: #np.round(((t-t_start)/(t_stop-t_start))*100000) % 1000 == 0:
			print("\r" + "integrated {:.1%}".format((t-t_start)/(t_stop-t_start)), end='')
	T = time.time() - T_0
	print(" done in {:.5g}s".format(T))
	return (t_, x_, T)
示例#49
0
 def fun(x):
     return np.dot(np.dot(x, H), x)
示例#50
0
 def conv_function(self,tensor_window):
     tensor_window = np.reshape(tensor_window,(np.shape(tensor_window)[0],np.shape(tensor_window)[1]*np.shape(tensor_window)[2]))
     t = np.dot(self.kernels,tensor_window.T)
     return t
def neural_network(x, theta):
    w1, b1, w2, b2 = theta
    return np.tanh(np.dot((np.tanh(np.dot(x, w1) + b1)), w2) + b2)
示例#52
0
 def fun(x):
     return np.sin(np.dot(x, randv))
示例#53
0
 def predict(self, X):
     X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)
     check_is_fitted(self, ["coefs_"])
     return np.dot(X, self.coefs_)
示例#54
0
 def training_loss(weights):
     diff = np.abs(np.dot(X, weights) - y)
     if self.relative:
         diff = diff / y
     return np.mean(deadzone(diff))
 def rayleigh_quotient(self, vec):
     hv_val = self.hess_dot_vec(self.params_flat, vec)
     rq = np.dot(hv_val, vec) / np.dot(vec, vec)
     return rq
示例#56
0
    def outputs(weights,
                input_set,
                fence_set,
                output_set=None,
                return_pred_set=False):
        update_x_weights = parser.get(weights, 'update_x_weights')
        update_h_weights = parser.get(weights, 'update_h_weights')
        reset_x_weights = parser.get(weights, 'reset_x_weights')
        reset_h_weights = parser.get(weights, 'reset_h_weights')
        thidden_x_weights = parser.get(weights, 'thidden_x_weights')
        thidden_h_weights = parser.get(weights, 'thidden_h_weights')
        output_h_weights = parser.get(weights, 'output_h_weights')

        data_count = len(fence_set) - 1
        feat_count = input_set.shape[0]

        ll = 0.0
        n_i_track = 0
        fence_base = fence_set[0]
        pred_set = None

        if return_pred_set:
            pred_set = np.zeros((output_count, input_set.shape[1]))

        # loop through sequences and time steps
        for data_iter in range(data_count):
            hiddens = copy(parser.get(weights, 'init_hiddens'))

            fence_post_1 = fence_set[data_iter] - fence_base
            fence_post_2 = fence_set[data_iter + 1] - fence_base
            time_count = fence_post_2 - fence_post_1
            curr_input = input_set[:, fence_post_1:fence_post_2]

            for time_iter in range(time_count):
                hiddens = update(
                    np.expand_dims(np.hstack((curr_input[:, time_iter], 1)),
                                   axis=0), hiddens, update_x_weights,
                    update_h_weights, reset_x_weights, reset_h_weights,
                    thidden_x_weights, thidden_h_weights)

                # IF WE WANT PREDICTION, WE HAVE TO TURN SIGMOID TO LINEAR
                if output_set is not None:
                    # subtract a small number so -1
                    out_proba = sigmoid(
                        np.sign(output_set[:, n_i_track] - 1e-3) *
                        np.dot(hiddens, output_h_weights))
                    out_lproba = safe_log(out_proba)
                    ll += np.sum(out_lproba)
                else:
                    out_proba = sigmoid(np.dot(hiddens, output_h_weights))
                    out_lproba = safe_log(out_proba)

                # if output_set is not None:
                #     # subtract a small number so -1
                #     out_proba = linear(np.sign(output_set[:, n_i_track] - 1e-3) *
                #                        np.dot(hiddens, output_h_weights))
                #     out_lproba = safe_log(out_proba)
                #     ll += np.sum(out_lproba)
                # else:
                #     out_proba = linear(np.dot(hiddens, output_h_weights))
                #     out_lproba = safe_log(out_proba)

                if return_pred_set:
                    pred_set[:, n_i_track] = out_lproba[0]

                n_i_track += 1

        return ll, pred_set
示例#57
0
文件: JazNet.py 项目: sbidari/EMPJ
    def test(self, InpsAndTargsFunc, testdelay=0, **kwargs):
        p = self.p
        '''
		Function that tests a trained network. Relevant parameters in p start with 'test'
		Inputs:
			InpsAndTargsFunc: function used to generate time series (same as in train)
			testdelay: Amount of time to wait between plots (useful for debugging)
			**kwargs: arguments passed to InpsAndTargsFunc
		'''

        self.initialize_act()
        print('Initializing', end="")
        for i in range(p['test_init_trials']):
            print('.', end="")
            inps_and_targs = InpsAndTargsFunc(dt=p['dt'], **kwargs)
            self.run(inps_and_targs['inps'])
        print('')

        inps_and_targs = InpsAndTargsFunc(dt=p['dt'], **kwargs)
        inp = inps_and_targs['inps']
        targ = inps_and_targs['targs']
        test_fig = plt.figure()
        ax = test_fig.add_subplot(1, 1, 1)
        tvec = np.expand_dims(np.arange(0, len(inp)) * p['dt'], axis=1)
        line_inp = plt.Line2D(np.repeat(tvec, inp.shape[1], axis=1).T,
                              inp.T,
                              linestyle='--',
                              color='g')
        line_targ = plt.Line2D(np.repeat(tvec, targ.shape[1], axis=1).T,
                               targ.T,
                               linestyle='--',
                               color='r')
        line_out = plt.Line2D(np.repeat(tvec, targ.shape[1], axis=1).T,
                              targ.T,
                              color='b')
        ax.add_line(line_inp)
        ax.add_line(line_targ)
        ax.add_line(line_out)
        ax.legend([line_inp, line_targ, line_out],
                  ['Input', 'Target', 'Output'],
                  loc=1)
        ax.set_title('RNN Testing: Wait')
        ax.set_xlim([0, p['dt'] * len(inp)])
        ax.set_ylim([-1.2, 1.2])
        ax.set_xlabel('Time (s)')
        test_fig.canvas.draw()

        E_out = 0  # Running squared error
        V_targ = 0  # Running variance of target
        print('Testing: %g trials' % p['test_trials'])
        for idx in range(p['test_trials']):
            print('.', end="")
            inps_and_targs = InpsAndTargsFunc(dt=p['dt'], **kwargs)
            inp = inps_and_targs['inps']
            targ = inps_and_targs['targs']
            targ_idx = inps_and_targs['targ_idx']

            tvec = np.expand_dims(np.arange(0, len(inp)) * p['dt'], axis=1)
            ax.set_xlim([0, p['dt'] * len(inp)])
            line_inp.set_xdata(np.repeat(tvec, inp.shape[1], axis=1).T)
            line_inp.set_ydata(inp.T)

            line_targ.set_xdata(np.repeat(tvec, targ.shape[1], axis=1).T)
            line_targ.set_ydata(targ.T)
            out = self.run(inp)[0]
            line_out.set_xdata(np.repeat(tvec, out.shape[1], axis=1).T)
            line_out.set_ydata(out.T)

            ax.set_title('RNN Testing, trial %g' % (idx + 1))
            test_fig.canvas.draw()

            E_out = E_out + np.trace(
                np.dot(
                    np.transpose(out[targ_idx] - targ[targ_idx]),
                    out[targ_idx] - targ[targ_idx])) / targ[targ_idx].shape[1]
            V_targ = V_targ + np.trace(
                np.dot(np.transpose(targ[targ_idx]),
                       targ[targ_idx])) / targ[targ_idx].shape[1]

            time.sleep(testdelay)
        print('')
        E_norm = E_out / V_targ
        print('Normalized error: %g' % E_norm)
        return E_norm
示例#58
0
def test_inv():
    def fun(x): return np.linalg.inv(x)
    D = 8
    mat = npr.randn(D, D)
    mat = np.dot(mat, mat) + 1.0 * np.eye(D)
    check_grads(fun)(mat)
示例#59
0
 def loss_jacobian(self, packed_coef_inter, X_batch, y_batch):
     y_pred = np.dot(X_batch, packed_coef_inter)  # svm decision function
     idx = np.argwhere(y_batch * y_pred < 1.).ravel()
     return np.dot(y_batch[idx], X_batch[idx])
示例#60
0
	def penalty_term(coefficients):
		return n.sum(((n.dot(p, c.flatten())**2).sum() for p,c in zip(penalties, coefficients)))