Exemplo n.º 1
0
#TODO
# px = [px; px(0)];
# py = [py; py(0)];
px = [0, .5, 2, 3, 1, 0]
py = [0, 1, 1.5, .5, -.5, 0]
# pz = [0, 2, -3.5, 2.8, -3.2, 0]
px = np.asarray(px)
py = np.asarray(py)
# pz = np.asarray(pz)
#%%
# generate A, b
A = np.zeros((m, n))
b = np.zeros((m, 1))

for i in range(0, m):
    A[i, :] = null_space(np.asmatrix([px[i + 1] - px[i], py[i + 1] - py[i]])).T
    b[i] = A[i, :] * .5 * (np.asmatrix([px[i + 1] + px[i], py[i + 1] + py[i]
                                        ])).T
    if A[i, :] * np.asmatrix([pxint, pyint]).T - b[i] > 0:
        A[i, :] = -A[i, :]
        b[i] = -b[i]

#%% Construct the problem.
# cvx_begin

#     variable B(n,n) symmetric
#     variable d(n)
#     maximize( det_rootn( B ) )
#     subject to
#        for i = 1:m
#            norm( B*A(i,:)', 2 ) + A(i,:)*d <= b(i);
 def get_nulls(self, F):
     e_one = null_space(F)
     e_two = null_space(F.T)
     e_one = e_one/e_one[2]
     e_two = e_two/e_two[2]
     return e_one, e_two
Exemplo n.º 3
0
def t_test(self):
    """T statistics for a contrast in a univariate or multivariate model.

    Parameters
    ----------
    self : brainstat.stats.SLM.SLM
        SLM object that has already run linear_model


    """

    if isinstance(self.contrast, Term):
        self.contrast = self.contrast.m.to_numpy()

    def null(A, eps=1e-15):
        u, s, vh = scipy.linalg.svd(A)
        null_mask = s <= eps
        null_space = scipy.compress(null_mask, vh, axis=0)
        return scipy.transpose(null_space)

    if not isinstance(self.df, np.ndarray):
        self.df = np.array([self.df])

    if self.contrast.ndim == 1:
        self.contrast = np.reshape(self.contrast, (-1, 1))

    [n, p] = np.shape(self.X)
    pinvX = np.linalg.pinv(self.X)

    if len(self.contrast) <= p:
        c = np.concatenate(
            (self.contrast, np.zeros((1, p - np.shape(self.contrast)[1]))),
            axis=1).T

        if np.square(np.dot(null_space(self.X).T,
                            c)).sum() / np.square(c).sum() > np.spacing(1):
            sys.exit("Contrast is not estimable :-(")

    else:
        c = np.dot(pinvX, self.contrast)
        r = self.contrast - np.dot(self.X, c)

        if np.square(np.ravel(r, "F")).sum() / np.square(
                np.ravel(self.contrast, "F")).sum() > np.spacing(1):
            warnings.warn("Contrast is not in the model :-( ")

    self.c = c.T
    self.df = self.df[len(self.df) - 1]

    if np.ndim(self.coef) == 2:
        k = 1
        self.k = k

        if self.r is None:
            # fixed effect
            if self.V is not None:
                Vmh = np.linalg.inv(cholesky(self.V).T)
                pinvX = np.linalg.pinv(np.dot(Vmh, self.X))
            Vc = np.sum(np.square(np.dot(c.T, pinvX)), axis=1)
        else:
            # mixed effect
            q1, v = np.shape(self.r)
            q = q1 + 1
            nc = np.shape(self.dr)[1]
            chunk = math.ceil(v / nc)
            irs = np.zeros((q1, v))

            for ic in range(1, nc + 1):
                v1 = 1 + (ic - 1) * chunk
                v2 = np.min((v1 + chunk - 1, v))

                vc = v2 - v1 + 1

                irs[:, int(v1 - 1):int(v2)] = np.around(
                    np.multiply(
                        self.r[:, int(v1 - 1):int(v2)],
                        np.tile(1 / self.dr[:, (ic - 1)], (1, vc)),
                    ))

            ur, ir, jr = np.unique(irs,
                                   axis=0,
                                   return_index=True,
                                   return_inverse=True)
            ir = ir + 1
            jr = jr + 1
            nr = np.shape(ur)[0]
            self.dfs = np.zeros((1, v))
            Vc = np.zeros((1, v))

            for ir in range(1, nr + 1):
                iv = (jr == ir).astype(int)
                rv = self.r[:, (iv - 1)].mean(axis=1)
                V = (1 - rv.sum()) * self.V[:, :, (q - 1)]

                for j in range(1, q1 + 1):
                    V = V + rv[(j - 1)] * self.V[:, :, (j - 1)]

                Vinv = np.linalg.inv(V)
                VinvX = np.dot(Vinv, self.X)
                Vbeta = np.linalg.pinv(np.dot(self.X.T, VinvX))
                G = np.dot(Vbeta, VinvX.T)
                Gc = np.dot(G.T, c)
                R = Vinv - np.dot(VinvX, G)
                E = np.zeros((q, 1))
                RVV = np.zeros((np.shape(self.V)))
                M = np.zeros((q, q))

                for j in range(1, q + 1):
                    E[(j - 1)] = np.dot(Gc.T, np.dot(self.V[:, :, (j - 1)],
                                                     Gc))
                    RVV[:, :, (j - 1)] = np.dot(R, self.V[:, :, (j - 1)])

                for j1 in range(1, q + 1):
                    for j2 in range(j1, q + 1):
                        M[(j1 - 1),
                          (j2 - 1)] = (RVV[:, :,
                                           (j1 - 1)] * RVV[:, :,
                                                           (j2 - 1)].T).sum()
                        M[(j2 - 1), (j1 - 1)] = M[(j1 - 1), (j2 - 1)]

                vc = np.dot(c.T, np.dot(Vbeta, c))
                iv = (jr == ir).astype(int)
                Vc[iv - 1] = vc
                self.dfs[iv - 1] = np.square(vc) / np.dot(
                    E.T, np.dot(np.linalg.pinv(M), E))

        self.ef = np.dot(c.T, self.coef)
        self.sd = np.sqrt(np.multiply(Vc, self.SSE) / self.df)
        self.t = np.multiply(np.divide(self.ef, (self.sd + (self.sd <= 0))),
                             self.sd > 0)

    else:
        # multivariate
        p, v, k = np.shape(self.coef)
        self.k = k
        self.ef = np.zeros((k, v))

        for j in range(0, k):
            self.ef[j, :] = np.dot(c.T, self.coef[:, :, j])

        j = np.arange(1, k + 1)
        jj = (np.multiply(j, j + 1) / 2) - 1
        jj = jj.astype(int)

        vf = np.divide(np.sum(np.square(np.dot(c.T, pinvX)), axis=1), self.df)
        self.sd = np.sqrt(vf * self.SSE[jj, :])

        if k == 2:
            det = np.multiply(self.SSE[0, :], self.SSE[2, :]) - np.square(
                self.SSE[1, :])

            self.t = (
                np.multiply(np.square(self.ef[0, :]), self.SSE[2, :]) +
                np.multiply(np.square(self.ef[1, :]), self.SSE[0, :]) -
                np.multiply(np.multiply(2 * self.ef[0, :], self.ef[1, :]),
                            self.SSE[1, :]))

        if k == 3:
            det = (np.multiply(
                self.SSE[0, :],
                (np.multiply(self.SSE[2, :], self.SSE[5, :]) -
                 np.square(self.SSE[4, :])),
            ) - np.multiply(self.SSE[5, :], np.square(self.SSE[1, :])) +
                   np.multiply(
                       self.SSE[3, :],
                       (np.multiply(self.SSE[1, :], self.SSE[4, :]) * 2 -
                        np.multiply(self.SSE[2, :], self.SSE[3, :])),
                   ))

            self.t = np.multiply(
                np.square(self.ef[0, :]),
                (np.multiply(self.SSE[2, :], self.SSE[5, :]) -
                 np.square(self.SSE[4, :])),
            )

            self.t = self.t + np.multiply(
                np.square(self.ef[1, :]),
                (np.multiply(self.SSE[0, :], self.SSE[5, :]) -
                 np.square(self.SSE[3, :])),
            )

            self.t = self.t + np.multiply(
                np.square(self.ef[2, :]),
                (np.multiply(self.SSE[0, :], self.SSE[2, :]) -
                 np.square(self.SSE[1, :])),
            )

            self.t = self.t + np.multiply(
                2 * self.ef[0, :],
                np.multiply(
                    self.ef[1, :],
                    (np.multiply(self.SSE[3, :], self.SSE[4, :]) -
                     np.multiply(self.SSE[1, :], self.SSE[5, :])),
                ),
            )

            self.t = self.t + np.multiply(
                2 * self.ef[0, :],
                np.multiply(
                    self.ef[2, :],
                    (np.multiply(self.SSE[1, :], self.SSE[4, :]) -
                     np.multiply(self.SSE[2, :], self.SSE[3, :])),
                ),
            )

            self.t = self.t + np.multiply(
                2 * self.ef[1, :],
                np.multiply(
                    self.ef[2, :],
                    (np.multiply(self.SSE[1, :], self.SSE[3, :]) -
                     np.multiply(self.SSE[0, :], self.SSE[4, :])),
                ),
            )

        if k > 3:
            sys.exit("Hotelling" "s T for k>3 not programmed yet")

        self.t = np.multiply(np.divide(self.t, (det + (det <= 0))),
                             (det > 0)) / vf
        self.t = np.multiply(np.sqrt(self.t + (self.t <= 0)), (self.t > 0))
    self.t = np.atleast_2d(self.t)
Exemplo n.º 4
0
def _full_from_partial(elems: Sequence, traceless: Union[None, bool]) -> Basis:
    """
    Internal function to parse the basis elements *elems*. By default,
    checks are performed for orthogonality and linear independence. If
    either fails an exception is raised. Returns a full hermitian and
    orthonormal basis.
    """
    elems = np.asanyarray(elems)
    if not isinstance(elems, Basis):
        # Convert elems to basis to have access to its handy attributes
        elems = normalize(elems.view(Basis))

    if not elems.isherm:
        warn("(Some) elems not hermitian! The resulting basis also won't be.")

    if not elems.isorthonorm:
        raise ValueError("The basis elements are not orthonormal!")

    if traceless is None:
        traceless = elems.istraceless
    else:
        if traceless and not elems.istraceless:
            raise ValueError("The basis elements are not traceless (up to " +
                             "an identity element) but a traceless basis " +
                             "was requested!")

    # Get a Generalized Gell-Mann basis to expand in (fulfills the desired
    # properties hermiticity and orthonormality, and therefore also linear
    # combinations, ie basis expansions, of it will). Split off the identity so
    # that for traceless bases we can put it in the front.
    if traceless:
        Id, ggm = np.split(Basis.ggm(elems.d), [1])
    else:
        ggm = Basis.ggm(elems.d)

    coeffs = expand(elems, ggm, tidyup=True)

    # Throw out coefficient vectors that are all zero (should only happen for
    # the identity)
    coeffs = coeffs[(coeffs != 0).any(axis=1)]
    if coeffs.size != 0:
        # Get d**2 - len(coeffs) vectors spanning the nullspace of coeffs.
        # Those together with coeffs span the whole space, and therefore also
        # the linear combinations of GGMs weighted with the coefficients will
        # span the whole matrix space
        coeffs = np.concatenate((coeffs, sla.null_space(coeffs).T))
        # Our new basis is given by linear combinations of GGMs with coeffs
        basis = np.einsum('ij,jkl', coeffs, ggm)
    else:
        # Resulting array is of size zero, i.e. we can just return the GGMs
        basis = ggm

    # Add the identity again and normalize the new basis
    if traceless:
        basis = np.concatenate((Id, basis)).view(Basis)
    else:
        basis = basis.view(Basis)

    # Clean up
    basis.tidyup()

    return basis
Exemplo n.º 5
0
def polyfix(x, y, n, xfix, yfix, xder=[], dydx=[]):

    nfit = len(x)
    if len(y) != nfit:
        raise ValueError('x and y must have the same size')

    nfix = len(xfix)
    if len(yfix) != nfix:
        raise ValueError('xfit adn yfit must have the same size')

    # transform list to col vector
    x = np.vstack(x)
    y = np.vstack(y)
    xfix = np.vstack(xfix)
    yfix = np.vstack(yfix)

    # if derivatives are specified:
    if len(xder) != 0:
        xder = np.array([xder]).reshape(-1, 1)
        dydx = np.array([dydx]).reshape(-1, 1)

    nder = len(xder)
    if len(dydx) != nder:
        raise ValueError('xder and dydx must have same size')

    nspec = nfix + nder
    specval = np.vstack((yfix, dydx))

    # first find A and pc such that A*pc = specval
    A = np.zeros((nspec, n + 1))
    # specified y values
    for i in range(n + 1):
        A[:nfix, i] = np.hstack(np.ones((nfix, 1)) * xfix**(n + 1 - (i + 1)))
    if nder > 0:
        for i in range(n):
            A[nfix:nder + nfix, i] = ((n - (i + 1) + 1) * np.ones(
                (nder, 1)) * xder**(n - (i + 1))).flatten()
    if nfix > 0:
        lastcol = n + 1
        nmin = nspec - 1
    else:
        lastcol = n
        nmin = nspec

    if n < nmin:
        raise ValueError(
            'Polynomial degree too low, cannot match all constraints')
    # find unique polynomial of degree nmin that fits the constraints
    firstcol = n - nmin
    pc0 = np.linalg.solve(A[:, firstcol:lastcol], specval)
    pc = np.zeros((n + 1, 1))
    pc[firstcol:lastcol] = pc0

    X = np.zeros((nfit, n + 1))
    for i in range(n + 1):
        X[:, i] = (np.ones((nfit, 1)) * x**(n + 1 - (i + 1))).flatten()

    yfit = y - np.polyval(pc, x)

    B = sl.null_space(A)
    z = np.linalg.lstsq(X @ B, yfit, rcond=None)[0]

    if len(z) == 0:
        return pc.flatten()
    else:
        p0 = B @ z

    p = p0 + pc
    return p.flatten()
Exemplo n.º 6
0
#程序文件Pex3_35.py
import numpy as np
from scipy.linalg import null_space
A = np.array([[1, -5, 2, -3], [5, 3, 6, -1], [2, 4, 2, 1]])
print("A的零空间(即基础解系)为:", null_space(A))
Exemplo n.º 7
0
import scipy.linalg as linalg

# Create a random 4x4 matrix
A = np.random.randint(-9, 9, (4, 4))
print('Matrix A\n', A)

# Compute the determinant of A
detA = linalg.det(A)
print(f'determinant of A\n', detA)

# Inverse of A
invA = linalg.inv(A)
print('inverse of A\n', invA)

# Rank of A
rankA = np.linalg.matrix_rank(A)
print('rank of A\n', rankA)

# Null space of A
nullA = linalg.null_space(A)
print('Null space\n', nullA)

# solve the matrix equation Ax=b
b = np.random.randint(-9, 9, (4,1))
print('b vector\n', b)

x = linalg.solve(A, b)
print('solved x vector\n', x)

print('A @ x =\n', A @ x)
Exemplo n.º 8
0
def multiTrajOptEnd(robKine, traj_q, T, ite_num, obstacles, radii, eps, body_sizes, stomp_weights, angle_pos, angle_neg,
                    freeAxis, jointLimit_low, jointLimit_high, dim=3,
                    sampleNum=500, update_rate_chomp=0.04, opt_threshold = 2.0,
                    MaxSol=10, scale=20, seed_num=0):

    np.random.seed(seed_num)
    link_num = traj_q.shape[1]

    K = np.diag([1] * T, 0) + np.diag([-1] * (T-1), -1)
    K[0, 0] = 0
    K[1, 0] = 0

    A = K.T @ K
    A[-1, -1] = 2

    M_end = np.linalg.pinv(A)
    B = np.copy(A[:-1, 1:-1])
    R = np.linalg.inv(B.T @ B)

    zero_mean = np.zeros((T-2,))

    N = sampleNum
    g_seedNum = 400

    traj_sample_set = np.zeros((T, link_num, N))
    cost_set = np.zeros((N, 1))

    p_set = np.zeros((N, 1))
    zero_ini = np.zeros((1, link_num))

    p_ini = 1
    traj_hto = []
    modeNum = 1

    goal_config_given = traj_q[T-1,:]
    Nend = np.floor(g_seedNum * angle_pos / (angle_pos + angle_neg))
    goal_config = np.copy(goal_config_given)
    goal_task = np.copy(robKine.computeEndPosConfig(goal_config_given * 180/np.pi))

    # Prepare a set of possible goal configurations
    g_config_set = np.zeros((g_seedNum + 1, link_num))
    g_config_set[0,:] =  goal_config_given

    for i in range(1, g_seedNum):
        if i < Nend + 1:
            noise_ang = angle_pos / Nend
        else:
            noise_ang = angle_neg / (g_seedNum - Nend)

        noise_endRot = noise_ang * freeAxis
        noise_endRot = np.hstack([np.zeros((3, )), noise_endRot])

        robKine.computeJacobianConfig(goal_config.flatten()*180/np.pi)
        dq = robKine.ComputeJointVelfromCartVel(np.reshape(noise_endRot, (6,1)))

        if i < Nend + 1:
            goal_config = goal_config.flatten() - dq.flatten()
        else:
            goal_config = goal_config.flatten() + dq.flatten()

        if np.greater(jointLimit_low, goal_config).any() or np.greater(goal_config, jointLimit_high).any():
            print('goal_config', goal_config)
            print('joint limit, i: ',  i)
            break

        for ite in range(10):
            goal_config_in = goal_config.flatten()
            x, J = robKine.computeJacobianEndPosConfig(goal_config_in*180/np.pi)

            goal_now = np.zeros((1, 3))
            goal_now[:] = x
            goal_error = goal_task - goal_now

            if dim == 2:
                goal_error = np.vstack([goal_error.transpose(), np.zeros((4, 1))])
            else:
                goal_error = np.vstack([goal_error.transpose(), np.zeros((3, 1))])

            goal_config = goal_config + 0.2 * np.transpose(robKine.ComputeJointVelfromCartVel(goal_error))

            if np.linalg.norm(goal_error) < 0.01:
                break

        g_config_set[i,:] =  goal_config

        if i == Nend:
            goal_config = goal_config_given

    g_config_set = g_config_set[g_config_set.any(axis=1), :]

    D_transformed = []
    exp_cost_set = []
    z = []
    epsilon = 0.4

    for iteration in range(ite_num):
        explr_schedule = 1
        if ite_num > 1:
            explr_schedule = 1 - 0.5*iteration/(ite_num-1)

        M = explr_schedule * 3.0 * R / (R.max() * T)

        print('sampling trajectories...')
        for k in range(N):
            noise = np.random.multivariate_normal(zero_mean, M, link_num)
            p = multivariate_normal.pdf(noise, mean=zero_mean, cov=M)

            if iteration == 0 and k== 0:
                p_ini = p[0]
            p_set[k] = np.prod(p / p_ini)

            traj_noise = np.vstack([zero_ini, np.transpose(noise), zero_ini])

            traj_q_noise = []
            if iteration == 0:
                g_noise_ind = np.random.randint(low=0, high=g_config_set.shape[0]-1)
                g_noise_traj = np.zeros((T, link_num))

                if link_num > 6:
                    g_noise_config = g_config_set[g_noise_ind, :]
                    J = robKine.computeJacobianConfig(g_noise_config.flatten()*180/np.pi)
                    vector_null = null_space(J)
                    g_noise_traj[T - 1, :] = g_config_set[g_noise_ind, :] - goal_config_given + np.random.uniform(- epsilon, epsilon)* np.reshape(vector_null, (1, link_num))
                else:
                    g_noise_traj[T - 1, :] = g_config_set[g_noise_ind, :] - goal_config_given

                g_noise_prop = np.dot(M_end, g_noise_traj)

                traj_q_noise = traj_q + traj_noise + g_noise_prop
            else:

                m = np.mod(k, modeNum)
                if link_num > 6:
                    g_config_m = traj_hto[m,T-1,:]
                    J = robKine.computeJacobianConfig(g_config_m.flatten() * 180 / np.pi)
                    vector_null = null_space(J)
                    null_noise_traj = np.zeros((T, link_num))
                    null_noise_traj[T - 1, :] = np.random.uniform(-epsilon, epsilon) * np.reshape(vector_null, (1, link_num))
                    null_noise_prop = np.dot(M_end, null_noise_traj)
                    traj_q_noise = traj_hto[m, :, :] + traj_noise + null_noise_prop
                else:
                    traj_q_noise = traj_hto[m,:,:] + traj_noise

            cost_set[k, 0], _, _ = computeCost(robKine, traj_q_noise, body_sizes, obstacles, radii, eps, stomp_weights)
            traj_sample_set[:,:,k] = traj_q_noise


        D = []
        for k in range(N):
            D.append( np.reshape(traj_sample_set[:,:,k], (T*link_num, )) )

        exp_arg = - scale * (cost_set - cost_set.min()) / (cost_set.max() - cost_set.min())
        exp_cost_set = np.exp(exp_arg)
        exp_cost_set = np.divide(np.exp(exp_arg), p_set)
        exp_cost_set = exp_cost_set / exp_cost_set.mean()

        print('performing dimensionality reduction...')
        embedding = SpectralEmbedding(n_components=3)
        D_transformed = embedding.fit_transform(np.asarray(D))
        max_D = np.max(D_transformed)
        D_transformed = D_transformed /max_D
        # print('D_transformed', D_transformed)

        print('fitting GMM...')
        z, modeNum = IWVBEMGMMfitting(D_transformed, exp_cost_set, 1000, MaxSol)
        z = np.reshape(z, (N,))
        # print(z)

        traj_hto = np.zeros((modeNum, T, link_num))
        cost_m = np.zeros((modeNum, 3))
        print('goal_task', goal_task)

        chomp_ite = 30
        if ite_num > 1:
            opt_thre =  3.0 + (opt_threshold - 3.0 ) * iteration / (ite_num-1)
            chomp_ite = 15
        for m in range(modeNum):
            ind_m = ( z== m )
            traj_sample_set_m =  np.asarray(D)[ind_m, :]

            num_m = np.sum(ind_m)
            print('num of samples in this mode', num_m)
            weight_m = exp_cost_set[ind_m, :]

            weight_m_2d = np.matlib.repmat(weight_m, 1, T*link_num)
            mean_traj_m = np.sum( np.multiply(weight_m_2d, traj_sample_set_m), axis=0) / np.sum(weight_m)

            traj_m = np.reshape(mean_traj_m, (T, link_num))
            goal_error = []

            traj_m = chompend(robKine, goal_task, traj_m, T, obstacles, radii, eps, body_sizes, stomp_weights, freeAxis,
                     MaxIter=chomp_ite,  update_rate=update_rate_chomp, collision_threshold=opt_thre)

            cost, collision, smoothness = computeCost(robKine, traj_m, body_sizes, obstacles, radii, eps, stomp_weights)
            print('total: ', cost, 'collision: ', collision, 'smoothness: ', smoothness)
            cost_m[m, 0] = cost
            cost_m[m, 1] = collision
            cost_m[m, 2] = smoothness

            traj_hto[m, :, :] = traj_m

        check_thre = sum( cost_m[:, 1] < opt_threshold)
        print('number of solutions that satisfy the collision threshold: ', check_thre)
        if check_thre > 1:
            print('break')
            break


    # remove the solutions that exceed the threshold of the collision cost
    ind_ok = ( cost_m[:, 1] < 7.0)
    modeNum = int(sum( cost_m[:, 1] < 7.0))
    traj_hto = traj_hto[ind_ok, :, :]
    cost_m = cost_m[ind_ok, :]

    return traj_hto, modeNum, cost_m, D_transformed, exp_cost_set, z
def SINGTANGENTS(resfn, X, lam, mE, opt, ei=0):
    """SINGTANGENTS   : Returns the tangents from the two branches at singular point. Assumes simple bifurcation
    USAGE   :
    du1, du2, others = SINGTANGENTS(resfn, X, lam, mE, ei=0)
    INPUTS  :
    resfn, X, lam, mE, ei=0
    OUTPUTS :
    du1, du2, others
    """

    # pdb.set_trace()
    def mineigval(lam, u0, k=0):
        us = SPNEWTONSOLVER(lambda u: resfn(u, lam)[0:2], u0, opt)
        return np.sort(np.linalg.eigvals(us.fjac.todense()))[k]

    # 1. Find Critical Point
    # pdb.set_trace()
    cpi = np.where(np.array(mE)[:-1, ei] * np.array(mE)[1:, ei] < 0)[0][0] + 1
    mc = np.argmin([mE[cpi][0], mE[cpi - 1][0]])
    biflam = so.fsolve(lambda lmu: mineigval(lmu, X[cpi - mc], k=ei),
                       lam[cpi - mc])
    # biflam = so.bisect(lambda lmu: mineigval(lmu, X[cpi-1], k=ei), lam[cpi-1], lam[cpi])
    us = SPNEWTONSOLVER(lambda u: resfn(u, biflam)[0:2], X[cpi - mc], opt)

    Rb, dRdXb, dRdlb, d2RdXlb, d2RdX2b = resfn(us.x, biflam, d3=1)
    evals, evecs = np.linalg.eig(dRdXb.todense())
    evecs = np.asarray(evecs[:, np.argsort(evals)])
    evals = evals[np.argsort(evals)]

    # pdb.set_trace()
    # 2. Branch-Switching
    zv = evecs[:, ei]
    Lm = sn.null_space(zv[np.newaxis, :])
    LdL = Lm.T.dot(dRdXb.todense()).dot(Lm)
    yv = -Lm.dot(np.linalg.solve(LdL, Lm.T.dot(dRdlb)))
    # yv = -ss.linalg.spsolve(dRdXb, dRdlb)

    aval = zv.dot(sp.tensordot(d2RdX2b, zv, axes=1).dot(zv))
    bval = zv.dot(
        sp.tensordot(d2RdX2b, zv, axes=1).dot(yv) +
        sp.tensordot(d2RdX2b, yv, axes=1).dot(zv) + 2.0 * d2RdXlb.dot(zv))
    cval = zv.dot(d2RdX2b.dot(yv).dot(yv) + 2.0 * d2RdXlb.dot(yv) + 0.0)
    if np.abs(aval > 1e-10):
        sig1 = (-bval - np.sqrt(bval**2 - 4 * aval * cval)) / (2.0 * aval)
        sig2 = (-bval + np.sqrt(bval**2 - 4 * aval * cval)) / (2.0 * aval)
    else:
        sig1 = 0.0
        sig2 = 1e10  # Some large number, representative of infty
    sig1, sig2 = (sig1, sig2)[np.argmin(
        (np.abs(sig1), np.abs(sig2)))], (sig1, sig2)[np.argmax(
            (np.abs(sig1), np.abs(sig2)))]
    du1 = (sig1 * zv + yv)  # Trivial branch
    if min(np.abs(sig1), np.abs(sig2)) == 0.0:
        du1 = du1 / np.linalg.norm(du1)
    al1 = 1.0 / np.sqrt(1.0 + du1.dot(du1))
    du2 = (sig2 * zv + yv)  # Bifurcated Branch
    if min(np.abs(sig1), np.abs(sig2)) == 0.0:
        du2 = du2 / np.linalg.norm(du2)
    al2 = 1.0 / np.sqrt(1.0 + du2.dot(du2))

    others = type(
        '', (), {
            'zv': zv,
            'yv': yv,
            'sig1': sig1,
            'sig2': sig2,
            'biflam': biflam,
            'cpi': cpi
        })()
    return du1, al1, du2, al2, others
def gordon_newell(K, n):

    s = [5, 20, 15, 15]  # in ms
    for i in range(1, K + 1):
        s.insert(len(s), 20)

    u = [1000 / si for si in s]  # in jobs/s
    dim = len(s)

    P0 = [0.1, 0.1, 0.1, 0.1]
    for i in range(1, K + 1):
        P0.insert(len(P0), 0.6 / K)

    P1 = [0.4, 0, 0, 0]
    P2 = [0.4, 0, 0, 0]
    P3 = [0.4, 0, 0, 0]

    for i in range(1, K + 1):
        P1.insert(len(P1), 0.6 / K)
        P2.insert(len(P2), 0.6 / K)
        P3.insert(len(P3), 0.6 / K)

    P = [
        P0, P1, P2, P3
    ]  # probability transition matrix, Pij is the probability of transition from server i+1 to server j+1

    Pi = [1, 0, 0, 0]

    for i in range(1, K + 1):
        Pi.insert(len(Pi), 0)

    for i in range(1, K + 1):
        P.insert(len(P), Pi)

    P = numpy.mat(P)
    # print("P:", "\n", P, "\n")

    A = numpy.mat(P).T - numpy.identity(dim)  # A = P.T-I
    # print("A = P.T - I :", "\n", A)

    U = [[0] * i + [ui] + [0] * (dim - i - 1) for i, ui in enumerate(u)
         ]  # diagonal matrix with u's as values on the main diagonal
    U = numpy.mat(U)
    # print("U:\n", U, "\n")

    # Now we need to solve K * X = 0 for X.
    # A * x = b -> solve(A,b)
    # Can't use X = np.linalg.solve(K, [0]*dim), because matrix K is singular.
    # We can fix this by normalizing or just find null space of K instead!
    X = splin.null_space(A * U)
    X = X / X[0]  # normalization
    # print(X)
    if (n == 10):
        outputGordonNewellResults(K, X)

    # run for N = 3:
    G = buzen_optimized(X, n)
    # print(G)

    x = X.T[0]
    usages = [xi * G[n - 1] / G[n] for xi in x]
    productivities = [usagei * ui for usagei, ui in zip(usages, u)
                      ]  # Xi = Ui / si = Ui * ui

    J = []

    for i in range(K + 4):
        s = 0
        for j in range(1, n):
            s += pow(x[i], j) * G[n - j] / G[n]
        J.insert(len(J), s)

    Xsystem = 0.1 * productivities[0]
    R = n / Xsystem

    outputBuzenResults(K, n, usages, productivities, J, R)
def simplexAlgorithm(A_dash, b_dash, Z, del_x, m, n):
    optimal_found = False
    beta = 0.001
    unbounded = False
    firstTime = False
    t_end = time.time() + time_outer
    #Algorithm starts here
    while True:
        del_x = np.round(del_x, 3)
        A_tight = findTightConstraintMatrix(del_x, A_dash, b_dash)
        size = A_tight.shape
        if size[0] == n and size[1] == n:  # If Tight matrix is  of size nxn
            inv_flag = False
            print(
                "------------------------------------------------------------------"
            )
            print("\nOur vertex point is ", del_x)
            print("Tight constraints matrix is ")
            print(A_tight)
            try:
                A_inverse = np.linalg.inv(A_tight)
            except:
                inv_flag = True

            if inv_flag == False:
                alpha_values = np.dot(Z.T, A_inverse)
            else:
                print(
                    "Inverse is not posibble , what could be the further step now"
                )
                break

            print("Alpha values are ", alpha_values)
            print(
                "------------------------------------------------------------------"
            )
            #checking for alpha values
            if alpha_values.size > 1:
                if all(x >= 0 for x in alpha_values):
                    print("\nAll alpha values are positive")
                    optimal_found = True
                    break
                negative = np.where(alpha_values < 0)[0]
                negative_i = negative[0]
            else:
                if alpha_values >= 0:
                    print("\nAll alpha values are positive ")
                    optimal_found = True
                    break
                else:
                    negative_i = 0
            try:
                A_inverse = np.linalg.inv(A_tight)
                k = np.array(A_inverse[:, negative_i] * -1, dtype=float)
            except:
                print("Problem is unbounded/Infeasible")
                exit()

            del_x = np.add(del_x, beta * k)
            previous_point = del_x
            p_end = time.time() + time_inner
            #findin next vertex
            while (isInFeasibleRegian(A_dash, b_dash, del_x)):
                previous_point = del_x
                del_x = np.add(del_x, beta * k)
                if time.time() > p_end:
                    unbounded = True
                    break
            del_x = previous_point
            if unbounded:
                break

        else:  #If tight constrains are not equal to n
            if A_tight.size == 0:
                if not firstTime:
                    print(
                        "------------------------------------------------------------------"
                    )
                    if isInFeasibleRegian(A_dash, b_dash, del_x):
                        print("The point that we founnd is feasible ")
                        del_x = del_x
                    else:
                        print(
                            "The point we founnd is non-feasible. So take some random point and move towards one of the vertex"
                        )
                        del_x = np.zeros(n)
                    firstTime = True
                else:

                    k = del_x
                    # print("Some random direction")
                    previous_point = del_x
                    s_end = time.time() + time_inner
                    while (isInFeasibleRegian(A_dash, b_dash, del_x)):
                        previous_point = del_x
                        del_x = np.add(del_x, beta * k)
                        if time.time() > s_end:
                            unbounded = True
                            break
                    del_x = previous_point
                    if unbounded:
                        break

            else:

                ng = null_space(A_tight)
                try:
                    if ng.T.shape[0] == 1:
                        ns = ng.T[0]
                    else:
                        if ng.T.shape[0] == 0:
                            print("We are getting empty null space vector ")
                            print(
                                "Problem could be unbounded or infeasible. So move in some random direction"
                            )
                            ns = np.ones(n)
                        else:
                            ns = ng.T[0]
                except:
                    print("Null space is not found")
                    ns = np.ones(n)

                l = []
                for i in range(0, ns.size):
                    l.append(ns[i])
                l = np.array(l)
                # print("Direction  ",l)
                previous_point = del_x
                del_x = np.add(del_x, beta * l)
                tight_matrix_equations = np.isclose(np.dot(A_dash, del_x),
                                                    b_dash, 1e-05)
                if (tight_matrix_equations.sum() == n):
                    continue

                previous_point = del_x
                p_end = time.time() + time_inner
                while (isInFeasibleRegian(A_dash, b_dash, del_x)):
                    previous_point = del_x
                    del_x = np.add(del_x, beta * l)
                    if time.time() > p_end:
                        unbounded = True
                        break

                del_x = previous_point
                if unbounded:
                    break
        if time.time() > t_end:
            unbounded = True
            break
        if unbounded:
            break
    return del_x, unbounded, optimal_found
Exemplo n.º 12
0
def simple_pruno(kspace,
                 calib,
                 kernel_size=(5, 5),
                 coil_axis=-1,
                 sens=None,
                 ph=None,
                 kspace_ref=None):
    '''PRUNO.'''

    # Coils to da back
    kspace = np.moveaxis(kspace, coil_axis, -1)
    calib = np.moveaxis(calib, coil_axis, -1)
    nx, ny, _nc = kspace.shape[:]

    # Make a calibration matrix
    kx, ky = kernel_size[:]
    kx2, ky2 = int(kx / 2), int(ky / 2)
    nc = calib.shape[-1]

    # Pull out calibration matrix
    C = view_as_windows(calib, (kx, ky, nc)).reshape((-1, kx * ky * nc))

    # Get the nulling kernels
    n = null_space(C, rcond=1e-3)
    print(n.shape)

    # Test to see if nulling kernels do indeed null
    if sens is not None:
        ws = 8  # full width of sensitivity map spectra
        wd = kx
        wm = wd + ws - 1

        # Choose a target
        xx, yy = int(nx / 3), int(ny / 3)

        # Get source
        wm2 = int(wm / 2)
        S = ph[xx - wm2:xx + wm2, yy - wm2:yy + wm2].copy()
        assert (wm, wm) == S.shape

        sens_spect = 1 / np.sqrt(N**2) * np.fft.fftshift(np.fft.fft2(
            np.fft.ifftshift(sens, axes=(0, 1)), axes=(0, 1)),
                                                         axes=(0, 1))

        # Get the target
        ctr = int(sens_spect.shape[0] / 2)
        ws2 = int(ws / 2)
        T = []
        for ii in range(nc):
            sens0 = sens_spect[ctr - ws2:ctr + ws2, ctr - ws2:ctr + ws2,
                               ii].copy()
            T.append(convolve2d(S, sens0, mode='valid'))
        T = np.moveaxis(np.array(T), 0, -1)
        assert (wd, wd, nc) == T.shape

        # Find local encoding matrix
        #     E S = T
        #
        ShS = S.conj().T @ S
        print(ShS.shape, T.shape)
        ShT = S.conj().T @ T
        print(ShS.shape, ShT.shape)
        E = np.linalg.solve(ShS, ShT).T
        print(E.shape)
Exemplo n.º 13
0
    exit()
    
grid[-1][-1]= L - t


i = 1
j = 0
while i<n:
    b[i] = g*m[j]
    i += 3
    j += 1

print("A: ")
print(grid,"\n")

ns = null_space(grid)
ns = ns*np.sign(ns[0,0])
print("Null space of A:",ns,"\n")

print("b:",b,"\n")

        
spring_c = np.linalg.lstsq(grid,b)

error = spring_c[3]
spring_c = spring_c[0]

print("Least square error:")
print(error,"\n")
print("Spring stiffness vector for the 3n springs:")
s = spring_c[:-1]
Exemplo n.º 14
0
def covariance_eig(array,
                   epsilon=1.0,
                   norm=None,
                   dims=None,
                   eigvals_only=False):
    r"""
    Return the eigenvalues and eigenvectors of the covariance matrix of `array`, satisfying differential privacy.

    Paper link: http://papers.nips.cc/paper/9567-differentially-private-covariance-estimation.pdf

    Parameters
    ----------
    array : array-like, shape (n_samples, n_features)
        Matrix for which the covariance matrix is sought.

    epsilon : float, default: 1.0
        Privacy parameter :math:`\epsilon`.

    norm : float, optional
        The max l2 norm of any row of the input array.  This defines the spread of data that will be protected by
        differential privacy.

        If not specified, the max norm is taken from the data, but will result in a :class:`.PrivacyLeakWarning`, as it
        reveals information about the data.  To preserve differential privacy fully, `norm` should be selected
        independently of the data, i.e. with domain knowledge.

    dims : int, optional
        Number of eigenvectors to return.  If `None`, return all eigenvectors.

    eigvals_only : bool, default: False
        Only return the eigenvalue estimates.  If True, all the privacy budget is spent on estimating the eigenvalues.

    Returns
    -------
    w : (n_features) array
        The eigenvalues, each repeated according to its multiplicity.

    v : (n_features, dims) array
        The normalized (unit "length") eigenvectors, such that the column ``v[:,i]`` is the eigenvector corresponding to
        the eigenvalue ``w[i]``.

    """

    n_features = array.shape[1]
    dims = n_features if dims is None else min(dims, n_features)
    if not isinstance(dims, Integral):
        raise TypeError(
            f"Number of requested dimensions must be integer-valued, got {type(dims)}"
        )
    if dims < 0:
        raise ValueError(
            f"Number of requested dimensions must be non-negative, got {dims}")

    max_norm = np.linalg.norm(array, axis=1).max()
    if norm is None:
        warnings.warn(
            "Data norm has not been specified and will be calculated on the data provided.  This will result "
            "in additional privacy leakage. To ensure differential privacy and no additional privacy "
            "leakage, specify `data_norm` at initialisation.",
            PrivacyLeakWarning)
        norm = max_norm
    elif max_norm > norm and not np.isclose(max_norm, norm):
        raise ValueError(
            f"Rows of input array must have l2 norm of at most {norm}, got {max_norm}"
        )

    cov = array.T.dot(array) / (norm**2)
    eigvals = np.sort(np.linalg.eigvalsh(cov))[::-1]
    epsilon_0 = epsilon if eigvals_only else epsilon / (dims +
                                                        (dims != n_features))

    mech_eigvals = LaplaceBoundedDomain(epsilon=epsilon_0,
                                        lower=0,
                                        upper=float("inf"),
                                        sensitivity=2)
    noisy_eigvals = np.array(
        [mech_eigvals.randomise(eigval) for eigval in eigvals]) * (norm**2)

    if eigvals_only:
        return noisy_eigvals

    # When estimating all eigenvectors, we don't need to spend budget for the dth vector
    epsilon_i = epsilon / (dims + (dims != n_features))
    cov_i = cov
    proj_i = np.eye(n_features)

    theta = np.zeros((0, n_features))
    mech_cov = Bingham(epsilon=epsilon_i)

    for _ in range(dims):
        if cov_i.size > 1:
            u_i = mech_cov.randomise(cov_i)
        else:
            u_i = np.ones((1, ))

        theta_i = proj_i.T.dot(u_i)
        theta = np.vstack((theta, theta_i))

        if cov_i.size > 1:
            proj_i = null_space(theta).T
            cov_i = proj_i.dot(cov).dot(proj_i.T)

    return noisy_eigvals, theta.T
Exemplo n.º 15
0
def pqmc_values(states, Q, pri):
    if len(Q) == 0:
        print("Q is null!")
        return

    super_operator_demension = list(Q.values())[0].dimension
    state_demension = np.max(states) + 1
    I_c = np.eye(state_demension, state_demension, dtype=np.complex)
    I_H = np.eye(super_operator_demension,
                 super_operator_demension,
                 dtype=np.complex)

    # compute epsilon_m
    epsilon_m = np.zeros([(super_operator_demension**2) * (state_demension**2),
                          (super_operator_demension**2) *
                          (state_demension**2)],
                         dtype=np.complex)
    for key, value in Q.items():
        E_i_kraus = value.kraus
        adjacency_matrix = np.zeros([state_demension, state_demension],
                                    dtype=np.complex)
        adjacency_matrix[key[1], key[0]] = 1.0
        for e in E_i_kraus:
            E = np.kron(adjacency_matrix, e)
            epsilon_m += np.kron(E, np.conjugate(E))

    # compute epsilon_m_infinity
    if verbose:
        print("epsilon_m:")
        print(epsilon_m.shape)
    epsilon_m_so = SuperOperator(kraus=[], matrix_representation=epsilon_m)

    #compute P_even
    P_even = np.zeros([
        super_operator_demension * state_demension,
        super_operator_demension * state_demension
    ],
                      dtype=np.complex)
    bscc_min_pri_key = []
    bscc_min_pri_value = []
    B = epsilon_m_so.get_bscc(np.kron(I_c, I_H))
    for b in B:
        C_b = []
        vecs = orth(b)
        for j in range(vecs.shape[1]):
            vec = vecs[:, j]
            nonzero_index = -1
            flag = False
            for i in range(state_demension):
                if not is_zero_array(vec[i * super_operator_demension:(1 + i) *
                                         super_operator_demension]):
                    if not flag:
                        flag = True
                        nonzero_index = i
                    else:
                        flag = False
                        break
            if flag:
                C_b.append(pri[nonzero_index])
        bscc_min_pri_key.append(b)
        bscc_min_pri_value.append(np.min(C_b))
    EP = set()
    for key, value in pri.items():
        if value % 2 == 0:
            EP.add(value)
    for k in EP:
        P_k = np.zeros([
            super_operator_demension * state_demension,
            super_operator_demension * state_demension
        ],
                       dtype=np.complex)
        for i in range(len(bscc_min_pri_key)):
            if bscc_min_pri_value[i] == k:
                P_k += bscc_min_pri_key[i]
        P_even += P_k
    if verbose:
        print("P_even:")
        print(P_even)

    P_vec = np.zeros([(super_operator_demension**2) * (state_demension**2), 1],
                     dtype=np.complex)

    for i in range(super_operator_demension):
        i_ket = np.matrix(I_H[i].reshape([super_operator_demension, 1]))
        for j in range(state_demension):
            s_ket = I_c[j].reshape([state_demension, 1])
            P_vec += np.kron(np.kron(s_ket, i_ket), np.kron(s_ket, i_ket))
    P_vec = np.dot(np.kron(np.kron(P_even, I_c), I_H), P_vec)
    if verbose:
        print("P_vec:")
        print(P_vec)

    eigen_values, eigen_vectors = np.linalg.eig(np.matrix(epsilon_m))
    Y = list()

    for i in range(len(eigen_values)):
        if complex_equal(eigen_values[i], 1.0 + 0.j):
            Y.append(np.array(eigen_vectors[:, i]).reshape([
                -1,
            ]))
    if verbose:
        print("Y")
        print(Y)

    v_ket = np.zeros([(super_operator_demension**2) * (state_demension**2), 1],
                     dtype=np.complex)
    for i in range(len(Y)):
        coefficient = list()
        for j in range(len(Y)):
            if j != i:
                coefficient.append(Y[j])
        T_dual = np.array(
            np.matrix(epsilon_m).H -
            np.eye(epsilon_m.shape[0], epsilon_m.shape[1], dtype=np.complex))
        for k in range(len(T_dual)):
            coefficient.append(T_dual[k])
        x_prim_ket = np.array(null_space(np.array(coefficient))[:,
                                                                0]).reshape([
                                                                    -1,
                                                                ])
        x_ket = x_prim_ket / np.inner(Y[i], x_prim_ket)
        v_ket += (np.inner(Y[i], P_vec.reshape([
            -1,
        ])) * x_ket).reshape([-1, 1])

    if verbose:
        print("v_ket:")
        print(v_ket)

    M = np.zeros([
        super_operator_demension * state_demension,
        super_operator_demension * state_demension
    ],
                 dtype=np.complex)
    for i in range(super_operator_demension):
        i_bra = I_H[i].reshape([1, super_operator_demension])
        for j in range(state_demension):
            s_bra = I_c[j].reshape([1, state_demension])
            M += np.kron(
                np.dot(
                    np.dot(np.kron(np.kron(np.kron(I_c, I_H), s_bra), i_bra),
                           v_ket), s_bra), i_bra)

    # M = epsilon_m_infinity_so.get_dual_super_operator().apply_on_operator(P_even)

    M = np.matrix(M)
    res = dict()
    for i in range(state_demension):
        E_s_ket = np.matrix(np.kron(I_c[i].reshape([state_demension, 1]), I_H))
        E_s_bra = np.matrix(np.kron(I_c[i].reshape([1, state_demension]), I_H))
        res[i] = E_s_bra * M * E_s_ket

    return res
def ransac(corresp1, corresp2):
    frac = 0
    niter = 0
    while (frac <= 0.75):

        #Generate 4 random numbers from the set
        Lmp = len(corresp1)
        r = rn.sample(range(0, Lmp), 4)
        a = [corresp1[r[i]] for i in range(0, len(r))]
        b = [corresp2[r[i]] for i in range(0, len(r))]
        #Take these 4 points and find homography
        #Fill in the matrix
        vsize = 9
        eqns = 4
        A = np.zeros((int(2 * eqns), vsize))
        #print(shape(A))
        #Loop to fill in the values
        for i in range(0, eqns):
            A[int(2 * i)][0] = b[i][0]
            A[int(2 * i)][1] = b[i][1]
            A[int(2 * i)][2] = 1
            A[int(2 * i)][3] = 0
            A[int(2 * i)][4] = 0
            A[int(2 * i)][5] = 0
            A[int(2 * i)][6] = -b[i][0] * a[i][0]
            A[int(2 * i)][7] = -b[i][1] * a[i][0]
            A[int(2 * i)][8] = -a[i][0]

            A[int(2 * i) + 1][0] = 0
            A[int(2 * i) + 1][1] = 0
            A[int(2 * i) + 1][2] = 0
            A[int(2 * i) + 1][3] = b[i][0]
            A[int(2 * i) + 1][4] = b[i][1]
            A[int(2 * i) + 1][5] = 1
            A[int(2 * i) + 1][6] = -b[i][0] * a[i][1]
            A[int(2 * i) + 1][7] = -b[i][1] * a[i][1]
            A[int(2 * i) + 1][8] = -a[i][1]

        #Find nullspace of the matrix
        h = null_space(A)
        #print(shape(h))
        #Put h in order
        H = h.reshape((3, 3))

        #Check with remaining points and see fraction
        C = []
        iterset = list(set(np.arange(0, Lmp)).difference(r))
        bvec = np.zeros((3, 1))
        avec = np.zeros((2, 1))
        eps = 12
        #12 0.75 good
        #16 0.75 works
        bvec[2] = 1
        for item in iterset:
            bvec[0] = corresp2[item][0]
            bvec[1] = corresp2[item][1]

            atemp = H @ bvec
            avec[0] = atemp[0] / atemp[2]
            avec[1] = atemp[1] / atemp[2]

            dist = np.sqrt(
                pow(corresp1[item][0] - avec[0], 2) +
                pow(corresp1[item][1] - avec[1], 2))
            if dist < eps:
                C.append(item)

        #Check how good in the consensus set
        frac = len(C) / len(iterset)
        niter = niter + 1
    return H, frac, niter, C
Exemplo n.º 17
0
l = 1
T = 1
mu = l / 5
a = 1 / (9 * T)
b = 1 / (T)

A = np.array([[-(mu + a), a], [b, -(mu + b + l)]])
B = np.array([[mu, 0], [0, mu]])
C = np.array([[0, 0], [0, l]])

A0 = np.array([[-a, a], [b, -(b + l)]])


def find_R():
    R = np.array([[0, 0], [0, 0]])
    for i in range(1000):
        R = -np.dot((C + np.dot(np.dot(R, R), B)), np.linalg.inv(A))
    return (R)


R = find_R()

P0 = lin.null_space(np.transpose(A0 + np.dot(R, B)))
print(A0 + np.dot(R, B))

P0 = P0 / np.sum(P0)
print(P0)

Pi = lin.null_space(np.transpose(C + np.dot(R, B) + np.dot(R, np.dot(R, B))))
print(Pi)
Exemplo n.º 18
0
def get_defined_functions(exe_num):
    if exe_num == 1:
        A = np.array([[1, 2, 1, 2], [1, 1, 2, 4]])
        b = np.array([3, 5])
        c = np.array([1, 1.5, 1, 1])

        F = null_space(A)

        # Getting initial x
        x0, _, _, _ = np.linalg.lstsq(A, b, rcond=None)

        def set_f_t(t):
            def f(z):
                f0 = t * np.dot(c, (np.dot(F, z) + x0))
                I_ = [
                    restricted_log(np.dot(F[i, :], z) + x0[i])
                    for i in range(4)
                ]

                return f0 - sum(I_)

            return f

        def set_get_f_t(t):
            def get_f(z0, d):
                def f(alpha):
                    z = z0 + alpha * d
                    f0 = np.dot((t * c), (np.dot(F, z) + x0))
                    I_ = [
                        restricted_log(np.dot(F[i, :], z) + x0[i])
                        for i in range(4)
                    ]

                    return f0 - sum(I_)

                return f

            return get_f

        def set_g_t(t):
            def g(z):
                logs_divs = [(x0[i] + F[i, 0] * z[0] + F[i, 1] * z[1])
                             for i in range(4)]
                logs_dev = np.array([
                    sum([F[i, 0] / logs_divs[i] for i in range(4)]),
                    sum([F[i, 1] / logs_divs[i] for i in range(4)])
                ])
                f0_dev = np.array([
                    sum([F[i, 0] * t * np.conj(c[i]) for i in range(4)]),
                    sum([F[i, 1] * t * np.conj(c[i]) for i in range(4)])
                ])

                return f0_dev - logs_dev

            return g

        def set_get_g_t(t):
            def get_g(z0, d):
                def g(alpha):
                    z = z0 + alpha * d
                    logs_dev = sum([
                        np.dot(F[i, :], z) / (np.dot(F[i, :], z) + x0[i])
                        for i in range(4)
                    ])
                    f0_dev = t * np.dot(c, np.dot(F, d))
                    return f0_dev - logs_dev

                return g

            return get_g

        def set_H_t(t):
            def H(z):
                logs_divs = [(x0[i] + F[i, 0] * z[0] + F[i, 1] * z[1])
                             for i in range(4)]
                logs_dev = np.array(
                    [[F[i, 0] / logs_divs[i] for i in range(4)],
                     [F[i, 1] / logs_divs[i] for i in range(4)]])
                aux = logs_dev**2
                dz1z1 = sum(aux[0])
                dz2z2 = sum(aux[1])
                dz1z2 = sum([(F[i, 0] * F[i, 1]) / logs_divs[i]**2
                             for i in range(4)])

                return np.array([[dz1z1, dz1z2], [dz1z2, dz2z2]])

            return H

        def original(z):
            min_x = np.dot(F, z) + x0
            min_fx = np.dot(c, min_x)

            return min_x, min_fx

        return x0, set_f_t, set_g_t, set_H_t, set_get_f_t, set_get_g_t, original

    elif exe_num == 2:

        def c1(x):
            A = [[0.25, 0], [0, 1.0]]
            b = [0.5, 0]
            aux1 = -1.0 * np.dot(np.dot(x, A), x)
            aux2 = np.dot(x[:2], b)

            return aux1 + aux2 + 3.0 / 4.0

        def c2(x):
            C = [[5.0, 3.0], [3.0, 5.0]]
            d = [11 / 2.0, 13 / 2.0]
            aux1 = -1.0 / 8 * np.dot(np.dot(x, C), x)
            aux2 = np.dot(x, d)

            return aux1 + aux2 - 35 / 2.0

        def set_f_t(t):
            def f(x):
                aux1 = (x[0] - x[2])**2 + (x[1] - x[3])**2
                aux2 = restricted_log(c1(x[:2])) + restricted_log(c2(x[2:]))

                return aux1 - aux2

            return f

        def set_get_f_t(t):
            def get_f(x0, d):
                def f(alpha):
                    x = x0 + alpha * d
                    aux1 = (x[0] - x[2])**2 + (x[1] - x[3])**2
                    aux2 = restricted_log(c1(x[:2])) + restricted_log(c2(
                        x[2:]))

                    return aux1 - aux2

                return f

            return get_f

        def set_g_t(t):
            def g(x):
                logs_div = [1 / c1(x[:2]), 1 / c2(x[2:])]
                dx0 = 2 * t * (x[0] - x[2]) - logs_div[0] * (-0.5 * x[0] + 0.5)
                dx1 = 2 * t * (x[1] - x[3]) - logs_div[0] * (-2 * x[1])
                dx2 = -2 * t * (x[0] - x[2]) - logs_div[1] * (
                    -(5 / 4.0) * x[2] - (3 / 4.0) * x[3] + 11 / 2.0)
                dx3 = -2 * t * (x[1] - x[3]) - logs_div[1] * (
                    -(3 / 4.0) * x[2] - (5 / 4.0) * x[3] + 13 / 2.0)

                return np.array([dx0, dx1, dx2, dx3])

            return g

        def set_get_g_t(t):
            def get_g(x0, d):
                def g(alpha):
                    x = x0 + alpha * d

                    ddist = 2 * t * ((x[0] - x[2]) * (d[0] - d[2]) +
                                     (x[1] - x[3]) * (d[1] - d[3]))
                    dc1 = (0.5 * x[0] * d[0] + 2 * x[1] * d[1] -
                           0.5 * d[0]) / c1(x[:2])
                    dc2 = ((5 / 4.0) * x[2] * d[2] + (3 / 4.0) *
                           (x[2] * d[3] + d[2] * x[3]) +
                           (5 / 4.0) * x[3] * d[3] - (11 / 2.0) * d[2] -
                           (13 / 2.0) * d[3]) / c2(x[2:])

                    return ddist + dc1 + dc2

                return g

            return get_g

        def set_H_t(t):
            def H(x):
                logs_div = [1 / c1(x[:2]), 1 / c2(x[2:])]
                dc1x1 = -0.5 * x[0] + 0.5
                dc1x2 = -2 * x[1]
                dc2x3 = -(5 / 4.0) * x[2] - (3 / 4.0) * x[3] + 11 / 2.0
                dc2x4 = -(3 / 4.0) * x[2] - (5 / 4.0) * x[3] + 13 / 2.0

                dx1x1 = 2 * t + (logs_div[0] * dc1x1)**2 + 0.5 * logs_div[0]
                dx1x2 = logs_div[0]**2 * dc1x1 * dc1x2
                dx1x3 = -2 * t
                dx1x4 = 0

                dx2x2 = 2 * t + (logs_div[0] * dc1x2)**2 + 2 * logs_div[0]
                dx2x3 = 0
                dx2x4 = -2 * t

                dx3x3 = 2 * t + (logs_div[1] *
                                 dc2x3)**2 + (5 / 4.0) * logs_div[1]
                dx3x4 = logs_div[1]**2 * dc2x3 * dc2x4 + (3 /
                                                          4.0) * logs_div[1]

                dx4x4 = 2 * t + (logs_div[1] *
                                 dc2x4)**2 + (5 / 4.0) * logs_div[1]

                return np.array([[dx1x1, dx1x2, dx1x3, dx1x4],
                                 [dx1x2, dx2x2, dx2x3, dx2x4],
                                 [dx1x3, dx2x3, dx3x3, dx3x4],
                                 [dx1x4, dx2x4, dx3x4, dx4x4]])

            return H

        def original(x):
            x = np.array(x)
            return x, np.linalg.norm(x[:2] - x[2:])

        return set_f_t, set_g_t, set_H_t, set_get_f_t, set_get_g_t, original

    elif exe_num == 3:
        A = np.array([[1, 1, 1]])
        b = np.array([3])
        C = np.array([[4, 0, 0], [0, 1, -1], [0, -1, 1]])
        d = np.array([-8, -6, -6])

        F = null_space(A)

        # Getting initial x
        x0, _, _, _ = np.linalg.lstsq(A, b, rcond=None)

        def set_f_t(t):
            def f(z):
                f0 = np.dot(np.dot(0.5 * (np.dot(F, z) + x0), C),
                            np.dot(F, z) + x0) + np.dot(np.dot(F, z) + x0, d)
                I_ = [
                    restricted_log(np.dot(F[i, :], z) + x0[i])
                    for i in range(3)
                ]

                return t * f0 - sum(I_)

            return f

        def set_get_f_t(t):
            def get_f(z0, d_):
                def f(alpha):
                    z = z0 + alpha * d_
                    f0 = np.dot(np.dot(0.5 * (np.dot(F, z) + x0), C),
                                np.dot(F, z) + x0) + np.dot(
                                    np.dot(F, z) + x0, d)
                    I_ = [
                        restricted_log(np.dot(F[i, :], z) + x0[i])
                        for i in range(3)
                    ]

                    return f0 - sum(I_)

                return f

            return get_f

        def set_g_t(t):
            def g(z):
                z_conj = np.conj(z)
                F_conj = np.conj(F)
                x0_conj = np.conj(x0)

                df01 = [
                    sum([
                        F[i, j] * (C[i, 0] *
                                   (x0_conj[i] + F_conj[i, 0] * z_conj[0] +
                                    F_conj[i, 1] * z_conj[1])) / 2.0
                        for i in range(3)
                    ]) for j in range(2)
                ]
                df02 = [
                    sum([d[i] * F_conj[i, j] for i in range(3)])
                    for j in range(2)
                ]
                df03 = [
                    x0[i] + F[i, 0] * z[0] + F[i, 1] * z[1] for i in range(3)
                ]
                df04 = [
                    sum([
                        df03[i] *
                        (C[0, i] * F_conj[0, j] + C[1, i] * F_conj[1, j] +
                         C[2, i] * F_conj[2, j]) / 2.0 - F[i, j] / df03[i]
                        for i in range(3)
                    ]) for j in range(2)
                ]

                return np.array(
                    np.array(df01) + np.array(df02) + np.array(df04))

            return g

        def set_get_g_t(t):
            def get_g(z0, d_):
                def g(alpha):
                    z = z0 + alpha * d_
                    z_conj = np.conj(z)
                    F_conj = np.conj(F)
                    x0_conj = np.conj(x0)

                    df01 = [
                        sum([
                            F[i, j] * (C[i, 0] *
                                       (x0_conj[i] + F_conj[i, 0] * z_conj[0] +
                                        F_conj[i, 1] * z_conj[1])) / 2.0
                            for i in range(3)
                        ]) for j in range(2)
                    ]

                    df02 = [
                        sum([d[i] * F_conj[i, j] for i in range(3)])
                        for j in range(2)
                    ]
                    df03 = [
                        x0[i] + F[i, 0] * z[0] + F[i, 1] * z[1]
                        for i in range(3)
                    ]
                    df04 = [
                        sum([
                            df03[i] *
                            (C[0, i] * F_conj[0, j] + C[1, i] * F_conj[1, j] +
                             C[2, i] * F_conj[2, j]) / 2.0 - F[i, j] / df03[i]
                            for i in range(3)
                        ]) for j in range(2)
                    ]

                    return np.array(
                        np.array(df01) + np.array(df02) + np.array(df04))

                return g

            return get_g

        def set_H_t(t):
            def H(z):
                F_conj = np.conj(F)
                der1 = np.array([[
                    sum([(C[i, j] * (F_conj[i, 0])) / 2.0 for i in range(3)]),
                    sum([(C[i, j] * (F_conj[i, 1])) / 2.0 for i in range(3)])
                ] for j in range(3)])
                der2 = [(x0[i] + F[i, 0] * z[0] + F[i, 1] * z[1])**2
                        for i in range(3)]

                dz1z1 = 2 * sum([F[i, 0] * der1[i, 0]
                                 for i in range(3)]) + sum(
                                     [F[i, 0]**2 / der2[i] for i in range(3)])
                dz1z2 = sum(F[i, 1] * der1[i, 0] + F[i, 0] * der1[i, 1]
                            for i in range(3)) + sum([
                                F[i, 0] * F[i, 1] / der2[i] for i in range(3)
                            ])
                dz2z2 = 2 * sum([F[i, 1] * der1[i, 1]
                                 for i in range(3)]) + sum(
                                     [F[i, 1]**2 / der2[i] for i in range(3)])

                return t * np.array([[dz1z1, dz1z2], [dz1z2, dz2z2]])

            return H

        def original(z):
            min_x = np.dot(F, z) + x0
            min_fx = 0.5 * np.dot(np.dot(min_x, C), min_x) + np.dot(min_x, d)

            return min_x, min_fx

        return x0, set_f_t, set_g_t, set_H_t, set_get_f_t, set_get_g_t, original

    elif exe_num == 4:
        F0 = np.array([[0.5, 0.55, 0.33, 2.38], [0.55, 0.18, -1.18, -0.4],
                       [0.33, -1.18, -0.94, 1.46], [2.38, -0.4, 1.46, 0.17]])

        F1 = np.array([[5.19, 1.54, 1.56, -2.8], [1.54, 2.2, 0.39, -2.5],
                       [1.56, 0.39, 4.43, 1.77], [-2.8, -2.5, 1.77, 4.06]])

        F2 = np.array([[-1.11, 0, -2.12, 0.38], [0, 1.91, -0.25, -0.58],
                       [-2.12, -0.25, -1.49, 1.45], [0.38, -0.58, 1.45, 0.63]])

        F3 = np.array([[2.69, -2.24, -0.21, -0.74], [-2.24, 1.77, 1.16, -2.01],
                       [-0.21, 1.16, -1.82, -2.79],
                       [-0.74, -2.01, -2.79, -2.22]])

        F4 = np.array([[0.58, -2.19, 1.69, 1.28], [-2.19, -0.05, -0.01, 0.91],
                       [1.69, -0.01, 2.56, 2.14], [1.28, 0.91, 2.14, -0.75]])

        c = np.array([1, 0, 2, -1])

        F = [F1, F2, F3, F4]

        x0 = np.zeros(4)
        while np.linalg.eigvalsh(F0 + sum([x0[i] * F[i]
                                           for i in range(4)]))[0] <= 0:
            x0 = np.random.randn(4)

        def set_f_t(t):
            def f(x):
                f0 = t * np.dot(c, x)
                I_ = restricted_log(
                    np.linalg.eigvalsh(F0 +
                                       sum([x[i] * F[i]
                                            for i in range(4)]))[0])

                return f0 - I_

            return f

        def set_get_f_t(t):
            def get_f(x0, d):
                def f(alpha):
                    x = x0 + alpha * d
                    f0 = t * np.dot(c, x)
                    I_ = restricted_log(
                        np.linalg.eigvalsh(F0 +
                                           sum([x[i] * F[i]
                                                for i in range(4)]))[0])

                    return f0 - I_

                return f

            return get_f

        def set_g_t(t):
            def g(x):
                F_sum = np.matrix(F0 + sum([x[i] * F[i] for i in range(4)]))
                F_adf = np.linalg.inv(F_sum) * np.linalg.det(F_sum)
                F_dev = [np.trace(np.dot(F_adf, F[i])) for i in range(4)]
                log_dev = 1.0 / np.linalg.det(F_sum)

                return np.array(
                    [t * c[i] - log_dev * F_dev[i] for i in range(4)])

            return g

        def set_get_g_t(t):
            def get_g(x0, d):
                def g(alpha):
                    x = x0 + alpha * d
                    F_sum = F0 + sum([x[i] * F[i] for i in range(4)])
                    F_adf = np.linalg.inv(F_sum) * np.linalg.det(F_sum)
                    F_dev = [np.trace(F_adf * (d[i] * F[i])) for i in range(4)]
                    log_dev = 1.0 / np.linalg.det(F_sum)

                    return t * np.dot(c, d) - sum(
                        [log_dev * F_dev[i] for i in range(4)])

                return g

            return get_g

        def set_H_t(t):
            def H(x):
                F_sum = F0 + sum([x[i] * F[i] for i in range(4)])
                F_adj = np.linalg.inv(F_sum) * np.linalg.det(F_sum)
                F_det = np.linalg.det(F_sum)
                F_dev = [np.trace(np.dot(F_adj, F[i])) for i in range(4)]
                der1 = np.dot(F_dev, F_dev) / F_det

                X_list = [F_adj.dot(Fi) for Fi in F]
                der2 = np.array([[
                    -np.trace(Xi) * np.trace(Xj) - np.sum(Xi.T * Xj)
                    for Xi in X_list
                ] for Xj in X_list])
                return (-der1 - der2) / F_det

            return H

        def original(x):
            return x, np.dot(c, x)

        return x0, set_f_t, set_g_t, set_H_t, set_get_f_t, set_get_g_t, original

    elif exe_num == 5:

        def set_f_t(t):
            def f(x):
                f0 = 100 * (x[0]**2 - x[1])**2 + (x[0] - 1)**2 + 90 * (
                    x[2]**2 - x[3])**2 + (x[2] - 1)**2 + 10.1 * (
                        (x[1] - 1)**2 +
                        (x[3] - 1)**2) + 19.8 * (x[1] - 1) * (x[3] - 1)
                I_ = sum([restricted_log(x[i] + 10) for i in range(4)]) + sum(
                    [restricted_log(-x[i] + 10) for i in range(4)])

                return t * f0 - I_

            return f

        def set_get_f_t(t):
            def get_f(x0, d):
                def f(alpha):
                    x = x0 + alpha * d
                    f0 = 100 * (x[0]**2 - x[1])**2 + (x[0] - 1)**2 + 90 * (
                        x[2]**2 - x[3])**2 + (x[2] - 1)**2 + 10.1 * (
                            (x[1] - 1)**2 +
                            (x[3] - 1)**2) + 19.8 * (x[1] - 1) * (x[3] - 1)
                    I_ = sum([
                        restricted_log(x[i] + 10) for i in range(4)
                    ]) + sum([restricted_log(-x[i] + 10) for i in range(4)])

                    return t * f0 - I_

                return f

            return get_f

        def set_g_t(t):
            def g(x):
                return np.array([
                    -1.0 / (x[0] - 10) - 1 / (x[0] + 10) - t *
                    (400 * x[0] * (-x[0]**2 + x[1]) - 2 * x[0] + 2), t *
                    (-200 * x[0]**2 + (1101 * x[1]) / 5 +
                     (99 * x[3]) / 5 - 40) - 1 / (x[1] - 10) - 1 / (x[1] + 10),
                    -1.0 / (x[2] - 10) - 1 / (x[2] + 10) - t *
                    (360 * x[2] * (-x[2]**2 + x[3]) - 2 * x[2] + 2),
                    t * (-180 * x[2]**2 + (99 * x[1]) / 5 +
                         (1001 * x[3]) / 5 - 40) - 1 / (x[3] - 10) - 1 /
                    (x[3] + 10)
                ])

            return g

        def set_get_g_t(t):
            def get_g(x0, d):
                def g(alpha):
                    x = x0 + alpha * d
                    d_f0 = 200 * (x[0]**2 - x[1]) * (
                        2 * x[0] * d[0] - d[1]
                    ) + 2 * (x[0] - 1) * d[0] + 180 * (x[2]**2 - x[3]) * (
                        2 * x[2] * d[2] -
                        d[3]) + 2 * (x[2] - 1) * d[2] + 10.1 * (
                            2 * (x[1] - 1) * d[1] + 2 *
                            (x[3] - 1) * d[3]) + 19.8 * (d[1] * (x[3] - 1) +
                                                         (x[1] - 1) * d[3])
                    d_logs = sum([
                        d[i] * (1.0 / (-x[i] - 10) - 1.0 / (x[i] - 10))
                        for i in range(4)
                    ])

                    return d_f0 + d_logs

                return g

            return get_g

        def set_H_t(t):
            def H(x):
                dx1x1 = 1 / (x[0] - 10)**2 + 1 / (x[0] + 10)**2 + t * (
                    1200 * x[0]**2 - 400 * x[1] + 2)
                dx1x2 = -400 * t * x[0]
                dx1x3 = dx1x4 = 0
                dx2x2 = (1101 * t) / 5 + 1 / (x[1] - 10)**2 + 1 / (x[1] +
                                                                   10)**2
                dx2x3 = 0
                dx2x4 = (99 * t) / 5
                dx3x3 = 1 / (x[2] - 10)**2 + 1 / (x[2] + 10)**2 + t * (
                    1080 * x[2]**2 - 360 * x[3] + 2)
                dx3x4 = -360 * t * x[2]
                dx4x4 = (1001 * t) / 5 + 1 / (x[3] - 10)**2 + 1 / (x[3] +
                                                                   10)**2

                return np.array([[dx1x1, dx1x2, dx1x3, dx1x4],
                                 [dx1x2, dx2x2, dx2x3, dx2x4],
                                 [dx1x3, dx2x3, dx3x3, dx3x4],
                                 [dx1x4, dx2x4, dx3x4, dx4x4]])

            return H

        def original(x):
            min_f = 100 * (x[0]**2 - x[1])**2 + (x[0] - 1)**2 + 90 * (
                x[2]**2 - x[3])**2 + (x[2] - 1)**2 + 10.1 * (
                    (x[1] - 1)**2 +
                    (x[3] - 1)**2) + 19.8 * (x[1] - 1) * (x[3] - 1)
            return x, min_f

        return set_f_t, set_g_t, set_H_t, set_get_f_t, set_get_g_t, original
Exemplo n.º 19
0
def PseudoArchlength_2D_AxiSym(T0in, X0in, mu, ds):
    """
    Pseudo-Arclength Continuation algorithm for 2D Axi Symmetric Periodic LPO.
    Suitable for IC = [x0 0 0 0 v0 0] (Lyapunov, DRO).

    :param T0in:
    :param X0in:
    :param mu:
    :param ds:
    :return:
    """

    # Initialization
    T0 = deepcopy(T0in)
    X0 = deepcopy(X0in)
    STM0 = reshape(eye(6), (1, 36))
    tol = 1e-012
    Iter = int(0)
    MaxIter = int(350)

    # Integrate the initial condition
    # Up to the next intersection with the xz-plane
    _, _, te, xxMMe, ie = odelay(STM,
                                 concatenate((X0, STM0), axis=None),
                                 linspace(0, T0),
                                 TOLERANCE=1e-014,
                                 events=[xzPlaneCrossing],
                                 args=(mu, ))
    if len(ie) == 0:
        sys.exit('Unable to compute first guess solution')

    # Pick the state at the half period
    tf = te[-1]
    Xf = xxMMe[-1, 0:6]
    STMf = reshape(xxMMe[-1, 6:42], (6, 6))

    # Find Null Space direction along the family
    ff = CR3BP(Xf, tf, mu)
    DFk = array([[STMf[1, 0], STMf[1, 4], ff[1]],
                 [STMf[3, 0], STMf[3, 4], ff[3]]])
    Fk = array([[Xf[1]], [Xf[3]]])
    k = linalg.null_space(DFk)

    # Augmented constraint
    x = array([[X0[0]], [X0[4]], [tf]])
    xc = array([[X0in[0]], [X0in[4]], [T0in / 2]])
    Gk = concatenate((Fk, transpose(x - xc) @ k - ds))
    DGk = concatenate((DFk, transpose(k)))
    delta = linalg.solve(DGk, Gk)  # [dx0, dv0, dt]

    # Update iteration parameters
    err = max(abs(Gk))

    # Pseudo-Arclength Continuation algorithm
    while err > tol and Iter < MaxIter:

        # Correct the initial state for the new iteration
        T0 = 2 * (tf + delta[2, 0])
        X0[0] -= delta[0, 0]
        X0[4] -= delta[1, 0]

        # Integrate the(k-1)-th corrected initial condition
        # Up to the next intersection with the xz-plane
        _, _, te, xxMMe, ie = odelay(STM,
                                     concatenate((X0, STM0), axis=None),
                                     linspace(0, T0),
                                     TOLERANCE=1e-014,
                                     events=[xzPlaneCrossing],
                                     args=(mu, ))
        if len(ie) == 0:
            sys.exit('Unable to compute first guess solution')

        # Newton-Raphson Method
        ff = CR3BP(Xf, tf, mu)
        DFk = array([[STMf[1, 0], STMf[1, 4], ff[1]],
                     [STMf[3, 0], STMf[3, 4], ff[3]]])
        Fk = array([[Xf[1]], [Xf[3]]])

        # Augmented constraint
        x = array([[X0[0]], [X0[4]], [tf]])
        xc = array([[X0in[0]], [X0in[4]], [T0in / 2]])
        Gk = concatenate((Fk, transpose(x - xc) @ k - ds))
        DGk = concatenate((DFk, transpose(k)))
        delta = linalg.solve(DGk, Gk)  # [dx0, dv0, dt]

        # Update iteration parameters
        err = max(abs(Gk))
        Iter += 1
        print(err)
        print(Iter)

    # Convergence check
    if Iter == MaxIter:
        sys.exit('No convergence (maximum number of iterations reached)')

    return T0, X0, Iter, err
Exemplo n.º 20
0
def MetaQ(x0, N, Rs):
    
    ### pre-process data ###
    # problem size
    n   = len(N)
    dim = int(n*(n-1)/2)
    
    # get mapping array
    mp = np.array([[i,j] for i in range(n) for j in range(i+1,n)])
    
    # range differences RD (equivalent to TDOA)
    RD = Rs[mp[:,1]] - Rs[mp[:,0]]
    
    # ranges between stations 
    R  = (N[mp[:,1]] - N[mp[:,0]]).T
    Rd = la.norm(R, axis=0)
    
    
    
    ### calculate hyperbola function ###
    
    # foci of the hyperbolas (right in between stations)
    b  = ( N[mp[:,0]] + N[mp[:,1]] ) / 2
    
    # construct orthonormal eigenvector basis of hyperbolic
    v1 = R / Rd
    V = np.zeros([dim, 3, 3])
    V[:,:,0]  = v1.T
    V[:,:,1:] = np.array([sla.null_space(V[i, :, :].T) for i in range(dim)])
    
    # shortcuts for matrix
    lm = (Rd - RD)**2
    phim = lm / (2*Rd)
    #lp = (Rd + RD)**2
    #phip = lp / (2*Rd)
    
    # matrix for coefficient computation
    N = np.zeros([dim, 2, 2])
    N[:,:,:] = np.array([np.array([[ (RD[i]/2)**2, 0],\
                                   [ (Rd[i]/2 - phim[i])**2, -phim[i]**2 + lm[i]]\
                                   #[ (Rd[i]/2 - phip[i])**2, -phip[i]**2 + lp[i]]
                                       ])\
                         for i in range(dim)] )
    
    # compute coefficients by solving linear system; assuming target contour 
    # will be 1
    ABB = np.zeros([dim,3])
    ABB[:, :2] = la.solve(N, np.ones([dim, 2]))
    ABB[:, 2] = ABB[:, 1]

    # diagonal matrix for correct scaling of the eigenvectors
    D        = np.zeros([dim, 3, 3])
    D[:,:,:] = np.array( [np.diag(ABB[i,:]) for i in range(dim) ])
    
    
    
    ### set up solution
    
    # A matrix of the quadratic form
    A   = V @ D @ V.swapaxes(1,2)
    
    # lil helper to "label" data on the "wrong side" of the hyperbolic
    sig = np.sign(RD)
    
    # quadratic form F = x^T A x  - 1; but swapping sign to capture the "correct"
    # side of the hyperbolic
    def FJ(x, mode=0):
        sign_cond = np.array( [(np.dot((x - b[i]), V[i,:,0]) * sig[i] < 0) for i in range(dim)] )
        swapsign, add = zip(*[((1,1) if cond else (-1,0)) for cond in sign_cond])
        
        if mode == 0:
            R = np.array( [ \
                   ( ( np.dot((x - b[i]), np.dot(A[i,:,:], (x.T - b[i].T) ) ) ) \
                    * 1) * swapsign[i] + 0*add[i] - 1\
                    for i in range(dim)] )
        elif mode == 1:
            R = np.array( [ \
                   ( np.dot(A[i,:,:], (x - b[i]) ) ) \
                    * 1 + 0*swapsign[i]\
                for i in range(dim)] )
        return R
    
    """
    F   = lambda x: np.array( [\
                    ( ( np.dot((x - b[i]), np.dot(A[i,:,:], (x.T - b[i].T) ) ) ) \
                       * (1 if (np.dot((x - b[i]), V[i,:,0]) * sig[i] >= 0) else -1)\
                       ) - 0 \
                    for i in range(dim)] )
    # apparently DelF = 2 A x; same sign-trick applies
    J    = lambda x: np.array( [\
                    2  * ( np.dot(A[i,:,:], (x - b[i])) ) \
                       * (1 if (np.dot((x - b[i]), V[i,:,0]) * sig[i] >= 0) else -1) \
                    for i in range(dim)] )
    """
    
    
        
    ### finally, solve...
    
    xsol = sciop.least_squares(lambda x: FJ(x, 0), x0, method='dogbox')#, jac = J)#, method='lm', xtol=1e-8, x_scale='jac')
    
    # record results
    xn = xsol.x
    cost = xsol.cost
    opti = xsol.optimality
    fval = xsol.fun
    
    
    return xn, fval, FJ
Exemplo n.º 21
0
 def compute_stationary_distribution(self):
     p = np.real(null_space(self.P)[:, 0].flatten())
     p = p / np.sum(p)
     return p
Exemplo n.º 22
0
        # Householder transformation
        Hx = (np.eye(dim - n + 1) - 2. * np.outer(x, x) / (x * x).sum())
        mat = np.eye(dim)
        mat[n - 1:, n - 1:] = Hx
        H = np.dot(H, mat)
        # Fix the last sign such that the determinant is 1
    D[-1] = (-1)**(1 - (dim % 2)) * D.prod()
    # Equivalent to np.dot(np.diag(D), H) but faster, apparently
    H = (D * H.T).T
    return H


from scipy.linalg import null_space

A = np.array([[1, 1], [1, 1]])
ns = null_space(A)
ns * np.sign(ns[0, 0])  # Remove the sign ambiguity of the vector
a = np.array([[1, 2], [3, 4]])
np.linalg.det(a)
# credits: https://www.tensorflow.org/api_docs/python/tf/Variable
A = tf.Variable(np.zeros((5, 5), dtype=np.float32), trainable=False)
new_part = tf.ones((2, 3))
update_A = A[2:4, 2:5].assign(new_part)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
print(update_A.eval())
##based on this address: https://stackoverflow.com/questions/46511017/plot-hyperplane-linear-svm-python
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
Exemplo n.º 23
0
def _full_from_partial(elems: Sequence, traceless: bool,
                       labels: Sequence[str]) -> Basis:
    """
    Internal function to parse the basis elements *elems*. By default,
    checks are performed for orthogonality and linear independence. If
    either fails an exception is raised. Returns a full hermitian and
    orthonormal basis.
    """
    # Convert elems to basis to have access to its handy attributes
    elems = Basis(elems)
    elems.normalize(copy=False)

    if not elems.isherm:
        warn("(Some) elems not hermitian! The resulting basis also won't be.")

    if not elems.isorthonorm:
        raise ValueError("The basis elements are not orthonormal!")

    if traceless is None:
        traceless = elems.istraceless
    else:
        if traceless and not elems.istraceless:
            raise ValueError(
                "The basis elements are not traceless (up to an identity element) "
                + "but a traceless basis was requested!")

    if labels is not None and len(labels) not in (len(elems), elems.d**2):
        raise ValueError(
            f'Got {len(labels)} labels but expected {len(elems)} or {elems.d**2}'
        )

    # Get a Generalized Gell-Mann basis to expand in (fulfills the desired
    # properties hermiticity and orthonormality, and therefore also linear
    # combinations, ie basis expansions, of it will). Split off the identity so
    # that for traceless bases we can put it in the front.
    if traceless:
        Id, ggm = np.split(Basis.ggm(elems.d), [1])
    else:
        ggm = Basis.ggm(elems.d)

    coeffs = expand(elems, ggm, hermitian=elems.isherm, tidyup=True)
    # Throw out coefficient vectors that are all zero (should only happen for
    # the identity)
    coeffs = coeffs[(coeffs != 0).any(axis=-1)]
    if coeffs.size != 0:
        # Get d**2 - len(coeffs) vectors spanning the nullspace of coeffs.
        # Those together with coeffs span the whole space, and therefore also
        # the linear combinations of GGMs weighted with the coefficients will
        # span the whole matrix space
        coeffs = np.concatenate((coeffs, sla.null_space(coeffs).T))
        # Our new basis is given by linear combinations of GGMs with coeffs
        basis = np.einsum('ij,jkl', coeffs, ggm)
    else:
        # Resulting array is of size zero, i.e. we can just return the GGMs
        basis = ggm

    # Add the identity again and normalize the new basis
    if traceless:
        basis = np.concatenate((Id, basis)).view(Basis)
    else:
        basis = basis.view(Basis)

    # Clean up
    basis.tidyup()

    if labels is not None and len(labels) == len(elems):
        # Fill up labels for newly generated elements
        labels = list(labels)
        if traceless:
            # sort Identity label to the front, default to first if not found
            # (should not happen since traceless checks that it is present)
            id_idx = next((i for i, elem in enumerate(elems)
                           if np.allclose(Id.view(ndarray),
                                          elem.view(ndarray),
                                          rtol=elems._rtol,
                                          atol=elems._atol)), 0)
            labels.insert(0, labels.pop(id_idx))

        labels.extend('$C_{{{}}}$'.format(i)
                      for i in range(len(labels), len(basis)))

    return basis, labels
Exemplo n.º 24
0
def evans(yl,yr,lamda,s,p,m,e):

    if e['evans'] == "reg_reg_polar":
        muL = np.trace(np.dot(np.dot(np.conj((linalg.orth(yl)).T),
                            e['LA'](e['Li'][0],lamda,s,p)),linalg.orth(yl)))
        muR = np.trace(np.dot(np.dot(np.conj((linalg.orth(yr)).T),
                            e['RA'](e['Ri'][0],lamda,s,p)),linalg.orth(yr)))

        omegal,gammal = manifold_polar(e['Li'],linalg.orth(yl),lamda,e['LA'],
                                        s,p,m,e['kl'],muL)
        omegar,gammar = manifold_polar(e['Ri'],linalg.orth(yr),lamda,e['RA'],
                                        s,p,m,e['kr'],muR)

        out = (linalg.det(np.dot(np.conj(linalg.orth(yl).T),yl))*
               linalg.det(np.dot(np.conj(linalg.orth(yr).T),yr))*gammal*
               gammar*linalg.det(np.concatenate((omegal,omegar),axis=1)))

    elif e['evans'] == "adj_reg_polar":
        muL = np.trace(np.dot(np.dot(np.conj((linalg.orth(yl)).T),
                            e['LA'](e['Li'][0],lamda,s,p)),linalg.orth(yl)))
        muR = np.trace(np.dot(np.dot(np.conj((linalg.orth(yr)).T),
                            e['RA'](e['Ri'][0],lamda,s,p)),linalg.orth(yr)))

        omegal,gammal = manifold_polar(e['Li'],linalg.orth(yl),lamda,e['LA'],
                                        s,p,m,e['kl'],muL)
        omegar,gammar = manifold_polar(e['Ri'],linalg.orth(yr),lamda,e['RA'],
                                        s,p,m,e['kr'],muR)

        out = (np.conj(linalg.det(np.dot(np.conj(linalg.orth(yl).T),yl)))*
                linalg.det(np.dot(np.conj(linalg.orth(yr).T),yr))*
                np.conj(gammal)*gammar*linalg.det(
                np.conj(omegal.T).dot(omegar)))

    elif e['evans'] == "reg_adj_polar":
        muL = np.trace(np.dot(np.dot(np.conj((linalg.orth(yl)).T),
                        e['LA'](e['Li'][0],lamda,s,p)),linalg.orth(yl)))
        muR = np.trace(np.dot(np.dot(np.conj((linalg.orth(yr)).T),
                        e['RA'](e['Ri'][0],lamda,s,p)),linalg.orth(yr)))

        omegal,gammal = manifold_polar(e['Li'],linalg.orth(yl),lamda,e['LA'],
                                        s,p,m,e['kl'],muL)
        omegar,gammar = manifold_polar(e['Ri'],linalg.orth(yr),lamda,e['RA'],
                                        s,p,m,e['kr'],muR)

        out = ( linalg.det(np.dot(np.conj(linalg.orth(yl).T),yl))*
                    np.conj(linalg.det(np.dot(np.conj(linalg.orth(yr).T),yr)))*
                    np.conj(gammar)*gammal*linalg.det(
                    np.conj(omegar.T).dot(omegal)))


    elif e['evans'] == "adj_reg_compound":
        Lmani = manifold_compound(e['Li'],wedgieproduct(yl),lamda,s,p,m,
                                  e['LA'],e['kl'],1)
        Rmani = manifold_compound(e['Ri'],wedgieproduct(yr),lamda,s,p,m,
                                  e['RA'],e['kr'],-1)

        out = np.inner(np.conj(Lmani),Rmani)

    elif e['evans'] == "reg_adj_compound":
        Lmani = manifold_compound(e['Li'],wedgieproduct(yl),lamda,s,p,m,
                                  e['LA'],e['kl'],1)
        Rmani = manifold_compound(e['Ri'],wedgieproduct(yr),lamda,s,p,m,
                                  e['RA'],e['kr'],-1)

        out = np.inner(np.conj(Rmani),Lmani)

    elif e['evans'] == "reg_reg_bvp_cheb":
        VLa,VLb = bvp_basis_cheb(s,p,m,lamda,e['A_pm'],e['LA'],-1,e['kl'])
        VRa,VRb = bvp_basis_cheb(s,p,m,lamda,e['A_pm'],e['RA'],1,e['kr'])
        temp = linalg.null_space(yl.T)
        detCL = ( np.linalg.det(np.hstack([yl,temp]))
                 / np.linalg.det(np.hstack([VLa,temp])) )
        temp = linalg.null_space(yr.T)
        detCR = ( np.linalg.det(np.hstack([yr,temp]))
                 / np.linalg.det(np.hstack([VRb,temp])) )

        out = np.linalg.det(np.hstack([VLb,VRa]))*detCL*detCR

    elif e['evans'] == "regular_periodic":
        sigh = manifold_periodic(e['Li'],np.eye(e['kl']),lamda,s,p,m,e['kl'])
        out = np.zeros((1,len(kappa)),dtype=np.complex)
        for j in range(len(kappa)):
            out[j] = np.det(sigh-np.exp(1j*kappa[j]*p['X'])
                        *np.exp(e['kl']))

    elif e['evans'] == "balanced_periodic":
        sigh = manifold_periodic(e['Li'],np.eye(e['kl']),lamda,s,p,m,e['kl'])
        phi = manifold_periodic(e['Ri'],np.eye(e['kr']),lamda,s,p,m,e['kr'])
        out = np.zeros((1,len(kappa)),dtype=np.complex)
        for j in range(len(kappa)):
            out[j] = np.linalg.det(sigh-np.exp(1j*kappa[j]*p['X'])*phi)

    elif e['evans'] == "balanced_polar_scaled_periodic":
        kappa = yr
        Amatrix = e['A'](e['Li'][0],lamda,s,p)
        k, kdud = np.shape(Amatrix)
        egs = np.linalg.eigvals(Amatrix)
        real_part_egs = np.real(egs)
        cnt_pos = len(np.where(real_part_egs > 0)[0])
        cnt_neg = len(np.where(real_part_egs < 0)[0])
        if not (cnt_neg == e['dim_eig_R']):
            raise ValueError("consistent splitting failed")
        if not (cnt_pos == e['dim_eig_L']):
            raise ValueError("consistent splitting failed")
        index1 = np.argsort(-real_part_egs)
        muL = np.sum(egs[index1[0:e['dim_eig_L']]])
        index2 = np.argsort(real_part_egs)
        muR = np.sum(egs[index2[0:e['dim_eig_R']]])
        # Initializing vector
        ynot = linalg.orth(np.vstack([np.eye(k),np.eye(k)]))
        # Compute the manifolds
        sigh, gammal = manifold_polar(e['Li'],ynot,lamda,A_lift_matrix,s,p,m,k,muL)
        phi, gammar = manifold_polar(e['Ri'],ynot,lamda,A_lift_matrix,s,p,m,k,muR)
        #print(sigh, '\n', gammal, '\n', phi, '\n', gammar)
        #STOP
        out = np.zeros((1,len(kappa)),dtype=np.complex)
        for j in range(1,len(kappa)+1):
            out[:,j-1] = gammal*gammar*np.linalg.det(np.vstack([np.concatenate(
                [sigh[:k,:k],np.exp(1j*kappa[j-1]*p['X'])*phi[:k,:k]],axis=1),
                np.concatenate([sigh[k:2*k,:k], phi[k:2*k,:k]],axis=1)]))

    elif e['evans'] == "bpspm":
        out = Struct()
        out.lamda = lamda
        muL = 0
        muR = 0
        # initializing vector
        ynot = linalg.orth(np.vstack([np.eye(e['kl']),np.eye(e['kr'])]))
        # compute the manifolds
        out.sigh,out.gammal = manifold_polar(e['Li'],ynot,lamda,A_lift_matrix,s,p,
                                             m,e['kl'],muL)
        out.phi,out.gammar = manifold_polar(e['Ri'],ynot,lamda,A_lift_matrix,s,p,
                                             m,e['kr'],muR)

    elif e['evans'] == "balanced_polar_periodic":
        kappa = yr
        muL = 0
        muR = 0
        k = e['kl']

        # initializing vector
        ynot = linalg.orth(np.vstack([np.eye(k), np.eye(k)]))

        # compute the manifolds
        sigh, gammal = manifold_polar(e['Li'],ynot,lamda,A_lift_matrix,s,p,m,
                                      k,muL)
        phi, gammar = manifold_polar(e['Ri'],ynot,lamda,A_lift_matrix,s,p,m,
                                     k,muR)
        out = np.zeros(len(kappa),dtype=np.complex)
        for j in range(len(kappa)):
            out[j] = gammal*gammar*np.linalg.det(np.vstack([np.concatenate(
                    [sigh[:k,:k],np.exp(1j*kappa[j]*p.X)*phi[:k,:k]],axis=1),
                    np.concatenate([sigh[k:2*k,:k], phi[k:2*k,:k]],axis=1)]))

    else:
        raise ValueError("e['evans'], '"+e['evans']+"', is not implemented.")

    return out
Exemplo n.º 25
0
def main():
    # 1. load dataset, train and valid
    train_dataset, valid_dataset = build_dataset(n_mels = n_mels, train_dataset = args.train_dataset, valid_dataset = args.valid_dataset, background_noise = args.background_noise)
    print('train ',len(train_dataset), 'val ', len(valid_dataset))

    weights = train_dataset.make_weights_for_balanced_classes()
    sampler = WeightedRandomSampler(weights, len(weights))
    train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, sampler=sampler,
                              pin_memory=use_gpu, num_workers=args.dataload_workers_nums)
    valid_dataloader = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False,
                              pin_memory=use_gpu, num_workers=args.dataload_workers_nums)
    # a name used to save checkpoints etc.
    # 2. prepare the model, checkpoint
    full_name = '%s_%s_%s_bs%d_lr%.1e_wd%.1e' % (args.model, args.optim, args.lr_scheduler, args.batch_size, args.learning_rate, args.weight_decay)
    if args.comment:
        full_name = '%s_%s' % (full_name, args.comment)

    model = models.create_model(model_name=args.model, num_classes=len(CLASSES), in_channels=1)

    if use_gpu:
        model = torch.nn.DataParallel(model).cuda()

    criterion = torch.nn.CrossEntropyLoss()

    if args.optim == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=args.weight_decay)
    else:
        optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)

    start_timestamp = int(time.time()*1000)
    start_epoch = 0
    best_accuracy = 0
    best_loss = 1e100
    global_step = 0

    if args.resume:
        print("resuming getShapeLista checkpoint '%s'" % args.resume)
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        model.float()
        optimizer.load_state_dict(checkpoint['optimizer'])

        best_accuracy = checkpoint.get('accuracy', best_accuracy)
        best_loss = checkpoint.get('loss', best_loss)
        start_epoch = checkpoint.get('epoch', start_epoch)
        global_step = checkpoint.get('step', global_step)

        del checkpoint  # reduce memory

    if args.lr_scheduler == 'plateau':
        lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=args.lr_scheduler_patience, factor=args.lr_scheduler_gamma)
    else:
        lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_scheduler_step_size, gamma=args.lr_scheduler_gamma, last_epoch=start_epoch-1)

    def get_lr():
        return optimizer.param_groups[0]['lr']

    writer = SummaryWriter(comment=('_speech_commands_' + full_name))

    #3. train and validation
    print("training %s for Google speech commands..." % args.model)
    since = time.time()
    #grad_client_list = [[]] * args.clients

    for epoch in range(start_epoch, args.max_epochs):
        print("epoch %3d with lr=%.02e" % (epoch, get_lr()))
        phase = 'train'
        writer.add_scalar('%s/learning_rate' % phase,  get_lr(), epoch)

        model.train()  # Set model to training mode

        running_loss = 0.0
        it = 0
        correct = 0
        total = 0
        A = 0
        #federated
        n = 0 # the length of squeezed gradient vector
        shape_list = []
        num_paddings = 0
        A_inv = 0
        S_i = 0
        S_j = 0
        u = 0
        sigma = 0
        vh = 0
        ns = 0
        global_grad_sum  = 0#= torch.zeros(84454500).cuda()
        global_grad_sum_actual = 0 #= torch.zeros(28151052).cuda()

        #compute for each client
        current_client = 0
        pbar = tqdm(train_dataloader, unit="audios", unit_scale=train_dataloader.batch_size, disable=True)
        for batch in pbar:

            inputs = batch['input']
            inputs = torch.unsqueeze(inputs, 1)
            targets = batch['target']
            #print(inputs.shape, targets.shape)
            if args.mixup:
                inputs, targets = mixup(inputs, targets, num_classes=len(CLASSES))

            inputs = Variable(inputs, requires_grad=True)
            targets = Variable(targets, requires_grad=False)
            if use_gpu:
                inputs = inputs.cuda()
                targets = targets.cuda(async=True)

            outputs = model(inputs)
            if args.mixup:
                loss = mixup_cross_entropy_loss(outputs, targets)
            else:
                loss = criterion(outputs, targets)
            optimizer.zero_grad()
            loss.backward()
            #generate gradient list
            current_client_grad = []
            for name, param in model.named_parameters():
                if param.requires_grad:
                    #print(name, param.grad.shape, param.grad.type())#, param.grad)
                    current_client_grad.append(param.grad)
            #break
            #print(len(current_client_grad), current_client_grad[0].shape, current_client_grad[-1].shape)
            #randomize the gradient, if in a new batch, generate the randomization matrix
            if (current_client == 0):
                # Generate matrix A
                # 1. first obtain n(the total length of gradient vector) (if n == 0, get it, or pass)
                
                n = getLenOfGradientVectorCuda(current_client_grad)
                global_grad_sum = torch.zeros(3 * math.ceil(float(n)/args.matrix_size) * args.matrix_size).cuda()
                global_grad_sum_actual = torch.zeros(n).cuda()
                shape_list = getShapeListCuda(current_client_grad)
                print("gradient vector of length", n)
                # 2. randomize a full rank matrix A, the elements are evenly distributed
                #A = np.random.randint(0, 10000,size = (n, n)) 
                # Memory is not enough to create such a large matrix : CURRENT SOLUTION use a small size matrix and 
                # iterate over the vector
                A = torch.randint(0, 10000, (args.matrix_size, args.matrix_size)).float().cuda()
                print("generating randomization matrix")
                A_inv = A.inverse()
                # two index set
                S_i = random.sample(range(0, 3*args.matrix_size), args.matrix_size)
                S_i.sort()
                S_j = random.sample(range(0, 2*args.matrix_size), args.matrix_size)
                S_j.sort()
                # extend to B
                #B = torch.zeros(args.matrix_size, 3*args.matrix_size).cuda()
                B = torch.zeros(args.matrix_size, 3*args.matrix_size).cuda()
                for i in range(0, args.matrix_size):
                        B[:, S_i[i]:S_i[i]+1] = A[:, i:i+1]
                C = (10000 * torch.randn(2 * args.matrix_size, 3*args.matrix_size)).cuda()
                for i in range(0, args.matrix_size):
                    C[S_j[i] : S_j[i] + 1, :] = B[i:i+1 , :]
                #Does cuda speed up our calculation
                #C = C.cuda()
                # SVD
                u, s, vh = torch.svd(C, some=False)
                #print('u', u.shape, 's', s.shape, 'vh', vh.shape)
                # recontruct sigma
                sigma = torch.zeros(C.shape[0], C.shape[1]).cuda()
                #print(C.shape[0], C.shape[1])
                sigma[:min(C.shape[0],C.shape[1]), :min(C.shape[0],C.shape[1])] = s.diag()
                #assert(torch.equal(torch.mm(u, torch.mm(sigma, vh)), C))
                # C = torch.mm(torch.mm(u, sigma), vh.t())
                
                # linear independent group(null space)
                ns = null_space(C.cpu()) # (3000, 1000) we use the first args.
                ns = torch.from_numpy(ns).cuda()
                #print('ns', ns.shape)
            # do the randomization, obtain a new gradient vector for gradient_client_list[current_client]
            flatterned_grad = transListOfArraysToArraysCuda(current_client_grad, n)
            global_grad_sum_actual += flatterned_grad
            ###TODO: 1. need padding, 2. how to recover
            # random numbers
            r = (10000*torch.randn(args.matrix_size, 1)).cuda()
            r_new =  torch.zeros(3 * args.matrix_size, 1).cuda()
            for i in range(args.matrix_size):
                r_new += r[i] * ns[:,i:i+1]
            num_paddings = math.ceil(float(n)/args.matrix_size) * args.matrix_size  - n
            #print('flatterned', n, 'need padding', num_paddings) #np.array
            # extent to 2n
            flatterned_grad_extended = torch.zeros(n + num_paddings).cuda()
            flatterned_grad_extended[:n] = flatterned_grad
            #print(flatterned_grad_extended[:20])
            current_client_grad_after_random = torch.zeros(3*flatterned_grad_extended.shape[0]).cuda()
            new_grad = (10000 * torch.randn(3 * args.matrix_size, 1)).cuda()
            for i in range(0, flatterned_grad_extended.shape[0], args.matrix_size):
                if (i/args.matrix_size % 5000 == 0):
                    print(i/args.matrix_size, flatterned_grad_extended.shape[0] / args.matrix_size)
                for j in range(args.matrix_size):
                    new_grad[S_i[j]] = flatterned_grad_extended[i + j]
                
                # compute the randomize gradient
                randomized_gradient = torch.mm(vh.t(), new_grad + r_new)
                #print("randomized gradient", randomized_gradient.shape)
                current_client_grad_after_random[3*i : 3*i + 3*args.matrix_size] = torch.squeeze(randomized_gradient)
            #print('after randomization', current_client_grad_after_random.shape)
            # transform the flatterned vector to matrix for model update
            if (current_client == args.clients - 1):
                print("client", current_client)
                global_grad_sum += current_client_grad_after_random
                # collect all the randomized gradient, cacluate the sum, and send to all clients
                # each client eliminate the randomness, and update the parameters according to the gradients
                # remove randomness
                res = torch.zeros(int(global_grad_sum.shape[0]/3)).cuda()
                alpha = torch.zeros(args.matrix_size, 1).cuda()
                for i in range(0, global_grad_sum.shape[0] , 3 * args.matrix_size):
                    tmp = torch.mm(torch.mm(u, sigma), global_grad_sum[i : i + 3*args.matrix_size].view(-1, 1))
                    for j in range(args.matrix_size): 
                        alpha[j] = tmp[S_j[j]]
                    res[int(i/3) : int(i/3) + args.matrix_size] = (torch.mm(A_inv, alpha)).squeeze()
                
                # set the gradient manually and update
                recovered_grad_in_cuda = transCudaArrayWithShapeList(res, shape_list)
                # check whether the same 
                print('dist', torch.dist(res[:n], global_grad_sum_actual))
                ind = 0
                #print(recovered_grad_in_cuda, recovered_grad_in_cuda[0].shape, r)
                for name, param in model.named_parameters():
                    if param.requires_grad:
                        #print(recovered_grad_in_cuda[ind].shape, recovered_grad_in_cuda[ind].type())
                        #print(recovered_grad_in_cuda[ind][:10])
                        param.grad = recovered_grad_in_cuda[ind]
                        ind+=1
                assert(ind == len(recovered_grad_in_cuda))
                optimizer.step()
                print("all clients finished")
                current_client = 0
                global_grad_sum.fill_(0)
                global_grad_sum_actual.fill_(0)
            else :
                print("client", current_client)
                global_grad_sum += current_client_grad_after_random
                current_client += 1

            # only update the parameters when current_client == args.clients - 1

            # statistics
            it += 1
            global_step += 1
            #running_loss += loss.data[0]
            running_loss += loss.item()
            pred = outputs.data.max(1, keepdim=True)[1]
            if args.mixup:
                targets = batch['target']
                targets = Variable(targets, requires_grad=False).cuda(async=True)
            correct += pred.eq(targets.data.view_as(pred)).sum()
            total += targets.size(0)

            writer.add_scalar('%s/loss' % phase, loss.item(), global_step)

            # update the progress bar
            pbar.set_postfix({
                'loss': "%.05f" % (running_loss / it),
                'acc': "%.02f%%" % (100*float(correct)/total)
            })
            print("loss\t ", running_loss / it, "\t acc \t", 100*float(correct)/total)
            #break

        accuracy = float(correct)/total
        epoch_loss = running_loss / it
        writer.add_scalar('%s/accuracy' % phase, 100*accuracy, epoch)
        writer.add_scalar('%s/epoch_loss' % phase, epoch_loss, epoch)
Exemplo n.º 26
0
                    m2 = ndofV * icon[k2, iel] + i2
                    K_mat[m1, m2] += K_el[ikk, jkk]
            f_rhs[m1] += f_el[ikk]
            G_mat[m1, iel] += G_el[ikk, 0]
    h_rhs[iel] += h_el[0]

for i in range(NfemV):
    print(G_mat[i, 0], G_mat[i, 2], G_mat[i, 3], G_mat[i, 4], G_mat[i, 4])
    #print ("%3i  & %3i & %3i & %3i  \\\\ " %(int(round(G_mat[i,0])),int(round(G_mat[i,1])),int(round(G_mat[i,2])),int(round(G_mat[i,3])) ))

#print (G_mat)
G2 = np.zeros((8, NfemP), dtype=np.float64)  # matrix GT

print("----------------------------------------------")

G2[0, :] = G_mat[8, :]
G2[1, :] = G_mat[9, :]
G2[2, :] = G_mat[10, :]
G2[3, :] = G_mat[11, :]
G2[4, :] = G_mat[12, :]
G2[5, :] = G_mat[13, :]
G2[6, :] = G_mat[14, :]
G2[7, :] = G_mat[15, :]

#for i in range (10):
#    print ("%3f %3f %3f %3f %3f %3f %3f %3f %3f " %(G2[i,0],G2[i,1],G2[i,2],G2[i,3],G2[i,4],G2[i,5],G2[i,6],G2[i,7],G2[i,8]))

ns = null_space(G2)

print(ns)
Exemplo n.º 27
0
def lyapunovSpectrum(sol,f,args=[],kwargs={},dist=1e-6,K=1,plotbool=False,plotaxis=None,savefigName=None):
    #This method approximates the first K values of the Lyapunov Spectrum of a 
    #sequence of a difference system. Variable sol is the inputted
    #sequence (organized first by dimension, then by chronology) and variable
    #f is the corresponding difference system. Variable dist is the distance 
    #used between the initial conditions for each trajectory in each iteration.
    #Infrastructure for plotting the convergence of each exponent is available 
    #(for visual verification). Variable savefigName allows for tge resulting 
    #graph to be plotted under the name <savefigName>.png.
    dim=len(sol)
    solcopy=[sol[i][:-1] for i in range(dim)]
    n=len(solcopy[0])
    if any([len(solcopy[i])!=n for i in range(1,dim)]):
        print("Error: Inputted solution is inviable. Terminating process")
        for j in range(dim):
            print("Length of sol[%i]: %i"%(j,len(sol[j])))
        return 0.0
    
    K=max(min(K,dim),1)
    vect0=np.array([float(solcopy[i][0]) for i in range(dim)])
    vects=[np.copy(vect0) for i in range(K)]
    for i in range(K):
        vects[i][i]=vects[i][i]+dist
        
    def GramDet(vectors):
        size=len(vectors)
        Gramian=[[np.inner(vectors[i],vectors[j]) for j in range(size)] for i in range(size)]
        return npla.det(Gramian)
    
    Lambda=[0.0 for i in range(K)]
    if plotbool:
        plotLambda=[[] for i in range(K)]
    counts=[0 for i in range(K)]
    
    vols=[0.0 for i in range(K)]
    for i in range(1,n):
        vect0=np.array([float(solcopy[j][i]) for j in range(dim)])
        for j in range(K):
            vects[j]=f(vects[j],*args,**kwargs)
            if not isinstance(vects[j],np.ndarray):
                vects[j]=np.array(vects[j])
            vols[j]=math.sqrt(GramDet([vects[l]-vect0 for l in range(j+1)]))

        for j in range(K):
            if vols[j]!=0:
                counts[j]+=1
                Lambda[j]+=math.log(vols[j])-(j+1)*math.log(dist)
                if plotbool:
                    if j==0:
                        plotLambda[j].append(Lambda[j]/i)
                    else:
                        if counts[j-1]==0:
                            plotLambda[j].append(Lambda[j]/i)
                        else:
                            plotLambda[j].append(Lambda[j]/i-Lambda[j-1]/i)
            else:
                if plotbool:
                    counts[j]+=1
                    plotLambda[j].append(plotLambda[j][len(plotLambda[j])-1])
                    
        orthonormals=mtb.grammSchmidt([vects[j]-vect0 for j in range(K)],normalize=True,tol=min(dist*1e-3,1e-3),clean=False)
        adjusted=[]
        voided=[]
        for j in range(K):
            if any([elem!=0.0 for elem in orthonormals[j]]):
                adjusted.append(j)
                vects[j]=dist*orthonormals[j]+vect0
            else:
                voided.append(j)
        if len(voided)>0:
            if len(adjusted)>0:
                nullspace=np.transpose(spla.null_space(np.array([vects[a] for a in adjusted])))
                for j in range(len(voided)):
                    vects[voided[j]]=dist*nullspace[j]/npla.norm(nullspace[j])+vect0
            else:
                for j in range(K):
                    vects[j]=np.array([0 for k in range(j)]+[dist]+[0 for k in range(j+1,dim)])+vect0
    
    lmbda=[]
    if counts[0]!=0:
        lmbda.append(Lambda[0]/n)
    for i in range(1,K):
        if counts[i]!=0:
            if counts[i-1]==0:
                lmbda.append(Lambda[i]/n)
            else:
                lmbda.append(Lambda[i]/n-Lambda[i-1]/n)
    
    if plotbool:
        showbool=False
        if plotaxis is None:
            graphsize=9
            font = {"family": "serif",
                "color": "black",
                "weight": "bold",
                "size": "20"}
            plotfig=plt.figure(figsize=(graphsize,graphsize))
            plotaxis=plotfig.add_subplot(111)
            if K==1:
                plotaxis.set_title("Maximal Lyapunov Exponent Convergence",fontdict=font)
                plotaxis.set_xlabel("Iterations",fontsize=16,rotation=0)
                plotaxis.set_ylabel("$\\mathbf{\\lambda_{max}}$",fontsize=16,rotation=0)
            else:
                plotaxis.set_title("Lyapunov Spectrum Convergence",fontdict=font)
                plotaxis.set_xlabel("Iterations",fontsize=16,rotation=0)
                plotaxis.set_ylabel("$\\mathbf{\\lambda 's}$",fontsize=16,rotation=0)
            plotaxis.xaxis.set_tick_params(labelsize=16)
            plotaxis.yaxis.set_tick_params(labelsize=16)
            showbool=True
        color=ptb.colors(K)
        for i in range(K):
            plotaxis.plot([j for j in range(counts[i])],plotLambda[i],color=color[i])
        if showbool:
            if savefigName is not None and isinstance(savefigName,str):
                plt.savefig(savefigName+".png",bbox_inches="tight")
                plt.close()
            else:
                plt.show()
    return lmbda
Exemplo n.º 28
0
def find_bott_index_at_time_old_method(u_values_initial,
                                       u_values_final,
                                       k_values,
                                       t,
                                       edges_tf=False):
    Lx = u_values_initial.size
    Ly = k_values.size

    edg = edges_tf

    xs_arr = np.arange(Lx)

    eiX = np.diag(np.exp((np.kron(xs_arr, [1, 1])) * 1j * 2 * np.pi / Lx))
    eiX_minus = np.diag(
        np.exp((np.kron(-xs_arr, [1, 1])) * 1j * 2 * np.pi / Lx))

    # create the hamiltonian on the other side of the array
    H_last_initial = cHam.create_hamiltonian(u_values_initial,
                                             k_values[-1],
                                             edges=edg)
    H_last_final = cHam.create_hamiltonian(u_values_final,
                                           k_values[-1],
                                           edges=edg)

    eiH_last = la.expm(1j * H_last_final * t)
    eiH_minus_last = la.expm(-1j * H_last_final * t)

    P_last = H_last_initial * 0

    eigs_last, vecs_last = la.eigh(H_last_initial)
    for j in range(Lx * 2):
        P_last += np.outer(vecs_last[:, j], np.conj(
            vecs_last[:, j])) if eigs_last[j] <= 0 else 0

    P_last_at_time = np.linalg.multi_dot([eiH_last, P_last, eiH_minus_last])

    P_current = H_last_final * 0
    matrix_out = np.zeros((Lx * 2, Lx * 2), dtype='complex')

    for i in range(Ly):
        H_current_initial = cHam.create_hamiltonian(u_values_initial,
                                                    k_values[i],
                                                    edges=edg)
        H_current_final = cHam.create_hamiltonian(u_values_final,
                                                  k_values[i],
                                                  edges=edg)

        eiH_current = la.expm(1j * H_current_final * t)
        eiH_minus_current = la.expm(-1j * H_current_final * t)

        eigs_current, vecs_current = la.eigh(H_current_initial)
        P_current = H_last_final * 0
        for j in range(Lx * 2):
            P_current += np.outer(
                vecs_current[:, j], np.conj(
                    vecs_current[:, j])) if eigs_current[j] <= 0 else 0

        P_current_at_time = np.linalg.multi_dot(
            [eiH_current, P_current, eiH_minus_current])

        # plt.pcolor((P_current_at_time-P_current).__abs__())
        # plt.colorbar()
        # plt.show()

        UVUV_time = np.linalg.multi_dot([
            P_current_at_time, eiX, P_current_at_time, P_last_at_time,
            eiX_minus, P_last_at_time, P_current_at_time
        ])
        ns = la.null_space(UVUV_time, 1e-8)
        sh = ns.shape
        Q = 0 * H_current_final
        for n in range(sh[1]):
            Q += np.outer(ns[:, n], np.conj(ns[:, n]))

        matrix_out += la.logm(UVUV_time + Q)

        P_last = P_current
        P_last_at_time = P_current_at_time

    bout = np.diag(matrix_out)
    bott_index = -Lx * np.imag(bout[::2] + bout[1::2]) / (2 * np.pi)

    return (bott_index)
Exemplo n.º 29
0
def GlowCS(args):
    if args.init_norms == None:
        args.init_norms = [None]*len(args.m)
    else:
        assert args.init_strategy == "random_fixed_norm", "init_strategy should be random_fixed_norm if init_norms is used"
    assert len(args.m) == len(args.gamma) == len(args.init_norms), "length of either m, gamma or init_norms are not same"
    loopOver = zip(args.m, args.gamma, args.init_norms)

    for m, gamma, init_norm in loopOver:
        skip_to_next = False # flag to skip to next loop if recovery is fails due to instability
        n                  = args.size*args.size*3
        modeldir           = "./trained_models/%s/glow"%args.model
        test_folder        = "./test_images/%s"%args.dataset
        save_path          = "./results/%s/%s"%(args.dataset,args.experiment)

        # loading dataset
        trans           = transforms.Compose([transforms.Resize((args.size,args.size)),transforms.ToTensor()])
        test_dataset    = datasets.ImageFolder(test_folder, transform=trans)
        test_dataloader = torch.utils.data.DataLoader(test_dataset,batch_size=args.batchsize,drop_last=False,shuffle=False)
        
        # loading glow configurations
        config_path = modeldir+"/configs.json"
        with open(config_path, 'r') as f:
            configs = json.load(f)
        
        # sensing matrix
        A = np.random.normal(0,1/np.sqrt(m), size=(n,m))
        A = torch.tensor(A,dtype=torch.float, requires_grad=False, device=args.device)
        
        # regularizor
        gamma     = torch.tensor(gamma, requires_grad=True, dtype=torch.float, device=args.device)
    
        # adding noise
        if  args.noise == "random_bora":
            noise = np.random.normal(0,1,size=(args.batchsize,m))
            noise = noise * 0.1/np.sqrt(m)
            noise = torch.tensor(noise,dtype=torch.float,requires_grad=False, device=args.device)
        else:
            noise = np.random.normal(0,1,size=(args.batchsize,m))
            noise = noise / (np.linalg.norm(noise,2,axis=-1, keepdims=True)) * float(args.noise)
            noise = torch.tensor(noise, dtype=torch.float, requires_grad=False, device=args.device)
        
        # start solving over batches
        Original = []; Recovered = []; Z_Recovered = []; Residual_Curve = []; Recorded_Z = []
        for i, data in enumerate(test_dataloader):
            x_test = data[0]
            x_test = x_test.clone().to(device=args.device)
            n_test = x_test.size()[0]
            assert n_test == args.batchsize, "please make sure that no. of images are evenly divided by batchsize"
            
            # loading glow model
            glow = Glow((3,args.size,args.size),
                        K=configs["K"],L=configs["L"],
                        coupling=configs["coupling"],
                        n_bits_x=configs["n_bits_x"],
                        nn_init_last_zeros=configs["last_zeros"],
                        device=args.device)
            glow.load_state_dict(torch.load(modeldir+"/glowmodel.pt"))
            glow.eval()
            
            # making a forward to record shapes of z's for reverse pass
            _ = glow(glow.preprocess(torch.zeros_like(x_test)))
            
            # initializing z from Gaussian with std equal to init_std
            if args.init_strategy == "random":
                z_sampled = np.random.normal(0,args.init_std,[n_test,n])
                z_sampled = torch.tensor(z_sampled,requires_grad=True,dtype=torch.float,device=args.device)
            # intializing z from Gaussian and scaling its norm to init_norm
            elif args.init_strategy == "random_fixed_norm":
                z_sampled = np.random.normal(0,1,[n_test,n])
                z_sampled = z_sampled / np.linalg.norm(z_sampled, axis=-1, keepdims=True)
                z_sampled = z_sampled * init_norm
                z_sampled = torch.tensor(z_sampled,requires_grad=True,dtype=torch.float,device=args.device)
                print("z intialized with a norm equal to = %0.1f"%init_norm)
            # initializing z from pseudo inverse
            elif args.init_strategy == "pseudo_inverse":
                x_test_flat = x_test.view([-1,n])
                y_true      = torch.matmul(x_test_flat, A) + noise
                A_pinv      = torch.pinverse(A)
                x_pinv      = torch.matmul(y_true, A_pinv)
                x_pinv      = x_pinv.view([-1,3,args.size,args.size])
                x_pinv      = torch.clamp(x_pinv,0,1)
                z, _, _     = glow(glow.preprocess(x_pinv*255,clone=True))
                z           = glow.flatten_z(z).clone().detach()
                z_sampled   = torch.tensor(z, requires_grad=True, dtype=torch.float, device=args.device)
            # initializing z from a solution of lasso-wavelet 
            elif args.init_strategy == "lasso_wavelet":
                new_args    = {"batch_size":n_test, "lmbd":0.01,"lasso_solver":"sklearn"}
                new_args    = easydict.EasyDict(new_args)   
                estimator   = celebA_estimators.lasso_wavelet_estimator(new_args)
                x_ch_last   = x_test.permute(0,2,3,1)
                x_ch_last   = x_ch_last.contiguous().view([-1,n])
                y_true      = torch.matmul(x_ch_last, A) + noise
                x_lasso     = estimator(np.sqrt(2*m)*A.data.cpu().numpy(), np.sqrt(2*m)*y_true.data.cpu().numpy(), new_args)
                x_lasso     = np.array(x_lasso)
                x_lasso     = x_lasso.reshape(-1,64,64,3)
                x_lasso     = x_lasso.transpose(0,3,1,2)
                x_lasso     = torch.tensor(x_lasso, dtype=torch.float, device=args.device)
                z, _, _     = glow(x_lasso - 0.5)
                z           = glow.flatten_z(z).clone().detach()
                z_sampled   = torch.tensor(z, requires_grad=True, dtype=torch.float, device=args.device)
                print("z intialized from a solution of lasso-wavelet")
            # intializing z from null(A)
            elif args.init_strategy == "null_space":
                x_test_flat    = x_test.view([-1,n])
                x_test_flat_np = x_test_flat.data.cpu().numpy()
                A_np        = A.data.cpu().numpy()
                nullA       = null_space(A_np.T)
                coeff       = np.random.normal(0,1,(args.batchsize, nullA.shape[1]))            
                x_null      = np.array([(nullA * c).sum(axis=-1) for c in coeff])
                pert_norm   = 5 # <-- 5 gives optimal results --  bad initialization and not too unstable
                x_null      = x_null / np.linalg.norm(x_null, axis=1, keepdims=True) * pert_norm
                x_perturbed = x_test_flat_np + x_null
                # no clipping x_perturbed to make sure forward model is ||y-Ax|| is the same
                err         = np.matmul(x_test_flat_np,A_np) - np.matmul(x_perturbed,A_np)
                assert (err **2).sum() < 1e-6, "null space does not satisfy ||y-A(x+x0)|| <= 1e-6"
                x_perturbed = x_perturbed.reshape(-1,3,args.size,args.size)
                x_perturbed = torch.tensor(x_perturbed, dtype=torch.float, device=args.device)
                z, _, _     = glow(x_perturbed - 0.5)
                z           = glow.flatten_z(z).clone().detach()
                z_sampled   = torch.tensor(z, requires_grad=True, dtype=torch.float, device=args.device)
                print("z initialized from a point in null space of A")
            else:
                raise "Initialization strategy not defined"
                        
            # selecting optimizer
            if args.optim == "adam":
                optimizer = torch.optim.Adam([z_sampled], lr=args.lr,)
            elif args.optim == "lbfgs":
                optimizer = torch.optim.LBFGS([z_sampled], lr=args.lr,)
            else:
                raise "optimizer not defined"
            
            # to be recorded over iteration
            psnr_t    = torch.nn.MSELoss().to(device=args.device)
            residual  = []; recorded_z = []
            # running optimizer steps
            for t in range(args.steps):
                def closure():
                    optimizer.zero_grad()
                    z_unflat    = glow.unflatten_z(z_sampled, clone=False)
                    x_gen       = glow(z_unflat, reverse=True, reverse_clone=False)
                    x_gen       = glow.postprocess(x_gen,floor_clamp=False)
                    x_test_flat = x_test.view([-1,n])
                    x_gen_flat  = x_gen.view([-1,n])
                    y_true      = torch.matmul(x_test_flat, A) + noise
                    y_gen       = torch.matmul(x_gen_flat, A) 
                    global residual_t
                    residual_t = ((y_gen - y_true)**2).sum(dim=1).mean()
                    if args.z_penalty_squared:
                        z_reg_loss_t= gamma*(z_sampled.norm(dim=1)**2).mean()
                    else:
                        z_reg_loss_t= gamma*z_sampled.norm(dim=1).mean()
                    loss_t      = residual_t + z_reg_loss_t
                    psnr        = psnr_t(x_test, x_gen)
                    psnr        = 10 * np.log10(1 / psnr.item())
                    print("\rAt step=%0.3d|loss=%0.4f|residual=%0.4f|z_reg=%0.5f|psnr=%0.3f"%(t,loss_t.item(),residual_t.item(),z_reg_loss_t.item(), psnr),end="\r")
                    loss_t.backward()
                    return loss_t
                try:
                    optimizer.step(closure)
                    recorded_z.append(z_sampled.data.cpu().numpy())
                    residual.append(residual_t.item())
                except:
                    # try may not work due to instability in the reverse direction.
                    skip_to_next = True
                    break
            
            if skip_to_next:
                break
            
            # getting recovered and true images
            with torch.no_grad():
                x_test_np = x_test.data.cpu().numpy().transpose(0,2,3,1)
                z_unflat  = glow.unflatten_z(z_sampled, clone=False)
                x_gen     = glow(z_unflat, reverse=True, reverse_clone=False)
                x_gen     = glow.postprocess(x_gen,floor_clamp=False)
                x_gen_np  = x_gen.data.cpu().numpy().transpose(0,2,3,1)
                x_gen_np  = np.clip(x_gen_np,0,1)
                z_recov   = z_sampled.data.cpu().numpy()
            Original.append(x_test_np)
            Recovered.append(x_gen_np)
            Z_Recovered.append(z_recov)
            Residual_Curve.append(residual)
            Recorded_Z.append(recorded_z)
                    
            # freeing up memory for second loop
            glow.zero_grad()
            optimizer.zero_grad()
            del x_test, x_gen, optimizer, psnr_t, z_sampled, glow
            torch.cuda.empty_cache()
            print("\nbatch completed")
        
        if skip_to_next:
            print("\nskipping current loop due to instability or user triggered quit")
            continue
    
        # collecting everything together 
        Original     = np.vstack(Original)
        Recovered    = np.vstack(Recovered)
        Z_Recovered  = np.vstack(Z_Recovered)
        Recorded_Z   = np.vstack(Recorded_Z) 
        psnr         = [compare_psnr(x, y) for x,y in zip(Original, Recovered)]
        z_recov_norm = np.linalg.norm(Z_Recovered, axis=-1)
        
        # print performance analysis
        printout = "+-"*10 + "%s"%args.dataset + "-+"*10 + "\n"
        printout = printout + "\t n_test        = %d\n"%len(Recovered)
        printout = printout + "\t n             = %d\n"%n
        printout = printout + "\t m             = %d\n"%m
        printout = printout + "\t gamma         = %0.6f\n"%gamma
        printout = printout + "\t optimizer     = %s\n"%args.optim
        printout = printout + "\t lr            = %0.3f\n"%args.lr
        printout = printout + "\t steps         = %0.3f\n"%args.steps
        printout = printout + "\t init_strategy = %s\n"%args.init_strategy
        printout = printout + "\t init_std      = %0.3f\n"%args.init_std
        if init_norm is not None:
            printout = printout + "\t init_norm     = %0.3f\n"%init_norm
        printout = printout + "\t z_recov_norm  = %0.3f\n"%np.mean(z_recov_norm)
        printout = printout + "\t PSNR          = %0.3f\n"%(np.mean(psnr))
        print(printout)
        
        # saving printout
        if args.save_metrics_text:
            with open("%s_cs_glow_results.txt"%args.dataset,"a") as f:
                f.write('\n' + printout)
    
        
        # setting folder to save results in 
        if args.save_results:
            gamma = gamma.item()
            file_names = [name[0].split("/")[-1] for name in test_dataset.samples]
            if args.init_strategy == "random":
                save_path_template = save_path + "/cs_m_%d_gamma_%0.6f_steps_%d_lr_%0.3f_init_std_%0.2f_optim_%s"
                save_path = save_path_template%(m,gamma,args.steps,args.lr,args.init_std,args.optim)
            elif args.init_strategy == "random_fixed_norm":
                save_path_template = save_path+"/cs_m_%d_gamma_%0.6f_steps_%d_lr_%0.3f_init_%s_%0.3f_optim_%s"
                save_path          = save_path_template%(m,gamma,args.steps,args.lr,args.init_strategy,init_norm, args.optim)
            else:
                save_path_template = save_path + "/cs_m_%d_gamma_%0.6f_steps_%d_lr_%0.3f_init_%s_optim_%s"
                save_path          = save_path_template%(m,gamma,args.steps,args.lr,args.init_strategy,args.optim)
            if not os.path.exists(save_path):
                os.makedirs(save_path)
            else:
                save_path_1 = save_path + "_1"
                if not os.path.exists(save_path_1):
                    os.makedirs(save_path_1)
                    save_path = save_path_1
                else:
                    save_path_2 = save_path + "_2"
                    if not os.path.exists(save_path_2):
                        os.makedirs(save_path_2)
                        save_path = save_path_2
            # saving results now
            _ = [sio.imsave(save_path+"/"+name, x) for x,name in zip(Recovered,file_names)]
            Residual_Curve = np.array(Residual_Curve).mean(axis=0)
            np.save(save_path+"/original.npy", Original)
            np.save(save_path+"/recovered.npy", Recovered)
            np.save(save_path+"/z_recovered.npy", Z_Recovered)
            np.save(save_path+"/residual_curve.npy", Residual_Curve)                
            if init_norm is not None:
                np.save(save_path+"/Recorded_Z_init_norm_%d.npy"%init_norm, Recorded_Z) 
        torch.cuda.empty_cache()
Exemplo n.º 30
0
        P1P2_augmented.cpu().data.numpy())
    if P1P2_rank == P1P2_augmented_rank:
        intersect_yes_or_no[(r1, r2)] = 1
    else:
        intersect_yes_or_no[(r1, r2)] = 0

ints = [k for k, v in intersect_yes_or_no.items() if v == 1]
ints_in_eng = [(id2rel_dict[pair[0]], id2rel_dict[pair[1]]) for pair in ints]
three_bundles = [id2rel_dict[x] for x in [3, 26, 34, 49, 45]]
zero_bundles = [id2rel_dict[x] for x in [0, 82, 48, 51, 42, 58]]

#3. Find null space of the P's and do +r's (or -r's)
from scipy.linalg import null_space, subspace_angles
null_spaces = {}
for r in range(122):
    null = null_space(unique_projmat[r].data.cpu().numpy())
    if null.shape[1] != 0:
        null_spaces[r] = null + np.tile(rel_embeddings[r].reshape(
            (-1, 1)), null.shape[1])  #each column is in null space of Px = Pr
    else:
        null_spaces[r] = rel_embeddings[r].reshape((-1, 1))

#4. Find angle
comb = combinations(range(122), 2)
angles = {}
for r1, r2 in list(comb):
    angles[(r1,
            r2)] = np.rad2deg(subspace_angles(null_spaces[r1],
                                              null_spaces[r2]))[0].item()
place_of_birth = rel2id_dict['/people/person/place_of_birth']
nationality = rel2id_dict['/people/person/nationality']
Exemplo n.º 31
0
def increment_arrangement(a, b, I, eps=np.finfo(np.float32).eps):
    # Normalize halfspace, |a| = 1.
    a = np.reshape(a, (1, -1))
    b = np.reshape(b, (1, 1))
    norm_a = np.linalg.norm(a)
    a = a / norm_a
    b = b / norm_a
    I.add_halfspace(a, b)
    # ==========================================================================
    # Phase 1: Find an edge e₀ in 𝓐(H) such that cl(e₀) ∩ h ≠ ∅
    # ==========================================================================
    if DEBUG:
        print('PHASE 1')
    n = I.num_halfspaces()
    u = I.get(0, 0)
    # Find an incident edge e on v such that aff(e) is not parallel to h.
    for e in u.superfaces:
        v_e = vector(e)
        dist = np.linalg.norm(a @ v_e)  # TODO Check this
        if dist > eps:
            break
    if DEBUG:
        assert (dist > eps)
    # Find edge e₀ such that cl(e₀) ∩ h ≠ ∅.
    e0 = e
    v_e0 = vector(e0) / np.linalg.norm(vector(e0))
    while True:
        if color_edge(e0, a, b, eps) > COLOR_AH_WHITE:
            # if DEBUG:
            #     print(e0._sv_key)
            #     print('color', color_edge(e0, a, b, eps))
            break
        # Find v(e0) closer to h.
        if len(e0.subfaces) == 2:
            v0 = e0.subfaces[0]
            v1 = e0.subfaces[1]
            d0 = np.abs(a @ v0.int_pt - b)
            d1 = np.abs(a @ v1.int_pt - b)
            if d0 < d1:
                v = v0
            else:
                v = v1
        if len(e0.subfaces) == 1:
            v = e0.subfaces[0]
        # Find e in v such that aff(e0) == aff(e).
        e_min = None
        v_min = None
        min_dist = np.inf
        for e in v.superfaces:
            if e is e0:
                continue
            v_e = vector(e) / np.linalg.norm(vector(e))
            dist = np.linalg.norm(v_e - (v_e0.T @ v_e) * v_e0)
            if dist < min_dist:
                e_min = e
                v_min = v_e
                min_dist = dist
        e0 = e_min
        # if DEBUG:
        #     print('e0', color_edge(e0, a, b, eps))
        #     print('e0', e0._sv_key.astype(float))

    # ==========================================================================
    # Phase 2: Mark all faces f with cl(f) ∩ h ≠ ∅ pink, red, or crimson.
    # ==========================================================================
    if DEBUG:
        print('PHASE 2')
    # Add some 2 face incident upon e₀ to Q and mark it green.
    f = e0.superfaces[0]
    f.color = COLOR_AH_GREEN
    Q = [f]
    # Color vertices, edges, and 2 faces of 𝓐(H).
    d = a.shape[1]
    L = [[] for i in range(d + 1)]
    while Q:
        f = Q.pop()
        for e in f.subfaces:
            if e.color != COLOR_AH_WHITE:
                continue
            color_e = color_edge(e, a, b, eps)
            if color_e > COLOR_AH_WHITE:
                # Mark each white vertex v ∈ h crimson and insert v into L₀.
                for v in e.subfaces:
                    if v.color == COLOR_AH_WHITE:
                        color_v = color_vertex(v, a, b, eps)
                        if color_v == COLOR_AH_CRIMSON:
                            v.color = color_v
                            L[0].append(v)
                # Color e and insert e into L₁.
                e.color = color_e
                L[1].append(e)
                # Mark all white 2 faces green and put them into Q.
                for g in e.superfaces:
                    if g.color == COLOR_AH_WHITE:
                        g.color = COLOR_AH_GREEN
                        Q.append(g)
    # Color k faces, 2 ≤ k ≤ d.
    for k in range(2, d + 1):
        for f in L[k - 1]:
            for g in f.superfaces:
                if g.color != COLOR_AH_WHITE and g.color != COLOR_AH_GREEN:
                    continue

                if f.color == COLOR_AH_PINK:
                    if DEBUG and k == d:
                        # print('f  ', f._sv_key.astype(float))
                        # print('g  ', g._sv_key.astype(float))
                        # print('g h', get_sign(I.A @ g.int_pt - I.b, eps).astype(float))
                        pass
                    above = 0
                    below = 0
                    for f_g in g.subfaces:
                        if f_g.color == COLOR_AH_RED:
                            above = 1
                            below = 1
                            break
                        if f_g._sign_bit_n != n:
                            s = get_sign(a @ f_g.int_pt - b, eps)
                            f_g._sign_bit_n = n
                            f_g._sign_bit = s
                        else:
                            s = f_g._sign_bit
                        # if DEBUG:
                        #     print('f_g', s.astype(float))
                        if s > 0:
                            above = 1
                        elif s < 0:
                            below = 1
                    if above * below == 1:
                        g.color = COLOR_AH_RED
                    else:
                        g.color = COLOR_AH_PINK

                elif f.color == COLOR_AH_RED:
                    g.color = COLOR_AH_RED

                elif f.color == COLOR_AH_CRIMSON:
                    crimson = True
                    for f_g in g.subfaces:
                        if f_g.color != COLOR_AH_CRIMSON:
                            crimson = False
                            break
                    if crimson:
                        g.color = COLOR_AH_CRIMSON
                    else:
                        g.color = COLOR_AH_PINK

                else:
                    # print('f sv', f._sv_key.astype(float))
                    # print('f rank', f.rank)
                    # for u in f.subfaces:
                    #     print('u', u.color)
                    # print('f color', f.color)
                    assert (False)
                # In any case, insert g into Lₖ.
                L[k].append(g)
    if PROFILE:
        for k in range(0, d + 1):
            print('L_%d' % k, len(L[k]))

    # ==========================================================================
    # Phase 3: Update all marked faces.
    # ==========================================================================
    if DEBUG:
        print('PHASE 3')
    if PROFILE:
        step_1_time = 0
        step_2_time = 0
        step_3_time = 0
        step_4_time = 0
        step_4_hit = 0
        step_4_total = 0
        step_5_time = 0
        step_5_hit = 0
        step_5_total = 0
        step_6_time = 0
        red_count = 0
    for k in range(0, d + 1):
        f_k = len(L[k])
        for i in range(f_k):
            g = L[k][i]
            if g.color == COLOR_AH_PINK:
                g.color = COLOR_AH_GREY
                # Add to grey subfaces of superfaces
                for u in g.superfaces:
                    u._grey_subfaces.append(g)
            elif g.color == COLOR_AH_CRIMSON:
                g.color = COLOR_AH_BLACK
                # Add to black subfaces of superfaces
                for u in g.superfaces:
                    u._black_subfaces.append(g)
            elif g.color == COLOR_AH_RED:
                pt = (I.A @ g.int_pt - I.b).flatten().T
                g_sv = get_sign(pt, eps)

                if PROFILE:
                    red_count += 1
                    t_start = time()
                # Step 1. Create g_a = g ∩ h⁺ and g_b = g ∩ h⁻. Remove g from
                # 𝓐(H) and Lₖ and replace with g_a, g_b.
                g_a = Node(k)
                g_a.color = COLOR_AH_GREY
                g_a._sv_key = np.concatenate((g._sv_key, np.array([1], int)))
                g_a._sv_key = g_sv.copy()
                g_a._sv_key[-1] = 1
                g_b = Node(k)
                g_b.color = COLOR_AH_GREY
                g_b._sv_key = np.concatenate((g._sv_key, np.array([-1], int)))
                g_b._sv_key = g_sv.copy()
                g_b._sv_key[-1] = -1
                I.remove_node(g)
                L[k][i] = g_a
                L[k].append(g_b)
                I.add_node(g_a)
                I.add_node(g_b)

                if PROFILE:
                    step_1_time += time() - t_start
                    t_start = time()
                # Step 2. Create the black face f = g ∩ h, connect it to g_a and
                # g_b, and put f into 𝓐(H) and Lₖ₋₁.
                f = Node(k - 1)
                f.color = COLOR_AH_BLACK
                # f._sv_key = np.concatenate((g._sv_key, np.array([0], int)))
                f._sv_key = g_sv.copy()
                f._sv_key[-1] = 0
                f.superfaces = [g_a, g_b]
                g_a.subfaces = [f]
                g_b.subfaces = [f]
                g_a._black_subfaces = [f]
                g_b._black_subfaces = [f]
                L[k - 1].append(f)
                I.add_node(f)

                if PROFILE:
                    step_2_time += time() - t_start
                    t_start = time()
                # Step 3. Connect each red superface of g with g_a and g_b.
                for r in g.superfaces:
                    if DEBUG:
                        # if r.color != COLOR_AH_RED:
                        #     print('r', r.rank)
                        #     print('r', r._sv_key)
                        #     print('r', r._sv_key.astype(float))
                        #     # for u in
                        assert (r.color == COLOR_AH_RED or r.rank == d + 1)
                    # if r.color == COLOR_AH_RED:
                    g_a.superfaces.append(r)
                    g_b.superfaces.append(r)
                    r.subfaces.append(g_a)
                    r.subfaces.append(g_b)
                    r._grey_subfaces.append(g_a)
                    r._grey_subfaces.append(g_b)

                if PROFILE:
                    step_3_time += time() - t_start
                    t_start = time()
                # Step 4. Connect each white or grey subface of g with g_a if it
                # is in h⁺, and with g_b, otherwise.
                for u in g.subfaces:
                    if PROFILE:
                        step_4_total += 1
                    if u.color != COLOR_AH_WHITE and u.color != COLOR_AH_GREY:
                        if DEBUG:
                            assert (u.color == COLOR_AH_BLACK
                                    )  # FIXME Can there be black subfaces?
                        continue
                    if u._sign_bit_n != n:
                        if PROFILE:
                            step_4_hit += 1
                        s = get_sign(a @ u.int_pt - b, eps)
                        u._sign_bit_n = n
                        u._sign_bit = s
                    else:
                        s = u._sign_bit
                    if s == 1:
                        g_a.subfaces.append(u)
                        if u.color == COLOR_AH_GREY:
                            g_a._grey_subfaces.append(u)
                        u.superfaces.append(g_a)
                    elif s == -1:
                        g_b.subfaces.append(u)
                        if u.color == COLOR_AH_GREY:
                            g_b._grey_subfaces.append(u)
                        u.superfaces.append(g_b)
                    else:
                        assert (False)

                if PROFILE:
                    step_4_time += time() - t_start
                    t_start = time()
                # Step 5. If k = 1, connect f with the -1 face, and connect f
                # with the black subfaces of the grey subfaces of g, otherwise.
                if k == 1:
                    zero = I.get(-1, 0)
                    f.subfaces.append(zero)
                    zero.superfaces.append(f)
                else:
                    # # VERSION 1
                    # V = dict()
                    # for u in g.subfaces:
                    #     if u.color != COLOR_AH_GREY:
                    #         continue
                    #     if DEBUG:
                    #         assert(u in g._grey_subfaces)
                    #     for v in u.subfaces:
                    #         if PROFILE:
                    #             step_5_total += 1
                    #         if v.color == COLOR_AH_BLACK:
                    #             if DEBUG:
                    #                 assert(v in u._black_subfaces)
                    #             V[tuple(v._sv_key)] = v
                    # V = list(V.values())

                    # # VERSION 2
                    # V = dict()
                    # for u in g._grey_subfaces:
                    #     for v in u._black_subfaces:
                    #         if PROFILE:
                    #             step_5_total += 1
                    #         V[tuple(v._sv_key)] = v
                    # V = list(V.values())

                    # VERSION 3
                    V = list()
                    for u in g._grey_subfaces:
                        for v in u._black_subfaces:
                            if PROFILE:
                                step_5_total += 1
                            if v._black_bit == 0:
                                V.append(v)
                                v._black_bit = 1
                            else:
                                v._black_bit = 0

                    if PROFILE:
                        step_5_hit += len(V)
                    for v in V:
                        f.subfaces.append(v)
                        v.superfaces.append(f)

                if PROFILE:
                    step_5_time += time() - t_start
                    t_start = time()
                # Step 6. Update the interior points for f, g_a, and g_b.
                for u in [f, g_a, g_b]:
                    if u.rank == 0:
                        id0 = np.where(u._sv_key == 0)[0]
                        if DEBUG:
                            assert (len(id0) == d)
                        # TODO THIS IS OKAY I THINK
                        u.int_pt = np.linalg.solve(I.A[id0], I.b[id0])
                    elif u.rank == 1:
                        if len(u.subfaces) == 2:
                            p = np.array(
                                [v.int_pt.flatten() for v in u.subfaces]).T
                            u.int_pt = np.mean(p, axis=1, keepdims=True)
                        elif len(u.subfaces) == 1:
                            g_sv = u._sv_key[0:-1]
                            v = u.subfaces[0]
                            null = null_space(I.A[u._sv_key == 0], eps)
                            v_sv = get_sign(I.A @ v.int_pt - I.b, eps)
                            i = np.where(u._sv_key != v_sv)[0][0]
                            dot = I.A[i] @ null
                            if dot * u._sv_key[i] > 0:
                                u.int_pt = v.int_pt + null
                            else:
                                u.int_pt = v.int_pt - null
                        else:
                            assert (False)
                    else:
                        if DEBUG:
                            assert (len(u.subfaces) >= 2)
                        # p = np.array([v.int_pt.flatten() for v in u.subfaces[0:u.rank+1]]).T
                        p = np.array([v.int_pt.flatten()
                                      for v in u.subfaces]).T
                        u.int_pt = np.mean(p, axis=1, keepdims=True)
                    if DEBUG:
                        # Check interior point.
                        pt = (I.A @ u.int_pt - I.b).flatten().T
                        sv = get_sign(pt)
                        if not np.array_equal(u._sv_key, sv):
                            null = null_space(I.A[u._sv_key == 0], eps)
                            v_sv = get_sign(I.A @ v.int_pt - I.b, eps)

                            print('u', u._sv_key.astype(float))
                            print('v', v_sv.astype(float))
                            # i = np.where(u._sv_key != v_sv)[0].item()
                            inds = np.where(u._sv_key != v_sv)[0]
                            dot = I.A[i] @ null
                            print('dot', dot)

                            print('i', i)
                            print(
                                'rank e',
                                np.linalg.matrix_rank(I.A[u._sv_key == 0],
                                                      eps))
                            print(
                                'rank v',
                                np.linalg.matrix_rank(I.A[v._sv_key == 0],
                                                      eps))
                            print('null', null)
                            print('dot', dot)
                            print('u', u._sv_key.astype(float))
                            print('u rank', u.rank)
                            if u.rank >= 1:
                                for v in u.subfaces:
                                    print('v', v._sv_key.astype(float))
                            print('p', sv.astype(float))
                        assert (np.array_equal(u._sv_key, sv))
                        # Check for duplicates in super/sub faces.

                if PROFILE:
                    step_6_time += time() - t_start
                    t_start = time()
    if PROFILE:
        print(' # red: %d' % (red_count, ))
        print('step 1: %0.8f' % (step_1_time))
        print('step 2: %0.8f' % (step_2_time))
        print('step 3: %0.8f' % (step_3_time))
        print('step 4: %0.8f %d/%d' % (step_4_time, step_4_hit, step_4_total))
        print('step 5: %0.8f %d/%d' % (step_5_time, step_5_hit, step_5_total))
        print('step 6: %0.8f' % (step_6_time))
        print('   k=0: %0.8f' % (0))
        print('   k=1: %0.8f' % (0))
        print('  k>=2: %0.8f' % (0))
    # Clear all colors, grey and black subface lists.
    for k in range(0, d + 1):
        for f in L[k]:
            f.color = COLOR_AH_WHITE
            f._grey_subfaces.clear()
            f._black_subfaces.clear()

    return I