def def_pos(self,distance,k):
		major_axis = True		# Project along major or minor axes?

		# Load Ellipticity Data for 100 Halos
		pkl_file = open(self.root+'/nkern/Stacking/Halo_Shape/100_halo_ellipticities.pkl','rb')
		input = pkl.Unpickler(pkl_file)
		d = input.load()
		eig_vec = d['eig_vec']
		eig_val = d['eig_val']

		# Define theta and phi for unit vector and construt unit vector in cartesian coordinates
		if major_axis == True:
			eig_vec = eig_vec[k][0]
			r = norm(eig_vec)
			theta = np.arccos(eig_vec[2]/r)
			phi = np.arctan(eig_vec[1]/eig_vec[0])

		if major_axis == False:
			eig_vec = eig_vec[k][1]
			r = norm(eig_vec)
			theta = np.arccos(eig_vec[2]/r)
			phi = np.arctan(eig_vec[1]/eig_vec[0])

	#	if random.random()<.5:
	#		eig_vec *= -1.

		theta = random.normal(theta,.075)
		phi = random.normal(phi,.075)
		x = np.sin(theta)*np.cos(phi)
		y = np.sin(theta)*np.sin(phi)
		z = np.cos(theta)	

		unit = np.array([x,y,z])/norm(np.array([x,y,z]))
		return unit*distance	
    def func(self, X, V):
        k = self.C.TFdata.k
        v1 = self.C.TFdata.v1
        w1 = self.C.TFdata.w1
        
        if k >=0:
            J_coords = self.F.sysfunc.J_coords
            w = sqrt(k)
        
            q = v1 - (1j/w)*matrixmultiply(self.F.sysfunc.J_coords,v1)
            p = w1 + (1j/w)*matrixmultiply(transpose(self.F.sysfunc.J_coords),w1)
            
            p /= linalg.norm(p)
            q /= linalg.norm(q)

            p = reshape(p,(p.shape[0],))
            q = reshape(q,(q.shape[0],))
            
            direc = conjugate(1/matrixmultiply(transpose(conjugate(p)),q))
            p = direc*p

            l1 = firstlyapunov(X, self.F.sysfunc, w, J_coords=J_coords, p=p, q=q)
            
            return array([l1])
        else:
            return array([1])
示例#3
0
def test_c_samples_scaling():
    """Test C scaling by n_samples
    """
    X = iris.data[iris.target != 2]
    y = iris.target[iris.target != 2]
    X2 = np.r_[X, X]
    y2 = np.r_[y, y]

    clfs = [svm.SVC(tol=1e-6, kernel='linear', C=0.1),
            svm.SVR(tol=1e-6, kernel='linear', C=100),
            svm.LinearSVC(tol=1e-6, C=0.1),
            linear_model.LogisticRegression(penalty='l1', tol=1e-6, C=100),
            linear_model.LogisticRegression(penalty='l2', tol=1e-6),
            svm.NuSVR(tol=1e-6, kernel='linear')]

    for clf in clfs:
        clf.set_params(scale_C=False)
        coef_ = clf.fit(X, y).coef_
        coef2_ = clf.fit(X2, y2).coef_
        error_no_scale = linalg.norm(coef2_ - coef_) / linalg.norm(coef_)
        assert_true(error_no_scale > 1e-3)

        clf.set_params(scale_C=True)
        coef_ = clf.fit(X, y).coef_
        coef2_ = clf.fit(X2, y2).coef_
        error_with_scale = linalg.norm(coef2_ - coef_) / linalg.norm(coef_)
        assert_true(error_with_scale < 1e-5)
示例#4
0
def poseLinearCalibration(objectPoints, imagePoints, cameraMatrix, distCoeffs, model, retMatrix=False):
    '''
    takes calibration points and estimate linearly camera pose. re
    '''
    # map coordinates with z=0
    xm, ym = objectPoints.T[:2]
    # undistort ccd points, x,y homogenous undistorted
    xp, yp = cl.ccd2homUndistorted(imagePoints, cameraMatrix, distCoeffs, model)
    
    A = dataMatrixPoseCalib(xm, ym, xp, yp)
    
    _, s, v = ln.svd(A)
    m = v[-1] # select right singular vector of smaller singular value
    
    # normalize and ensure that points are in front of the camera
    m /= np.sqrt(ln.norm(m[:3])*ln.norm(m[3:6])) * np.sign(m[-1])
    
    # rearrange as rVec, tVec
    R = np.array([m[:3], m[3:6], np.cross(m[:3], m[3:6])]).T
    rVec = cv2.Rodrigues(R)[0]
    tVec = m[6:]
    
    if retMatrix:
        return rVec, tVec, A
    
    return rVec, tVec
 def fgmres(self,rhs,tol=1e-6,restrt=None,maxiter=None,callback=None):
     if maxiter == None:
         maxiter = len(rhs)
     if restrt == None:
         restrt = 2*maxiter
     # implemented as in [Saad, 1993]
     # start
     x = zeros(len(rhs))
     H = zeros((restrt+1, restrt))
     V = zeros((len(rhs),restrt))
     Z = zeros((len(rhs),restrt))
     # Arnoldi process (with modified Gramm-Schmidt)
     res = 1.
     j = 0
     r = rhs - self.point.matvec(x)
     beta = norm(r)
     V[:,0]=r/beta
     while j < maxiter and res > tol:
         Z[:,j] = self.point.psolve(V[:,j])
         w = self.point.matvec(Z[:,j])
         for i in range(j+1):
             H[i,j]=dot(w,V[:,i])
             w = w - H[i,j]*V[:,i]
         H[j+1,j] = norm(w)
         V[:,j+1]=w/H[j+1,j]
         e = zeros(j+2)
         e[0]=1.
         y, res, rank, sing_val = lstsq(H[:j+2,:j+1],beta*e)
         j += 1
         print "# GMRES| iteration :", j, "res: ", res/beta
         self.resid = r_[self.resid,res/beta]
         Zy = dot(Z[:,:j],y)
     x = x + Zy
     info = 1
     return (x,info)
def _help_expm_cond_search(A, A_norm, X, X_norm, eps, p):
    p = np.reshape(p, A.shape)
    p_norm = norm(p)
    perturbation = eps * p * (A_norm / p_norm)
    X_prime = expm(A + perturbation)
    scaled_relative_error = norm(X_prime - X) / (X_norm * eps)
    return -scaled_relative_error
示例#7
0
def pcosine(u, v):
    """Computes the Cosine distance (positive space) between 1-D arrays.

    The Cosine distance (positive space) between `u` and `v` is defined as

    .. math::

        d(u, v) = 1 - abs \\left( \\frac{u \\cdot v}{||u||_2 ||v||_2} \\right)

    where :math:`u \\cdot v` is the dot product of :math:`u` and :math:`v`.

    Parameters
    ----------
    u : array
        Input array.
    v : array
        Input array.

    Returns
    -------
    cosine : float
        Cosine distance between `u` and `v`.

    """

    # validate vectors like scipy does
    u = ssd._validate_vector(u)
    v = ssd._validate_vector(v)

    dist = 1. - np.abs(np.dot(u, v) / (linalg.norm(u) * linalg.norm(v)))

    return dist
示例#8
0
文件: oPCA.py 项目: asaich/sima
def power_iteration_oPCA(data, num_pcs, tolerance=0.01, max_iter=1000):
    """Compute OPCs recursively with the power iteration method.

    Parameters
    ----------
    data : array
        see offsetPCA
    num_pcs : int
        Then number of oPCs to be computed.
    tolerance : float, optional
        The criterion for fractional difference between subsequent estimates
        used to determine when to terminate the algorithm. Default: 0.01.
    max_iter : int, optional
        The maximum number of iterations. Default: 1000.

    Returns
    -------
    see offsetPCA


    WARNING: INCOMPLETE!!!!!!!!!!!
    """

    for X in (data):
        U0 = X.reshape(1, -1)
        p = X.shape
        break
    eivects, eivals = [], []
    Z = np.zeros((1, p))
    for pc_idx in range(num_pcs):
        U = U0 / norm(U0)  # np.random.randn(num_pcs, p)
        iter_count = 0
        while True:
            print iter_count
            iter_count += 1
            if iter_count > max_iter:
                warnings.warn("max_iter reached by power_iteration_oPCA")
                break
            _opca._Z_update(Z, U, data)
            U_new = np.dot(np.dot(np.dot(U, U.T), inv(np.dot(Z, U.T))), Z)
            error = norm(U_new - U) / norm(U)
            U = U_new / norm(U_new)
            if error < tolerance:
                break
        eivects.append(U.T)
        eivals.append(float(np.dot(Z, U.T) /
                            np.dot(U, U.T)) / (2. * (X.shape[0] - 1.)))
        """
        XUtU = np.dot(np.dot(X, U.T), U)
        X -= XUtU
        OX[1:] -= XUtU[:-1]
        OX[:-1] -= XUtU[1:]  # TODO: isn't this wrong???
        """
        print 'Eigenvalue', pc_idx + 1, 'found'
    eivects = np.concatenate(eivects, axis=1)
    for i in range(eivects.shape[1]):
        eivects[:, i] /= norm(eivects[:, i])
    eivals = np.array(eivals)
    idx = np.argsort(-eivals)
    return eivals[idx], eivects[:, idx], np.dot(X, eivects[:, idx])
示例#9
0
    def getVelocity(self,p, V, E, last=False):
        """
        This function calculates the velocity for the robot with RRT.
        The inputs are (given in order):
            p        = the current x-y position of the robot
            E        = edges of the tree  (2 x No. of nodes on the tree)
            V        = points of the tree (2 x No. of vertices)
            last = True, if the current region is the last region
                 = False, if the current region is NOT the last region
        """

        pose     = mat(p).T

        #dis_cur = distance between current position and the next point
        dis_cur  = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose

        heading = E[1,self.E_current_column]        # index of the current heading point on the tree
        if norm(dis_cur) < 1.5*self.radius:         # go to next point
            if not heading == shape(V)[1]-1:
                self.E_current_column = self.E_current_column + 1
                dis_cur  = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
            #else:
            #    dis_cur  = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- vstack((V[1,E[0,self.E_current_column]],V[2,E[0,self.E_current_column]]))

        Vel = zeros([2,1])
        Vel[0:2,0] = dis_cur/norm(dis_cur)*0.5                    #TUNE THE SPEED LATER
        return Vel
def test_lasso_lars_vs_lasso_cd(verbose=False):
    # Test that LassoLars and Lasso using coordinate descent give the
    # same results.
    X = 3 * diabetes.data

    alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
    lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
    for c, a in zip(lasso_path.T, alphas):
        if a == 0:
            continue
        lasso_cd.alpha = a
        lasso_cd.fit(X, y)
        error = linalg.norm(c - lasso_cd.coef_)
        assert_less(error, 0.01)

    # similar test, with the classifiers
    for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
        clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
        clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
                                  normalize=False).fit(X, y)
        err = linalg.norm(clf1.coef_ - clf2.coef_)
        assert_less(err, 1e-3)

    # same test, with normalized data
    X = diabetes.data
    alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
    lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
                                  tol=1e-8)
    for c, a in zip(lasso_path.T, alphas):
        if a == 0:
            continue
        lasso_cd.alpha = a
        lasso_cd.fit(X, y)
        error = linalg.norm(c - lasso_cd.coef_)
        assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
    # Test that LassoLars and Lasso using coordinate descent give the
    # same results when early stopping is used.
    # (test : before, in the middle, and in the last part of the path)
    alphas_min = [10, 0.9, 1e-4]

    for alpha_min in alphas_min:
        alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
                                                       alpha_min=alpha_min)
        lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
        lasso_cd.alpha = alphas[-1]
        lasso_cd.fit(X, y)
        error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
        assert_less(error, 0.01)

    # same test, with normalization
    for alpha_min in alphas_min:
        alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
                                                       alpha_min=alpha_min)
        lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
                                      tol=1e-8)
        lasso_cd.alpha = alphas[-1]
        lasso_cd.fit(X, y)
        error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
        assert_less(error, 0.01)
示例#12
0
def _assert_cov(cov, cov_desired, tol=0.005, nfree=True):
    assert_equal(cov.ch_names, cov_desired.ch_names)
    err = (linalg.norm(cov.data - cov_desired.data, ord='fro') /
           linalg.norm(cov.data, ord='fro'))
    assert_true(err < tol, msg='%s >= %s' % (err, tol))
    if nfree:
        assert_equal(cov.nfree, cov_desired.nfree)
def test_rank_deficient_design():
    # consistency test that checks that LARS Lasso is handling rank
    # deficient input data (with n_features < rank) in the same way
    # as coordinate descent Lasso
    y = [5, 0, 5]
    for X in ([[5, 0],
               [0, 5],
               [10, 10]],

              [[10, 10, 0],
               [1e-32, 0, 0],
               [0, 0, 1]],
              ):
        # To be able to use the coefs to compute the objective function,
        # we need to turn off normalization
        lars = linear_model.LassoLars(.1, normalize=False)
        coef_lars_ = lars.fit(X, y).coef_
        obj_lars = (1. / (2. * 3.)
                    * linalg.norm(y - np.dot(X, coef_lars_)) ** 2
                    + .1 * linalg.norm(coef_lars_, 1))
        coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
        coef_cd_ = coord_descent.fit(X, y).coef_
        obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
                  + .1 * linalg.norm(coef_cd_, 1))
        assert_less(obj_lars, obj_cd * (1. + 1e-8))
示例#14
0
 def calculate_examples(mean, sigma, weights, c = 2):
     from scipy.linalg import norm
     
     mean_p = mean + c * (weights/norm(weights)) * norm(sigma)
     mean_m = mean - c * (weights/norm(weights)) * norm(sigma)
     
     return np.array([mean_p, mean_m])
示例#15
0
def get_neuromag_transform(lpa, rpa, nasion):
    """Creates a transformation matrix from RAS to Neuromag-like space

    Resets the origin to mid-distance of peri-auricular points with nasion
    passing through y-axis.
    (mne manual, pg. 97)

    Parameters
    ----------
    lpa : numpy.array, shape = (1, 3)
        Left peri-auricular point coordinate.
    rpa : numpy.array, shape = (1, 3)
        Right peri-auricular point coordinate.
    nasion : numpy.array, shape = (1, 3)
        Nasion point coordinate.

    Returns
    -------
    trans : numpy.array, shape = (3, 3)
        Transformation matrix to Neuromag-like space.
    """
    origin = (lpa + rpa) / 2
    nasion = nasion - origin
    lpa = lpa - origin
    rpa = rpa - origin
    axes = np.empty((3, 3))
    axes[1] = nasion / linalg.norm(nasion)
    axes[2] = np.cross(axes[1], lpa - rpa)
    axes[2] /= linalg.norm(axes[2])
    axes[0] = np.cross(axes[1], axes[2])

    trans = linalg.inv(axes)
    return trans
示例#16
0
def test_orthogonal_procrustes():
    np.random.seed(1234)
    for m, n in ((6, 4), (4, 4), (4, 6)):
        # Sample a random target matrix.
        B = np.random.randn(m, n)
        # Sample a random orthogonal matrix
        # by computing eigh of a sampled symmetric matrix.
        X = np.random.randn(n, n)
        w, V = eigh(X.T + X)
        assert_allclose(inv(V), V.T)
        # Compute a matrix with a known orthogonal transformation that gives B.
        A = np.dot(B, V.T)
        # Check that an orthogonal transformation from A to B can be recovered.
        R, s = orthogonal_procrustes(A, B)
        assert_allclose(inv(R), R.T)
        assert_allclose(A.dot(R), B)
        # Create a perturbed input matrix.
        A_perturbed = A + 1e-2 * np.random.randn(m, n)
        # Check that the orthogonal procrustes function can find an orthogonal
        # transformation that is better than the orthogonal transformation
        # computed from the original input matrix.
        R_prime, s = orthogonal_procrustes(A_perturbed, B)
        assert_allclose(inv(R_prime), R_prime.T)
        # Compute the naive and optimal transformations of the perturbed input.
        naive_approx = A_perturbed.dot(R)
        optim_approx = A_perturbed.dot(R_prime)
        # Compute the Frobenius norm errors of the matrix approximations.
        naive_approx_error = norm(naive_approx - B, ord='fro')
        optim_approx_error = norm(optim_approx - B, ord='fro')
        # Check that the orthogonal Procrustes approximation is better.
        assert_array_less(optim_approx_error, naive_approx_error)
def hess(A):
    """Computes the upper Hessenberg form of A using Householder reflectors.
    input:  A, mxn array
    output: Q, orthogonal mxm array
            H, upper Hessenberg
            s.t. Q.dot(H).dot(Q.T) = A
    """
    # similar approach as the householder function.
    # again, not perfectly optimized, but good enough.
    Q = np.eye(A.shape[0]).T
    H = np.array(A, order="C")
    # initialize m and n for convenience
    m, n = H.shape
    # avoid reallocating v in the for loop
    v = np.empty(A.shape[1]-1)
    for k in xrange(n-2):
        # get a slice of the temporary array
        vk = v[k:]
        # fill it with corresponding values from R
        vk[:] = H[k+1:,k]
        # add in the term that makes the reflection work
        vk[0] += copysign(la.norm(vk), vk[0])
        # normalize it so it's an orthogonal transform
        vk /= la.norm(vk)
        # apply projection to H on the left
        H[k+1:,k:] -= 2 * np.outer(vk, vk.dot(H[k+1:,k:]))
        # apply projection to H on the right
        H[:,k+1:] -= 2 * np.outer(H[:,k+1:].dot(vk), vk)
        # Apply it to Q
        Q[k+1:] -= 2 * np.outer(vk, vk.dot(Q[k+1:]))
    return Q, H
示例#18
0
 def test_bar3( self ):
     '''Clamped bar with recursive constraints (load at right end)
     [0]-[1]-[2]-[3]
     u[1] = 0.2 * u[2], u[2] = 0.2 * u[3], R[3] = 10
     '''
     self.domain.coord_max = (3,0,0)
     self.domain.shape = (3,)
     self.ts.bcond_list =  [BCDof(var='u', dof = 0, value = 0.),
                            BCDof(var='u', dof = 1, link_dofs = [2], link_coeffs = [0.5] ),
                            BCDof(var='u', dof = 2, link_dofs = [3], link_coeffs = [1.] ),
                            BCDof(var='f', dof = 3, value = 1 ) ]
     # system solver
     u = self.tloop.eval()
     # expected solution
     u_ex = array([-0. ,  0.1 , 0.2 , 0.2],
                   dtype = float )
     difference = sqrt( norm( u-u_ex ) )
     self.assertAlmostEqual( difference, 0 )
     return         
     #
     # '---------------------------------------------------------------'
     # 'Clamped bar with recursive constraints (displ at right end)'
     # 'u[1] = 0.5 * u[2], u[2] = 1.0 * u[3], u[3] = 1'
     self.ts.bcond_list =  [BCDof(var='u', dof = 0, value = 0.),
                            BCDof(var='u', dof = 1, link_dofs = [2], link_coeffs = [0.5] ),
                            BCDof(var='u', dof = 2, link_dofs = [3], link_coeffs = [1.] ),
                            BCDof(var='u', dof = 3, value = 1 ) ]
     u = self.tloop.eval()
     # expected solution
     u_ex = array([0. ,  0.5 , 1 ,  1], dtype = float )
     difference = sqrt( norm( u-u_ex ) )
     self.assertAlmostEqual( difference, 0 )
示例#19
0
def test_orthogonal_procrustes_skbio_example():
    # This transformation is also exact.
    # It uses translation, scaling, and reflection.
    #
    #   |
    #   | a
    #   | b
    #   | c d
    # --+---------
    #   |
    #   |       w
    #   |
    #   |       x
    #   |
    #   |   z   y
    #   |
    #
    A_orig = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], dtype=float)
    B_orig = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], dtype=float)
    B_standardized = np.array([
        [-0.13363062, 0.6681531],
        [-0.13363062, 0.13363062],
        [-0.13363062, -0.40089186],
        [0.40089186, -0.40089186]])
    A, A_mu = _centered(A_orig)
    B, B_mu = _centered(B_orig)
    R, s = orthogonal_procrustes(A, B)
    scale = s / np.square(norm(A))
    B_approx = scale * np.dot(A, R) + B_mu
    assert_allclose(B_approx, B_orig)
    assert_allclose(B / norm(B), B_standardized)
示例#20
0
def getRank(M, beta = 0.8, eps = 1e-6):
    ''' Loop until get right rank
    
    	Args: M: rank matrix; beta: non-teleport weight; eps: epsilon
    	
    	Returns: Each node's Rank
    '''
    # Preparation
    n1, n2 = M.shape
    r1 = 1.0 / n2 * np.ones((n2, 1))
    r0 = np.ones((n2, 1))
    n = 0
    print '|Loop|  epsilon|time(s)|'
    print '|----|---------|-------|'
	# Loop
    while norm(r1 - r0, 1) > eps:
        t0 = time.clock()
        n += 1
        r0 = r1
        r1 = beta * M.dot(r0)
        r1 = r1 + (1 - beta) / n2
        sum_r1 = r1.sum()
        r1 = r1 + (1 - sum_r1) / n2
        t1 = time.clock() - t0
        print '|%4d|%6.3e|%7.3f|' % (n, norm(r1 - r0, 1), t1)
    return r1
示例#21
0
def initial_cond(coords, mass, dipole, temp, F):
    cm_coords = coords - tile(center_of_mass(coords, mass), (coords.shape[0], 1))

    print "computing inertia tensor and principal axes of inertia"

    mol_I, mol_Ix = eig(inertia_tensor(cm_coords, mass))
    mol_I.sort()

    print "principal moments of inertia are: ", mol_I

    # compute the ratio of the dipole energy to the
    # rotational energy

    print "x = (mu*F / kB*T_R) = ", norm(dipole) * F / kB_au / temp

    # random initial angular velocity vector
    # magnitude set so that 0.5 * I * w**2.0 = kT
    w_mag = sqrt(2.0 * kB_au * temp / mol_I.mean())
    w0 = 2.0 * rand(3) - 1.0
    w0 = w0 / norm(w0) * w_mag

    # random initial orientation / random unit quaternion
    q0 = 2.0 * rand(4) - 1.0
    q0 = q0 / norm(q0)

    return q0, w0
示例#22
0
def test_randomized_svd_power_iteration_normalizer():
    # randomized_svd with power_iteration_normalized='none' diverges for
    # large number of power iterations on this dataset
    rng = np.random.RandomState(42)
    X = make_low_rank_matrix(100, 500, effective_rank=50, random_state=rng)
    X += 3 * rng.randint(0, 2, size=X.shape)
    n_components = 50

    # Check that it diverges with many (non-normalized) power iterations
    U, s, V = randomized_svd(X, n_components, n_iter=2,
                             power_iteration_normalizer='none')
    A = X - U.dot(np.diag(s).dot(V))
    error_2 = linalg.norm(A, ord='fro')
    U, s, V = randomized_svd(X, n_components, n_iter=20,
                             power_iteration_normalizer='none')
    A = X - U.dot(np.diag(s).dot(V))
    error_20 = linalg.norm(A, ord='fro')
    assert_greater(np.abs(error_2 - error_20), 100)

    for normalizer in ['LU', 'QR', 'auto']:
        U, s, V = randomized_svd(X, n_components, n_iter=2,
                                 power_iteration_normalizer=normalizer,
                                 random_state=0)
        A = X - U.dot(np.diag(s).dot(V))
        error_2 = linalg.norm(A, ord='fro')

        for i in [5, 10, 50]:
            U, s, V = randomized_svd(X, n_components, n_iter=i,
                                     power_iteration_normalizer=normalizer,
                                     random_state=0)
            A = X - U.dot(np.diag(s).dot(V))
            error = linalg.norm(A, ord='fro')
            assert_greater(15, np.abs(error_2 - error))
示例#23
0
def compute_innovationFactor(y, H, m_minus):
    innov = y - np.dot(H, m_minus)
    if la.norm(innov) > (1.0/3.0):
        innov_s = sigmaUnitSwitch(y) - np.dot(H, m_minus)
        if la.norm(innov_s) < la.norm(innov):
            innov = innov_s
    return innov
    def batched_decode(df):
        s = time.time()
        data = []
        r_data = []
        df = df.reindex(numpy.random.permutation(df.index))
        for start_i in range(0, len(df), batch_size):
            if verbose:
                print(start_i)
            batched_df = df[start_i:start_i+batch_size]
            text_embeddings, text_masks, hypothesis_embeddings, hypothesis_masks, labels = \
                prepare(batched_df, model['utable'], worddict, model['uoptions'], use_eos)
            uff = model['f_w2v'](text_embeddings, text_masks, hypothesis_embeddings, hypothesis_masks)
            r_uff = model['f_w2v'](hypothesis_embeddings, hypothesis_masks, text_embeddings, text_masks)

            text_embeddings, text_masks, hypothesis_embeddings, hypothesis_masks, labels = \
                prepare(batched_df, model['btable'], worddict, model['boptions'], use_eos)
            bff = model['f_w2v2'](text_embeddings, text_masks, hypothesis_embeddings, hypothesis_masks)
            r_bff = model['f_w2v2'](hypothesis_embeddings, hypothesis_masks, text_embeddings, text_masks)
            if use_norm:
                for j in range(len(uff)):
                    uff[j] /= norm(uff[j])
                    bff[j] /= norm(bff[j])
                    r_uff[j] /= norm(r_uff[j])
                    r_bff[j] /= norm(r_bff[j])
            ff = numpy.concatenate([uff, bff], axis=1)
            r_ff = numpy.concatenate([r_uff, r_bff], axis=1)
            data.append(ff)
            r_data.append(r_ff)
        data = numpy.concatenate(data)
        r_data = numpy.concatenate(r_data)
        print('used {0} seconds'.format(time.time() - s))
        return data, r_data, df.label.values
示例#25
0
def _admm_ips(S, support, rho=1., tau_inc=2., tau_decr=2., mu=None, tol=1e-6,
              max_iter=100, Xinit=None):
    """
    returns:
    -------
    Z       : numpy.ndarray
        the split variable with correct support

    r_      : list of floats
        normalised norm of difference between split variables

    s_      : list of floats
        convergence of the variable Z in normalised norm
    r_.append(linalg.norm(X - Z))
    s_.append(np.inf)
        normalisation is based on division by the number of elements
    """
    p = S.shape[0]
    dof = np.count_nonzero(support)
    Z = (1 + rho) * np.identity(p)
    U = np.zeros((p, p))
    if Xinit is None:
        X = np.identity(p)
    else:
        X = Xinit
    r_ = list()
    s_ = list()
    f_vals_ = list()
    rho_ = [rho]
    r_.append(linalg.norm(X - Z) / dof)
    s_.append(np.inf)
    f_vals_.append(_pen_neg_log_likelihood(X, S))
    iter_count = 0
    while True:
        try:
            Z_old = Z.copy()
            # closed form optimization for X
            eigvals, eigvecs = linalg.eigh(rho * (Z - U) - S)
            eigvals = (eigvals + (eigvals ** 2 + rho) ** (1. / 2)) / rho
            X = eigvecs.dot(np.diag(eigvals).dot(eigvecs.T))
            # proximal operator for Z: projection on support
            Z = support * (X + U)
            # update scaled dual variable
            U = U + X - Z
            r_.append(linalg.norm(X - Z) / (p ** 2))
            s_.append(linalg.norm(Z - Z_old) / dof)
            func_val = -np.linalg.slogdet(support * X)[1] + \
                np.sum(S * X * support)
            f_vals_.append(func_val)

            if mu is not None:
                rho = _update_rho(U, rho, r_[-1], s_[-1],
                                  mu, tau_inc, tau_decr)
                rho_.append(rho)
            iter_count += 1
            if (_check_convergence(X, Z, Z_old, U, rho, tol_abs=tol) or
                    iter_count > max_iter):
                raise StopIteration
        except StopIteration:
            return X, Z, r_, s_, f_vals_, rho_
示例#26
0
def PicardTolerance(x,u_k,b_k,FSpaces,dim,NormType,iter):
    X = IO.vecToArray(x)
    uu = X[0:dim[0]]
    bb = X[dim[0]+dim[1]:dim[0]+dim[1]+dim[2]]

    u = Function(FSpaces[0])
    u.vector()[:] = u.vector()[:] + uu
    diffu = u.vector().array() - u_k.vector().array()

    b = Function(FSpaces[2])
    b.vector()[:] = b.vector()[:] + bb
    diffb = b.vector().array() - b_k.vector().array()
    if (NormType == '2'):
        epsu = splin.norm(diffu)/sqrt(dim[0])
        epsb = splin.norm(diffb)/sqrt(dim[0])
    elif (NormType == 'inf'):
        epsu = splin.norm(diffu, ord=np.Inf)
        epsb = splin.norm(diffb, ord=np.Inf)
    else:
        print "NormType must be 2 or inf"
        quit()

    print 'iter=%d: u-norm=%g   b-norm=%g ' % (iter, epsu,epsb)
    u_k.assign(u)
    b_k.assign(b)


    return u_k,b_k,epsu,epsb
示例#27
0
def _check_dipoles(dipoles, fwd, stc, evoked, residual=None):
    src = fwd['src']
    pos1 = fwd['source_rr'][np.where(src[0]['vertno'] ==
                                     stc.vertices[0])]
    pos2 = fwd['source_rr'][np.where(src[1]['vertno'] ==
                                     stc.vertices[1])[0] +
                            len(src[0]['vertno'])]

    # Check the position of the two dipoles
    assert_true(dipoles[0].pos[0] in np.array([pos1, pos2]))
    assert_true(dipoles[1].pos[0] in np.array([pos1, pos2]))

    ori1 = fwd['source_nn'][np.where(src[0]['vertno'] ==
                                     stc.vertices[0])[0]][0]
    ori2 = fwd['source_nn'][np.where(src[1]['vertno'] ==
                                     stc.vertices[1])[0] +
                            len(src[0]['vertno'])][0]

    # Check the orientation of the dipoles
    assert_true(np.max(np.abs(np.dot(dipoles[0].ori[0],
                                     np.array([ori1, ori2]).T))) > 0.99)

    assert_true(np.max(np.abs(np.dot(dipoles[1].ori[0],
                                     np.array([ori1, ori2]).T))) > 0.99)

    if residual is not None:
        picks_grad = mne.pick_types(residual.info, meg='grad')
        picks_mag = mne.pick_types(residual.info, meg='mag')
        rel_tol = 0.02
        for picks in [picks_grad, picks_mag]:
            assert_true(linalg.norm(residual.data[picks], ord='fro') <
                        rel_tol *
                        linalg.norm(evoked.data[picks], ord='fro'))
示例#28
0
def power_method(A, N=20, tol=1e-12):
    """Compute the dominant eigenvalue of A and a corresponding eigenvector
    via the power method.

    Inputs:
        A ((n,n) ndarray): A square matrix.
        N (int): The maximum number of iterations.
        tol (float): The stopping tolerance.

    Returns:
        (foat): The dominant eigenvalue of A.
        ((n, ) ndarray): An eigenvector corresponding to the dominant
            eigenvalue of A.
    """
    # Choose a random x_0 with norm 1.
    x = np.random.random(A.shape[0])
    x /= la.norm(x)

    for _ in xrange(N):
        # x_{k+1} = Ax_k / ||Ax_k||
        y = np.dot(A, x)
        x_new = y / la.norm(y)

        # Check for convergence.
        if la.norm(x_new - x) < tol:
            x = x_new
            break

        # Move to the next iteration.
        x = x_new

    return np.dot(x, np.dot(A, x)), x
示例#29
0
def dogleg(gk,Hk,rk):
    """Calculate the dogleg minimizer of the quadratic model function.
    
    Parameters:
        gk : ndarray of shape (n,)
            The current gradient of the objective function
        Hk : ndarray of shape (n,n)
            The current (or approximate) hessian
        rk : float
            The current trust region radius
    Returns:
        pk : ndarray of shape (n,)
            The dogleg minimizer of the model function.
    """
    pB = la.solve(-Hk,gk)
    pU = -(gk.T.dot(gk))/(gk.T.dot(Hk.dot(gk)))*gk
    
    if la.norm(pB) <= rk:
        return pB
    
    if la.norm(pU) >= rk:
        return rk*pU/la.norm(pU)
    
    a = pB.T.dot(pB) - 2.*pB.T.dot(pU) + pU.T.dot(pU)
    b = 2*pB.T.dot(pU) -2.*pU.T.dot(pU)
    c = pU.T.dot(pU) - rk**2
    t = 1. + (-b + np.sqrt(b**2-4.*a*c))/(2.*a)
    return pU + (t-1.)*(pB-pU)
示例#30
0
def test_cov_estimation_on_raw_segment():
    """Test estimation from raw on continuous recordings (typically empty room)
    """
    tempdir = _TempDir()
    raw = Raw(raw_fname, preload=False)
    cov = compute_raw_data_covariance(raw)
    cov_mne = read_cov(erm_cov_fname)
    assert_true(cov_mne.ch_names == cov.ch_names)
    assert_true(linalg.norm(cov.data - cov_mne.data, ord='fro')
                / linalg.norm(cov.data, ord='fro') < 1e-4)

    # test IO when computation done in Python
    cov.save(op.join(tempdir, 'test-cov.fif'))  # test saving
    cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))
    assert_true(cov_read.ch_names == cov.ch_names)
    assert_true(cov_read.nfree == cov.nfree)
    assert_array_almost_equal(cov.data, cov_read.data)

    # test with a subset of channels
    picks = pick_channels(raw.ch_names, include=raw.ch_names[:5])
    cov = compute_raw_data_covariance(raw, picks=picks)
    assert_true(cov_mne.ch_names[:5] == cov.ch_names)
    assert_true(linalg.norm(cov.data - cov_mne.data[picks][:, picks],
                ord='fro') / linalg.norm(cov.data, ord='fro') < 1e-4)
    # make sure we get a warning with too short a segment
    raw_2 = raw.crop(0, 1)
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        cov = compute_raw_data_covariance(raw_2)
    assert_true(len(w) == 1)
示例#31
0
def _gradient_descent(objective,
                      p0,
                      it,
                      n_iter,
                      objective_error=None,
                      n_iter_check=1,
                      n_iter_without_progress=50,
                      momentum=0.5,
                      learning_rate=1000.0,
                      min_gain=0.01,
                      min_grad_norm=1e-7,
                      min_error_diff=1e-7,
                      verbose=0,
                      args=None,
                      kwargs=None):
    """Batch gradient descent with momentum and individual gains.

    Parameters
    ----------
    objective : function or callable
        Should return a tuple of cost and gradient for a given parameter
        vector. When expensive to compute, the cost can optionally
        be None and can be computed every n_iter_check steps using
        the objective_error function.

    p0 : array-like, shape (n_params,)
        Initial parameter vector.

    it : int
        Current number of iterations (this function will be called more than
        once during the optimization).

    n_iter : int
        Maximum number of gradient descent iterations.

    n_iter_check : int
        Number of iterations before evaluating the global error. If the error
        is sufficiently low, we abort the optimization.

    objective_error : function or callable
        Should return a tuple of cost and gradient for a given parameter
        vector.

    n_iter_without_progress : int, optional (default: 30)
        Maximum number of iterations without progress before we abort the
        optimization.

    momentum : float, within (0.0, 1.0), optional (default: 0.5)
        The momentum generates a weight for previous gradients that decays
        exponentially.

    learning_rate : float, optional (default: 1000.0)
        The learning rate should be extremely high for t-SNE! Values in the
        range [100.0, 1000.0] are common.

    min_gain : float, optional (default: 0.01)
        Minimum individual gain for each parameter.

    min_grad_norm : float, optional (default: 1e-7)
        If the gradient norm is below this threshold, the optimization will
        be aborted.

    min_error_diff : float, optional (default: 1e-7)
        If the absolute difference of two successive cost function values
        is below this threshold, the optimization will be aborted.

    verbose : int, optional (default: 0)
        Verbosity level.

    args : sequence
        Arguments to pass to objective function.

    kwargs : dict
        Keyword arguments to pass to objective function.

    Returns
    -------
    p : array, shape (n_params,)
        Optimum parameters.

    error : float
        Optimum.

    i : int
        Last iteration.
    """
    if args is None:
        args = []
    if kwargs is None:
        kwargs = {}

    p = p0.copy().ravel()
    update = np.zeros_like(p)
    gains = np.ones_like(p)
    error = np.finfo(np.float).max
    best_error = np.finfo(np.float).max
    best_iter = 0

    for i in range(it, n_iter):
        new_error, grad = objective(p, *args, **kwargs)
        grad_norm = linalg.norm(grad)

        inc = update * grad >= 0.0
        dec = np.invert(inc)
        gains[inc] += 0.05
        gains[dec] *= 0.95
        np.clip(gains, min_gain, np.inf)
        grad *= gains
        update = momentum * update - learning_rate * grad
        p += update

        if (i + 1) % n_iter_check == 0:
            if new_error is None:
                new_error = objective_error(p, *args)
            error_diff = np.abs(new_error - error)
            error = new_error

            if verbose >= 2:
                m = "[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
                print(m % (i + 1, error, grad_norm))

            if error < best_error:
                best_error = error
                best_iter = i
            elif i - best_iter > n_iter_without_progress:
                if verbose >= 2:
                    print("[t-SNE] Iteration %d: did not make any progress "
                          "during the last %d episodes. Finished." %
                          (i + 1, n_iter_without_progress))
                break
            if grad_norm <= min_grad_norm:
                if verbose >= 2:
                    print("[t-SNE] Iteration %d: gradient norm %f. Finished." %
                          (i + 1, grad_norm))
                break
            if error_diff <= min_error_diff:
                if verbose >= 2:
                    m = "[t-SNE] Iteration %d: error difference %f. Finished."
                    print(m % (i + 1, error_diff))
                break

        if new_error is not None:
            error = new_error

    return p, error, i
示例#32
0
for j1 in range(6):
    mu=muinit*(ps_step**j1)
    opterr1=n_s*100
    end1=2
    optmu21=mu2init
    for j2 in range(6):
        mu2=mu2init*(ps_step**j2)
        err=0
        for k in range(1): #5 for full 5 fold CV
            train1_idx=SP.concatenate((train_idx[:int(n_train*k*0.2)],train_idx[int(n_train*(k+1)*0.2):n_train]))
            valid_idx=train_idx[int(n_train*k*0.2):int(n_train*(k+1)*0.2)]
            res1=lmm_lasso.train(X[train1_idx],K[train1_idx][:,train1_idx],y[train1_idx],mu,mu2,group,w0=w0)
            w1=res1['weights']
            w0=w1
            yhat = lmm_lasso.predict(y[train1_idx],X[train1_idx,:],X[valid_idx,:],K[train1_idx][:,train1_idx],K[valid_idx][:,train1_idx],res1['ldelta0'],w1)
            err += LA.norm(yhat-y[valid_idx])
            
        print mu, mu2, err
        if err<=opterr1:
            opterr1=err
            optmu21=mu2
            end1=2
        else:
            end1-=1
        if end1==0:
            break
    if opterr1<=opterr:
        opterr=opterr1
        optmu=mu
        optmu2=optmu21
        end=2
示例#33
0
def encode(model,
           X,
           use_norm=True,
           verbose=True,
           batch_size=128,
           use_eos=False):
    """
    Encode sentences in the list X. Each entry will return a vector
    """
    # first, do preprocessing
    X = preprocess(X)

    # word dictionary and init
    d = defaultdict(lambda: 0)
    for w in model['utable'].keys():
        d[w] = 1
    ufeatures = numpy.zeros((len(X), model['uoptions']['dim']),
                            dtype='float32')
    bfeatures = numpy.zeros((len(X), 2 * model['boptions']['dim']),
                            dtype='float32')

    # length dictionary
    ds = defaultdict(list)
    captions = [s.split() for s in X]
    for i, s in enumerate(captions):
        ds[len(s)].append(i)

    # Get features. This encodes by length, in order to avoid wasting computation
    for k in ds.keys():
        if verbose:
            print(k)
        numbatches = int(len(ds[k]) / batch_size + 1)  #changed by Debanjan

        for minibatch in range(int(numbatches)):
            caps = ds[k][minibatch::numbatches]

            if use_eos:
                uembedding = numpy.zeros(
                    (k + 1, len(caps), model['uoptions']['dim_word']),
                    dtype='float32')
                bembedding = numpy.zeros(
                    (k + 1, len(caps), model['boptions']['dim_word']),
                    dtype='float32')
            else:
                uembedding = numpy.zeros(
                    (k, len(caps), model['uoptions']['dim_word']),
                    dtype='float32')
                bembedding = numpy.zeros(
                    (k, len(caps), model['boptions']['dim_word']),
                    dtype='float32')
            for ind, c in enumerate(caps):
                caption = captions[c]
                for j in range(len(caption)):
                    if d[caption[j]] > 0:
                        uembedding[j, ind] = model['utable'][caption[j]]
                        bembedding[j, ind] = model['btable'][caption[j]]
                    else:
                        uembedding[j, ind] = model['utable']['UNK']
                        bembedding[j, ind] = model['btable']['UNK']
                if use_eos:
                    uembedding[-1, ind] = model['utable']['<eos>']
                    bembedding[-1, ind] = model['btable']['<eos>']
            if use_eos:
                uff = model['f_w2v'](uembedding,
                                     numpy.ones((len(caption) + 1, len(caps)),
                                                dtype='float32'))
                bff = model['f_w2v2'](bembedding,
                                      numpy.ones((len(caption) + 1, len(caps)),
                                                 dtype='float32'))
            else:
                uff = model['f_w2v'](uembedding,
                                     numpy.ones((len(caption), len(caps)),
                                                dtype='float32'))
                bff = model['f_w2v2'](bembedding,
                                      numpy.ones((len(caption), len(caps)),
                                                 dtype='float32'))
            if use_norm:
                for j in range(len(uff)):
                    uff[j] /= norm(uff[j])
                    bff[j] /= norm(bff[j])
            for ind, c in enumerate(caps):
                ufeatures[c] = uff[ind]
                bfeatures[c] = bff[ind]

    features = numpy.c_[ufeatures, bfeatures]
    return features
示例#34
0
Ny = utils.optimize_fftsize(Ny, fft_max_prime)

dx = Lx/np.float(Nx)
dy = Ly/np.float(Ny)

# X-Y vector - linespace can be problematic, refinement with arange
x = np.linspace(-Lx/2., Lx/2., Nx)
x = (np.arange(x.size) - x.size/2)  * (x[1]-x[0])
y = np.linspace(-Ly/2., Ly/2., Ny)
y = (np.arange(y.size) - y.size/2)  * (y[1]-y[0])
x, y = np.meshgrid(x, y)

# Currents
current = current_mag * np.array([np.cos(current_dir), np.sin(current_dir)])
U_eff_vec = (wind_U * np.array([np.cos(wind_dir), np.sin(wind_dir)]) - current)
wind_U_eff = linalg.norm(U_eff_vec)
wind_dir_eff = np.arctan2(U_eff_vec[1], U_eff_vec[0])

# Kx-Ky meshgrid 
kx = 2.*np.pi*np.fft.fftfreq(Nx, dx)
ky = 2.*np.pi*np.fft.fftfreq(Ny, dy)
kx, ky = np.meshgrid(kx, ky)

# Kx-Ky resolution
kx_res = kx[0, 1] - kx[0, 0]
ky_res = ky[1, 0] - ky[0, 0]

# K-theta meshgrid (Polar, wind direction shifted)
k = np.sqrt(kx**2 + ky**2)
good_k = np.where(k > np.min(np.array([kx_res, ky_res]))/2.0)
kinv = np.zeros(k.shape)
示例#35
0
文件: mytsne.py 项目: beartell/bismol
def _gradient_descent(objective,
                      p0,
                      it,
                      n_iter,
                      objective_error=None,
                      n_iter_check=1,
                      n_iter_without_progress=50,
                      momentum=0.5,
                      learning_rate=1000.0,
                      min_gain=0.01,
                      min_grad_norm=1e-7,
                      min_error_diff=1e-7,
                      verbose=0,
                      args=None,
                      kwargs=None,
                      urls=[],
                      text=[],
                      colors=[],
                      n_samples=0,
                      n_components=0):
    """Batch gradient descent with momentum and individual gains.
    Parameters
    ----------
    objective : function or callable
        Should return a tuple of cost and gradient for a given parameter
        vector. When expensive to compute, the cost can optionally
        be None and can be computed every n_iter_check steps using
        the objective_error function.
    p0 : array-like, shape (n_params,)
        Initial parameter vector.
    it : int
        Current number of iterations (this function will be called more than
        once during the optimization).
    n_iter : int
        Maximum number of gradient descent iterations.
    n_iter_check : int
        Number of iterations before evaluating the global error. If the error
        is sufficiently low, we abort the optimization.
    objective_error : function or callable
        Should return a tuple of cost and gradient for a given parameter
        vector.
    n_iter_without_progress : int, optional (default: 30)
        Maximum number of iterations without progress before we abort the
        optimization.
    momentum : float, within (0.0, 1.0), optional (default: 0.5)
        The momentum generates a weight for previous gradients that decays
        exponentially.
    learning_rate : float, optional (default: 1000.0)
        The learning rate should be extremely high for t-SNE! Values in the
        range [100.0, 1000.0] are common.
    min_gain : float, optional (default: 0.01)
        Minimum individual gain for each parameter.
    min_grad_norm : float, optional (default: 1e-7)
        If the gradient norm is below this threshold, the optimization will
        be aborted.
    min_error_diff : float, optional (default: 1e-7)
        If the absolute difference of two successive cost function values
        is below this threshold, the optimization will be aborted.
    verbose : int, optional (default: 0)
        Verbosity level.
    args : sequence
        Arguments to pass to objective function.
    kwargs : dict
        Keyword arguments to pass to objective function.
    Returns
    -------
    p : array, shape (n_params,)
        Optimum parameters.
    error : float
        Optimum.
    i : int
        Last iteration.
    """
    #connect to database
    conn = r.connect(host="localhost", port=28015, db="messagedb")
    #keep track of total distance each point has moved
    global point_distances

    if args is None:
        args = []
    if kwargs is None:
        kwargs = {}

    p = p0.copy().ravel()
    update = np.zeros_like(p)
    gains = np.ones_like(p)
    error = np.finfo(np.float).max
    best_error = np.finfo(np.float).max
    best_iter = 0

    for i in range(it, n_iter):
        new_error, grad = objective(p, *args, **kwargs)
        grad_norm = linalg.norm(grad)

        # track which keys to remove from dictionary
        to_remove = []
        # check client changes
        if (len(changes) > 0):
            for change in changes:
                index = urls.index(str(change["new_val"]["id"]))
                print(index)
                print(changes)
                print(p[index * 2])
                print(p[index * 2 + 1])
                print(change["old_val"]["x"])
                print(change["old_val"]["y"])
                print(change["new_val"]["x"])
                print(change["new_val"]["y"])
                p[index * 2] = change["new_val"]["x"]
                p[index * 2 + 1] = change["new_val"]["y"]
                to_remove.append(change)
            for item in to_remove:
                print(len(changes))
                changes.remove(item)
                print(len(changes))

        inc = update * grad >= 0.0
        dec = np.invert(inc)
        gains[inc] += 0.05
        gains[dec] *= 0.95
        np.clip(gains, min_gain, np.inf)
        grad *= gains
        update = momentum * update - learning_rate * grad
        p += update

        if (i + 1) % n_iter_check == 0:
            if new_error is None:
                new_error = objective_error(p, *args)
            error_diff = np.abs(new_error - error)
            error = new_error

            if verbose >= 2:
                m = "[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
                print((m % (i + 1, error, grad_norm)))

            if error < best_error:
                best_error = error
                best_iter = i
            elif i - best_iter > n_iter_without_progress:
                if verbose >= 2:
                    print(("[t-SNE] Iteration %d: did not make any progress "
                           "during the last %d episodes. Finished." %
                           (i + 1, n_iter_without_progress)))
                break
            if grad_norm <= min_grad_norm:
                if verbose >= 2:
                    print(
                        ("[t-SNE] Iteration %d: gradient norm %f. Finished." %
                         (i + 1, grad_norm)))
                break
            if error_diff <= min_error_diff:
                if verbose >= 2:
                    m = "[t-SNE] Iteration %d: error difference %f. Finished."
                    print((m % (i + 1, error_diff)))
                break

        if (i + 1) % 5 == 0:
            #save to database every so often
            embedded_array = p.reshape(n_samples, n_components)

            #array of packaged message objects
            data = []

            #combine url, xy coords, and text into one object and add to data array
            for idx in range(len(urls)):
                urls[idx] = urls[idx].replace('\ufeff', '')
                tempUrl = int(urls[idx])

                if (len(colors) > 0):
                    data.append({
                        "id": tempUrl,
                        "x": embedded_array[idx][0],
                        "y": embedded_array[idx][1],
                        "distance": point_distances[idx],
                        "text": text[idx],
                        "color": colors[idx],
                        "modified_by": "server"
                    })
                else:
                    data.append({
                        "id": tempUrl,
                        "x": embedded_array[idx][0],
                        "y": embedded_array[idx][1],
                        "distance": point_distances[idx],
                        "text": text[idx],
                        "modified_by": "server"
                    })

            #insert data into the database, updating if it already exists
            results = r.table("messages").insert(
                data, conflict="update", return_changes="always").run(conn)

            for k in range(len(results["changes"])):
                if results["changes"][k]["old_val"] == None:
                    break
                delta = math.sqrt(
                    math.pow(
                        results["changes"][k]["new_val"]["x"] -
                        results["changes"][k]["old_val"]["x"], 2) + math.pow(
                            results["changes"][k]["new_val"]["y"] -
                            results["changes"][k]["old_val"]["y"], 2))
                point_distances[k] += delta

        if new_error is not None:
            error = new_error

    #close database connection
    conn.close()

    return p, error, i
def _test_lstsq_reg_single(k, d, r):
    """Do one test of utils._solver.lstsq_reg()."""
    A = np.random.random((k, d))
    b = np.random.random(k)
    B = np.random.random((k, r))
    I = np.eye(d)

    # VECTOR TEST
    x = la.lstsq(A, b)[0]

    # Ensure that the least squares solution is the usual one when P=0.
    x_ = roi.utils.lstsq_reg(A, b, P=0)[0]
    assert np.allclose(x, x_)

    # Ensure that the least squares solution is the usual one when P=0 (matrix)
    x_ = roi.utils.lstsq_reg(A, b, np.zeros((d, d)))[0]
    assert np.allclose(x, x_)

    # Check that the regularized least squares solution has decreased norm.
    x_ = roi.utils.lstsq_reg(A, b, P=2)[0]
    assert la.norm(x_) <= la.norm(x)

    x_ = roi.utils.lstsq_reg(A, b, P=2 * I)[0]
    assert la.norm(x_) <= la.norm(x)

    # MATRIX TEST
    X = la.lstsq(A, B)[0]

    # Ensure that the least squares solution is the usual one when P=0.
    X_ = roi.utils.lstsq_reg(A, B, P=0)[0]
    assert np.allclose(X, X_)

    # Ensure that the least squares solution is the usual one when P=0 (matrix)
    X_ = roi.utils.lstsq_reg(A, B, P=0)[0]
    assert np.allclose(X, X_)

    # Ensure that the least squares solution is the usual one when P=0 (list)
    X_ = roi.utils.lstsq_reg(A, B, P=[np.zeros((d, d))] * r)[0]
    assert np.allclose(X, X_)

    # Ensure that the least squares problem decouples correctly.
    Ps = [l * I for l in range(r)]
    X_ = roi.utils.lstsq_reg(A, B, P=Ps)[0]
    for j in range(r):
        xj_ = roi.utils.lstsq_reg(A, B[:, j], P=Ps[j])[0]
        assert np.allclose(xj_, X_[:, j])

    # Check that the regularized least squares solution has decreased norm.
    X_ = roi.utils.lstsq_reg(A, B, P=2)[0]
    assert la.norm(X_) <= la.norm(X)

    X_ = roi.utils.lstsq_reg(A, B, P=2)[0]
    assert la.norm(X_) <= la.norm(X)

    X_ = roi.utils.lstsq_reg(A, B, P=[2] * r)[0]
    assert la.norm(X_) <= la.norm(X)

    X_ = roi.utils.lstsq_reg(A, B, P=[2 * I] * r)[0]
    assert la.norm(X_) <= la.norm(X)

    # Test residuals actually give the Frobenius norm squared of the misfit.
    Acond = np.linalg.cond(A)
    X_, res, rnk, svdvals = roi.utils.lstsq_reg(A, B, P=0)
    assert np.isclose(np.sum(res), np.sum((A @ X_ - B)**2))
    assert np.isclose(abs(svdvals[0] / svdvals[-1]), Acond)

    # Ensure residuals larger, condition numbers smaller with regularization.
    X_, res, rnk, svdvals = roi.utils.lstsq_reg(A, B, P=2)
    assert np.sum(res) > np.sum((A @ X_ - B)**2)
    assert abs(svdvals[0] / svdvals[-1]) < Acond

    X_, res, rnk, svdvals = roi.utils.lstsq_reg(A, B, P=[2 * I] * r)
    assert np.sum(res) > np.sum((A @ X_ - B)**2)
    assert abs(svdvals[0] / svdvals[-1]) < Acond
示例#37
0
文件: solver.py 项目: thomasdes/tick
    def check_solver(self, solver, fit_intercept=True, model='logreg',
                     decimal=1):
        """Check solver instance finds same parameters as scipy BFGS

        Parameters
        ----------
        solver : `Solver`
            Instance of a solver to be tested

        fit_intercept : `bool`, default=True
            Model uses intercept is `True`

        model : 'linreg' | 'logreg' | 'poisreg', default='logreg'
            Name of the model used to test the solver

        decimal : `int`, default=1
            Number of decimals required for the test
        """
        # Set seed for data simulation
        np.random.seed(12)
        n_samples = TestSolver.n_samples
        n_features = TestSolver.n_features

        coeffs0 = weights_sparse_gauss(n_features, nnz=5)
        if fit_intercept:
            interc0 = 2.
        else:
            interc0 = None

        if model == 'linreg':
            X, y = SimuLinReg(coeffs0, interc0, n_samples=n_samples,
                              verbose=False, seed=123).simulate()
            model = ModelLinReg(fit_intercept=fit_intercept).fit(X, y)
        elif model == 'logreg':
            X, y = SimuLogReg(coeffs0, interc0, n_samples=n_samples,
                              verbose=False, seed=123).simulate()
            model = ModelLogReg(fit_intercept=fit_intercept).fit(X, y)
        elif model == 'poisreg':
            X, y = SimuPoisReg(coeffs0, interc0, n_samples=n_samples,
                               verbose=False, seed=123).simulate()
            # Rescale features to avoid overflows in Poisson simulations
            X /= np.linalg.norm(X, axis=1).reshape(n_samples, 1)
            model = ModelPoisReg(fit_intercept=fit_intercept).fit(X, y)
        else:
            raise ValueError("``model`` must be either 'linreg', 'logreg' or"
                             " 'poisreg'")

        solver.set_model(model)

        strength = 1e-2
        prox = ProxL2Sq(strength, (0, model.n_features))

        if type(solver) is not SDCA:
            solver.set_prox(prox)
        else:
            solver.set_prox(ProxZero())
            solver.l_l2sq = strength

        coeffs_solver = solver.solve()
        # Compare with BFGS
        bfgs = BFGS(max_iter=100,
                    verbose=False).set_model(model).set_prox(prox)
        coeffs_bfgs = bfgs.solve()
        np.testing.assert_almost_equal(coeffs_solver, coeffs_bfgs,
                                       decimal=decimal)

        # We ensure that reached coeffs are not equal to zero
        self.assertGreater(norm(coeffs_solver), 0)

        self.assertAlmostEqual(
            solver.objective(coeffs_bfgs), solver.objective(coeffs_solver),
            delta=1e-2)
示例#38
0
def adaptive_graal_terminate(J,
                             F,
                             prox_g,
                             x1,
                             numb_iter=100,
                             phi=1.5,
                             tol=1e-6,
                             output=False):
    """ Adaptive Golden Ratio algorithm with termination criteria.

    Input 
    -----
    J : function that computes residual in every iteration.
        Takes x as input.
    F : main operator.
        Takes x as input.
    prox_g: proximal operator.
        Takes two parameters x and a scalar as input.
    x1: Strating point.
        np.array, be consistent with J, F and prox_g.
    numb_iter: number of iteration to run rhe algorithm.
    phi: a key parameter for the algorithm.
         Must be between 1 and the golden ratio, 1.618... Choice
         phi=1.5 seems to be one of the best.
    tol: a positive number.
        Required accuracy for termination of the algorithm.
    output: boolean.  
         If true, prints the length of a stepsize in every iteration. Useful
         for monitoring.
    Return
    ------
    values: 1d array
          Collects all values that were computed in every iteration with J(x).
    x : last iterate.
    i: positive integer, number of iteration to reach desired accuracy.
    time_list: list of time stamps in every iteration. 
          Useful for monitoring.
    """

    begin = perf_counter()

    x, x_ = x1.copy(), x1.copy()
    x0 = x + np.random.randn(x.shape[0])
    Fx = F(x)
    la = phi / 2 * LA.norm(x - x0) / LA.norm(Fx - F(x0))
    rho = 1. / phi + 1. / phi**2
    values = [J(x)]
    time_list = [perf_counter() - begin]
    th = 1

    i = 1
    while i <= numb_iter and values[-1] > tol:
        i += 1
        x1 = prox_g(x_ - la * Fx, la)
        Fx1 = F(x1)
        n1 = LA.norm(x1 - x)
        n2 = LA.norm(Fx1 - Fx)
        n1_div_n2 = np.exp(2 * (np.log(n1) - np.log(n2)))
        la1 = min(rho * la, 0.25 * phi * th / la * n1_div_n2, 1e6)
        x_ = ((phi - 1) * x1 + x_) / phi
        if output:
            print(i, la, LA.norm(x1), LA.norm(Fx), LA.norm(x_))

        th = phi * la1 / la
        x, la, Fx = x1, la1, Fx1
        values.append(J(x))
        time_list.append(perf_counter() - begin)

    end = perf_counter()

    print("Time execution of aGRAAL:", end - begin)
    return np.array(values), x, i, time_list
示例#39
0
def M(axis, theta):
    return expm(np.cross(np.eye(3), axis / norm(axis) * theta))
示例#40
0
def adaptive_graal(J, F, prox_g, x0, numb_iter=100, phi=1.5, output=False):
    """ Adaptive Golden Ratio algorithm.

    Input 
    -----
    J : function that computes residual in every iteration.
        Takes x as input.
    F : main operator.
        Takes x as input.
    prox_g: proximal operator.
        Takes two parameters x and a scalar as input.
    x0: Starting point.
        np.array, be consistent with J, F and prox_g.
    numb_iter: number of iteration to run rhe algorithm.
    phi: a key parameter for the algorithm.
         Must be between 1 and the golden ratio, 1.618... Choice
         phi=1.5 seems to be one of the best.
    output: boolean.  
         If true, prints the length of a stepsize in every iteration.  
         Useful for monitoring.

    Return
    ------
    values: 1d array
          Collects all values that were computed in every iteration with J(x)
    x, x_ : last iterates
    time_list: list of time stamps in every iteration. 
          Useful for monitoring.
    """

    begin = perf_counter()

    x, x_ = x0.copy(), x0.copy()
    x0 = x + np.random.randn(x.shape[0]) * 1e-9
    Fx = F(x)
    la = phi / 2 * LA.norm(x - x0) / LA.norm(Fx - F(x0))
    rho = 1. / phi + 1. / phi**2
    values = [J(x)]
    time_list = [perf_counter() - begin]
    th = 1

    for i in range(numb_iter):
        x1 = prox_g(x_ - la * Fx, la)
        Fx1 = F(x1)

        n1 = LA.norm(x1 - x)**2
        n2 = LA.norm(Fx1 - Fx)**2
        n1_div_n2 = np.exp(np.log(n1) - np.log(n2))
        la1 = min(rho * la, 0.25 * phi * th / la * n1_div_n2, 1e6)
        x_ = ((phi - 1) * x1 + x_) / phi
        if output:
            print(i, la)
        th = phi * la1 / la
        x, la, Fx = x1, la1, Fx1
        values.append(J(x))
        time_list.append(perf_counter() - begin)

    end = perf_counter()

    print("Time execution of adaptive GRAAL:", end - begin)
    return np.array(values), x, x_, time_list
示例#41
0
def iqml_recon_ri(G, a, K, noise_level, max_ini=100, stop_cri='mse'):
    """
    Here we assume both the measurements a and the linear transformation G are real-valued
    """
    # make sure the assumption (real-valued) is true
    assert not np.iscomplexobj(a) and not np.iscomplexobj(G)
    compute_mse = (stop_cri == 'mse')
    GtG = np.dot(G.T, G)
    Gt_a = np.dot(G.T, a)

    two_Lp1 = G.shape[1]
    assert two_Lp1 % 2 == 1
    L = np.int((two_Lp1 - 1) / 2.)

    sz_T0 = 2 * (L - K + 1)
    sz_T1 = 2 * (K + 1)
    sz_G1 = 2 * L + 1
    sz_R0 = sz_T0
    sz_R1 = 2 * L + 1
    sz_coef = 2 * (K + 1)

    D = expansion_mtx_pos(L + 1)

    max_iter = 50
    min_error = float('inf')
    beta = linalg.lstsq(G, a)[0]

    Tbeta = Tmtx_ri_half(beta, K, D)  # has 2(K+1) columns
    rhs = np.concatenate((np.zeros(sz_T1 + sz_T0 + sz_G1), [1.]))
    rhs_bl = np.concatenate((Gt_a, np.zeros(sz_R0)))

    for ini in xrange(max_ini):
        c_ri = np.random.randn(2 * (K + 1))  # first half of c_ri: c_real, second half: c_imag
        c0 = c_ri.copy()
        error_seq = np.zeros(max_iter)
        # R has (2L + 1) columns
        R_loop = Rmtx_ri_half(c_ri, K, L + 1, D)

        for loop in xrange(max_iter):
            Mtx_loop = np.vstack((np.hstack((np.zeros((sz_T1, sz_T1)), Tbeta.T, np.zeros((sz_T1, sz_R1)),
                                             c0[:, np.newaxis])),
                                  np.hstack((Tbeta, np.zeros((sz_T0, sz_T0)), -R_loop, np.zeros((sz_T0, 1)))),
                                  np.hstack((np.zeros((sz_R1, sz_T1)), -R_loop.T, GtG, np.zeros((sz_G1, 1)))),
                                  np.hstack((c0[np.newaxis, :], np.zeros((1, sz_T0 + sz_R1 + 1))))
                                  ))
            # matrix should be Hermitian symmetric
            Mtx_loop = (Mtx_loop + Mtx_loop.T) / 2.
            c_ri = linalg.solve(Mtx_loop, rhs)[:sz_coef]

            R_loop = Rmtx_ri_half(c_ri, K, L + 1, D)

            Mtx_brecon = np.vstack((np.hstack((GtG, R_loop.T)),
                                    np.hstack((R_loop, np.zeros((sz_R0, sz_R0))))
                                    ))
            # matrix should be Hermitian symmetric
            Mtx_brecon = (Mtx_brecon + Mtx_brecon.T) / 2.
            b_recon_ri = linalg.solve(Mtx_brecon, rhs_bl)[:sz_G1]

            error_seq[loop] = linalg.norm(a - np.dot(G, b_recon_ri))
            if error_seq[loop] < min_error:
                min_error = error_seq[loop]
                b_opt = b_recon_ri[:L+1] + 1j * np.concatenate((np.array([0]),
                                                                b_recon_ri[L+1:]))
                c_opt = c_ri[:K+1] + 1j * c_ri[K+1:]
            if min_error < noise_level and compute_mse:
                break
        if min_error < noise_level and compute_mse:
            break

    return b_opt, min_error, c_opt, ini
示例#42
0
def _cholesky_omp(X,
                  y,
                  n_nonzero_coefs,
                  tol=None,
                  copy_X=True,
                  return_path=False):
    """Orthogonal Matching Pursuit step using the Cholesky decomposition.

    Parameters
    ----------
    X : ndarray of shape (n_samples, n_features)
        Input dictionary. Columns are assumed to have unit norm.

    y : ndarray of shape (n_samples,)
        Input targets.

    n_nonzero_coefs : int
        Targeted number of non-zero elements.

    tol : float, default=None
        Targeted squared error, if not None overrides n_nonzero_coefs.

    copy_X : bool, default=True
        Whether the design matrix X must be copied by the algorithm. A false
        value is only helpful if X is already Fortran-ordered, otherwise a
        copy is made anyway.

    return_path : bool, default=False
        Whether to return every value of the nonzero coefficients along the
        forward path. Useful for cross-validation.

    Returns
    -------
    gamma : ndarray of shape (n_nonzero_coefs,)
        Non-zero elements of the solution.

    idx : ndarray of shape (n_nonzero_coefs,)
        Indices of the positions of the elements in gamma within the solution
        vector.

    coef : ndarray of shape (n_features, n_nonzero_coefs)
        The first k values of column k correspond to the coefficient value
        for the active features at that step. The lower left triangle contains
        garbage. Only returned if ``return_path=True``.

    n_active : int
        Number of active features at convergence.
    """
    if copy_X:
        X = X.copy("F")
    else:  # even if we are allowed to overwrite, still copy it if bad order
        X = np.asfortranarray(X)

    min_float = np.finfo(X.dtype).eps
    nrm2, swap = linalg.get_blas_funcs(("nrm2", "swap"), (X, ))
    (potrs, ) = get_lapack_funcs(("potrs", ), (X, ))

    alpha = np.dot(X.T, y)
    residual = y
    gamma = np.empty(0)
    n_active = 0
    indices = np.arange(X.shape[1])  # keeping track of swapping

    max_features = X.shape[1] if tol is not None else n_nonzero_coefs

    L = np.empty((max_features, max_features), dtype=X.dtype)

    if return_path:
        coefs = np.empty_like(L)

    while True:
        lam = np.argmax(np.abs(np.dot(X.T, residual)))
        if lam < n_active or alpha[lam]**2 < min_float:
            # atom already selected or inner product too small
            warnings.warn(premature, RuntimeWarning, stacklevel=2)
            break

        if n_active > 0:
            # Updates the Cholesky decomposition of X' X
            L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
            linalg.solve_triangular(
                L[:n_active, :n_active],
                L[n_active, :n_active],
                trans=0,
                lower=1,
                overwrite_b=True,
                check_finite=False,
            )
            v = nrm2(L[n_active, :n_active])**2
            Lkk = linalg.norm(X[:, lam])**2 - v
            if Lkk <= min_float:  # selected atoms are dependent
                warnings.warn(premature, RuntimeWarning, stacklevel=2)
                break
            L[n_active, n_active] = sqrt(Lkk)
        else:
            L[0, 0] = linalg.norm(X[:, lam])

        X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
        alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
        indices[n_active], indices[lam] = indices[lam], indices[n_active]
        n_active += 1

        # solves LL'x = X'y as a composition of two triangular systems
        gamma, _ = potrs(L[:n_active, :n_active],
                         alpha[:n_active],
                         lower=True,
                         overwrite_b=False)

        if return_path:
            coefs[:n_active, n_active - 1] = gamma
        residual = y - np.dot(X[:, :n_active], gamma)
        if tol is not None and nrm2(residual)**2 <= tol:
            break
        elif n_active == max_features:
            break

    if return_path:
        return gamma, indices[:n_active], coefs[:, :n_active], n_active
    else:
        return gamma, indices[:n_active], n_active
示例#43
0
def do_solve(**kw):
    count[0] = 0
    x0, flag = lgmres(A, b, x0=zeros(A.shape[0]), inner_m=6, tol=1e-14, **kw)
    count_0 = count[0]
    assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b))
    return x0, count_0
示例#44
0
# ## Linear Algebra Methods in SciPy

# In[98]:

import scipy.linalg as sl
A = array([[1,2], [-6,4]])
[LU,piv] = sl.lu_factor(A)


# In[99]:

bi = array([1,1])
xi=sl.lu_solve((LU,piv),bi)
xi


# ### Solving a least squares problem with SVD

# In[100]:

import scipy.linalg as sl
A = array([[1, 2], [-6, 4], [0, 8]])
b = array([1,1,1])
[U1, Sigma_1, VT] = sl.svd(A, full_matrices = False, compute_uv = True)
xast = dot(VT.T, dot(U1.T, b)/ Sigma_1)
r = dot(A, xast) - b  # computes the residual
nr = sl.norm(r, 2)     # computes the Euclidean norm of the residual
nr

示例#45
0
def grasp_callback(my_grasp):

    my_pose = geometry_msgs.msg.PoseStamped()
    my_pose.header.stamp = my_grasp.markers[0].header.stamp
    my_pose.header.frame_id = "/xtion_rgb_optical_frame"

    pose_target = geometry_msgs.msg.Pose()
    pose_target.position.x = my_grasp.markers[0].points[0].x
    pose_target.position.y = my_grasp.markers[0].points[0].y
    pose_target.position.z = my_grasp.markers[0].points[0].z

    ## Convert to quaternion

    u = [1, 0, 0]
    norm = linalg.norm([
        my_grasp.markers[i].points[0].x - my_grasp.markers[0].points[1].x,
        my_grasp.markers[0].points[0].y - my_grasp.markers[0].points[1].y,
        my_grasp.markers[0].points[0].z - my_grasp.markers[0].points[1].z
    ])
    v = asarray([
        my_grasp.markers[0].points[0].x - my_grasp.markers[0].points[1].x,
        my_grasp.markers[0].points[0].y - my_grasp.markers[0].points[1].y,
        my_grasp.markers[0].points[0].z - my_grasp.markers[0].points[1].z
    ]) / norm

    if (array_equal(u, v)):
        pose_target.orientation.w = 1
        pose_target.orientation.x = 0
        pose_target.orientation.y = 0
        pose_target.orientation.z = 0
    elif (array_equal(u, negative(v))):
        pose_target.orientation.w = 0
        pose_target.orientation.x = 0
        pose_target.orientation.y = 0
        pose_target.orientation.z = 1
    else:
        half = [u[0] + v[0], u[1] + v[1], u[2] + v[2]]
        pose_target.orientation.w = dot(u, half)
        temp = cross(u, half)
        pose_target.orientation.x = temp[0]
        pose_target.orientation.y = temp[1]
        pose_target.orientation.z = temp[2]
    norm = math.sqrt(pose_target.orientation.x * pose_target.orientation.x +
                     pose_target.orientation.y * pose_target.orientation.y +
                     pose_target.orientation.z * pose_target.orientation.z +
                     pose_target.orientation.w * pose_target.orientation.w)

    if norm == 0:
        norm = 1

    my_pose.pose.orientation.x = pose_target.orientation.x / norm
    my_pose.pose.orientation.y = pose_target.orientation.y / norm
    my_pose_.pose.orientation.z = pose_target.orientation.z / norm
    my_pose.pose.orientation.w = pose_target.orientation.w / norm

    pose_target_trans = geometry_msgs.msg.PoseStamped()
    pose_target_trans.header.stamp = pose_target.header.stamp
    pose_target_trans.header.frame_id = "/map"
    now = rospy.Time.now()
    listener.waitForTransform("/map", "/xtion_rgb_optical_frame", now,
                              rospy.Duration(1.0))
    pose_target_trans = listener.transformPose("/map", pose_target)

    my_grasp_pub.publish(pose_target_trans)
示例#46
0
qbhistvec = qbhistvec.astype(np.complex)

# %%
sqr2D = sqr2D.astype(np.complex)

# %%
#prob_detect_v = np.kron(np.eye(32), sqr2D) @ qbhistvec
#
# More direct route, without going through histories
i, j = find_best()
prob_detect_v = \
    (np.kron(EnergyCorrectionMatricesT[i], sqr2D) @ eigenvectors.T[i]) + \
    (np.kron(EnergyCorrectionMatricesT[j], sqr2D) @ eigenvectors.T[j])

# normalize
prob_detect_v = prob_detect_v / norm(prob_detect_v)

# %%
prob_detect = np.zeros(32)
for t_idx in range(32):
    prob_detect[t_idx] = \
        np.abs(prob_detect_v[2*t_idx])**2 + np.abs(prob_detect_v[2*t_idx+1])**2

# %%
plt.plot(times, prob_detect, 'bs')

# %%
detect_fft = \
    np.kron(F, np.eye(2)) @ prob_detect_v
detect_fft = detect_fft / norm(detect_fft)
示例#47
0
    def _run_modeling(cls, dataset_reader, **kwargs):
        def one_or_nan(x):
            y = np.ones(x.shape)
            y[np.isnan(x)] = float('nan')
            return y

        if 'subject_rejection' in kwargs and kwargs[
                'subject_rejection'] is True:
            assert False, 'SubjectAwareGenerativeModel must not and need not ' \
                          'apply subject rejection.'

        x_es = cls._get_opinion_score_2darray_with_preprocessing(
            dataset_reader, **kwargs)
        E, S = x_es.shape

        use_log = kwargs['use_log'] if 'use_log' in kwargs else False

        # === initialization ===

        mos = pd.DataFrame(x_es).mean(axis=1)

        x_e = mos  # use MOS as initial value for x_e
        b_s = np.zeros(S)

        r_es = x_es - np.tile(x_e, (S, 1)).T  # r_es: residual at e, s
        v_s = np.array(pd.DataFrame(r_es).std(axis=0, ddof=0))

        log_v_s = np.log(v_s)

        # === iteration ===

        MAX_ITR = 5000
        REFRESH_RATE = 0.1
        DELTA_THR = 1e-8

        print '=== Belief Propagation ==='

        itr = 0
        while True:

            x_e_prev = x_e

            # (8) b_s
            num = pd.DataFrame(x_es - np.tile(x_e, (S, 1)).T).sum(
                axis=0)  # sum over e
            den = pd.DataFrame(one_or_nan(x_es)).sum(axis=0)  # sum over e
            b_s_new = num / den
            b_s = b_s * (1.0 - REFRESH_RATE) + b_s_new * REFRESH_RATE

            a_es = x_es - np.tile(x_e, (S, 1)).T - np.tile(b_s, (E, 1))
            if use_log:
                # (9') log_v_s
                num = pd.DataFrame(-np.ones([E, S]) +
                                   a_es**2 / np.tile(v_s**2, (E, 1))).sum(
                                       axis=0)  # sum over e
                den = pd.DataFrame(-2 * a_es**2 / np.tile(v_s**2, (E, 1))).sum(
                    axis=0)  # sum over e
                log_v_s_new = log_v_s - num / den
                log_v_s = log_v_s * (1.0 -
                                     REFRESH_RATE) + log_v_s_new * REFRESH_RATE
                v_s = np.exp(log_v_s)
            else:
                # (9) v_s
                num = pd.DataFrame(2 * np.ones([E, S]) *
                                   np.tile(v_s**3, (E, 1)) -
                                   4 * np.tile(v_s, (E, 1)) * a_es**2).sum(
                                       axis=0)  # sum over e
                den = pd.DataFrame(
                    np.ones([E, S]) * np.tile(v_s**2, (E, 1)) -
                    3 * a_es**2).sum(axis=0)  # sum over e
                v_s_new = num / den
                v_s = v_s * (1.0 - REFRESH_RATE) + v_s_new * REFRESH_RATE
                # v_s = np.maximum(v_s, np.zeros(v_s.shape))

            # (7) x_e
            num = pd.DataFrame(
                (x_es - np.tile(b_s, (E, 1))) / np.tile(v_s**2, (E, 1))).sum(
                    axis=1)  # sum along s
            den = pd.DataFrame(one_or_nan(x_es) / np.tile(v_s**2, (E, 1))).sum(
                axis=1)  # sum along s
            x_e_new = num / den
            x_e = x_e * (1.0 - REFRESH_RATE) + x_e_new * REFRESH_RATE

            itr += 1

            delta_x_e = linalg.norm(x_e_prev - x_e)

            msg = 'Iteration {itr:4d}: change {delta_x_e}, mean x_e {x_e}, mean b_s {b_s}, mean v_s {v_s}'.\
                format(itr=itr, delta_x_e=delta_x_e, x_e=np.mean(x_e), b_s=np.mean(b_s), v_s=np.mean(v_s))
            sys.stdout.write(msg + '\r')
            sys.stdout.flush()
            # time.sleep(0.001)

            if delta_x_e < DELTA_THR:
                break

            if itr >= MAX_ITR:
                break

        sys.stdout.write("\n")

        result = {
            'quality_scores': list(x_e),
            'observer_bias': list(b_s),
            'observer_inconsistency': list(v_s),
        }

        try:
            observers = dataset_reader._get_list_observers()  # may not exist
            result['observers'] = observers
        except AssertionError:
            pass

        return result
示例#48
0
文件: snapshot.py 项目: mrc13/eTraGo
def get_medoids(df):
    """

    Parameters
    ----------
    df : pandas.DataFrame
        Dataframe returned by `cluster` function

    Returns
    --------
     Nested dictionary, first key is cluster id. Nested keys are:

        'data': with the representative data for each cluster (medoid)
        'size' : size of the cluster in days/weeks (weight)
        'dates': pandas.datetimeindex with dates of original data of clusters.
    """
    # calculate hours of the group (e.g. 24 for day, 168 for week etc)
    hours = int(len(df) / len(set(df.index.get_level_values('group'))))

    # Finding medoids, clustersizes, etc.
    cluster_group = {}
    cluster_size = {}
    medoids = {}

    # this is necessary fors weeks, because there might be no cluster for
    # elements inside the dataframe, i.e. no complete weeks
    cluster_ids = [
        i for i in df.index.get_level_values('cluster_id').unique()
        if not np.isnan(i)
    ]
    for c in cluster_ids:
        print('Computing medoid for cluster: ', c, '...')
        # days in the cluster is the df subset indexed by the cluster id 'c'
        cluster_group[c] = df.loc[c]
        # the size for daily clusters is the length of all hourly vals / 24
        cluster_size[c] = len(df.loc[c]) / hours

        # store the cluster 'days' i.e. all observations of cluster in 'cluster'
        # TODO: Maybe rather use copy() to keep cluster_days untouched (reference problem)
        cluster = cluster_group[c]

        #pdb.set_trace()
        # Finding medoids (this is a little hackisch but should work correctly):
        # 1) create emtpy distance matrix with size of cluster
        # 2) loop through matrix and add the distance between two 'days'
        # 3) As we want days, we have to slice 24*i...
        Yc = np.empty((
            int(cluster_size[c]),
            int(cluster_size[c]),
        ))
        Yc[:] = np.NAN
        for i in range(len(Yc)):
            for j in range(len(Yc[i, :])):
                A = cluster.iloc[hours * i:hours * i + hours].values
                B = cluster.iloc[hours * j:hours * j + hours].values
                Yc[i, j] = norm(A - B, ord='fro')
        # taking the index with the minimum summed distance as medoid
        mid = np.argmin(Yc.sum(axis=0))

        # store data about medoids
        medoids[c] = {}
        # find medoid
        medoids[c]['data'] = cluster.iloc[hours * mid:hours * mid + hours]
        # size ( weight)
        medoids[c]['size'] = cluster_size[c]
        # dates from original data
        medoids[c]['dates'] = medoids[c]['data'].index.get_level_values(
            'datetime')

    return medoids
示例#49
0
 def cost_func(y):
     dists = np.array([norm(x - y) for x in X])
     return np.sum(dists)
示例#50
0
    def _run_modeling(cls, dataset_reader, **kwargs):

        # mode: DEFAULT - subject and content-aware
        #       NO_SUBJECT - subject-unaware
        #       NO_CONTENT - content-unaware

        if 'subject_rejection' in kwargs and kwargs[
                'subject_rejection'] is True:
            assert False, '{} must not and need not apply subject rejection.'.format(
                cls.__name__)

        gradient_method = kwargs[
            'gradient_method'] if 'gradient_method' in kwargs else cls.DEFAULT_GRADIENT_METHOD
        assert gradient_method == 'simplified' or gradient_method == 'original' or gradient_method == 'numerical'

        def sum_over_content_id(xs, cids):
            assert len(xs) == len(cids)
            num_c = np.max(cids) + 1
            for cid in set(cids):
                assert cid in range(num_c), \
                    'content id must be in [0, {num_c}), but is {cid}'.format(num_c=num_c, cid=cid)
            sums = np.zeros(num_c)
            for x, cid in zip(xs, cids):
                sums[cid] += x
            return sums

        def std_over_subject_and_content_id(x_es, cids):
            assert x_es.shape[0] == len(cids)
            num_c = np.max(cids) + 1
            for cid in set(cids):
                assert cid in range(num_c), \
                    'content id must be in [0, {num_c}), but is {cid}'.format(num_c=num_c, cid=cid)
            ls = [[] for _ in range(num_c)]
            for idx_cid, cid in enumerate(cids):
                ls[cid] = ls[cid] + list(x_es[idx_cid, :])
            stds = []
            for l in ls:
                stds.append(pd.Series(l).std(ddof=0))
            return np.array(stds)

        def one_or_nan(x):
            y = np.ones(x.shape)
            y[np.isnan(x)] = float('nan')
            return y

        x_es = cls._get_opinion_score_2darray_with_preprocessing(
            dataset_reader, **kwargs)
        E, S = x_es.shape
        C = dataset_reader.max_content_id_of_ref_videos + 1

        # === initialization ===

        mos = np.array(
            MosModel(dataset_reader).run_modeling()['quality_scores'])
        r_es = x_es - np.tile(mos, (S, 1)).T  # r_es: residual at e, s
        sigma_r_s = pd.DataFrame(r_es).std(axis=0, ddof=0)  # along e
        sigma_r_c = std_over_subject_and_content_id(
            r_es, dataset_reader.content_id_of_dis_videos)

        x_e = mos  # use MOS as initial value for x_e
        b_s = np.zeros(S)
        v_s = np.zeros(S) if cls.mode == 'NO_SUBJECT' else sigma_r_s
        a_c = np.zeros(C) if cls.mode == 'NO_CONTENT' else sigma_r_c

        x_e_std = None
        b_s_std = None
        v_s_std = None
        a_c_std = None

        # === iterations ===

        MAX_ITR = 10000
        REFRESH_RATE = 0.1
        DELTA_THR = 1e-8
        EPSILON = 1e-3

        print '=== Belief Propagation ==='

        itr = 0
        while True:

            x_e_prev = x_e

            # ==== (12) b_s ====

            if gradient_method == 'simplified':
                a_c_e = np.array(
                    map(lambda i: a_c[i],
                        dataset_reader.content_id_of_dis_videos))
                num_num = x_es - np.tile(x_e, (S, 1)).T
                num_den = np.tile(v_s**2, (E, 1)) + np.tile(a_c_e**2, (S, 1)).T
                num = pd.DataFrame(num_num / num_den).sum(axis=0)  # sum over e
                den_num = one_or_nan(x_es)  # 1 and nan
                den_den = num_den
                den = pd.DataFrame(den_num / den_den).sum(axis=0)  # sum over e
                b_s_new = num / den
                b_s = b_s * (1.0 - REFRESH_RATE) + b_s_new * REFRESH_RATE
                b_s_std = 1.0 / np.sqrt(den)  # calculate std of x_e

            elif gradient_method == 'original':
                a_c_e = np.array(
                    map(lambda i: a_c[i],
                        dataset_reader.content_id_of_dis_videos))
                vs2_add_ace2 = np.tile(v_s**2,
                                       (E, 1)) + np.tile(a_c_e**2, (S, 1)).T
                order1 = (x_es - np.tile(x_e, (S, 1)).T -
                          np.tile(b_s, (E, 1))) / vs2_add_ace2
                order1 = pd.DataFrame(order1).sum(axis=0)  # sum over e
                order2 = -one_or_nan(x_es) / vs2_add_ace2
                order2 = pd.DataFrame(order2).sum(axis=0)  # sum over e
                b_s_new = b_s - order1 / order2
                b_s = b_s * (1.0 - REFRESH_RATE) + b_s_new * REFRESH_RATE
                b_s_std = 1.0 / np.sqrt(-order2)  # calculate std of x_e

            elif gradient_method == 'numerical':
                axis = 0  # sum over e
                order1 = (cls.loglikelihood_fcn(
                    x_es,
                    x_e,
                    b_s + EPSILON / 2.0,
                    v_s,
                    a_c,
                    dataset_reader.content_id_of_dis_videos,
                    axis=axis) - cls.loglikelihood_fcn(
                        x_es,
                        x_e,
                        b_s - EPSILON / 2.0,
                        v_s,
                        a_c,
                        dataset_reader.content_id_of_dis_videos,
                        axis=axis)) / EPSILON
                order2 = (cls.loglikelihood_fcn(
                    x_es,
                    x_e,
                    b_s + EPSILON,
                    v_s,
                    a_c,
                    dataset_reader.content_id_of_dis_videos,
                    axis=axis) - 2 * cls.loglikelihood_fcn(
                        x_es,
                        x_e,
                        b_s,
                        v_s,
                        a_c,
                        dataset_reader.content_id_of_dis_videos,
                        axis=axis) + cls.loglikelihood_fcn(
                            x_es,
                            x_e,
                            b_s - EPSILON,
                            v_s,
                            a_c,
                            dataset_reader.content_id_of_dis_videos,
                            axis=axis)) / EPSILON**2
                b_s_new = b_s - order1 / order2
                b_s = b_s * (1.0 - REFRESH_RATE) + b_s_new * REFRESH_RATE
                b_s_std = 1.0 / np.sqrt(-order2)  # calculate std of x_e

            else:
                assert False

            if cls.mode == 'NO_SUBJECT':
                b_s = np.zeros(S)  # forcing zero, hence disabling
                b_s_std = np.zeros(S)

            # ==== (14) v_s ====

            if gradient_method == 'simplified':
                a_c_e = np.array(
                    map(lambda i: a_c[i],
                        dataset_reader.content_id_of_dis_videos))
                a_es = x_es - np.tile(x_e, (S, 1)).T - np.tile(b_s, (E, 1))
                vs2_add_ace2 = np.tile(v_s**2,
                                       (E, 1)) + np.tile(a_c_e**2, (S, 1)).T
                vs2_minus_ace2 = np.tile(v_s**2,
                                         (E, 1)) - np.tile(a_c_e**2, (S, 1)).T
                num = -np.tile(v_s, (E, 1)) / vs2_add_ace2 + np.tile(
                    v_s, (E, 1)) * a_es**2 / vs2_add_ace2**2
                num = pd.DataFrame(num).sum(axis=0)  # sum over e
                poly_term = np.tile(a_c_e**4, (S, 1)).T \
                      - 3 * np.tile(v_s**4, (E, 1)) \
                      - 2 * np.tile(v_s**2, (E, 1)) * np.tile(a_c_e**2, (S, 1)).T
                den = vs2_minus_ace2 / vs2_add_ace2**2 + a_es**2 * poly_term / vs2_add_ace2**4
                den = pd.DataFrame(den).sum(axis=0)  # sum over e
                v_s_new = v_s - num / den
                v_s = v_s * (1.0 - REFRESH_RATE) + v_s_new * REFRESH_RATE
                # calculate std of v_s
                lpp = pd.DataFrame(vs2_minus_ace2 / vs2_add_ace2**2 +
                                   a_es**2 * poly_term / vs2_add_ace2**4).sum(
                                       axis=0)  # sum over e
                v_s_std = 1.0 / np.sqrt(-lpp)

            elif gradient_method == 'original':
                a_c_e = np.array(
                    map(lambda i: a_c[i],
                        dataset_reader.content_id_of_dis_videos))
                a_es = x_es - np.tile(x_e, (S, 1)).T - np.tile(b_s, (E, 1))
                vs2_add_ace2 = np.tile(v_s**2,
                                       (E, 1)) + np.tile(a_c_e**2, (S, 1)).T
                vs2_minus_ace2 = np.tile(v_s**2,
                                         (E, 1)) - np.tile(a_c_e**2, (S, 1)).T
                poly_term = np.tile(a_c_e**4, (S, 1)).T \
                      - 3 * np.tile(v_s**4, (E, 1)) \
                      - 2 * np.tile(v_s**2, (E, 1)) * np.tile(a_c_e**2, (S, 1)).T
                order1 = -np.tile(v_s, (E, 1)) / vs2_add_ace2 + np.tile(
                    v_s, (E, 1)) * a_es**2 / vs2_add_ace2**2
                order1 = pd.DataFrame(order1).sum(axis=0)  # sum over e
                order2 = vs2_minus_ace2 / vs2_add_ace2**2 + a_es**2 * poly_term / vs2_add_ace2**4
                order2 = pd.DataFrame(order2).sum(axis=0)  # sum over e
                v_s_new = v_s - order1 / order2
                v_s = v_s * (1.0 - REFRESH_RATE) + v_s_new * REFRESH_RATE
                v_s_std = 1.0 / np.sqrt(-order2)  # calculate std of v_s

            elif gradient_method == 'numerical':
                axis = 0  # sum over e
                order1 = (cls.loglikelihood_fcn(
                    x_es,
                    x_e,
                    b_s,
                    v_s + EPSILON / 2.0,
                    a_c,
                    dataset_reader.content_id_of_dis_videos,
                    axis=axis) - cls.loglikelihood_fcn(
                        x_es,
                        x_e,
                        b_s,
                        v_s - EPSILON / 2.0,
                        a_c,
                        dataset_reader.content_id_of_dis_videos,
                        axis=axis)) / EPSILON
                order2 = (cls.loglikelihood_fcn(
                    x_es,
                    x_e,
                    b_s,
                    v_s + EPSILON,
                    a_c,
                    dataset_reader.content_id_of_dis_videos,
                    axis=axis) - 2 * cls.loglikelihood_fcn(
                        x_es,
                        x_e,
                        b_s,
                        v_s,
                        a_c,
                        dataset_reader.content_id_of_dis_videos,
                        axis=axis) + cls.loglikelihood_fcn(
                            x_es,
                            x_e,
                            b_s,
                            v_s - EPSILON,
                            a_c,
                            dataset_reader.content_id_of_dis_videos,
                            axis=axis)) / EPSILON**2
                v_s_new = v_s - order1 / order2
                v_s = v_s * (1.0 - REFRESH_RATE) + v_s_new * REFRESH_RATE
                v_s_std = 1.0 / np.sqrt(-order2)  # calculate std of v_s

            else:
                assert False

            # force non-negative
            v_s = np.maximum(v_s, 0.0 * np.ones(v_s.shape))

            if cls.mode == 'NO_SUBJECT':
                v_s = np.zeros(S)  # forcing zero, hence disabling
                v_s_std = np.zeros(S)

            # ==== (15) a_c ====

            if gradient_method == 'simplified':
                a_c_e = np.array(
                    map(lambda i: a_c[i],
                        dataset_reader.content_id_of_dis_videos))
                a_es = x_es - np.tile(x_e, (S, 1)).T - np.tile(b_s, (E, 1))
                vs2_add_ace2 = np.tile(v_s**2,
                                       (E, 1)) + np.tile(a_c_e**2, (S, 1)).T
                vs2_minus_ace2 = np.tile(v_s**2,
                                         (E, 1)) - np.tile(a_c_e**2, (S, 1)).T
                num = -np.tile(a_c_e, (S, 1)).T / vs2_add_ace2 + np.tile(
                    a_c_e, (S, 1)).T * a_es**2 / vs2_add_ace2**2
                num = pd.DataFrame(num).sum(axis=1)  # sum over s
                num = sum_over_content_id(
                    num, dataset_reader.content_id_of_dis_videos
                )  # sum over e:c(e)=c
                poly_term = np.tile(v_s**4, (E, 1)) \
                      - 3 * np.tile(a_c_e**4, (S, 1)).T \
                      - 2 * np.tile(v_s**2, (E, 1)) * np.tile(a_c_e**2, (S, 1)).T
                den = -vs2_minus_ace2 / vs2_add_ace2**2 + a_es**2 * poly_term / vs2_add_ace2**4
                den = pd.DataFrame(den).sum(axis=1)  # sum over s
                den = sum_over_content_id(
                    den, dataset_reader.content_id_of_dis_videos
                )  # sum over e:c(e)=c
                a_c_new = a_c - num / den
                a_c = a_c * (1.0 - REFRESH_RATE) + a_c_new * REFRESH_RATE
                # calculate std of a_c
                lpp = sum_over_content_id(
                    pd.DataFrame(-vs2_minus_ace2 / vs2_add_ace2**2 +
                                 a_es**2 * poly_term / vs2_add_ace2**4).sum(
                                     axis=1), dataset_reader.
                    content_id_of_dis_videos)  # sum over e:c(e)=c
                a_c_std = 1.0 / np.sqrt(-lpp)

            elif gradient_method == 'original':
                a_c_e = np.array(
                    map(lambda i: a_c[i],
                        dataset_reader.content_id_of_dis_videos))
                a_es = x_es - np.tile(x_e, (S, 1)).T - np.tile(b_s, (E, 1))
                vs2_add_ace2 = np.tile(v_s**2,
                                       (E, 1)) + np.tile(a_c_e**2, (S, 1)).T
                vs2_minus_ace2 = np.tile(v_s**2,
                                         (E, 1)) - np.tile(a_c_e**2, (S, 1)).T
                poly_term = np.tile(v_s**4, (E, 1)) \
                      - 3 * np.tile(a_c_e**4, (S, 1)).T \
                      - 2 * np.tile(v_s**2, (E, 1)) * np.tile(a_c_e**2, (S, 1)).T
                order1 = -np.tile(a_c_e, (S, 1)).T / vs2_add_ace2 + np.tile(
                    a_c_e, (S, 1)).T * a_es**2 / vs2_add_ace2**2
                order1 = pd.DataFrame(order1).sum(axis=1)  # sum over s
                order1 = sum_over_content_id(
                    order1, dataset_reader.content_id_of_dis_videos
                )  # sum over e:c(e)=c
                order2 = -vs2_minus_ace2 / vs2_add_ace2**2 + a_es**2 * poly_term / vs2_add_ace2**4
                order2 = pd.DataFrame(order2).sum(axis=1)  # sum over s
                order2 = sum_over_content_id(
                    order2, dataset_reader.content_id_of_dis_videos
                )  # sum over e:c(e)=c
                a_c_new = a_c - order1 / order2
                a_c = a_c * (1.0 - REFRESH_RATE) + a_c_new * REFRESH_RATE
                a_c_std = 1.0 / np.sqrt(-order2)  # calculate std of a_c

            elif gradient_method == 'numerical':
                axis = 1  # sum over s
                order1 = (cls.loglikelihood_fcn(
                    x_es,
                    x_e,
                    b_s,
                    v_s,
                    a_c + EPSILON / 2.0,
                    dataset_reader.content_id_of_dis_videos,
                    axis=axis) - cls.loglikelihood_fcn(
                        x_es,
                        x_e,
                        b_s,
                        v_s,
                        a_c - EPSILON / 2.0,
                        dataset_reader.content_id_of_dis_videos,
                        axis=axis)) / EPSILON
                order2 = (cls.loglikelihood_fcn(
                    x_es,
                    x_e,
                    b_s,
                    v_s,
                    a_c + EPSILON,
                    dataset_reader.content_id_of_dis_videos,
                    axis=axis) - 2 * cls.loglikelihood_fcn(
                        x_es,
                        x_e,
                        b_s,
                        v_s,
                        a_c,
                        dataset_reader.content_id_of_dis_videos,
                        axis=axis) + cls.loglikelihood_fcn(
                            x_es,
                            x_e,
                            b_s,
                            v_s,
                            a_c - EPSILON,
                            dataset_reader.content_id_of_dis_videos,
                            axis=axis)) / EPSILON**2
                order1 = sum_over_content_id(
                    order1, dataset_reader.content_id_of_dis_videos
                )  # sum over e:c(e)=c
                order2 = sum_over_content_id(
                    order2, dataset_reader.content_id_of_dis_videos
                )  # sum over e:c(e)=c
                a_c_new = a_c - order1 / order2
                a_c = a_c * (1.0 - REFRESH_RATE) + a_c_new * REFRESH_RATE
                a_c_std = 1.0 / np.sqrt(-order2)  # calculate std of a_c

            else:
                assert False

            # force non-negative
            a_c = np.maximum(a_c, 0.0 * np.ones(a_c.shape))

            if cls.mode == 'NO_CONTENT':
                a_c = np.zeros(C)  # forcing zero, hence disabling
                a_c_std = np.zeros(C)

            # (11) ==== x_e ====

            if gradient_method == 'simplified':
                a_c_e = np.array(
                    map(lambda i: a_c[i],
                        dataset_reader.content_id_of_dis_videos))
                num_num = x_es - np.tile(b_s, (E, 1))
                num_den = np.tile(v_s**2, (E, 1)) + np.tile(a_c_e**2, (S, 1)).T
                num = pd.DataFrame(num_num / num_den).sum(axis=1)  # sum over s
                den_num = one_or_nan(x_es)  # 1 and nan
                den_den = num_den
                den = pd.DataFrame(den_num / den_den).sum(axis=1)  # sum over s
                x_e_new = num / den
                x_e = x_e * (1.0 - REFRESH_RATE) + x_e_new * REFRESH_RATE
                x_e_std = 1.0 / np.sqrt(den)  # calculate std of x_e

            elif gradient_method == 'original':
                a_c_e = np.array(
                    map(lambda i: a_c[i],
                        dataset_reader.content_id_of_dis_videos))
                a_es = x_es - np.tile(x_e, (S, 1)).T - np.tile(b_s, (E, 1))
                vs2_add_ace2 = np.tile(v_s**2,
                                       (E, 1)) + np.tile(a_c_e**2, (S, 1)).T
                order1 = a_es / vs2_add_ace2
                order1 = pd.DataFrame(order1).sum(axis=1)  # sum over s
                order2 = -one_or_nan(x_es) / vs2_add_ace2
                order2 = pd.DataFrame(order2).sum(axis=1)  # sum over s
                x_e_new = x_e - order1 / order2
                x_e = x_e * (1.0 - REFRESH_RATE) + x_e_new * REFRESH_RATE
                x_e_std = 1.0 / np.sqrt(-order2)  # calculate std of x_e

            elif gradient_method == 'numerical':
                axis = 1  # sum over s
                order1 = (cls.loglikelihood_fcn(
                    x_es,
                    x_e + EPSILON / 2.0,
                    b_s,
                    v_s,
                    a_c,
                    dataset_reader.content_id_of_dis_videos,
                    axis=axis) - cls.loglikelihood_fcn(
                        x_es,
                        x_e - EPSILON / 2.0,
                        b_s,
                        v_s,
                        a_c,
                        dataset_reader.content_id_of_dis_videos,
                        axis=axis)) / EPSILON
                order2 = (cls.loglikelihood_fcn(
                    x_es,
                    x_e + EPSILON,
                    b_s,
                    v_s,
                    a_c,
                    dataset_reader.content_id_of_dis_videos,
                    axis=axis) - 2 * cls.loglikelihood_fcn(
                        x_es,
                        x_e,
                        b_s,
                        v_s,
                        a_c,
                        dataset_reader.content_id_of_dis_videos,
                        axis=axis) + cls.loglikelihood_fcn(
                            x_es,
                            x_e - EPSILON,
                            b_s,
                            v_s,
                            a_c,
                            dataset_reader.content_id_of_dis_videos,
                            axis=axis)) / EPSILON**2
                x_e_new = x_e - order1 / order2
                x_e = x_e * (1.0 - REFRESH_RATE) + x_e_new * REFRESH_RATE
                x_e_std = 1.0 / np.sqrt(-order2)  # calculate std of x_e

            else:
                assert False

            itr += 1

            delta_x_e = linalg.norm(x_e_prev - x_e)

            likelihood = np.sum(
                cls.loglikelihood_fcn(x_es,
                                      x_e,
                                      b_s,
                                      v_s,
                                      a_c,
                                      dataset_reader.content_id_of_dis_videos,
                                      axis=1))

            msg = 'Iteration {itr:4d}: change {delta_x_e}, likelihood {likelihood}, x_e {x_e}, b_s {b_s}, v_s {v_s}, a_c {a_c}'.\
                format(itr=itr, delta_x_e=delta_x_e, likelihood=likelihood, x_e=np.nanmean(x_e), b_s=np.nanmean(b_s), v_s=np.nanmean(v_s), a_c=np.nanmean(a_c))
            sys.stdout.write(msg + '\r')
            sys.stdout.flush()
            # time.sleep(0.001)

            if delta_x_e < DELTA_THR:
                break

            if itr >= MAX_ITR:
                break

        sys.stdout.write("\n")

        assert x_e_std is not None
        assert b_s_std is not None

        result = {
            'quality_scores': list(x_e),
            'quality_scores_std': list(x_e_std),
        }

        if cls.mode != 'NO_SUBJECT':
            result['observer_bias'] = list(b_s)
            result['observer_bias_std'] = list(b_s_std)

            result['observer_inconsistency'] = list(v_s)
            result['observer_inconsistency_std'] = list(v_s_std)

        if cls.mode != 'NO_CONTENT':
            result['content_ambiguity'] = list(a_c)
            result['content_ambiguity_std'] = list(a_c_std)

        try:
            observers = dataset_reader._get_list_observers()  # may not exist
            result['observers'] = observers
        except AssertionError:
            pass

        return result
示例#51
0
 def basicFunc(self, c, d, beta):
     return np.exp(beta * norm(c - d)**2)
def BinarySolver(func, h0, x0, rho, maxIter, sigma=100):

    """
    Use exact penalty method to solve optimization problem with binary constraints
        min_x func(x)
        s.t. x \in {-1,1}

    Inputs:
        - func: Function that needs to be minimized
        - x0:   Initialization
    
    This implementation uses a novel method invented by HKT Team 
    Copyright 2018 HKT
    
    
    
    
    Refereces: https://www.facebook.com/dangkhoasdc

    """

    n = len(x0)
    #xt, vt: Values of x and v at the previous iteration, which are used to update x and v at the current iteration, respectively
    #h0 = x0.copy()    
    xt = x0 #np.zeros(x0.shape)  #np.sign(x0)
    vt = np.ones(xt.shape) - xt #np.zeros(xt.shape)  # Initialize v to zeros!!!!!!! Note on this
    
    
    # Lagrangian duals
    y1 = np.zeros(x0.shape)
    y2 = np.zeros(x0.shape)

    def fcost(x): 
        return func(x) + sigma*np.sum((x-h0)**2) 
   
    print("Initial cost with sign: %f without sign = %f" %(fcost(xt), fcost(h0)))
    print("reconstruction: %f" %(func(x0)))
    print("Encoder error: %f" %(sigma*np.sum((x0-h0)**2)))
    # pdb.set_trace()

    def fx(x): # Fix v, solve for x
        return func(x) + sigma*np.sum(np.power(x-h0,2)) 
        + np.dot(y1, vt - np.ones(xt.shape) + x) + 0.5*rho*(np.sum(np.power(vt + x - np.ones(xt.shape), 2))) 
        + np.dot (y2, np.multiply(vt, x+np.ones(x0.shape))) + 0.5*rho*np.sum(np.power(np.multiply(vt, x+np.ones(x0.shape)),2))

    def fv(x): # Fix x, solve for v
        return np.dot(y1, x - np.ones(x0.shape) + xt) + 0.5*rho*np.sum(np.power(x - np.ones(x0.shape), 2))
        + np.dot(y2, np.multiply(x, np.ones(x0.shape)+xt)) + 0.5*rho*np.sum(np.power(np.multiply(x, np.ones(x0.shape)+xt),2))

    
    # Define the lower and upper bounds for fx, i.e., -1 <= x <= 1        
    xbounds= [(-1,1) for i in range(n)]
    vbounds = [(0, 2) for i in range(n)]
    # Now, let the iterations begin
    converged = False
    iter = 0
    while iter < maxIter and not converged:
        # Fix v, minimize x
        print('----Updating x ')               
        x_res = minimize(fx, xt, bounds = xbounds)
        x = x_res.x
        # print min(x), max(x)
        # Fix x, update v
        print('----Updating v')
        v_res = minimize(fv, vt, bounds = vbounds)
        v = v_res.x

        # print min(v), max(v)
        y1 = y1 + rho*(v + x - np.ones(x0.shape))
        y2 = y2 + rho*(np.multiply(v, np.ones(x0.shape) + x))

        print("Iter: %d , fx = %.3f, prev_fx = %.3f, x diff: %.3f, rho = %.3f reconstruction: %f" 
              %(iter, fcost(x), fcost(xt), norm(x - xt), rho, func(x)))
        print("reconstruction: %f" %(func(x)))
        print("Encoder error: %f" %(sigma*np.sum((x-h0)**2)))
        # Check for convergence
        # if iter > 4 and ((norm(v - vt) < 1e-6 and abs(func(x) - func(xt) < 1e-6)) or (n-np.dot(xt, vt))**2<1.5):
        if iter > 1 and ( (norm(x - xt) < 1e-6  or                           
                          abs(fcost(x)-fcost(xt))<1e-6  ) ):
            converged = True
            print('--------Using LINF  - Converged---------')            
            return xt #np.ones(x0.shape) - vt
        
        #print (xt)
        rho = rho*1.05
        xt = x
        vt = v

        
        iter = iter + 1

    return  xt #np.ones(x0.shape) - vt
示例#53
0
    uop_sd.set_uniq_amps_(x_rand)
    upsi = uop_sd (psi)
    upsi_h = fockspace.fock2hilbert (upsi, norb, nelec)
    uTupsi = uop_sd (upsi, transpose=True)
    for ix in range (2**(2*norb)):
        if np.any (np.abs ([psi[ix], upsi[ix], uTupsi[ix]]) > 1e-8):
            print (pbin (ix), psi[ix], upsi[ix], uTupsi[ix])
    print ("<psi|psi> =",psi.dot (psi), "<psi|U|psi> =",psi.dot (upsi),"<psi|U'U|psi> =",upsi.dot (upsi))
    print ("<psi|S**2|psi> =",spin_square (psi, norb)[0],
           "<psi|U'S**2U|psi> =",spin_square (upsi, norb)[0], spin_op.spin_square (upsi_h, norb, nelec)[0])

    ndet = cistring.num_strings (norb, nelec//2)
    np.random.seed (0)
    tpsi = 1-(2*np.random.rand (ndet))
    tpsi = np.multiply.outer (tpsi, tpsi).ravel ()
    tpsi /= linalg.norm (tpsi)
    tpsi = fockspace.hilbert2fock (tpsi, norb, (nelec//2, nelec//2)).ravel ()
    from scipy import optimize
    def obj_test (uop_test):
        def obj_fun (x):
            uop_test.set_uniq_amps_(x)
            upsi = uop_test (psi)
            ut = upsi.dot (tpsi)
            err = upsi.dot (upsi) - (ut**2)
            jac = np.zeros (uop_test.ngen)
            for ix, dupsi in enumerate (uop_test.gen_deriv1 (psi)):
                jac[ix] += 2*dupsi.dot (upsi - ut*tpsi) 
            jac = uop_test.product_rule_pack (jac)
            print (err, linalg.norm (jac))
            return err, jac
示例#54
0
a = inner(q(u_k)*nabla_grad(u), nabla_grad(v))*dx
f = Constant(0.0)
L = f*v*dx

# Picard iterations
u = Function(V)     # new unknown function
eps = 1.0           # error measure ||u-u_k||
rho = 0             # convergence rate
tol = 1.0E-6        # tolerance
iter = 0            # iteration counter
maxiter = 25        # max no of iterations allowed
for iter in range(maxiter):
    solve(a == L, u, bcs)
    diff = u.vector().array() - u_k.vector().array()
    oldeps = eps
    eps = la.norm(diff, ord=np.Inf) / la.norm(u.vector().array(), ord=np.Inf)
    rho = eps/oldeps
    print 'iter=%d: norm=%g, rho=%g' % (iter, eps, rho)
    if eps < tol * (1.0-rho)
        break
    u_k.assign(u)   # update for next iteration

convergence = 'convergence after %d Picard iterations' % iter
if iter >= maxiter:
    convergence = 'no ' + convergence
print convergence

# Find max error
u_exact = Expression('pow((pow(2, m+1)-1)*x[0] + 1, 1.0/(m+1)) - 1', m=m)
u_e = interpolate(u_exact, V)
diff = la.norm((u_e.vector().array() - u.vector().array()), ord=np.Inf)
示例#55
0
def compute_coordinate():
    '''
    Compute the magnet's position in cartesian space.
    
    INPUTS:
        - No inputs.

    OUTPUT:
        - position: The most current and updated position
                    of the magnet in cartesian space.
    '''

    global initialGuess  # Modify from within function
    start = time()  # Call clock() for accurate time readings

    # Data acquisition
    (H1, H2, H3, H4, H5, H6) = getData(IMU)  # Get data from MCU

    # Compute norms
    HNorm = [
        float(norm(H1)),
        float(norm(H2)),  #
        float(norm(H3)),
        float(norm(H4)),  # Compute L2 vector norms
        float(norm(H5)),
        float(norm(H6))
    ]  #

    # Solve system of equations
    sol = root(
        LHS,
        initialGuess,
        args=(K, HNorm),
        method='lm',  # Invoke solver using the
        options={
            'ftol': 1e-10,
            'xtol': 1e-10,
            'maxiter': 1000,  # Levenberg-Marquardt 
            'eps': 1e-8,
            'factor': 0.001
        })  # Algorithm (aka LMA)

    # Store solution in array
    position = np.array(
        (
            sol.x[0] * 1000,  # x-axis
            sol.x[1] * 1000,  # y-axis
            sol.x[2] * 1000,  # z-axis
            time() - start),
        dtype='float64')  # time

    # Check value
    if (position[2] < 0): position[2] = -1 * position[2]  # Make sure z-value
    else: pass  # ... is ALWAYS +ve

    # Print solution (coordinates) to screen
    print("(x, y, z, t): (%.3f, %.3f, %.3f, %.3f)" %
          (position[0], position[1], position[2], position[3]))

    # Check if solution makes sense
    if (abs(sol.x[0] * 1000) > 500) or (abs(sol.x[1] * 1000) > 500) or (abs(
            sol.x[2] * 1000) > 500):
        initialGuess = findIG(
            getData(IMU))  # Determine initial guess based on magnet's location
        return (compute_coordinate())  # Recursive call of function()

    # Update initial guess with current position and feed back to solver
    else:
        initialGuess = np.array(
            (
                sol.x[0] + dx,
                sol.x[1] + dx,  # Update the initial guess as the
                sol.x[2] + dx),
            dtype='float64')  # current position and feed back to LMA

        return (position)  # Return position
示例#56
0
文件: proj.py 项目: jaeilepp/eggie
def sensitivity_map(fwd,
                    projs=None,
                    ch_type='grad',
                    mode='fixed',
                    exclude=[],
                    verbose=None):
    """Compute sensitivity map

    Such maps are used to know how much sources are visible by a type
    of sensor, and how much projections shadow some sources.

    Parameters
    ----------
    fwd : dict
        The forward operator.
    projs : list
        List of projection vectors.
    ch_type : 'grad' | 'mag' | 'eeg'
        The type of sensors to use.
    mode : str
        The type of sensitivity map computed. See manual. Should be 'free',
        'fixed', 'ratio', 'radiality', 'angle', 'remaining', or 'dampening'
        corresponding to the argument --map 1, 2, 3, 4, 5, 6 and 7 of the
        command mne_sensitivity_map.
    exclude : list of string | str
        List of channels to exclude. If empty do not exclude any (default).
        If 'bads', exclude channels in fwd['info']['bads'].
    verbose : bool, str, int, or None
        If not None, override default verbose level (see mne.verbose).

    Return
    ------
    stc : SourceEstimate
        The sensitivity map as a SourceEstimate instance for
        visualization.
    """
    # check strings
    if not ch_type in ['eeg', 'grad', 'mag']:
        raise ValueError("ch_type should be 'eeg', 'mag' or 'grad (got %s)" %
                         ch_type)
    if not mode in [
            'free', 'fixed', 'ratio', 'radiality', 'angle', 'remaining',
            'dampening'
    ]:
        raise ValueError('Unknown mode type (got %s)' % mode)

    # check forward
    if is_fixed_orient(fwd, orig=True):
        raise ValueError('fwd should must be computed with free orientation')
    fwd = convert_forward_solution(fwd,
                                   surf_ori=True,
                                   force_fixed=False,
                                   verbose=False)
    if not fwd['surf_ori'] or is_fixed_orient(fwd):
        raise RuntimeError('Error converting solution, please notify '
                           'mne-python developers')

    # limit forward
    if ch_type == 'eeg':
        fwd = pick_types_forward(fwd, meg=False, eeg=True, exclude=exclude)
    else:
        fwd = pick_types_forward(fwd, meg=ch_type, eeg=False, exclude=exclude)

    gain = fwd['sol']['data']

    # Make sure EEG has average
    if ch_type == 'eeg':
        if projs is None or not _has_eeg_average_ref_proj(projs):
            eeg_ave = [make_eeg_average_ref_proj(fwd['info'])]
        else:
            eeg_ave = []
        projs = eeg_ave if projs is None else projs + eeg_ave

    # Construct the projector
    if projs is not None:
        proj, ncomp, U = make_projector(projs,
                                        fwd['sol']['row_names'],
                                        include_active=True)
        # do projection for most types
        if mode not in ['angle', 'remaining', 'dampening']:
            gain = np.dot(proj, gain)

    # can only eggie the last couple methods if there are projectors
    elif mode in ['angle', 'remaining', 'dampening']:
        raise ValueError('No projectors used, cannot compute %s' % mode)

    n_sensors, n_dipoles = gain.shape
    n_locations = n_dipoles // 3
    sensitivity_map = np.empty(n_locations)

    for k in range(n_locations):
        gg = gain[:, 3 * k:3 * (k + 1)]
        if mode != 'fixed':
            s = linalg.svd(gg, full_matrices=False, compute_uv=False)
        if mode == 'free':
            sensitivity_map[k] = s[0]
        else:
            gz = linalg.norm(gg[:, 2])  # the normal component
            if mode == 'fixed':
                sensitivity_map[k] = gz
            elif mode == 'ratio':
                sensitivity_map[k] = gz / s[0]
            elif mode == 'radiality':
                sensitivity_map[k] = 1. - (gz / s[0])
            else:
                if mode == 'angle':
                    co = linalg.norm(np.dot(gg[:, 2], U))
                    sensitivity_map[k] = co / gz
                else:
                    p = linalg.norm(np.dot(proj, gg[:, 2]))
                    if mode == 'remaining':
                        sensitivity_map[k] = p / gz
                    elif mode == 'dampening':
                        sensitivity_map[k] = 1. - p / gz
                    else:
                        raise ValueError('Unknown mode type (got %s)' % mode)

    # only normalize fixed and free methods
    if mode in ['fixed', 'free']:
        sensitivity_map /= np.max(sensitivity_map)

    vertices = [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']]
    subject = _subject_from_forward(fwd)
    stc = SourceEstimate(sensitivity_map[:, np.newaxis],
                         vertices=vertices,
                         tmin=0,
                         tstep=1,
                         subject=subject)
    return stc
def power_diagram(face, uv, h=None, dh=None):
    if h is None:
        h = np.zeros((uv.shape[0], 1))

    if dh is None:
        dh = h * 0

    nf = face.shape[0]
    c = 1

    while True:
        h = h - c * dh
        pl = np.concatenate((uv, np.reshape(np.square(norm(uv, axis=1)), (-1, 1)) - h), axis=1)
        hull = ConvexHull(pl, qhull_options='Qt')
        face = hull.simplices
        # fix ups for the convex hull, as the orientation may inverse
        fn_from_hull = hull.equations[:,2]
        fn = calculate_face_normal(face, pl)
        for i in range(face.shape[0]):
            if fn[i,2] * fn_from_hull[i] < 0 :  # orientation difff
                face[i,:] = face[i,[0, 2, 1]]



        for i in range(face.shape[0]):
            mif = np.argmin(face[i,:])
            face[i, :] = face[i, np.mod(np.arange(mif,mif+3),3)]
        face = face[np.argsort(face[:, 0] * np.max(face) + face[:, 1]), :]
        fn = calculate_face_normal(face, pl)
        ind = fn[:, 2] < 0

        if np.sum(ind) < nf:
            h = h + c * dh
            c = c / 2
        else:
            break

        if np.max(abs(dh)) == 0:
            break

    fn = calculate_face_normal(face, pl)
    ind = fn[:, 2] < 0
    face = face[ind, :]
    pd = dict()
    pd['face'] = face
    vr = compute_vertex_ring(face, uv, ordered=True)
    pd['uv'] = uv
    pd['dp'] = np.zeros((face.shape[0], 2))
    pd['cell'] = [[] for i in range(pl.shape[0])]

    for i in range(face.shape[0]):
        dp = face_dual_uv(pl[face[i,:],:])
        pd['dp'][i,:] = dp

    K =  ConvexHull(uv, qhull_options='Qt').vertices
    ks = np.argmin(K)
    K = np.concatenate((K[ks::],  K[0:ks]), axis=0)
    K = np.append(K,K[0])
    vb = np.zeros((K.shape[0] - 1, 2))
    mindp = np.min(pd["dp"], axis=0) - 1
    maxdp = np.max(pd["dp"], axis=0) + 1
    minx = mindp[0]
    miny = mindp[1]
    maxx = maxdp[0]
    maxy = maxdp[1]
    box = np.array([minx, miny, maxx, miny, maxx, maxy, minx, maxy, minx, miny]).reshape((-1,2))

    for i in range(K.shape[0]- 1):
        i1 = K[i]
        i2 = K[i + 1]
        vec = uv[i2,:] - uv[i1,:]
        vec = np.array([vec[1], -vec[0]])
        mid = (uv[i2,:] + uv[i1,:]) / 2.0
        intersect = intersectRayPolygon(mid, vec, box)
        vb[i,:] = intersect

    pd["dpe"] = np.concatenate((pd["dp"], vb), axis=0)

    vvif, _, _= compute_connectivity(face)

    for i in range(uv.shape[0]):
        vri = vr[i]
        pb = np.argwhere(K==i)
        if pb.size > 0 :
            pb = pb[0][0]
            fr = np.zeros((len(vri) + 1,)).astype(int)
            fr[-1] = face.shape[0] + pb
            if pb == 0:
                fr[0] = face.shape[0] + K.shape[0]-2
            else:
                fr[0] = face.shape[0] + pb - 1
            for j in range(len(vri) - 1):
                fr[j+1] = vvif[i, vri[j]]
        else:
            fr = np.zeros((len(vri),)).astype(int)
            for j in range(len(vri)):
                fr[j] = vvif[i, vri[j]]
        pd["cell"][i] = np.flip(fr)

    return pd, h
示例#58
0
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy as sc
from scipy import linalg
from numpy import linalg as linalg2

H5 = linalg.hilbert(5)
H10 = linalg.hilbert(10)
H20 = linalg.hilbert(20)
print('\nnorma(H5)=', linalg.norm(H5, np.inf))
print('k(H5)=', linalg2.cond(H5, np.inf))

print('\nnorma(H10)=', linalg.norm(H10, np.inf))
print('k(H10)=', linalg2.cond(H10, np.inf))

print('\nnorma(H220)=', linalg.norm(H20, np.inf))
print('k(H20)=', linalg2.cond(H20, np.inf))
np.random.seed(0)
mask = np.ones([size, size], dtype=bool)

coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.0
coef[-roi_size:, -roi_size:] = 1.0

X = np.random.randn(n_samples, size ** 2)
for x in X:  # smooth data
    x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)

y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.0)) / linalg.norm(noise, 2)
y += noise_coef * noise  # add noise

# #############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(2)  # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(location=cachedir, verbose=1)

# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity, memory=mem)
clf = Pipeline([("ward", ward), ("ridge", ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {"ward__n_clusters": [10, 20, 30]}, n_jobs=1, cv=cv)
示例#60
0
def dmg_seed_50_1D(colnum):

    #INITIALIZING STUFF
    Nmitral = 50
    Ngranule = np.copy(Nmitral)  #number of granule cells     pg. 383 of Li/Hop
    Ndim = Nmitral + Ngranule  #total number of cells
    #    t_inh = 25 ; # time when inhalation starts
    #    t_exh = 205; #time when exhalation starts

    #    Ndamagetotal = Nmitral*2 + 1  #number of damage steps
    Ndamage = 3  #steps to reduce entire matrix to zero
    Ncols = int(Nmitral / 2)  #define number of columns to damage

    finalt = 395
    # end time of the cycle

    #y = zeros(ndim,1);

    P_odor0 = np.zeros((Nmitral, 1))  #odor pattern, no odor
    P_odor1 = P_odor0 + .00429  #Odor pattern 1
    #    P_odor2 = 1/70*np.array([.6,.5,.5,.5,.3,.6,.4,.5,.5,.5])
    #    P_odor3 = 4/700*np.array([.7,.8,.5,1.2,.7,1.2,.8,.7,.8,.8])
    #control_odor = control_order + .00429

    #control_odor = np.zeros((Nmitral,1)) #odor input for adaptation

    #controllevel = 1 #1 is full adaptation

    H0 = np.zeros((Nmitral, Ngranule))  #weight matrix: to mitral from granule
    W0 = np.zeros((Ngranule, Nmitral))  #weights: to granule from mitral

    H0 = np.load('H0_50_53Hz.npy')  #load weight matrix

    W0 = np.load('W0_50_53Hz.npy')  #load weight matrix

    #H0 = H0 + H0*np.random.rand(np.shape(H0))
    #W0 = W0+W0*np.random.rand(np.shape(W0))

    M = 5  #average over 5 trials for each level of damage

    #initialize iterative variables
    d1it, d2it, d3it, d4it = np.zeros(M), np.zeros(M), np.zeros(M), np.zeros(M)
    IPRit, IPR2it, pnit = np.zeros(M), np.zeros(M), np.zeros(M)
    frequencyit = np.zeros(M)
    pwrit = np.zeros(M)
    yout2, Sh2 = np.zeros((finalt, Ndim)), np.zeros((finalt, Ndim))
    psi = np.copy(Sh2[:, :Nmitral])

    #initialize quantities to be returned at end of the process
    dmgpct1 = np.zeros(Ncols * (Ndamage - 1) + 1)
    eigfreq1 = np.zeros(Ncols * (Ndamage - 1) + 1)
    d11 = np.zeros(Ncols * (Ndamage - 1) + 1)
    d21 = np.zeros(Ncols * (Ndamage - 1) + 1)
    d31 = np.zeros(Ncols * (Ndamage - 1) + 1)
    d41 = np.zeros(Ncols * (Ndamage - 1) + 1)
    pwr1 = np.zeros(Ncols * (Ndamage - 1) + 1)
    IPR1 = np.zeros(Ncols * (Ndamage - 1) + 1)
    IPR2 = np.zeros(Ncols * (Ndamage - 1) + 1)
    pn1 = np.zeros(Ncols * (Ndamage - 1) + 1)
    freq1 = np.zeros(Ncols * (Ndamage - 1) + 1)
    cell_act = np.zeros((finalt, Ndim, Ncols * (Ndamage - 1) + 1))

    damage = 0
    dam = np.ones(Nmitral)

    #Get the base response first
    Omean1,Oosci1,Omeanbar1,Ooscibar1 = np.zeros((Nmitral,M))+0j,\
                np.zeros((Nmitral,M))+0j,np.zeros(M)+0j,np.zeros(M)+0j
    for m in np.arange(M):
        yout,y0out,Sh,t,OsciAmp1,Omean1[:,m],Oosci1[:,m],Omeanbar1[m],\
            Ooscibar1[m],freq0,maxlam = olf_bulb_10(Nmitral,H0,W0,P_odor1,dam)

    counter = 0  #to get the right index for each of the measures
    damage = 0
    dam[colnum] += .5  #so it starts on zero damage
    for col in range(Ncols):
        cols = int(np.mod(colnum + col, Nmitral))
        for lv in np.arange(Ndamage):
            #reinitialize all iterative variables to zero (really only need to do for distance measures, but good habit)
            d1it, d2it, d3it, d4it = np.zeros(M), np.zeros(M), np.zeros(
                M), np.zeros(M)
            IPRit, IPR2it, pnit = np.zeros(M), np.zeros(M), np.zeros(M)
            frequencyit = np.zeros(M)
            pwrit = np.zeros(M)
            if not (
                    lv == 0 and cols != colnum
            ):  #if it's the 0th level for any but the original col, skip
                dam[cols] = dam[cols] - .5
                dam[dam < 1e-10] = 0
                damage = np.sum(1 - dam)

                for m in np.arange(M):
                    #Then get respons of damaged network
                    yout2[:,:],y0out2,Sh2[:,:],t2,OsciAmp2,Omean2,Oosci2,Omeanbar2,\
                    Ooscibar2,freq2,grow_eigs2 = olf_bulb_10(Nmitral,H0,W0,P_odor1,dam)
                    #calculate distance measures
                    print(time.time() - tm1)
                    for i in np.arange(M):
                        d1it[m] += 1 - Omean1[:, m].dot(Omean2) / (
                            lin.norm(Omean1[:, m]) * lin.norm(Omean2))
                        d2it[m] += 1 - lin.norm(Oosci1[:, m].dot(
                            np.conjugate(Oosci2))) / (lin.norm(Oosci1[:, m]) *
                                                      lin.norm(Oosci2))
                        d3it[m] += (Omeanbar1[m] - Omeanbar2) / (Omeanbar1[m] +
                                                                 Omeanbar2)
                        d4it[m] += np.real((Ooscibar1[m] - Ooscibar2) /
                                           (Ooscibar1[m] + Ooscibar2))

                    d1it[m] = d1it[
                        m] / M  #average over comparison with all control trials
                    d2it[m] = d2it[m] / M
                    d3it[m] = d3it[m] / M
                    d4it[m] = d4it[m] / M

                    #calculate spectral density and "wave function" to get average power and IPR
                    P_den = np.zeros(
                        (501,
                         Nmitral))  #only calculate the spectral density from
                    for i in np.arange(
                            Nmitral
                    ):  #t=125 to t=250, during the main oscillations
                        f, P_den[:, i] = signal.periodogram(Sh2[125:250, i],
                                                            nfft=1000,
                                                            fs=1000)
                    psi = np.zeros(Nmitral)
                    for p in np.arange(Nmitral):
                        psi[p] = np.sum(P_den[:, p])
                    psi = psi / np.sqrt(np.sum(psi**2))

                    psi2 = np.copy(OsciAmp2)
                    psi2 = psi2 / np.sqrt(np.sum(psi2**2))

                    maxAmp = np.max(OsciAmp2)
                    pnit[m] = len(OsciAmp2[OsciAmp2 > maxAmp / 2])

                    IPRit[m] = 1 / np.sum(psi**4)
                    IPR2it[m] = 1 / np.sum(psi2**4)
                    pwrit[m] = np.sum(P_den) / Nmitral

                    #get the frequency according to the adiabatic analysis
                    maxargs = np.argmax(P_den, axis=0)
                    argf = stats.mode(maxargs[maxargs != 0])
                    frequencyit[m] = f[argf[0][0]]
        #            print(cols)
        #            print(time.time()-tm1)
        #
        #        print('level',lv)
        #Get the returned variables for each level of damage
                dmgpct1[counter] = damage / Nmitral
                IPR1[counter] = np.average(IPRit)  #Had to do 1D list, so
                pwr1[counter] = np.average(
                    pwrit)  #it goes column 0 damage counterl
                freq1[counter] = np.average(
                    frequencyit)  #0,1,2,3,4...Ndamage-1, then
                #col 1 damage level 0,1,2...
                #        IPRsd[counter]=np.std(IPRit)
                #        pwrsd[counter]=np.std(pwrit)
                #        freqsd[counter]=np.std(frequencyit)
                IPR2[counter] = np.average(IPR2it)
                pn1[counter] = np.average(pnit)

                d11[counter] = np.average(d1it)
                d21[counter] = np.average(d2it)
                d31[counter] = np.average(d3it)
                d41[counter] = np.average(d4it)
                #        d1sd[counter] =  np.std(d1it)
                #        d2sd[counter] = np.std(d2it)
                #        d3sd[counter]=np.std(d3it)
                #        d4sd[counter]=np.std(d4it)

                eigfreq1[counter] = np.copy(freq2)
                if (colnum == 0 or colnum == int(Nmitral / 2)):
                    cell_act[:, :, counter] = np.copy(yout2)

                counter += 1

    return dmgpct1, eigfreq1, d11, d21, d31, d41, pwr1, IPR1, IPR2, pn1, freq1, cell_act