Example #1
0
 def __init__(self):
     N=1000
     N1=10
     mat=sps.coo_matrix((random.random(N1*N)+1j*random.random(N1*N),(random.randint(0,N,N1*N),random.randint(0,N,N1*N))),shape=(N,N))
     mat=mat.T.conj()+mat+sps.diags(random.random(N),0)*10
     self.mat=mat
     self.M=sps.diags(random.random(N),0)
def order_components(A,C):
     """Order components based on their maximum temporal value and size
     
     Parameters
     -----------
     A:   sparse matrix (d x K)
          spatial components
     C:   matrix or np.ndarray (K x T)
          temporal components
          
     A_or:  np.ndarray   
         ordered spatial components
     C_or:  np.ndarray  
         ordered temporal components
     srt:   np.ndarray  
         sorting mapping
     """
     A = np.array(A.todense())
     nA2 = np.sqrt(np.sum(A**2,axis=0))
     A = np.array(np.matrix(A)*diags(1/nA2,0))
     nA4 = np.sum(A**4,axis=0)**0.25
     C = np.array(diags(nA2,0)*np.matrix(C))
     mC = np.ndarray.max(np.array(C),axis=1)
     srt = np.argsort(nA4**mC)[::-1]
     A_or = A[:,srt]
     C_or = C[srt,:]
          
     return A_or, C_or, srt
Example #3
0
def laplacian_reachable_filter(laplacian, reachable_indexes):
    """
    Transforms a matrix to make sure only reachable elements are kept.

    The only current alternative is usage of LU instead of Cholesky, which is
    computationally more difficult and also requires reach-dependent computation to get
    an in and out flow to different GO terms

    An alternative is the construction of the individual laplacian
    for each new application

    :param laplacian: initial laplacian of directionless orientation
    :param reachable_indexes: indexes that are reachable from
    the nodes for which we want to perform the computation.
    :return: laplacian where all the lines and columns for terms that are not reachable are null.
    """
    pad_array = [0] * laplacian.shape[0]
    for index in reachable_indexes:
        pad_array[index] = 1
    diagonal_pad = diags(pad_array, 0, format="lil")
    re_laplacian = copy(laplacian)
    re_laplacian = diagonal_pad.dot(re_laplacian.dot(diagonal_pad))
    re_laplacian = re_laplacian - \
        diags(re_laplacian.diagonal(), 0, format="lil")
    d = (-re_laplacian.sum(axis=0)).tolist()[0]
    re_laplacian = re_laplacian + diags(d, 0, format="lil")

    # print 're_laplacian', re_laplacian.shape

    return re_laplacian
Example #4
0
def counts_to_ppmi(counts_csr, smoothing=0.75):
    """
    Converts a sparse matrix of co-occurrences into a sparse matrix of positive
    pointwise mutual information. Context distributional smoothing is applied
    to the resulting matrix.
    """
    # word_counts adds up the total amount of association for each term.
    word_counts = np.asarray(counts_csr.sum(axis=1)).flatten()

    # smooth_context_freqs represents the relative frequency of occurrence
    # of each term as a context (a column of the table).
    smooth_context_freqs = np.asarray(counts_csr.sum(axis=0)).flatten() ** smoothing
    smooth_context_freqs /= smooth_context_freqs.sum()

    # Divide each row of counts_csr by the word counts. We accomplish this by
    # multiplying on the left by the sparse diagonal matrix of 1 / word_counts.
    ppmi = sparse.diags(1 / word_counts).dot(counts_csr)

    # Then, similarly divide the columns by smooth_context_freqs, by the same
    # method except that we multiply on the right.
    ppmi = ppmi.dot(sparse.diags(1 / smooth_context_freqs))

    # Take the log of the resulting entries to give pointwise mutual
    # information. Discard those whose PMI is less than 0, to give positive
    # pointwise mutual information (PPMI).
    ppmi.data = np.maximum(np.log(ppmi.data), 0)
    ppmi.eliminate_zeros()
    return ppmi
Example #5
0
    def initialize(self):
        if self.method == 'landweber':
            self.AT = self.A.transpose()
        elif self.method == 'cimmino':
            self.A = self.A.tocsr()
            self.AT = self.A.transpose()
            rowInnerProduct = np.zeros(self.Nrow, dtype=np.float32)
            self.a = np.zeros(self.Ncol, dtype=np.float32)
            # Calculate row inner product, placeholder for matrix rows
            row = np.zeros(self.Ncol, dtype=np.float32)
            for i in range(self.Nrow):
                row[:] = self.A[i, :].toarray()
                rowInnerProduct[i] = np.dot(row, row)
            self.M = ss.diags(1/rowInnerProduct)
        elif self.method == 'component averaging':
            self.A = self.A.tocsr()
            self.AT = self.A.transpose()
            weightedRowProduct = np.zeros(self.Nrow, dtype=np.float32)
            self.a = np.zeros(self.Ncol, dtype=np.float32)

            # Calculate number of non-zero elements in each column
            s = np.zeros(self.Ncol, dtype=np.float32)

            for i in range(self.Ncol):
                s[i] = self.A[:, i].count_nonzero()

            # Calculate weighted row product
            row = np.zeros(self.Ncol) #placeholder for matrix rows
            for i in range(self.Nrow):
                row[:] = self.A[i, :].toarray()
                weightedRowProduct[i] = np.sum(row * row * s)
            self.M = ss.diags(1/weightedRowProduct)
        else:
            print("Invalid update method!")
    def __init__(self, *, x_grid_dim, x_amplitude, v, **kwargs):
        """
         The following parameters must be specified
             x_grid_dim - the grid size
             x_amplitude - the maximum value of the coordinates
             v - a potential energy (as a function)
             kwargs - is ignored
         """
        # saving the properties
        self.x_grid_dim = x_grid_dim
        self.x_amplitude = x_amplitude
        self.v = v

        # get coordinate step size
        self.dx = 2. * self.x_amplitude / self.x_grid_dim

        # generate coordinate range
        self.x = (np.arange(self.x_grid_dim) - self.x_grid_dim / 2) * self.dx
        # The same as
        # self.x = np.linspace(-self.x_amplitude, self.x_amplitude - self.dx , self.x_grid_dim)

        # Construct the kinetic energy part as sparse matrix from diagonal
        self.hamiltonian = diags([1., -2., 1.], [-1, 0, 1], shape=(self.x_grid_dim, self.x_grid_dim))
        self.hamiltonian *= -0.5 / (self.dx ** 2)

        # Add diagonal potential energy
        self.hamiltonian += diags(self.v(self.x), 0)
Example #7
0
    def get_propagation(self, ham, psi_init, steps, dz, norm=False):
        """
        Get the time evolution.

        :param ham: sparse.csr_matrix. Tight-Binding Hamilonian.
        :param psi_init: np.ndarray. Initial state.
        :param steps: Positive Integer. Number of steps.
        :param dz: Positive number. Step.
        :param norm: Boolean. Default value True. Normalize the norm to 1 at each step.
        """
        error_handling.empty_ham(ham)
        error_handling.ndarray(psi_init, "psi_init", self.lat.sites)
        error_handling.positive_int(steps, "steps")
        error_handling.positive_real(dz, "dz")
        error_handling.boolean(norm, "norm")
        self.steps = steps
        self.dz = dz
        self.prop = np.empty((self.lat.sites, self.steps), "c16")
        self.prop[:, 0] = psi_init
        diag = 1j * np.ones(self.lat.sites, "c16")
        A = (sparse.diags(diag, 0) - 0.5 * self.dz * ham).toarray()
        B = (sparse.diags(diag, 0) + 0.5 * self.dz * ham).toarray()
        mat = np.dot(LA.inv(A), B)
        for i in range(1, self.steps):
            self.prop[:, i] = np.dot(mat, self.prop[:, i - 1])
            if norm:
                self.prop[:, i] /= np.abs(self.prop[:, i]).sum()
Example #8
0
def evaluateHessian(x, lagrange, obj_factor, flag, user_data = None):
    if flag:
        # Build helper array
        data.calcDose(x)
        values = np.zeros(int(data.numX * (data.numX + 1) / 2), float_)
        #hessian = sparse.csr_matrix((data.numX, data.numX))
        quadHelperAlphaBetas = (data.currentDose < quadHelperThresh) * 2 * quadHelperUnder
        quadHelperAlphaBetas += (data.currentDose >= quadHelperThresh) * 2 * quadHelperOver
        # generate Hessian using matrix multiplication
        abDmat = obj_factor * data.Dmat *sparse.diags(quadHelperAlphaBetas, 0)* data.Dmat.transpose()
        
        hessian = abDmat.todense()
        idx = 0
        for i in range(0, data.numX):
            for j in range(0,i):
                values[idx] = hessian[i,j]
                idx += 1
    else:
        # Build helper array
        data.calcDose(x)
        values = np.zeros(int(data.numX * (data.numX + 1) / 2), float_)
        #hessian = sparse.csr_matrix((data.numX, data.numX))
        quadHelperAlphaBetas = (data.currentDose < quadHelperThresh) * 2 * quadHelperUnder
        quadHelperAlphaBetas += (data.currentDose >= quadHelperThresh) * 2 * quadHelperOver
        # generate Hessian using matrix multiplication
        abDmat = obj_factor * data.Dmat *sparse.diags(quadHelperAlphaBetas, 0)* data.Dmat.transpose()
        
        hessian = abDmat.todense()
        idx = 0
        for i in range(0, data.numX):
            for j in range(0,i):
                values[idx] = hessian[i,j]
                idx += 1
    return(values)
Example #9
0
def random_sparse_matrix(size, density=0.05):
    """Generate a random sparse similarity matrix.
    
    Values are bounded by [0, 1]. Diagonal is all ones. The final density is
    approximately 2*`density`.
    
    Parameters
    ----------
    size : int
        Shape of the matrix (`size` x `size`)
    
    density : float, optional, default=0.05
        The matrix' density will be approximately 2 * `density`
        
    Returns
    -------
    S : csr_matrix
        Random matrix
    """
    S = sparse.rand(size, size, density, 'csr')
    S += S.T
    S /= S.max()
    S -= sparse.diags(S.diagonal(), 0)
    S += sparse.diags(np.ones(size), 0)
    return S
Example #10
0
def ncuts(A, k=16):
    assert( A.shape[0] == A.shape[1] );
    n = A.shape[0];
    offset = .5;    

    d = np.array(A.sum(1) + 2*offset);
    dr = sp.diags(np.ones(n)*offset,0,format='csc');
    A = A + dr;
    dsqinv = (1./ np.sqrt(d  + np.spacing(1))).flatten();

    P = sp.diags(dsqinv, 0, format='csc').dot(A).dot(sp.diags(dsqinv, 0, format='csc'));

    log.info('Solving for eigenvalues and eigenvectors...');
    
    Eval, Ev = eigsh(P, k=k);

    log.info('Solved!');

    #Sort vectors in descending order, leaving out the zero vector
    idx = np.argsort(-Eval)
    Ev = Ev[:,idx].real
    Eval = Eval[idx].real
    
    #Make vectors unit norm
    for i in range(k):
        Ev[:,i] /= np.linalg.norm(Ev[:,i]);

    return Eval, Ev;
Example #11
0
def extract_DF_F(Y, A, C, i=None):
    """Extract DF/F values from spatial/temporal components and background

     Parameters
     -----------
     Y: np.ndarray
           input data (d x T)
     A: sparse matrix of np.ndarray
           Set of spatial including spatial background (d x K)
     C: matrix
           Set of temporal components including background (K x T)

     Returns
     -----------
     C_df: matrix
          temporal components in the DF/F domain
     Df:  np.ndarray
          vector with baseline values for each trace
    """
    A2 = A.copy()
    A2.data **= 2
    nA2 = np.squeeze(np.array(A2.sum(axis=0)))
    A = A * diags(1 / nA2, 0)
    C = diags(nA2, 0) * C

    # if i is None:
    #    i = np.argmin(np.max(A,axis=0))

    Y = np.matrix(Y)
    Yf = A.transpose() * (Y - A * C)  # + A[:,i]*C[i,:])
    Df = np.median(np.array(Yf), axis=1)
    C_df = diags(1 / Df, 0) * C

    return C_df, Df
Example #12
0
    def __init__(self,domain,boundary='PBC',Nx=1000):
        ''' Define a 1-D space over a domain.
        
        domain: tuple of (xmin, xmax)
        Nx: integer number of chunks to discretize space
        
        '''

        # Record properties of the space
        self.xmin=domain[0]
        self.xmax=domain[1]
        self.L=self.xmax-self.xmin
        self.Nx=Nx
        self.boundary=boundary
        
        # Discretize
        self.x=np.matrix(np.linspace(self.xmin,self.xmax,self.Nx,endpoint=False)).T
        self.dx=self.L/self.Nx
        
        # Define derivative matrix
        self.D=spar.diags([np.ones(Nx-1),-np.ones(Nx-1),np.array([-1]),
            np.array([1])],[1,-1,Nx-1,-(Nx-1)])/(2*self.dx)
            
        self.q=spar.diags([force_array(self.x)],[0])
        self.p=-1j*self.D/(2*np.pi)
Example #13
0
def create(m, n):
    mu = 1
    rho = 1
    sigma = 0.1

    A = problem_util.normalized_data_matrix(m, n, mu)
    x0 = sp.rand(n, 1, rho)
    x0.data = np.random.randn(x0.nnz)
    x0 = x0.toarray().ravel()

    b = np.sign(A.dot(x0) + sigma*np.random.randn(m))
    A[b>0,:] += 0.7*np.tile([x0], (np.sum(b>0),1))
    A[b<0,:] -= 0.7*np.tile([x0], (np.sum(b<0),1))

    P = la.block_diag(np.random.randn(n-1,n-1), 0)

    lam = 1
    x = cp.Variable(A.shape[1])

    # Straightforward formulation w/ no constraints
    # TODO(mwytock): Fix compiler so this works
    z0 = 1 - sp.diags([b],[0])*A*x + cp.norm1(P.T*x)
    f_eval = lambda: (lam*cp.sum_squares(x) + cp.sum_entries(cp.max_elemwise(z0, 0))).value

    # Explicit epigraph constraint
    t = cp.Variable(1)
    z = 1 - sp.diags([b],[0])*A*x + t
    f = lam*cp.sum_squares(x) + cp.sum_entries(cp.max_elemwise(z, 0))
    C = [cp.norm1(P.T*x) <= t]
    return cp.Problem(cp.Minimize(f), C), f_eval
Example #14
0
def solve(dx, dt):
    """
    Solve using the Crank-Nicholson scheme.
    """

    # init. lhs and rhs matrices:
    c = (D / 2.0) * (dt / (dx ** 2))
    mtrx1 = diags([-c, 2 * (1 + c), -c], [1, 0, -1],
        shape=(M - 2, M - 2)).toarray()
    mtrx2 = diags([c, 2 * (1 - c), c], [1, 0, -1],
        shape=(M - 2, M - 2)).toarray()

    # Init. initial temperature distribution:
    u = [(i * dx) * (1 - i * dx) for i in range(M)]
    u[0], u[-1] = 0, 0  # Dirichlet boundary conditions

    # Apply scheme for t = 1, 2, 3,...
    x = np.linspace(0, L, M)
    y = np.linspace(0, T, N)
    z = np.zeros((len(y), len(x)))
    z[0] = u
    rhs = mtrx2.dot(u[1:-1])
    for i in range(1, N):
        sol = [0] * M  # Init. solution vector
        sol[1:-1] = np.linalg.solve(mtrx1, rhs)
        rhs = mtrx2.dot(sol[1:-1])
        z[i] = sol
    return x, y, z
Example #15
0
def solve(dx, dt):
    """
    Solve using a (simple) explicit difference scheme. 
    """

    # init. sigma:
    sigma = ((C * dt) / dx) ** 2
    if sigma > 1:
        print("Warning: stability condition violated!")

    # init. matrices:
    mtrx1 = sigma * diags([-1, 2, -1], [1, 0, -1],
        shape=(M - 2, M - 2)).toarray()
    mtrx2 = diags([1], shape=(M - 2, M - 2)).toarray()
    mtrx3 = (2 * mtrx2 - mtrx1)

    # Calculate solution for t = 1, 2:
    u = [(i * dx) * (1 - i * dx) for i in range(M)]
    v = np.zeros(M, float)
    v[1:-1] = [u[i] + 0.5 * sigma * (u[i - 1] - 2 * u[i] + u[i + 1]) for i in
               range(1, M - 1)]

    # Apply scheme for t = 2, 3, 4,...
    x = np.linspace(0, L, M)
    y = np.linspace(0, T, N)
    z = np.zeros((len(y), len(x)))
    z[0], z[1] = u, v
    for i in range(1, N):
        sol = [0] * M  # Init. solution vector
        sol[1:-1] = mtrx3.dot(v[1:-1]) - u[1:-1]
        u, v = v, sol
        z[i] = sol
    return x, y, z
Example #16
0
    def _getH0matrix(self, xyz, pp):

        """
        Creates sparse matrix containing inducing field components
        for source pp

..        REQUIRED ARGUMENTS:
..
..        xyz: N X 3 array of locations to predict field
..
..        pp: Source index
..
..        OUTPUTS:
..
..        H0: A 3N X N sparse array containing Hx, Hy and Hz at all locations
..
        """

        srcObj = self.survey.srcList[pp]

        h0 = srcObj.getH0(xyz)

        hx0 = sp.diags(h0[:, 0], format="csr")
        hy0 = sp.diags(h0[:, 1], format="csr")
        hz0 = sp.diags(h0[:, 2], format="csr")

        h0 = sp.vstack([hx0, hy0, hz0])

        return h0
Example #17
0
def gradient(weights, k, W, sample_count, n_dk_samples, X, sigma):
    D, K = X.shape[0], W.shape[0]

    result = 0.0
    alpha = np.empty((BatchSize, K), dtype=np.float64)
    scale = np.empty((BatchSize,),   dtype=np.float64)
    for d in range(0, D, BatchSize):
        max_d = min(D, d + BatchSize)
        top   = max_d - d

        alpha[:top,:] = X[d:max_d,:].dot(W.T)
        alpha[:top,k] = X[d:max_d,:].dot(weights)
        np.exp(alpha[:top], out=alpha[:top])

        alpha_sum = alpha[:top].sum(axis=1)
        scale[:top]  = fns.digamma(alpha_sum)
        scale[:top] -= fns.digamma(alpha_sum[:,np.newaxis] + n_dk_samples[d:max_d,:,:sample_count].sum(axis=1)).sum(axis=1) / sample_count
        scale[:top] += fns.digamma(alpha[:top,k,np.newaxis] + n_dk_samples[d:max_d,k,:sample_count]).sum(axis=1) / sample_count
        scale[:top] -= fns.digamma(alpha[:top,k])

        P_1 = ssp.diags(alpha[:top,k], 0).dot(X[d:max_d,:])
        P_2 = ssp.diags(scale[:top], 0).dot(P_1)

        batch_result = np.array(P_2.sum(axis=0))
        result += batch_result

    result -= weights / sigma

    return -np.squeeze(np.asarray(result))
Example #18
0
def test_make_diff2_radial_real():
    n, m, h = 8, 8, 0.1
    r = arange(0, h * m - 1e-10, h)
    D1 = diags((1, -8, 0, 8, -1), (-2, -1, 0, 1, 2), shape=(m, m)).toarray()
    D2 = diags((-1, 16, -30, 16, -1), (-2, -1, 0, 1, 2), shape=(m, m)).toarray()
    r[0] = 1
    D1[0, :] = 0
    D1[1, 1] += 1
    D2[0, :3] = [-60, 64, -4]
    D2[1, 1] += -1
    D = D2 / (24 * h ** 2) + diag(1.0 / r).dot(D1)  / (12 * h)
    L = mod.make_diff2_radial_real(n, 5, h)
    tolerance = 5.0e-5
    for k in xrange(0, 3):
        err = max(abs(diag(D, 2 - k) - L[k, 2 - k:]))
        print('Diagonal # ', 2 - k, ':    ', err)
        if err >= tolerance:
            print(diag(D, 2 - k))
            print(L[k, 2 - k:])
    for k in xrange(1, 3):
        err = max(abs(diag(D, -k) - L[2 + k, :-k]))
        print('Diagonal #', -k, ':    ', err)
        if err >= tolerance:
            print(diag(D, -k))
            print(L[2 + k, :-k])
Example #19
0
 def __mul__(u, v):
     """Hadamard product u*v."""
     if isinstance(v, ADI):
         if len(u.val) == len(v.val):
             # Note: scipy.sparse.diags has changed parameters between
             # versions 0.16x and 0.17x. This code is only tested on 0.16x.
             # TODO test code in SciPy 0.17x
             uJv = [sps.diags([u.val.flat],[0])*jv for jv in v.jac] # MATRIX multiplication
             vJu = [sps.diags([v.val.flat],[0])*ju for ju in u.jac] # MATRIX multiplication
             jac = [a+b for (a,b) in zip(uJv, vJu)]
             return ADI(u.val*v.val, jac)
         if len(v.val) == 1:
             # Fix dimensions and recurse
             vval = np.tile(v.val, (u.val.shape[0],1) )
             vjac = [sps.bmat([[j]]*len(u.val)) for j in v.jac]
             return u.__mul__(ADI(vval, vjac))
         if len(u.val) == 1:
             # Fix dimensions and recurse
             uval = np.tile(u.val, (v.val.shape[0],1) )
             ujac = [sps.bmat([[j]]*len(v.val)) for j in u.jac]
             return ADI(uval, ujac).__mul__(v)
         raise ValueError("Dimension mismatch")
     else:
         v = np.atleast_2d(v)
         if len(u.val) == 1:
             val = u.val * v
             jac = [sps.diags(v.flat,0)*sps.bmat([[j]]*len(v)) for j in u.jac]
             return ADI(val, jac)
         if len(v) == 1:
             return ADI(u.val*v, [v.flat[0]*ju for ju in u.jac])
         if len(u.val) == len(v):
             vJu = [sps.diags(v.flat, 0)*ju for ju in u.jac] # MATRIX multiplication
             return ADI(u.val*v, vJu)
         raise ValueError("Dimension mismatch")
Example #20
0
    def predict(self):

        m, n = self.A.shape # m observations

        convgraph = np.zeros(self.maxiter / 25)
        prevdist = 0.
        converged = False

        eps = 1e-6

        dd = np.array(self.A.sum(1))[:,0]
        D = diags(dd,0, format="csr")

        m, n = self.A.shape


        # random initialization, will initialize with K-means if told to
        H = csr_matrix(np.random.rand(m, self.k))

        EPS = csr_matrix(np.ones(H.shape) * eps)

        if self._embedding:
            # Apply eigenspace embedding K-means for initialization (Ng Weiss Jordan)

            Dz = diags(1 / (np.sqrt(dd) + eps), 0, format="csr")
            DAD = Dz.dot(self.A).dot(Dz)

            V = eigs(DAD, self.k)[1].real
            km_data = V / (np.linalg.norm(V, 2, axis=1).T * np.ones((self.k,1))).T

            km_predict = KMeans(n_clusters=self.k).fit_predict(km_data)

            indices = km_predict
            indptr = range(len(indices)+1)
            data = np.ones(len(indices))
            H = csr_matrix((data, indices, indptr))

        # Run separately for sparse and dense versions

        for i in range(self.maxiter):

            AH = self.A.dot(H)
            alpha = H.T.dot(AH)

            M1 = AH + EPS
            M2 = D.dot(H).dot(alpha) + EPS

            np.reciprocal(M2.data, out=M2.data)
            d1 = M1.multiply(M2).sqrt()

            H = H.multiply(d1)

            if i % 25 == 0:
                dist = sptrace(alpha)
                convgraph[i/25] = dist

                diff = dist / prevdist - 1
                prevdist = dist

        return NMFResult((H.toarray(),), convgraph, pdist)
Example #21
0
def build_offcentered_alpha(sh,alpha):
    # This computes the midpoints of alpha which will be used in the heterogenous laplacian
    nz=sh[-1]
    nx=sh[0]
    
    v1z,v2z,v3z=np.ones(nz),np.ones(nz-1),np.zeros(nz)
    v1z[-1],v3z[0]=2.0,2.0
    v1x,v2x,v3x=np.ones(nx),np.ones(nx-1),np.zeros(nx)
    v1x[-1],v3x[0]=2.0,2.0
    v3z=v3z.reshape(1,nz)
    v3x=v3x.reshape(1,nx)
    Lz1=np.array(spsp.diags([v1z,v2z],[0,1]).todense())
    Lx1=np.array(spsp.diags([v1x,v2x],[0,1]).todense())
    Lz=np.matrix(0.5*np.concatenate((v3z,Lz1),axis=0))
    Lx=np.matrix(0.5*np.concatenate((v3x,Lx1),axis=0))
    # Lz and Lx simply (of length nz and nx respectively) act on a vector and return one which is one entry larger than before,
    # with each entry being a weighted sum of the two adjacent entries. Boundary values are preserved.
    P=build_permutation_matrix(nz,nx)
    
    alpha_perm=P*alpha
    alpha_z,alpha_x=list(),list()
    for i in xrange(nx):
        alpha_z.append(Lz*alpha[nz*i:nz*(i+1)])
    for i in xrange(nz):
        alpha_x.append(Lx*alpha_perm[nx*i:nx*(i+1)])
    return alpha_x, alpha_z
Example #22
0
    def __density_normalize__(self, kernelMat):
        """
        density normalization: Eq (4-5) of [1]

        be very careful here. K is a sparse matrix, which behaves differently from usual np.ndarray
        in terms of operators *, /
        """
        assert issparse(kernelMat), 'K must be sparse, multiplication behaves very differnently for sparse/dense (elementwise vs mat-mult)'

        "calculate:  P_xy / Z(x)Z(y)"
        # rescale each column by Z and also each row by Z
        # easily done by just multipling with a diagonal matrix from the left (scaling rows) and right (rescaling columsn)
        # not that row and column sum are the same as the matrix is symmetric!!

        # Z(x), kind of the partition function
        Z = np.array(kernelMat.sum(0)).flatten()  # a bit ugly, Z is this strange type(matrix), which one cannot cast into a 1d array, hence the detour to np.array
        scalingMat = diags(1.0 / Z)  # multiplying by this (one the right) is equivalent of dividing each row by Z
        P_tilde = scalingMat * kernelMat * scalingMat  # this is matrix multiply!

        # Eq (5,6) of [1]
        # once again, the same trick with diagonal matrix for resacling
        Z_tilde = np.array(P_tilde.sum(0)).flatten()
        scalingMat = diags(1.0 / Z_tilde)
        P_tilde = P_tilde * scalingMat

        return P_tilde
Example #23
0
def logreg(w, Z, regcoef, hess=False):
    """
    :param w: a D-dimensional vector at which the oracle is evaluated
    :param Z: an (N x D) matrix — numpy.ndarray or scipy.sparse.csr_matrix
    :param regcoef: regularization coefficient
    :param hess: a flag, showing weather or not to evaluate the hessian
    :return f: logistic loss function value at w
    :return g: gradient of the logistic loss function at w, a D-dimensional vector
    :return H: hessian of the logistic loss function at w, a (D x D) matrix
    """
    if not(isinstance(w, np.ndarray)):
        raise TypeError("w should be a numpy.ndarray")
    if not(isinstance(Z, np.ndarray) or isinstance(Z, csr_matrix)):
        raise TypeError("Z should be a numpy.ndarray or a csr_matrix")
    if w.shape[0] != Z.shape[1]:
        raise ValueError("w.shape[0] and Z.shape[1] mast be equal")
    if regcoef <= 0:
        raise ValueError("Regularization coefficient must be greater then 0")

    w = w.reshape((w.size, 1))
    anc_var = np.exp(-Z.dot(w))
    f = np.sum(np.log(1 + np.exp(Z.dot(w))), axis=0) + regcoef * np.linalg.norm(w)**2 / 2
    if isinstance(Z, csr_matrix):
        g = np.sum(((diags([(1 / (1 + anc_var))[:, 0].tolist()], [0])).dot(Z)).toarray(), axis=0).reshape(w.shape)
        g += regcoef * w
    else:
        g = np.sum(Z / (1 + anc_var), axis=0).reshape(w.shape) + regcoef * w
    if hess:
        if isinstance(Z, csr_matrix):
            h = diags([(anc_var / np.square(1 + anc_var))[:, 0].tolist()], [0]).dot(Z)
        else:
            h = Z * (anc_var / np.square(1 + anc_var))
        h = Z.T.dot(h) + regcoef * np.eye(w.size)
        return f, g, h
    return f, g
def influence_matrix(p, U, S2, N, w):
    '''
    returns the Hat (or influence) matrix for a given smoothing parameter p
    '''
    tmp = sparse.eye(N,N) - U.dot( S2 / (S2 + p/(1-p)/6.)).dot(U.T)
    D = sparse.diags(1./np.sqrt(w), 0, shape=(N,N))
    invD = sparse.diags(np.sqrt(w), 0, shape=(N,N))
    return D * tmp * invD
    def __init__(self, m, n, dx, wn, ws, we, ww, id, neumann=None):
        """

        :param m: int, shape[0] of the room
        :param n: int, shape[1] of the room
        :param dx: float, dicretization distance
        :param wn: float, temperature of north wall
        :param ws: float, temperature of south wall
        :param we: float, temperature of east wall
        :param ww: float, temperature of west wall
        :param id: int, rank of the calling thread
        """

        self.M = m
        self.N = n

        # create the whole array
        self.u = np.zeros((m, n))
        self.dx = dx

        # create references to the walls
        self.wn = self.u[-1, :]
        self.ws = self.u[0, :]
        self.we = self.u[:, -1]
        self.ww = self.u[:, 0]

        # setup a dictionary for shared items
        self.shared = dict()

        # assign values to the walls
        self.wn += wn
        self.ws += ws
        self.we += we
        self.ww += ww

        ub = self.u.copy()
        nan = np.empty((m, n))
        nan[:, :] = np.NaN
        self.ub = np.where(ub != 0, ub, nan)

        # set the id
        self.id = id  # try using some form of broadcast with a class variable?

        # deal with any boundary conditions when we create the A matrix
        Dn = sparse.diags([1, -2, 1], [-1, 0, 1], shape=(n, n))
        Dm = sparse.diags([1, -2, 1], [-1, 0, 1], shape=(m, m))

        if neumann == 'east':
            Dn = Dn.copy().tolil()
            Dn[-1, -1] += 1
            self.ub[1:-1, -1] = np.NaN
        elif neumann == 'west':
            Dn = Dn.copy().tolil()
            Dn[0, 0] += 1
            self.ub[1:-1, 0] = np.NaN

        self.A = sparse.kronsum(Dn, Dm)
Example #26
0
 def _build_base_matrix(self):
     n_samples = self.graph.shape[0]
     n_classes = self.y_.max()+1
     B = np.zeros((n_samples,n_classes))
     B[self.x_,self.y_] = 1
     d = np.array(self.graph.sum(1).T)[0]
     Z = sparse.diags(1.0 / (d+self.lamb))
     Lamb = sparse.diags(self.lamb,shape=(n_samples,n_samples))
     return Z.dot(Lamb).dot(B)
def KPMF(input_matrix, approx=50, iterations=30, learning_rate=.001, adjacency_width=5, adjacency_strength=.5):
    A = input_matrix
    Z = np.asarray(A > 0,dtype=np.int)
    A1d = np.ravel(A)
    mean = np.mean(A1d)
    A = A-mean
    K = approx
    R = itr = iterations
    l = learning_rate
    N = A.shape[0]
    M = A.shape[1]
    U = np.random.randn(N,K)
    V = np.random.randn(K,M)
    #KPMF using gradient descent as per paper
    #Kernelized Probabilistic Matrix Factorization: Exploiting Graphs and Side Information
    #T. Zhou, H. Shan, A. Banerjee, G. Sapiro
    #Using diffusion kernel
    #U are the rows, we use an adjacency matrix CU to reprent connectivity
    #This matrix connects rows +-adjacency_width
    #V are the columns, connected columns are CV
    #Operate on graph laplacian L, which is the degree matrix D - C
    #Applying the diffusion kernel to L, this forms a spatial smoothness graph
    bw = adjacency_width
    #Use scipy.sparse.diags to generate band matrix with bandwidth = 2*adjacency_width+1
    #Example of adjacency_width = 1, N = 4
    #[1 1 0 0]
    #[1 1 1 0]
    #[0 1 1 1]
    #[0 0 1 1]
    print "Running KPMF with:"
    print "learning rate=" + `l`
    print "bandwidth=" + `bw`
    print "beta=" + `b`
    print "approximation rank=" + `K`
    print "iterations=" + `R`
    print ""
    CU = sp.diags([1]*(2*bw+1),range(-bw,bw+1),shape=(N,N)).todense()
    DU = np.diagflat(np.sum(CU,1))
    CV = sp.diags([1]*(2*bw+1),range(-bw,bw+1),shape=(M,M)).todense()
    DV = np.diagflat(np.sum(CV,1))
    LU = DU - CU
    LV = DV - CV
    beta = adjacency_strength
    KU = sl.expm(beta*LU)
    KV = sl.expm(beta*LV)
    SU = np.linalg.pinv(KU)
    SV = np.linalg.pinv(KV)
    for r in range(R):
        for i in range(N):
            for j in range(M):
                if Z[i,j] > 0:
                    e = A[i,j] - np.dot(U[i,:],V[:,j])
                    U[i,:] = U[i,:] + l*(e*V[:,j] - np.dot(SU[i,:],U))
                    V[:,j] = V[:,j] + l*(e*U[i,:] - np.dot(V,SV[:,j]))
    A_ = np.dot(U,V)
    return A_+mean
Example #28
0
 def test_abs(self):
     x, y = initVariablesADI(np.array([[5, -2]]).T, np.array([[3]]).T)
     z1 = (x*y).abs()
     assert np.array_equal(z1.val, np.array([[15, 6]]).T)
     assert (z1.jac[0] - sps.diags([3, -3], 0)).nnz == 0
     assert np.array_equal(z1.jac[1].toarray(), np.array([[5], [2]]))
     z2 = npad.abs(x*y)
     assert np.array_equal(z2.val, np.array([[15, 6]]).T)
     assert (z2.jac[0] - sps.diags([3, -3], 0)).nnz == 0
     assert np.array_equal(z2.jac[1].toarray(), np.array([[5], [2]]))
Example #29
0
def diff_conv_1d_fem(n, a, b):
    diagonals = [-a * 2 * (n + 1) ** 2 * np.ones((n,)),
                 (a * (n + 1) ** 2 + b * (n + 1) / 2) * np.ones((n - 1,)),
                 (a * (n + 1) ** 2 - b * (n + 1) / 2) * np.ones((n - 1,))]
    A = sps.diags(diagonals, [0, -1, 1])
    diagonals = [2 / 3 * np.ones((n,)),
                 1 / 6 * np.ones((n - 1,)),
                 1 / 6 * np.ones((n - 1,))]
    E = sps.diags(diagonals, [0, -1, 1])
    return A, E
Example #30
0
File: FHIM.py Project: boliu68/FHIM
	def gradient(self, x, y, variable):

		#the pusedo gradient
		if self.loss == 'linear':
			pred = self.predict(x)
			err = (y - pred).reshape((-1,1)) #n * 1

			if variable == 'beta':

				if sparse.issparse(x):
					#err_diag = sparse.lil_matrix((x.shape[0], x.shape[0]))
					#err_diag.setdiag(err.flatten())
					err_diag = sparse.diags(err.flatten(), 0, format='csr')
					gd = - err_diag * x
					gd = np.asarray(gd.sum(0)).flatten()
				else:
					gd = - x * err # n * d multiply n * 1
					gd = np.sum(gd, 0).flatten()
				lbd = self.lbd_beta
				var = self.beta

			if variable == 'a':

				if sparse.issparse(x):
					xa = x.dot(self.a[:,-1])
					#xa_diag = sparse.lil_matrix((len(xa), len(xa)))
					#xa_diag.setdiag(xa)
					xa_diag = sparse.diags(xa, 0, format='csr')
					xxa = xa_diag * x
					#err_diag = sparse.lil_matrix((x.shape[0], x.shape[0]))
					#err_diag.setdiag(err.flatten())
					err_diag = sparse.diags(err.flatten(), 0, format='csr')
					gd = - err_diag * xxa
					gd = np.asarray(gd.sum(0)).flatten()
				else:
					gd = - ((x * np.dot(x, self.a[:, -1]).reshape((-1,1))) * err)
					gd = np.sum(gd, 0)
				lbd = self.lbd_alpha
				var = self.a[:, -1]

			#gd = gd / np.linalg.norm(gd)
			id1 = var > 0
			id2 = var < 0
			id3 = np.logical_and(var == 0, gd < -lbd)
			id4 = np.logical_and(var == 0, gd > lbd)
			id5 = np.logical_and(var == 0, np.logical_and(gd >= -lbd, gd <= lbd))
			#print "Mean Gradient:%f, Norm Gradient:%f, max grad:%f" % (np.min(gd), np.linalg.norm(gd), np.max(np.abs(gd)))

			gd[id1] = gd[id1] + lbd
			gd[id2] = gd[id2] - lbd
			gd[id3] = gd[id3] + lbd
			gd[id4] = gd[id4] - lbd
			gd[id5] = 0

		return gd
def construir_matriz_tridiagonal(s, n):
    diagonales = [[-s]*(n-1), [1+2*s]*n, [-s]*(n-1)]
    return diags(diagonales, [-1, 0, 1], format='csr')
def getRegularizationMatrix(srcxaxis, srcyaxis, mode="curvature"):
    import numpy

    if mode == "zeroth":
        return identity(srcxaxis.size * srcyaxis.size)

    else:
        from scipy.sparse import diags, csc_matrix, lil_matrix

    if mode == "gradient":
        mat = diags([-2, -2, 8, -2, -2],
                    [-srcxaxis.size, -1, 0, 1, srcxaxis.size],
                    shape=(srcxaxis.size * srcyaxis.size,
                           srcxaxis.size * srcyaxis.size))
        mat = lil_matrix(mat)

        #glitches are at left and right edges
        allcols = numpy.arange(srcxaxis.size * srcyaxis.size)
        leftedges = allcols[allcols % srcxaxis.size == 0]
        rightedges = allcols[allcols % srcxaxis.size == srcxaxis.size - 1]
        for el in leftedges:
            mat[el, el - 1] = 0
        for el in rightedges:
            if el != allcols.max():
                mat[el, el + 1] = 0

    elif mode == "curvatureOLD":
        mat = diags([2, 2, -8, -8, 24, -8, -8, 2, 2], [
            -2 * srcxaxis.size, -2, -srcxaxis.size, -1, 0, 1, srcxaxis.size,
            2 * srcxaxis.size, 2
        ],
                    shape=(srcxaxis.size * srcyaxis.size,
                           srcxaxis.size * srcyaxis.size))
        mat = lil_matrix(mat)

        #glitches are at left and right edges
        allcols = numpy.arange(srcxaxis.size * srcyaxis.size)
        leftedges = allcols[allcols % srcxaxis.size == 0]
        rightedges = allcols[allcols % srcxaxis.size == srcxaxis.size - 1]
        leftedgesinone = allcols[allcols % srcxaxis.size == 1]
        rightedgesinone = allcols[allcols % srcxaxis.size == srcxaxis.size - 2]

        for el in leftedges:
            mat[el, el - 1] = 0
            mat[el, el - 2] = 0
        for el in rightedges:
            if el != allcols.max():
                mat[el, el + 1] = 0
                mat[el, el + 2] = 0
        for el in leftedgesinone:
            mat[el, el - 2] = 0
        for el in rightedgesinone:
            if el != allcols.max() - 1:
                mat[el, el + 2] = 0

    elif mode == "curvature":
        I, J = srcxaxis.size, srcyaxis.size
        matrix = lil_matrix((I * J, I * J))
        for i in range(I - 2):
            for j in range(J):
                ij = i + j * J
                i1j = ij + 1
                i2j = ij + 2
                matrix[ij, ij] += 1.
                matrix[i1j, i1j] += 4
                matrix[i2j, i2j] += 1
                matrix[ij, i2j] += 1
                matrix[i2j, ij] += 1
                matrix[ij, i1j] -= 2
                matrix[i1j, ij] -= 2
                matrix[i1j, i2j] -= 2
                matrix[i2j, i1j] -= 2
        for i in range(I):
            for j in range(J - 2):
                ij = i + j * J
                ij1 = ij + J
                ij2 = ij + 2 * J
                matrix[ij, ij] += 1
                matrix[ij1, ij1] += 4
                matrix[ij2, ij2] += 1
                matrix[ij, ij2] += 1
                matrix[ij2, ij] += 1
                matrix[ij, ij1] -= 2
                matrix[ij1, ij] -= 2
                matrix[ij1, ij2] -= 2
                matrix[ij2, ij1] -= 2
        for i in range(I):
            iJ_1 = i + (J - 2) * J
            iJ = i + (J - 1) * J
            matrix[iJ_1, iJ_1] += 1
            matrix[iJ, iJ] += 1
            matrix[iJ, iJ_1] -= 1
            matrix[iJ_1, iJ] -= 1
        for j in range(J):
            I_1j = (I - 2) + j * J
            Ij = (I - 1) + j * J
            matrix[I_1j, I_1j] += 1
            matrix[Ij, Ij] += 1
            matrix[Ij, I_1j] -= 1
            matrix[I_1j, Ij] -= 1
        for i in range(I):
            iJ = i + (J - 1) * J
            matrix[iJ, iJ] += 1
        for j in range(J):
            Ij = (I - 1) + j * J
            matrix[Ij, Ij] += 1
        mat = matrix
    return mat.tocsc()
Example #33
0
def fit_regularized_spline_QR(X,
                              data,
                              delta,
                              tau,
                              constraint,
                              q,
                              T,
                              lambd_values,
                              anoms=True):
    """Fit regularized spline regression to the data.

    The model is coded to be for:
    q = constant + linear + spline + interaction(spline, linear)

    Parameters
    ----------
    X : numpy.ndarray
        The design matrix
    data : numpy.ndarray
        The 1D variable being modeled
    delta : numpy.ndarray
        The dx for the regularized spline term(s)
    tau : float
        Quantile of interest \in (0, 1)
    constraint : str
        Type of constraint to impose: 'None', 'Below', 'Above'.
        'Below' indicates no crossing of lower quantile (i.e. tau = 0.55 shouldn't cross tau = 0.5)
        'Above' indicates no crossing of upper quantile (i.e. tau = 0.45 shouldn't cross tau = 0.5)
        'Median' imposes no constraints beyond Td < T.
    q : numpy.ndarray or None
        The fitted quantile not to be crossed (if constraint is not None)
    T : numpy.ndarray
        The value of temperature not to be exceeded
    lambd_values : numpy.ndarray or float
        The initial set of lambda values to try, or a single lambda value to use
    anoms : bool
        If true, do not enforce noncrossing constraint

    Returns
    -------
    beta : numpy.ndarray
        Parameter coefficients for quantile regression model
    yhat : numpy.ndarray
        Conditional values of predictand for a given quantile
    best_lambda : float
        Selected value of lambda based on BIC.
    """
    N, K = X.shape
    lambd1 = cp.Parameter(nonneg=True)
    lambd2 = cp.Parameter(nonneg=True)

    diag_vec = 1 / delta
    off_diag_1 = -1 / delta[:-1] - 1 / delta[1:]
    off_diag_2 = 1 / delta[1:]

    diagonals = [diag_vec, off_diag_1, off_diag_2]
    D0 = sparse.diags(diagonals, [0, 1, 2], shape=(N - 2, N - 1))

    add_row = np.zeros((N - 1, ))
    add_row[-2] = 1 / delta[-2]
    add_row[-1] = -1 / delta[-1] - 1 / delta[-2]

    add_col = np.zeros((N - 1, 1))
    add_col[-2] = 1 / delta[-1]
    add_col[-1] = 1 / delta[-1]

    D0 = sparse.vstack((D0, add_row))
    D0 = sparse.hstack((D0, add_col))

    # Spline term 1
    D1 = sparse.hstack(
        (sparse.rand(N - 1, K - 2 * N,
                     density=0), D0, sparse.rand(N - 1, N, density=0)))
    # Spline term 2
    D2 = sparse.hstack((sparse.rand(N - 1, K - 2 * N,
                                    density=0), sparse.rand(N - 1,
                                                            N,
                                                            density=0), D0))

    # Cost function to be minized (c.T@z)
    # np.repeat(0, 2*K): no penalty on coefficients themselves
    # tau*np.repeat(1, N), (1-tau)*np.repeat(1, N): weight on positive and negative residuals
    # lam*np.repeat(1, N-1): weight on positive and negative first and second derivatives
    # size: 2*K + 2*N + 2*(N - 1)
    c = np.concatenate(
        (np.repeat(0,
                   2 * K), tau * np.repeat(1, N), (1 - tau) * np.repeat(1, N)))

    c = cp.hstack((
        c,
        lambd1 *
        np.repeat(1, 2 *
                  (N - 1)),  # pos/neg second derivative of first spline term
        lambd2 *
        np.repeat(1, 2 *
                  (N - 1))))  # pos/neg second derivative of second spline term

    # Equality constraint: Az = b
    # Constraint ensures that fitted quantile trend + residuals = predictand
    A00 = X  # covariates for positive values of the variable
    A01 = -1 * X  # covariates for negative values of the variable
    A02 = sparse.eye(N)  # Positive residuals
    A03 = -1 * sparse.eye(N)  # Negative residuals
    A04 = sparse.rand(N, N - 1, density=0)
    A05 = sparse.rand(N, N - 1, density=0)
    A06 = sparse.rand(N, N - 1, density=0)
    A07 = sparse.rand(N, N - 1, density=0)

    # Additional constraint: D1@z - u + v = 0
    # Ensures that second derivative adds to u - v
    A10 = D1
    A11 = -1 * D1
    A12 = sparse.rand(N - 1, N, density=0)
    A13 = sparse.rand(N - 1, N, density=0)
    A14 = -1 * sparse.eye(N - 1)
    A15 = sparse.eye(N - 1)
    A16 = sparse.rand(N - 1, N - 1, density=0)
    A17 = sparse.rand(N - 1, N - 1, density=0)

    # Additional constraint: D2@z - u + v = 0
    # Ensures that second derivative adds to u - v
    A20 = D2
    A21 = -1 * D2
    A22 = sparse.rand(N - 1, N, density=0)
    A23 = sparse.rand(N - 1, N, density=0)
    A24 = sparse.rand(N - 1, N - 1, density=0)
    A25 = sparse.rand(N - 1, N - 1, density=0)
    A26 = -1 * sparse.eye(N - 1)
    A27 = sparse.eye(N - 1)

    A = sparse.vstack((sparse.hstack((A00, A01, A02, A03, A04, A05, A06, A07)),
                       sparse.hstack((A10, A11, A12, A13, A14, A15, A16, A17)),
                       sparse.hstack(
                           (A20, A21, A22, A23, A24, A25, A26, A27))))

    A = cp.Constant(A)
    b = np.hstack((data.T, np.zeros(2 * (N - 1))))

    # Determine if we have non-crossing constraints
    # Inequality constraints written Gx <= h
    # Always, constraint that all values of x are positive (> 0)
    n = A.shape[1]

    G1 = -1 * sparse.eye(n)
    if constraint == 'Median':
        n_constraints = 0  # Median is far enough from the Td < T constraint that we don't need to add it
        G = G1
        del G1
    elif constraint == 'Below':  # Constrain to be above lower quantile
        if anoms:
            n_constraints = len(q)
            G2 = sparse.hstack(
                (-X, X, sparse.rand(N, 2 * N + 4 * (N - 1), density=0)))
            G = sparse.vstack((G1, G2))
        else:  # additionally constrain to not cross T
            G2a = sparse.hstack(
                (X, -X, sparse.rand(N, 2 * N + 4 * (N - 1), density=0)))
            G2b = sparse.hstack(
                (-X, X, sparse.rand(N, 2 * N + 4 * (N - 1), density=0)))
            G2 = sparse.vstack((G2a, G2b))
            del G2a, G2b
            G = sparse.vstack((G1, G2))
    elif constraint == 'Above':  # just constrain to be below upper quantiles
        n_constraints = len(q)
        G2 = sparse.hstack(
            (X, -X, sparse.rand(N, 2 * N + 4 * (N - 1), density=0)))
        G = sparse.vstack((G1, G2))
    else:
        raise NameError('Constraint must be Median, Above, or Below')

    G = cp.Constant(G)

    # Right hand side of inequality constraint
    h = np.zeros((n + n_constraints, ))
    if constraint == 'Below':
        if anoms:
            h[n:] = -q
        else:
            c1 = len(T)
            h[n:(n + c1)] = T
            h[(n + c1):] = -q
    elif constraint == 'Above':
        h[n:] = q

    z = cp.Variable(
        2 * K + 2 * N + 4 *
        (N - 1))  # parameters + residuals + second derivatives (all pos + neg)
    objective = cp.Minimize(c.T @ z)
    prob = cp.Problem(objective, [A @ z == b, G @ z <= h])

    lambd2_scale = 1
    if (isinstance(lambd_values, float) | isinstance(lambd_values, int)):
        lambd1.value = lambd_values
        lambd2.value = lambd2_scale * lambd_values
        best_lambda = lambd_values
    else:
        BIC = np.empty((len(lambd_values)))
        for ct_v, v in enumerate(lambd_values):
            lambd1.value = v
            lambd2.value = lambd2_scale * v

            try:
                prob.solve(solver=cp.ECOS, warm_start=True)
            except SolverError:  # try a second solver
                prob.solve(solver=cp.SCS, warm_start=True)
            except SolverError:  # give up
                print('Both ECOS and SCS failed.')
                return 0

            beta = np.array(z.value[0:K] - z.value[K:2 * K])
            yhat = np.dot(X, beta)

            BIC[ct_v], df = calc_BIC(beta, yhat, data, tau, delta)
            if df > np.sqrt(len(data)):  # violating constraint of high dim BIC
                BIC[ct_v] = 1e6  # something large

        min_idx = np.argmin(BIC)
        new_idx = np.array([min_idx - 1, min_idx + 1])
        new_idx[new_idx < 0] = 0
        new_idx[new_idx > (len(BIC) - 1)] = (len(BIC) - 1)
        new_range = lambd_values[new_idx]
        delta_range = new_range[1] - new_range[0]
        new_range = np.logspace(np.log10(new_range[0] + 0.1 * delta_range),
                                np.log10(new_range[1] - 0.1 * delta_range), 6)
        BIC = np.empty((len(new_range)))
        # df_save = np.empty((len(new_range)))
        for ct_v, v in enumerate(new_range):
            lambd1.value = v
            lambd2.value = lambd2_scale * v
            try:
                prob.solve(solver=cp.ECOS, warm_start=True)
            except SolverError:  # try a second solver
                prob.solve(solver=cp.SCS, warm_start=True)
            except SolverError:  # give up
                print('Both ECOS and SCS failed.')
                return 0

            beta = np.array(z.value[0:K] - z.value[K:2 * K])
            yhat = np.dot(X, beta)

            BIC[ct_v], df = calc_BIC(beta, yhat, data, tau, delta)
            if df > np.sqrt(len(data)):  # violating constraint of high dim BIC
                BIC[ct_v] = 1e6  # something large

            # df_save[ct_v] = df

        # df_final = df_save[np.argmin(BIC)]
        best_lambda = new_range[np.argmin(BIC)]

    lambd1.value = best_lambda
    lambd2.value = lambd2_scale * best_lambda

    try:
        prob.solve(solver=cp.ECOS, warm_start=True)
    except SolverError:  # try a second solver
        prob.solve(solver=cp.SCS, warm_start=True)
    except SolverError:  # give up
        print('Both ECOS and SCS failed.')
        return 0

    beta = np.array(z.value[0:K] - z.value[K:2 * K])
    yhat = np.dot(X, beta)

    return beta, yhat, best_lambda
def multigrid(A,
              rhs,
              current_level,
              terminal_level,
              pMethod='identity',
              nMethod='given',
              pInput=None,
              nInput=None,
              useTerminalMPI=False,
              model=None,
              hack=False):
    # TODO: Should check if it's part of the enum
    if (pMethod == 'given'):
        if (len(pInput) != terminal_level - current_level):
            raise ValueError("Needs to be consistent")
        else:
            A = permute_sparse_matrix(A, pInput[0][0], pInput[0][1])
            rhs = rhs[pInput[0][0]]

            # TODO Should be a bit more dynamic put will set to identity:
            pMethod = 'identity'

    if (nMethod == 'given'):
        if (len(nInput) != terminal_level - current_level + 1):
            raise ValueError("Needs to be consistent")
        else:
            nrows = nInput[0]
            if (terminal_level != current_level):
                nInput = nInput[1:]

    # Split input matrix based on nrows
    # TODO: Actual algorithm for nrwos.
    A = A.tocsr()
    B0 = A[:nrows, :nrows]
    F0 = A[:nrows, nrows:]
    E0 = A[nrows:, :nrows]
    C0 = A[nrows:, nrows:]

    # Same for f0. f0 is the stuff that can be computed independently,
    # g0 is the stuff that can't be
    f0 = rhs[:nrows]
    g0 = rhs[nrows:]

    # On this level, set L0 = I, and U0 = B
    # TODO: Should this always be the case for non -level 0?

    if (current_level == terminal_level):
        # Direct Solve via ILU

        ILU = sparse.linalg.spilu(B0)
        (L1, U1) = (ILU.L, ILU.U)
        G1 = sparse.linalg.spsolve_triangular(U1.T, (E0.T).todense())
        W1 = sparse.linalg.spsolve_triangular(L1, F0.todense())
        A2 = C0 - G1.T * W1

        # Backsolve
        f1_prime = sparse.linalg.spsolve_triangular(L1, f0)
        f1_prime = f1_prime.reshape(len(f1_prime), 1)
        inner = G1.T * f1_prime
        #       inner = inner.reshape((len(inner), 1))
        g1_prime = g0 - inner

        # More backsolve
        y1 = spsolve(A2, g1_prime)
        y1 = y1.reshape(len(y1), 1)
        u1 = sparse.linalg.spsolve_triangular(
            U1, (f1_prime.reshape(len(f1_prime), 1) - W1 * y1), False)
        u1 = u1.reshape(len(u1), 1)
        y0 = np.concatenate((u1, y1))
        # Stick them together
        return y0
    else:
        # Descent donwards
        # Todo: Shouldn't hardcode 9
        L0 = sparse.eye(B0.shape[0])
        U0 = B0
        # Since B0 is diagonal can do this
        inv_U0 = sparse.diags(1 / B0.diagonal())

        # Use Schur's complement
        inv_L0 = L0
        G0 = E0 * inv_U0
        W0 = inv_L0 * F0
        A1 = C0 - G0 * W0

        # Forward/backwards substiution
        f0_prime = sparse.linalg.spsolve_triangular(L0, f0)
        f0_prime = f0_prime.reshape(len(f0_prime), 1)
        g0_prime = g0 - G0 * f0_prime

        y0 = multigrid(A1, g0_prime, current_level + 1, terminal_level,
                       'identity', 'given', pInput, nInput)
        u0 = sparse.linalg.spsolve_triangular(U0, (f0_prime - W0 * y0), False)
        u0 = u0.reshape(len(u0), 1)
        y0 = np.concatenate((u0, y0))

        if (pInput != None):
            y0 = repermute(y0, pInput[0][1])
        return y0
Example #35
0
    def __init__(self, n, seed=1):
        '''
        Generate problem in QP format and CVXPY format
        '''
        # Set random seed
        np.random.seed(seed)

        # Generate random dynamics
        self.nx = int(n)  # States
        self.nu = int(n / 2)  # Inputs

        self.A = spa.eye(self.nx) + .1 * spa.random(
            self.nx, self.nx, density=1.0, data_rvs=np.random.randn)

        # Restrict eigenvalues of A to be less than 1
        lambda_values, V = np.linalg.eig(self.A.todense())
        abs_lambda_values = np.abs(lambda_values)

        # Enforce eigenvalues to be maximum norm 1
        for i in range(len(lambda_values)):
            lambda_values[i] = lambda_values[i] \
                if abs_lambda_values[i] < 1 - 1e-02 else \
                lambda_values[i] / (abs_lambda_values[i] + 1e-02)

        # Reconstruct A = V * Lambda * V^{-1}
        self.A = spa.csc_matrix(
            V.dot(np.diag(lambda_values)).dot(np.linalg.inv(V)).real)

        self.B = spa.random(self.nx,
                            self.nu,
                            density=1.0,
                            data_rvs=np.random.randn)

        # Control penalty
        self.R = .1 * spa.eye(self.nu)
        ind07 = np.random.rand(self.nx) < 0.7  # Random 30% data
        # Choose only 70% of nonzero elements
        diagQ = np.multiply(np.random.rand(self.nx), ind07)
        self.Q = spa.diags(diagQ)
        QN = sla.solve_discrete_are(self.A.todense(), self.B.todense(),
                                    self.Q.todense(), self.R.todense())
        self.QN = spa.csc_matrix(QN.dot(QN.T))

        # self.QN = spa.csc_matrix(QN.dot(QN))  # Ensure symmetric PSD
        # self.QN = 10 * self.Q

        # Input ad state bounds
        self.umin = -1.0 * np.random.rand(self.nu)
        self.umax = -self.umin
        self.xmin = -1.0 - np.random.rand(self.nx)
        self.xmax = -self.xmin

        # Initial state (constrain to be within lower and upper bound)
        self.x0 = np.random.rand(self.nx)
        min_x0 = .5 * self.xmin
        max_x0 = .5 * self.xmax
        for i in range(self.nx):
            self.x0[i] = min_x0[i] + \
                self.x0[i] * (max_x0[i] - min_x0[i])

        # Horizon length
        self.T = 10

        self.qp_problem = self._generate_qp_problem()
        self.cvxpy_problem, self.cvxpy_variables, self.cvxpy_param = \
            self._generate_cvxpy_problem()
Example #36
0
 def A_matvec(x):
     x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
     w, v = eigs(x, k=1)
     return v / w[0]
    def __init__(self, strain_odb_file_name, boundary_conditions, step_name, instance_name='', set_name='',
                 strain_field_id='E'):
        print("Init calculator")
        self.odb_file_name = strain_odb_file_name
        self.work_directory = create_temp_dir_name(strain_odb_file_name)
        os.makedirs(self.work_directory)
        self.stain_field_id = strain_field_id
        self.instance_name = instance_name
        self.set_name = set_name
        parameter_pickle_file = self.work_directory + '/parameter_strain_pickle.pkl'
        data_pickle_file = self.work_directory + '/strain_pickle.pkl'
        parameter_dict = {'instance_name': self.instance_name, 'strain_odb_file_name': self.odb_file_name,
                          'set_name': self.set_name, 'strain_field_id': self.stain_field_id, 'step_name': step_name}
        with open(parameter_pickle_file, 'wb') as pickle_file:
            pickle.dump({'parameter_dict': parameter_dict, 'boundary_conditions': boundary_conditions}, pickle_file,
                        protocol=2)
        os.chdir('abaqus_functions')
        job = subprocess.Popen(abq + ' python write_data_for_def_calculation.py ' + parameter_pickle_file + ' '
                               + data_pickle_file, shell=True)
        job.wait()
        os.chdir('..')
        with open(data_pickle_file, 'rb') as pickle_file:
            data = pickle.load(pickle_file, encoding='latin1')
        os.remove(data_pickle_file)
        os.remove(parameter_pickle_file)

        print("Handling boundary conditions")
        bc_dofs = data['bc_dofs']
        bc_vals_dict = data['bc_vals_dict']
        self.nodal_displacements = data['nodal_displacements']
        elements = data['elements']
        b_components = data['b_components']
        strain_components = data['strain_components']
        displacement_components = data['displacement_components']
        self.bc_vals = 0.*bc_dofs
        for i, dof in enumerate(bc_dofs):
            self.bc_vals[i] = bc_vals_dict.get(dof, 0.)

        self.nodal_displacements[bc_dofs] = self.bc_vals
        row = np.zeros(b_components)
        col = np.zeros(b_components)
        values = np.zeros(b_components)
        self.gauss_point_volumes = np.zeros(strain_components)
        print("Assembling B-matrix")
        job_list = []
        for i, element in enumerate(elements):
            job_list.append((calculate_element_data, [element, i], {}))
        print("Starting multiprocessing of elements")
        b_data = multi_processer(job_list, cpus=12, delay=0., timeout=1e5)
        print("Assembling results")
        for i, data in enumerate(b_data):
            n = data[0].shape[0]
            col[i*n:(i + 1)*n] = data[0]
            row[i*n:(i + 1)*n] = data[1]
            values[i*n:(i + 1)*n] = data[2]
            gps = data[3].shape[0]//6
            self.gauss_point_volumes[i*gps*6:(i+1)*gps*6] = data[3]
        self.B_matrix = coo_matrix((values, (row, col)),
                                   shape=(strain_components, displacement_components)).tocsc()
        print("Shape of B-matrix:", self.B_matrix.shape)
        all_cols = np.arange(self.nodal_displacements.shape[0])
        self.bc_cols = np.where(np.in1d(all_cols, bc_dofs))[0]
        self.cols_to_keep = np.where(np.logical_not(np.in1d(all_cols, bc_dofs)))[0]
        print("Computing scale factors")
        scale_array = sp.diags([self.gauss_point_volumes], offsets=[0])
        self.B_matrix = scale_array*self.B_matrix
        self.B_red = self.B_matrix[:, self.cols_to_keep]

        print("Scaling B-matrix")
        self.scale_factors = norm(self.B_red, axis=0)
        scale_array = sp.diags([1./self.scale_factors], offsets=[0])
        self.B_red *= scale_array
        print("Init done")
        os.removedirs(self.work_directory)
def normalize_adj_matrix(adj):
  """Normalize adjacency matrix."""
  rowsum = np.array(adj.sum(1))
  d_inv_sqrt = np.power(rowsum, -0.5).flatten()
  d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
  return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def spar_Jac(ncomp, nr, nz, ne):
    '''
    
    Helper function that predetermines the sparsity matrix. This is provided to 
    solve_ivp to prevent solve_ivp from having to solve for this itself. solve_ivp
    runs the function's diffun to solve this problem, which is much slower. 
    
    Called in PSDM.__init__()
    
    Parameters
    ----------
    ncomp : int
        number of compounds to consider.
    nr : int
        number or radial collocation points.
    nz : int
        number or axial collocation points.
    ne : int
        number of axial elements.

    Returns
    -------
    sparse matrix that indicates sparsity to solve_ivp.

    '''
    nz_tot = (nz * ne) - (ne - 1)
    cube_size = nz_tot * (nr + 1)
    bead_num = nr * nz_tot

    #Consistent features
    diags = np.ones((2 * (nr - 1) + 1, bead_num))
    diag_loc = np.arange(-((nr - 1) * nz_tot), (nr - 1) * nz_tot + 1, nz_tot)

    #bulk diagonal matrix
    mat1 = sparse.spdiags(diags, diag_loc, bead_num, bead_num)

    #always true
    low_diag = np.ones(bead_num)  #might be nz_tot-1
    low_diag[-nz_tot] = 0.  #drops out if not nz_tot
    lower_mat = sparse.spdiags(low_diag, (nr - 1) * (nz_tot), nz_tot, bead_num)

    mat2 = sparse.bmat([[mat1], [lower_mat]])
    right1 = sparse.bsr_matrix((bead_num - nz_tot, nz_tot))

    #features that change with sub-location
    for i in range(ncomp):
        for j in range(ncomp):
            if i == j:
                rightp = np.ones(nz_tot)
                rightp[0] = 0.
                right2 = sparse.diags(rightp, 0)
                right3 = np.ones((nz_tot, nz_tot))
                right3[0] = 0.
                right3[:, 0] = 0.
            else:
                right2 = sparse.bsr_matrix((nz_tot, nz_tot))
                right3 = right2
            right_mat = sparse.bmat([[right1], [right2]])
            #sparse.spdiags(low_diag, -(nr-1)*(nz_tot), bead_num, nz_tot)
            right_mat = sparse.bmat([[right_mat], [right3]])

            mat1 = sparse.bmat([[mat2, right_mat]])

            if j == 0:
                rowmat = mat1
            else:
                rowmat = sparse.bmat([[rowmat, mat1]])
        if i == 0:
            newmat = rowmat
        else:
            newmat = sparse.bmat([[newmat], [rowmat]])

    return newmat  # mat1, mat2
for i in range(len(run_values) - 1):
    i_start = run_starts[i]
    i_end = run_starts[i + 1]

    nbr_dict[run_values[i]] = list(a_mtx_nonzero[1][i_start:i_end])

nbr_dict[run_values[-1]] = list(a_mtx_nonzero[1][run_starts[-1]:])

np.random.seed(seed)
w_rand = np.random.rand(
    a_mtx_nonzero[0].shape[0]) * (ew_bnds[1] - ew_bnds[0]) + ew_bnds[0]
w_mtx = sparse.csr_matrix((w_rand, a_mtx_nonzero), shape=(n, n))
w_mtx = (w_mtx + w_mtx.T) / 2

k_nbrs = sparse.diags(np.ones(n)).tocsr()
if k > 0:
    k_nbrs += a_mtx_csr
    if k > 1:
        k_pow = sparse.csr_matrix(a_mtx_csr)
        for i in range(2, k + 1):
            k_pow = k_pow.dot(a_mtx_csr)
            k_nbrs += k_pow

k_nbrs_nonzero = k_nbrs.nonzero()

run_values, run_starts, run_lengths = find_runs(k_nbrs_nonzero[0])

knbr_dict = {}
for i in range(len(run_values) - 1):
    i_start = run_starts[i]
Example #41
0
MAXITER = 150
FREQ = 10
list_iters = list(range(0, MAXITER, FREQ))[1:]
# ----------------

norm = lambda M: np.linalg.norm(M, 'fro')
#norm = lambda M: np.linalg.norm(M, 1)
#norm = lambda M: np.linalg.norm(M, 2)
#norm = lambda M: np.linalg.norm(M, -1)

M = np.genfromtxt("My.txt")
M = csr_matrix(M)
diag = M.diagonal()
shift = 0
D = diags(diag, shift)
Z = M - D
n, m = M.shape

# ...


# ...
def cost0(c):
    a = c[:n]
    b = c[n:]
    T = toeplitz(a, b)
    nr = norm(M - T)
    return nr

Example #42
0
def Lisser(x, y, rhog):

    # fenetre de viualisation
    xmin, xmax = min(x) - .02, max(x) + 0.02

    # Q0. Paramètres problème
    ##########################
    # nb de données
    n = len(x) - 1
    # liste des h
    h = x[1:] - x[:-1]
    # nb de points pour pour tracé
    neval = 1201  # 1200 segments

    # force de lissage
    rhoGlobal = rhog
    # poids de chacune des données par rapport aux autres
    rhoRelatif = np.ones(len(x))
    #exemple :
    # rhoRelatif[4] = 100000
    # Au bilan
    rho = rhoGlobal * rhoRelatif

    # 2. Détermination des 4-uplets de  la spline d'ajustement
    #################################################################

    # a. calcul des sigma''
    #==============================
    # i. construction systeme lineaire
    alphaj = 6. / (rho[2:n - 1] * h[1:n - 2] * h[2:n - 1])
    betaj = h[1:n - 1] - 6. * (h[1:n - 1] + h[2:n]) / (
        rho[2:n] * (h[1:n - 1]**2) * h[2:n]) - 6. * (
            h[0:n - 2] + h[1:n - 1]) / (rho[1:n - 1] *
                                        (h[1:n - 1]**2) * h[0:n - 2])
    gammaj = 2. * (h[0:n - 1] + h[1:n]) + 6. / (
        rho[2:n + 1] *
        h[1:n]**2) + 6. / (rho[0:n - 1] * h[0:n - 1]**2) + 6. * (
            (h[0:n - 1] + h[1:n])**2) / (rho[1:n] *
                                         (h[0:n - 1]**2) * h[1:n]**2)
    deltaj = betaj
    epsj = alphaj
    chij = 6. * ((y[2:] - y[1:n]) / h[1:n] - (y[1:n] - y[:n - 1]) / h[:n - 1])

    # A = np.diag(alphaj,-2)+np.diag(betaj,-1)+np.diag(gammaj)+np.diag(deltaj,1)+np.diag(epsj,2)
    # ou mieux : en mode sparse
    A = sp.diags([alphaj, betaj, gammaj, deltaj, epsj], [-2, -1, 0, +1, +2],
                 format="csc")
    B = chij

    # ii. resolution systeme lineaire
    # -> on fait du solveur direct ce coup-ci mais on pourrait recoder Gauss
    sigma_seconde = np.zeros(n + 1)
    # sigma_seconde[1:-1] = npl.solve(A,B)
    # ou mieux : en mode sparse
    sigma_seconde[1:-1] = spl.spsolve(A, B)

    # b. Calcul des sigma'''
    #==============================
    sigma_tierce = np.zeros(n + 1)
    sigma_tierce[:-1] = (sigma_seconde[1:] - sigma_seconde[:-1]) / h

    # c. Calcul des sigma
    #==============================
    sigma = np.zeros(n + 1)
    sigma[0] = y[0] - sigma_seconde[1] / (rho[0] * h[0])
    sigma[n] = y[n] - sigma_seconde[n - 1] / (rho[n] * h[n - 1])
    sigma[1:n] = y[1:n] - (sigma_seconde[2:] - sigma_seconde[1:n]) / (
        rho[1:n] * h[1:n]) + (sigma_seconde[1:n] -
                              sigma_seconde[:n - 1]) / (rho[1:n] * h[:n - 1])

    # d. calcul des sigma'
    #============================
    sigma_prime = np.zeros(n + 1)
    sigma_prime[:-1] = (sigma[1:] - sigma[:-1]) / h - h / 6 * (
        sigma_seconde[1:] + 2 * sigma_seconde[:-1])
    sigma_prime[-1] = sigma_prime[-2] + h[-1] * sigma_seconde[-2] + (
        h[-1]**2) / 2. * sigma_tierce[-2]

    # 3. Évaluation de la spline de lissage
    ##############################################

    # Evaluation de la spline aux neval points
    x_graphe = np.linspace(xmin, xmax, neval)
    sigma_graphe = eval_spline(x_graphe, x, sigma, sigma_prime, sigma_seconde,
                               sigma_tierce)

    return sigma, sigma_prime, sigma_seconde, sigma_tierce
Example #43
0
		
	return np.clip(np.nan_to_num(KL),0,100)


x_test, y_test = load_svmlight_file('../out/test_distsim2.txt')
F = n_features=x_test.shape[1]
x_train, y_train = load_svmlight_file('../out/train_distsim2.txt',F)

print('loaded');

x1,x2,y1,ytest = x_train[::2], x_train[1::2], y_train[::2],y_test[::2]
weights = KLDAggregate(F,x1,x2,y1)

print('KL weights computed');

scaler = diags([weights], [0])
scaled_x_train = x_train*scaler
scaled_x_test = x_test*scaler
	
print('rescaled');

model = NMF(n_components=100, init='nndsvd', shuffle=True)
model.fit(scaled_x_train)

print('NMF decomp fitted');

		
from sklearn import linear_model, svm, metrics

z_train = samples(scaled_x_train, model)
z_test = samples(scaled_x_test, model)
Example #44
0
    def __init__(self, MultiNet, MultiAttri, d, *varargs):
        """

        :param MultiNet: a list  of network matrices with shape of (n,n)
        :param MultiAttri: a list of attribute matrices with shape of (n,m)
        :param d: the dimension of the embedding representation
        :param varargs: 0:lambd,1:rho,2:maxiter,3:Att(use "Att" or "Net" for H's init), 4:splitnum
        :param self.H: a list of representation of layers (len: self.k)
        :param self.Z: a copy of self.H (len: self.k)
        :param self.U: a list of dual variable(len: self.k)
        :param self.V: window's representation (len: 1)
        :returns initialization of multiple core variable

        """
        self.window_len=8
        self.maxiter = 2  # Max num of iteration
        self.lambd = 0.05  # Initial regularization parameter
        self.rho = 5  # Initial penalty parameter
        self.k=len(MultiNet)#the num of layers of Multilayer Network
        print('k:',self.k)
        self.d = d
        self.worknum = 3
        splitnum = 1  # number of pieces we split the SA for limited cache
        [self.n, m] = MultiAttri[0].shape  # n = Total num of nodes, m = attribute category num
        print('MultiNet.shape:',MultiNet[0].shape)
        Nets=[]
        Attris=[]
        for Net in MultiNet:
            Net = sparse.lil_matrix(Net)
            Net.setdiag(np.zeros(self.n))
            Net = csc_matrix(Net)  # 在用python进行科学运算时,常常需要把一个稀疏的np.array压缩
            Nets.append(Net)
        # Nets=[csc_matrix(sparse.lil_matrix(Net).setdiag(np.zeros(self.n))) for Net in MultiNet]
        for Attri in MultiAttri:
            Attri=csc_matrix(Attri)
            Attris.append(Attri)
        # Attris=[csc_matrix(Attri) for Attri in MultiAttri]
        self.H=[]
        if len(varargs) >= 4 and varargs[3] == 'Att':
            # 将属性矩阵A打乱成n*m(或n*10d)的新矩阵再进行svd分解后的酉矩阵,规格为n*d,作为H的初始值
            for Atrri in Attris:
                sumcol = np.arange(m)# [0,1,...,m-1]
                np.random.shuffle(sumcol)# 打乱
                self.H.append(svds(Atrri[:, sumcol[0:min(10 * d, m)]], d)[0])
        else:
            # 将拓扑矩阵打乱成n*n或(n*10d)的新矩阵(按照纵向求和的从大到小排列),再进行svd分解后的酉矩阵,规格为n*d,作为H的初始值
            for Net in Nets:
                sumcol = Net.sum(0)#将Net沿纵方向向下加,成为一个长度为n的向量
                H_initial=svds(Net[:, sorted(range(0,self.n), key=lambda r: sumcol[0, r], reverse=True)[0:min(10 * d, self.n)]], d)[0]
                self.H.append(H_initial)
                # svds(Net[:, sorted(range(self.n), key=lambda r: sumcol[0, r], reverse=True)[0:min(10 * d, self.n)]], d)[0]
        if len(varargs) > 0:
            self.lambd = varargs[0]
            self.rho = varargs[1]
            if len(varargs) >= 3:
                self.maxiter = varargs[2]
                if len(varargs) >= 5:
                    splitnum = varargs[4]
        self.block = min(int(ceil(float(self.n) / splitnum)), 7575)  # Treat at least(most??) each 7575 nodes as a block。即将n个节点分成splitnum个block,1个block最多有7575个节点
        self.splitnum = int(ceil(float(self.n) / self.block))#重新计算splitnum
        with np.errstate(divide='ignore'):  # inf will be ignored,即不管异常与否,都会进行下面的计算
            self.Attri = [Attri.transpose() * sparse.diags(np.ravel(np.power(Attri.power(2).sum(1), -0.5))) for Attri in Attris]#计算属性矩阵
        self.Z = self.H.copy()
        self.affi = -1  # Index for affinity matrix sa
        self.U = [np.zeros((self.n, d)) for i in range(self.k)]#U是n*d的全0矩阵
        self.V=self.H[0]#V的初始值和H的第一个图的初始值一样
        self.nexidx = [np.split(Net.indices, Net.indptr[1:-1]) for Net in
                       Nets]  # 将每一列的非零元素的坐标分开,得到A list of sub-arrays.
        self.Net = [np.split(Net.data, Net.indptr[1:-1]) for Net in Nets]  # 将每一列的非零数据分开,得到A list of sub-arrays
        beta = estimate_beta(XE, YE, params)

        # Estimate the results
        results[spl] = np.trace(np.transpose(XT, axes=(0, 2, 1)) @ beta,
                                axis1=1,
                                axis2=2)

    return results


mse = []
rsquared = []
# Calcul du Laplacien du graphe
graph = load_graph()
degree = np.array(graph.sum(axis=0))
laplacian = ssp.diags(degree, offsets=[0]) - graph

# Les paramètres
params = {}
params["iterations"] = 1000
params["mu"] = 0.1
params["mu_min"] = 1e-7
# params["soft_thresh"] = 10e-3
params["soft_thresh"] = 0.0
params["delta"] = 0.0
params["graph"] = laplacian
if __name__ == "__main__":
    # 10-fold validation
    idx = np.arange(39)
    kf = KFold(n_splits=10)
    fold = 0
Example #46
0
def get_structural_holes_HAM(G, k, c, ground_truth_labels):
    '''Structural hole spanners detection via HAM method.

    Using HAM [1]_ to jointly detect SHS and communities.

    Parameters
    ----------
    G : easygraph.Graph
        An undirected graph.

    k : int
        top - k structural hole spanners

    c : int
        the number of communities

    ground_truth_labels : list of lists
        The label of each node's community.

    Returns
    -------
    top_k_nodes : list
        The top-k structural hole spanners. 

    SH_score : dict
        The structural hole spanners score for each node, given by HAM.

    cmnt_labels : dict
        The communities label of each node.


    Examples
    --------

    >>> get_structural_holes_HAM(G,
    ...                         k = 2, # To find top two structural holes spanners.
    ...                          c = 2,
    ...                          ground_truth_labels = [[0], [0], [1], [0], [1]] # The ground truth labels for each node - community detection result, for example.
    ...                         )

    References
    ----------
    .. [1] https://dl.acm.org/doi/10.1145/2939672.2939807

    '''

    G_index, _, node_of_index = G.to_index_node_graph(begin_index=1)

    A_mat = load_adj_matrix(G_index)
    A = A_mat  # adjacency matrix
    n = A.shape[0]  # the number of nodes

    epsilon = 1e-4  # smoothing value: epsilon
    max_iter = 50  # maximum iteration value
    seeeed = 5433
    np.random.seed(seeeed)
    topk = k

    # Inv of degree matrix D^-1
    invD = sps.diags((np.array(A.sum(axis=0))[0, :] + eps)**(-1.0), 0)
    # Laplacian matrix L = I - D^-1 * A
    L = (sps.identity(n) - invD.dot(A)).tocsr()
    # Initialize a random orthogonal matrix F
    F = sym(np.random.random((n, c)))

    # Algorithm 1
    for step in range(max_iter):
        Q = sps.identity(n).tocsr()
        P = L.dot(F)
        for i in range(n):
            Q[i, i] = 0.5 / (spl.norm(P[i, :]) + epsilon)

        R = L.T.dot(Q).dot(L)

        W, V = np.linalg.eigh(R.todense())
        Wsort = np.argsort(W)  # sort from smallest to largest
        F = V[:, Wsort[0:c]]  # select the smallest eigenvectors

    # find SH spanner
    SH = np.zeros((n, ))
    for i in range(n):
        SH[i] = np.linalg.norm(F[i, :])
    SHrank = np.argsort(SH)  # index of SH

    # METRICS BEGIN

    to_keep_index = np.sort(SHrank[topk:])
    A_temp = A[to_keep_index, :]
    A_temp = A_temp[:, to_keep_index]
    HAM_labels_keep = np.asarray(ground_truth_labels)[to_keep_index]
    allLabels = np.asarray(ground_truth_labels)

    cluster_matrix = F
    labelbook, distortion = kmeans(cluster_matrix[to_keep_index, :], c)
    HAM_labels, dist = vq(cluster_matrix[to_keep_index, :], labelbook)

    print("AMI")
    print('HAM: ' + str(
        metrics.adjusted_mutual_info_score(HAM_labels, HAM_labels_keep.T[0])))

    # classifify SHS using majority voting
    predLabels = np.zeros(len(ground_truth_labels))
    predLabels[to_keep_index] = HAM_labels + 1

    HAM_predLabels = label_by_neighbors(A, predLabels)
    print('HAM_all: ' + str(
        metrics.adjusted_mutual_info_score(HAM_predLabels, allLabels.T[0])))

    print("NMI")
    print('HAM: ' + str(
        metrics.normalized_mutual_info_score(HAM_labels, HAM_labels_keep.T[0]))
          )
    print('HAM_all: ' + str(
        metrics.normalized_mutual_info_score(HAM_predLabels, allLabels.T[0])))

    print("Entropy")
    print('HAM: ' + str(avg_entropy(HAM_labels, HAM_labels_keep.T[0])))
    print('HAM_all: ' + str(avg_entropy(HAM_predLabels, allLabels.T[0])))

    # METRICS END

    SH_score = dict()
    for index, rank in enumerate(SHrank):
        SH_score[node_of_index[index + 1]] = int(rank)

    cmnt_labels = dict()
    for index, label in enumerate(HAM_predLabels):
        cmnt_labels[node_of_index[index + 1]] = int(label)

    # top-k SHS
    top_k_ind = np.argpartition(SHrank, -k)[-k:]
    top_k_ind = top_k_ind[np.argsort(SHrank[top_k_ind])[::-1][:k]]
    top_k_nodes = []
    for ind in top_k_ind:
        top_k_nodes.append(node_of_index[ind + 1])

    return top_k_nodes, SH_score, cmnt_labels
def local_schurs(inputs):
    (B, F, E, C, f, g, u) = inputs
    A1_local = C - E * sparse.diags(1 / B.diagonal()) * F
    g1_local = g - E * sparse.diags(1 / B.diagonal()) * f

    return ((A1_local, g1_local, u, B, F, f))
    tridi_full_vector / np.linalg.norm(tridi_full_vector))
print('classical state:', classical_solution.state)

# It should not come as a surprise that `naive_hhl_solution` is exact because all the default methods used are exact. However, `tridi_solution` is exact only in the $2\times 2$ system size case. For larger matrices it will be an approximation, as shown in the slightly larger example below.

# In[10]:

from scipy.sparse import diags

num_qubits = 2
matrix_size = 2**num_qubits
# entries of the tridiagonal Toeplitz symmetric matrix
a = 1
b = -1 / 3

matrix = diags([b, a, b], [-1, 0, 1],
               shape=(matrix_size, matrix_size)).toarray()
vector = np.array([1] + [0] * (matrix_size - 1))

# run the algorithms
classical_solution = NumPyLinearSolver().solve(matrix,
                                               vector / np.linalg.norm(vector))
naive_hhl_solution = HHL().solve(matrix, vector)
tridi_matrix = TridiagonalToeplitz(num_qubits, a, b)
tridi_solution = HHL().solve(tridi_matrix, vector)

print('classical euclidean norm:', classical_solution.euclidean_norm)
print('naive euclidean norm:', naive_hhl_solution.euclidean_norm)
print('tridiagonal euclidean norm:', tridi_solution.euclidean_norm)

# We can also compare the difference in resources from the exact method and the efficient implementation. The $2\times 2$ system size is again special in that the exact algorithm requires less resources, but as we increase the system size, we can see that indeed the exact method scales exponentially in the number of qubits while `TridiagonalToeplitz` is polynomial.
def split_bigM(A, rhs, nrows, ncont, nprocess, model, inds):
    #print("Ncount is %d" % ncont)
    B0 = A[:nrows, :nrows]
    F0 = A[:nrows, nrows:]
    E0 = A[nrows:, :nrows]
    C0 = A[nrows:, nrows:]

    f0 = rhs[:nrows]
    g0 = rhs[nrows:]
    #    new_col_inds = shift_C0(C0, ncont, inds)
    #    C0 = C0[:, new_col_inds]
    #    F0 = F0[:, new_col_inds]
    (Bs, Fs, Es, Cs, gs, fs, us) = ([], [], [], [], [], [], [])

    (start_b, start_f_col, start_f_row, start_e_col, start_e_row, start_c_col,
     start_c_row) = (0, 0, 0, 0, 0, 0, 0)
    for k in range(0, ncont + 1):
        l_cut = len(inds['deltaLambda'][k])
        Bs.append(B0[start_b:start_b + l_cut, start_b:start_b + l_cut])
        us.append(f0[start_b:start_b + l_cut])
        start_b += l_cut
        th_cut = (len(inds['deltaTheta'][k]))

        Fs.append(F0[start_f_row:start_f_row + l_cut, :])
        fs.append(f0[start_f_row:start_f_row + l_cut])

        start_f_row += l_cut
        start_f_col += th_cut
        Es.append(E0[:, start_e_col:start_e_col + l_cut])

        start_e_row += l_cut
        start_e_col += l_cut
        temp_C0 = C0[start_c_row:start_c_row + th_cut, :]
        (r, c) = temp_C0.shape
        CO_above = sparse.csr_matrix((start_c_row, c))
        CO_below = sparse.csr_matrix((c - r - start_c_row, c))
        CO_all = sparse.vstack([
            CO_above,
            temp_C0,
            CO_below,
        ])
        Cs.append(CO_all)

        temp_g = g0[start_c_row:start_c_row + th_cut]
        # (r, c) = temp_g.shape
        g0_above = sparse.csr_matrix((start_c_row, 1))
        g0_below = sparse.csr_matrix((c - r - start_c_row, 1))
        g0_all = sparse.vstack([
            g0_above,
            sparse.csc_matrix(temp_g),
            g0_below,
        ])
        gs.append(g0_all)

        start_c_row += th_cut

    temp_C0 = C0[start_c_row:, :]
    (r, c) = temp_C0.shape
    CO_above = sparse.csr_matrix((start_c_row, c))
    CO_below = sparse.csr_matrix((c - r - start_c_row, c))
    CO_all = sparse.vstack([CO_above, temp_C0, CO_below])
    Cs.append(CO_all)

    temp_g = g0[start_c_row:]
    g0_above = sparse.csr_matrix((start_c_row, 1))
    g0_below = sparse.csr_matrix((c - r - start_c_row, 1))
    g0_all = sparse.vstack([
        g0_above,
        sparse.csc_matrix(temp_g),
        g0_below,
    ])
    gs.append(g0_all)

    # gs.append(g0[start_e_col])

    l_cut_p = len(inds['deltaLambdaP'])
    Bs.append(B0[start_b:start_b + l_cut_p, start_b:start_b + l_cut_p])
    us.append(f0[start_b:start_b + l_cut_p])

    Fs.append(F0[start_f_row:start_f_row + l_cut, :])
    fs.append(f0[start_f_row:start_f_row + l_cut])
    Es.append(E0[:, start_e_col:])
    print("BS length is %d" % len(Bs))
    to_send = [(B, F, E, C, f, g, u)
               for (B, F, E, C, f, g, u) in zip(Bs, Fs, Es, Cs, fs, gs, us)]

    As = []
    for i in range(0, len(Fs)):
        #  c = Cs[i]
        b = Bs[i]
        e = Es[i]
        f = fs[i]
        g = gs[i]
        #        temp_res = c - e * sparse.diags(1/b.diagonal()) * f
        temp_res = g - e * sparse.diags(1 / b.diagonal()) * f
        As.append(temp_res)
    return (to_send, As)
    yx = np.sum(y_sample_falseclassified.multiply(x_sample_falseclassified), axis=0)
    #print(yx.shape)
    
    
    gradient = np.dot(regPar, w) - yx.transpose()/B 
    nue = 1.0/(regPar*i)

    print("Iteration",t)
    predictionsTrain = np.where((yLabel_train.multiply(xData_train.dot(w)) < 1).todense())[0]
    print("Training Error", predictionsTrain.shape)
    #print(gradient.shape)
    #test_predictions = np.where((y_test.multiply(x_test.dot(w.transpose())) < 1).todense())[0]
    #print("Test Error", test_predictions.shape)
    
    diag_G = G.diagonal().reshape(1,rcv1.data.shape[1])
    G_inv = diags(np.reciprocal(diag_G),[0]).tocsr()
    w1 = w - nue*(G_inv.dot(gradient))
    w = min(1, 1/((linalg.norm(G*w1-w1))*np.sqrt(regPar))) * w1
    #print("hello")
    diag_G_ele = np.square(diag_G)
    gradient_ele = np.square(gradient)
    sum_grad = np.array(np.sqrt(diag_G_ele.transpose() + gradient_ele))
    G = diags(sum_grad.transpose(), [0]).tocsr()


# In[17]:




Example #51
0
def kron_reduction(G, ind):
    r"""Compute the Kron reduction.

    This function perform the Kron reduction of the weight matrix in the
    graph *G*, with boundary nodes labeled by *ind*. This function will
    create a new graph with a weight matrix Wnew that contain only boundary
    nodes and is computed as the Schur complement of the original matrix
    with respect to the selected indices.

    Parameters
    ----------
    G : Graph or sparse matrix
        Graph structure or weight matrix
    ind : list
        indices of the nodes to keep

    Returns
    -------
    Gnew : Graph or sparse matrix
        New graph structure or weight matrix


    References
    ----------
    See :cite:`dorfler2013kron`

    """
    if isinstance(G, graphs.Graph):

        if G.lap_type != 'combinatorial':
            msg = 'Unknown reduction for {} Laplacian.'.format(G.lap_type)
            raise NotImplementedError(msg)

        if G.is_directed():
            msg = 'This method only work for undirected graphs.'
            raise NotImplementedError(msg)

        L = G.L

    else:

        L = G

    N = np.shape(L)[0]
    ind_comp = np.setdiff1d(np.arange(N, dtype=int), ind)

    L_red = L[np.ix_(ind, ind)]
    L_in_out = L[np.ix_(ind, ind_comp)]
    L_out_in = L[np.ix_(ind_comp, ind)].tocsc()
    L_comp = L[np.ix_(ind_comp, ind_comp)].tocsc()

    Lnew = L_red - L_in_out.dot(linalg.spsolve(L_comp, L_out_in))

    # Make the laplacian symmetric if it is almost symmetric!
    if np.abs(Lnew - Lnew.T).sum() < np.spacing(1) * np.abs(Lnew).sum():
        Lnew = (Lnew + Lnew.T) / 2.

    if isinstance(G, graphs.Graph):
        # Suppress the diagonal ? This is a good question?
        Wnew = sparse.diags(Lnew.diagonal(), 0) - Lnew
        Snew = Lnew.diagonal() - np.ravel(Wnew.sum(0))
        if np.linalg.norm(Snew, 2) >= np.spacing(1000):
            Wnew = Wnew + sparse.diags(Snew, 0)

        # Removing diagonal for stability
        Wnew = Wnew - Wnew.diagonal()

        coords = G.coords[ind, :] if len(G.coords.shape) else np.ndarray(None)
        Gnew = graphs.Graph(Wnew,
                            coords=coords,
                            lap_type=G.lap_type,
                            plotting=G.plotting)
    else:
        Gnew = Lnew

    return Gnew
Example #52
0
# fichiers, sans rien afficher (elle peut tourner dans un serveur sans écran)

from scipy.sparse import eye, diags
from scipy.linalg import eigh


##Données:
N = 502
alpha = 0
beta = 1
gamma = 0

##Construction matrices
B = eye(N-1,N,1) - eye(N-1,N)
N3 = (N-1)//3
W = diags([alpha]*N3 + [beta]*N3 + [gamma]*N3)
L = -B.T @ W @ B

##Calculs éléments propres
#D,U = eigsh(-L, k=500, which='SM')
D,U = eigh(-L.todense())
U_filt = U.T[D > 1e-30]
D_filt = D[D > 1e-30]

##Tracés
num_shown = 8

vv = [ U_filt[i] for i in range(0,num_shown) ]

import matplotlib.pyplot as plt
fig, axes = plt.subplots(nrows=num_shown, ncols=1)
Example #53
0
    def _multi_grid_solver(self, ):

        self.logger.propagate = False
        bx, by = np.ceil(1. * np.array(self.full_res) * self.pix_res /
                         self.aero_res)
        level_x = math.log(bx, 2)
        level_y = math.log(by, 2)
        level = int(min(level_x, level_y))
        scale_factors = 1. / 2**np.arange(level)[::-1]
        shapes = (np.array([bx, by])[..., None] *
                  scale_factors).astype(int).T  #[-5:]
        #shapes[0]     = np.array([3,3])
        shape_dict = dict(zip(range(len(shapes)), shapes))
        #order        = [0, 1, 2, 1, 0, 1, 2, 3, 4, 3, 2, 3, 4] + range(5, len(shapes))
        order = range(len(shapes))
        self.xap_emus = self.emus[0]  #[self.band_indexs]
        self.xbp_emus = self.emus[1]  #[self.band_indexs]
        self.xcp_emus = self.emus[2]  #[self.band_indexs]
        self.up_bounds = np.array([2.5, 7])
        self.bot_bounds = np.array([0.001, 0.])
        #self.up_bounds   = self.xap_emus[0].inputs[:,3:5].max(axis=0)
        #self.bot_bounds  = self.xap_emus[0].inputs[:,3:5].min(axis=0)
        #self.bot_bounds[:] = 0.

        self.logger.info('Total %d level of grids are going to be used.' %
                         (len(shapes)))
        #for ii, shape in enumerate(shapes):
        for _, ii in enumerate(order):
            shape = shape_dict[ii]
            self.logger.info(
                bcolors.BLUE +
                '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++' +
                bcolors.ENDC)
            self.logger.info(bcolors.RED + 'Optimizing at grid level %d' %
                             (ii + 1) + bcolors.ENDC)
            self.num_blocks_x, self.num_blocks_y = shape
            self.control_variables = np.zeros(
                (self.boa.shape[0], 7, shape[0], shape[1]))
            if _ == 0:
                #self.aot, self.tcwv = self.aot_prior.copy(), self.tcwv_prior.copy()
                self.aot, self.tcwv = self._grid_conversion(
                    self.aot_prior.copy(),
                    shape), self._grid_conversion(self.tcwv_prior.copy(),
                                                  shape)
            if self.vza.ndim == 2:
                for i, parameter in enumerate([
                        self.sza, self.vza, self.raa, self.aot, self.tcwv,
                        self.tco3_prior, self.ele
                ]):
                    self.control_variables[:, i, :, :] = self._grid_conversion(
                        parameter, shape)

            elif self.vza.ndim == 3:

                same_ind = [0, 3, 4, 5, 6]
                same_var = [
                    self.sza, self.aot, self.tcwv, self.tco3_prior, self.ele
                ]
                for i, parameter in enumerate(same_var):
                    self.control_variables[:, same_ind[
                        i], :, :] = self._grid_conversion(parameter, shape)

                ang_ind = [1, 2]
                for j in range(len(self.vza)):
                    for i, parameter in enumerate([self.vza[j], self.raa[j]]):
                        self.control_variables[
                            j, ang_ind[i], :, :] = self._grid_conversion(
                                parameter, shape)

            self.prior_uncs = np.zeros((2, shape[0], shape[1]))
            for i, parameter in enumerate([self.aot_unc, self.tcwv_unc]):
                self.prior_uncs[i] = self._grid_conversion(parameter, shape)

            self.priors = np.zeros((2, shape[0], shape[1]))
            for i, parameter in enumerate([self.aot_prior, self.tcwv_prior]):
                self.priors[i] = self._grid_conversion(parameter, shape)

            #self._coarse_num = np.zeros(self.full_res)
            #self._coarse_num[self.Hx, self.Hy] = 1
            #subs = [np.array_split(sub, self.num_blocks_y, axis=1) for sub in np.array_split(self._coarse_num, self.num_blocks_x, axis=0)]
            #self._coarse_num = np.zeros((self.num_blocks_x, self.num_blocks_y))
            #for i in range(self.num_blocks_x):
            #    for j in range(self.num_blocks_y):
            #        self._coarse_num[i,j]        = subs[i][j].sum()
            nx, ny  = (np.ceil(np.array(self.full_res) / np.array([self.num_blocks_x, self.num_blocks_y])) \
                                  *  np.array([self.num_blocks_x, self.num_blocks_y])).astype(int)
            x_size, y_size = int(nx / self.num_blocks_x), int(
                ny / self.num_blocks_y)

            self._coarse_num = np.zeros((nx, ny))
            self._coarse_num[self.Hx, self.Hy] = 1

            self._coarse_num = self._coarse_num.reshape(
                self.num_blocks_x, x_size, self.num_blocks_y,
                y_size).sum(axis=(1, 3))
            #if ii < 0:
            #    self._coarse_mask = 1. * self._coarse_num / self._coarse_num.max() > 0.7
            #    pre_mask = self._coarse_mask
            #else:
            self._coarse_mask = self._coarse_num > 1

            #self.priors[0, ~self._coarse_mask] = self.control_variables[0, 3, self._coarse_mask].mean()
            #self.priors[1, ~self._coarse_mask] = self.control_variables[0, 3, self._coarse_mask].mean()

            self.priors = self.priors.reshape(2, -1)
            self.b_m_pixs = 4.  #self._coarse_num.max()#(self.aero_res / 500.)**2
            #self._coarse_mask = self._coarse_mask & self._grid_conversion(pre_mask, shape).astype(bool)
            #subs = [np.array_split(sub, self.num_blocks_y, axis=1) for sub in np.array_split(self.mask, self.num_blocks_x, axis=0)]
            #self._coarse_mask = np.zeros((self.num_blocks_x, self.num_blocks_y))
            #for i in range(self.num_blocks_x):
            #    for j in range(self.num_blocks_y):
            #        self._coarse_mask[i,j] = subs[i][j].sum()
            #self._coarse_mask = self._coarse_mask > 0.
            self.priors = self.control_variables[0,
                                                 [3, 4], :, :].reshape(2, -1)
            p0 = self.priors.copy()
            self.logger.info('Starting mean AOT : %.02f' % p0[0].mean())
            self.logger.info('Starting mean TCWV: %.02f' % p0[1].mean())
            bot = np.zeros_like(p0)
            up = np.zeros_like(p0)
            bot = np.ones(p0.shape) * self.bot_bounds[..., None]
            up = np.ones(p0.shape) * self.up_bounds[..., None]
            p0 = p0.ravel()
            bot = bot.ravel()
            up = up.ravel()
            bounds = np.array([bot, up]).T
            #import pdb; pdb.set_trace()
            #psolve = optimize.fmin_l_bfgs_b(self._cost, p0, approx_grad = 0, iprint = -1, m=20,\
            #                                maxiter=500, pgtol = 1e-3,factr=1e6, bounds = bounds,fprime=None)
            #res1 = optimize.minimize(self._cost, p0, jac=True, bounds = bounds, method='SLSQP', options={'disp': False})
            #P = np.ones(len(p0))#(self.prior_uncs**-2).ravel() #+ 2. * (1. / self.gamma)**-2
            #P[int(len(p0)/2.):] = 0.01
            #p0 = p0 / P
            mc = 100 if ii < 5 else 20
            mf = 20 if ii > 6 else 60
            mi = 20 if ii > 6 else 60
            factr = 1e8
            ftol = factr * np.finfo(float).eps
            psolve = optimize.minimize(self._cost,
                                       p0,
                                       jac=True,
                                       bounds=bounds,
                                       method='L-BFGS-B',
                                       options={
                                           'disp': False,
                                           'maxcor': mc,
                                           'gtol': 1e-02,
                                           'ftol': ftol,
                                           'maxfun': mf,
                                           'maxiter': mi
                                       })
            #res3 = optimize.minimize(self._cost, p0, jac=True, method='TNC', options={'disp': True})
            #res4 = optimize.minimize(self._cost, p0, jac=, method='COBYLA', options={'disp': True})
            #print res1, res2 #res3
            #psolve = res2
            self.logger.info(bcolors.GREEN + str(psolve['message'].decode()) +
                             bcolors.ENDC)
            self.logger.info(bcolors.GREEN +
                             'Iterations: %d' % int(psolve['nit']) +
                             bcolors.ENDC)
            self.logger.info(bcolors.GREEN +
                             'Function calls: %d' % int(psolve['nfev']) +
                             bcolors.ENDC)
            self.aot_prior, self.tcwv_prior = psolve['x'].reshape(
                2, self.num_blocks_x, self.num_blocks_y)
            #self.tcwv_prior = self.tcwv_prior
            if ii != len(shapes) - 1:
                mask = (self.aot_prior <= 0) | (self.aot_prior > 2.) | (
                    self.tcwv_prior <=
                    self.bot_bounds[1]) | (self.tcwv_prior > self.up_bounds[1])
                self.aot_prior[mask] = self.priors[0].reshape(shape)[mask]
                #self.aot_unc  [mask]  = self.prior_uncs[0].reshape(shape)[mask]
                self.tcwv_prior[mask] = self.priors[1].reshape(shape)[mask]
                #self.tcwv_unc  [mask] = self.prior_uncs[1].reshape(shape)[mask]
            else:
                self._obs_cost_test(psolve['x'], do_unc=True)
                nx, ny = self.prior_uncs.shape
                dtd = compose_dtd(1, ny)[0]
                #self.obs_unc[np.isnan(self.obs_unc)] = 0
                self.obs_unc[np.isnan(self.obs_unc)] = 0
                self.prior_uncs[np.isnan(self.prior_uncs)] = 0
                #self.prior_uncs[np.isnan(self.prior_uncs)] =  np.mean(self.prior_uncs[~np.isnan(self.prior_uncs)])
                #to_inv = np.nansum([sparse.diags((self.obs_unc[0]).ravel()), sparse.diags((self.prior_uncs[0]**-2).ravel()), self.gamma**2 * dtd], axis = 0)
                #to_inv = np.nansum([sparse.diags((self.obs_unc[0]).ravel()).toarray(), sparse.diags((self.prior_uncs[0]**-2).ravel()).toarray(), self.gamma**2 * dtd.toarray()], axis = 0).astype(np.float32)
                to_inv = sparse.diags(
                    (self.obs_unc[0]).ravel()) + sparse.diags(
                        (self.prior_uncs[0]**-2).ravel()) + self.gamma**2 * dtd
                aot_unc = (linalg.inv(to_inv).diagonal())**0.5

                #to_inv = np.nansum([sparse.diags((self.obs_unc[1]).ravel()), sparse.diags((self.prior_uncs[1]**-2).ravel()), self.gamma**2 * dtd], axis = 0)
                #to_inv = np.nansum([sparse.diags((self.obs_unc[1]).ravel()).toarray(), sparse.diags((self.prior_uncs[1]**-2).ravel()).toarray(), self.gamma**2 * dtd.toarray()], axis = 0).astype(np.float32)
                to_inv = sparse.diags(
                    (self.obs_unc[1]).ravel()) + sparse.diags(
                        (self.prior_uncs[1]**-2).ravel()) + self.gamma**2 * dtd
                tcwv_unc = (linalg.inv(to_inv).diagonal())**0.5

                unc = np.array([aot_unc, tcwv_unc])
                #unc = (np.nansum([self.obs_unc.reshape(nx, -1), self.prior_uncs**-2 ,  self.gamma**2], axis = 0)) ** -0.5
                self.aot_unc, self.tcwv_unc = unc.reshape(
                    nx, self.num_blocks_x, self.num_blocks_y)
        self.tco3_prior, self.tco3_unc = self._grid_conversion(
            self.tco3_prior,
            shape), self._grid_conversion(self.tco3_unc, shape)
        post_solved = np.array(
            [self.aot_prior, self.tcwv_prior, self.tco3_prior])
        post_unc = np.array([self.aot_unc, self.tcwv_unc, self.tco3_unc])
        handlers = self.logger.handlers[:]
        for handler in handlers:
            handler.close()
            self.logger.removeHandler(handler)
        return [post_solved, post_unc]
Example #54
0
    def __init__(self, \
        D = params.D, \
        K = params.K, \
        C = params.C, \
        xspace = params.x, \
        xnames = params.xnames, \
        name = params.name, \
        N = params.N, \
        dt = params.dt, \
        maxtime = params.maxtime, \
        iplot_time = params.iplot_time ):

        self.header = '''
        --------------------------------------------------------------
        Simex - Simulation of Extraction Processes
        --------------------------------------------------------------
        '''
        print(self.header)
        #----------------------------------
        # Extraction mechanism parameters defined via mex_param.py
        #----------------------------------
        self.D = D  #diffusion coefficients
        self.K = K  #partition coefficients
        self.C = C  #initial concentrations
        self.xspace = xspace  #Mechanism domain and interfaces points
        self.xnames = xnames  #Names of compartments

        self.ncomp = len(self.D)  #number of compartments
        self.nparts = len(self.K)  #number of interfaces
        self.domain_len = self.xspace[-1] - self.xspace[0]  #Domain size

        #output directory settings
        self.dir = "output"
        if not os.path.exists(self.dir):
            os.makedirs(self.dir)
        self.basename = name
        self.basedir = self.dir + "/" + self.basename
        if not os.path.exists(self.basedir):
            os.makedirs(self.basedir)

        self.basename = self.basedir + "/" + self.basename

        print("You defined a device with " + str(self.ncomp) +
              " compartment(s).")
        print("Mechanism layout/interfaces (x): ", self.xspace)
        print("Initial concentrations:", self.C)
        print("Diffusion coefficients:", self.D)
        print("Interface coefficients:", self.K)
        print("Output basename:", self.basename)
        print()

        #Check dimensions
        if self.nparts != self.ncomp + 1 and self.ncomp > 1:
            print(
                "Number of partitions must match the number of interfaces between spaces"
            )
            print("Please re-configure parameters in params.py")
            sys.exit(-1)

        #Padd the D vectors with 0.0 at 1st and last positions
        #This is just to simplify knowledge of the boundaries
        self.D = np.insert(self.D, 0, 0., axis=0)
        self.D = np.append(self.D, [0.])

        #Initialize compartment solvers
        #Create list of compartments
        self.compart = []
        for i in range(1, self.ncomp + 1):
            #Initialize compartment
            Dloc = np.array([self.D[i - 1], self.D[i], self.D[i + 1]])
            Kloc = np.array([self.K[i - 1], self.K[i]])
            xloc = np.array([self.xspace[i - 1], self.xspace[i]])
            self.compart.append(
                self.compartment(i - 1, Dloc, Kloc, xloc, xnames[i - 1]))

        #Discretize compartments and initialize the concentration on the grid
        self.Ninit = N
        print("Proposed number of control volumes (grid points): ", N)

        #check for adequate spatial resolution
        for i, comp in enumerate(self.compart):
            n = int(self.Ninit * (comp.len / self.domain_len)) - 1
            if n < 4:
                self.Ninit = int(6 * self.domain_len / comp.len)
                print("Warning: resolution not enough for this ", comp.name,
                      " compartment - Incresing total resolution!", self.Ninit)
                print(
                    "    Be aware that high resolution runs can take longer and use more memory!"
                )

        self.N = 0
        for i, comp in enumerate(self.compart):
            ni = self.N
            n = int(self.Ninit * (comp.len / self.domain_len)) - 1
            comp.init_disc(n, ni)  #configure
            self.N = self.N + comp.n
            #print("Compart: ", comp.icomp, " ini_index:", comp.ni, " deg_free",  comp.n)
        self.ndf = self.N
        self.N = self.N + self.nparts  #grid points, for plotting
        self.dx = (self.domain_len) / (self.N)

        self.x = np.linspace(self.xspace[0],
                             self.xspace[-1],
                             self.N,
                             endpoint=True)
        #self.x=self.x[:-1]

        print("Adjusted number of grid points: ", self.N)
        print("Number of degrees of freedom: ", self.ndf)

        #Define global tridiagonal matrix
        main = np.ones(self.ndf)
        lower = np.ones(self.ndf - 1)
        upper = np.ones(self.ndf - 1)

        #print(self.A.todense())
        for i, comp in enumerate(self.compart):
            comp.build_sys(main, lower, upper)

        #Fill matrix with compartment information (pre-computation)
        self.A = sparse.diags(diagonals=[main, lower, upper],
                              offsets=[0, -1, 1],
                              shape=(self.ndf, self.ndf),
                              format='csr')

        #print(self.A.todense())
        self.I = sparse.identity(self.ndf, format='csr')

        #Fill initial conditions
        self.u = np.zeros(self.ndf)
        for i, comp in enumerate(self.compart):
            self.u[comp.ni:comp.ni + comp.n] = np.full(comp.n, self.C[i])

        #Extend solution to interfaces and endpoints
        #self.extend_u()
        self.uext, self.mass = self.extend(self.u)
        self.mass_war = True
        self.mass_ini = self.mass

        #Time definition
        #Discretize time
        self.T = maxtime
        self.maxD = max(self.D)
        self.dt = dt  #0.1 #0.1*dx/maxD #0.25*dx*dx/maxD

        print()
        #Check if time discretization is fine enough

        dtdx_rel = self.dt * self.maxD / self.dx
        if dtdx_rel > 100:
            print(
                "Warning: reducing timestep size, as it seems too large for this resolution (rel, dt, dx)",
                dtdx_rel, self.dt, self.dx)
            self.dt = 100 * self.dx / self.maxD

        self.Nt = int(self.T / self.dt)
        self.time = np.linspace(0, self.T, self.Nt + 1)
        self.iplot = iplot_time
        print()
        print("Time-space info (dx, dt, Nt, maxD, dx/maxD):")
        print(self.dx, self.dt, self.Nt, self.maxD, self.dx / self.maxD)
        print()

        #Calculate equilibrium solution - reference
        self.equilibrium()
        self.diff_to_eq(0.0)

        #Precompute matrices
        self.Bplus = self.I + (0.5 * self.dt) * self.A
        self.Bminus = self.I - (0.5 * self.dt) * self.A
        #self.B=self.I-(self.dt)*self.A

        print("------------------------------------------------")
        print()
    def get_random_walk_distance(self,
                                 source=None,
                                 target=None,
                                 parameter=1,
                                 saveto=""):
        """
        Compute the random walk effective distance:
        F. Iannelli, A. Koher, P. Hoevel, I.M. Sokolov (in preparation)
        
        Parameters
        ----------
             source : int or None
                If source is None, the distances from all nodes to the target is calculated
                Otherwise the integer has to correspond to a node index
            
            target : int or None
                If target is None, the distances from the source to all other nodes is calculated
                Otherwise the integer has to correspond to a node index
                
            parameter : float
                compound delta which includes the infection and recovery rate alpha and beta, respectively,
                the mobility rate kappa and the Euler-Mascheroni constant lambda:
                    log[ (alpha-beta)/kappa - lambda ]
        
            saveto : string
                If empty, the result is saved internally in self.dominant_path_distance           
                
        Returns:
        --------
            random_walk_distance : ndarray or float
                If source and target are specified, a float value is returned that specifies the distance.
                
                If either source or target is None a numpy array is returned.
                The position corresponds to the node ID. 
                shape = (Nnodes,)
                
                If both are None a numpy array is returned.
                Each row corresponds to the node ID. 
                shape = (Nnodes,Nnodes)
        """

        assert (isinstance(parameter, float)
                or isinstance(parameter, int)) and parameter > 0
        assert isinstance(saveto, str)
        assert self.graph != None, "Load graph first."

        if hasattr(self.graph, "transition_rate"):
            P = adjacency_matrix(self.graph, weight="transition_rate").tocsc()
        else:
            P = adjacency_matrix(self.graph, weight="weight").tocsc()

        # assert np.all(np.isclose(P.sum(axis=1), 1, rtol=1e-15, equal_nan=True)), "If there are dim incompatibility issues, as nan == nan is false."
        assert np.all(np.isclose(
            P.sum(axis=1), 1,
            rtol=1e-15)), "The transition matrix has to be row normalized"

        one = eye(self.nodes, format="csc")
        Z = inv(one - P * np.exp(-parameter))
        D = diags(1. / Z.diagonal(), format="csc")
        """
        if np.any((Z.dot(D).toarray() == 0)):
            np.set_printoptions(suppress=True)
            print(f'A: \n {np.round(np.array(P.toarray()), 3)}')
            print(f'Z: \n {np.round(np.array(Z.toarray()), 3)}')
            print(f'D: \n {np.round(np.array(D.toarray()), 3)}')
            print(f'Z.dot(D): \n {np.round(np.array(Z.dot(D).toarray()), 3)}')
            utility_funcs.sum_matrix_signs(np.array(Z.toarray()))
            utility_funcs.sum_matrix_signs(np.array(D.toarray()))
            utility_funcs.sum_matrix_signs(np.array(Z.dot(D).toarray()))
            ZdotD = np.where(ZdotD == 0, 1e-10, ZdotD)
        """
        RWED = -np.log(Z.dot(D).toarray())
        # ZdotD = Z.dot(D).toarray()
        # ZdotD = np.where(ZdotD == 0, 1e-100, ZdotD)
        # RWED = -np.log(ZdotD)

        if source is not None:
            if target is not None:
                RWED = RWED[source, target]
            else:
                RWED = RWED[source, :]
        elif target is not None:
            RWED = RWED[:, target]

        if saveto is not "":
            save(saveto, RWED)

        return RWED
Example #56
0
def implicitHeat(eq, Ix, It, M=1e3, N=1e3):
    """
    Implicit method for solving the heat equation:
        u_{t} = D * u_{xx}
    With an initial condition for x, a Dirichlet left boundry condition and a Neumann 
    right boundry condition for t, over an interval of space Ix = [a,b] and an interval
    of time It = [0,t]:
        u(x,0)     =    b(x)
        u(a,b)     =    l(t)
        u_{x}(b,t) =    r(t)
    The method uses backwards finite differences over N points to approximate u_{t} and
    centered finite differences over M points to approximate u_{xx}. We also approximate
    the values at the right boundry through the Taylor series:
        u_{t}(x,t)  = (u(x,t) - u(x,t-k)) / k
        u_{xx}(x,t) = (u(x+h,t) - 2u(x,t) + u(x-h,t)) / (h^2)
        u_{x}(x,t)  = (u(x-2h,t) - 4u(x-h,t) + 3u(x,t)) / 2h
    This results in a linear system:
        Aw_{:,j+1} = b
    Where:
        - A is a tridiagonal matrix with the coefficients from the approximations.
        - b is a vector with the left boundry condition on the first entry, the right boundry condition
            on the last, and the respective approcimations of x in every other entry.
    
    Parameters
    ----------
    eq : dictionary
        Description of the equation, containing:
            =========  =============================================
            Key        Value
            =========  =============================================
            D          int - Diffusion Coeficient
            ic         function - Initial Condition
            bcL        funciton - Left Boundry Condition (Dirichlet)
            bcR        function - Right Boundry Condition (Neumann)
            =========  =============================================
        
    Ix : tuple (x0 xf)
        The interval (x0, xf) of distance over which to approximate the solution.
    It : tuple (t0 tf)
        The interval (t0, tf) of time over which to approximate the solution.
    M : int, optional
        Number of steps for space in the approximation. Default is 1e3.
    N : int, optional
        Number of steps for time in the approximation. Default is 1e3.
    
    Returns
    -------
    W : (M + 1, N + 1) double ndarray
        Matrix with the approximations of the PDE.
    X : (M + 1) double ndarray
        Times for x
    T : (N + 1) double ndarray
        Times for t
    """

    #Generate equally spaced intervals for X and T
    X = np.linspace(Ix[0], Ix[1], M + 1)
    h = (Ix[1] - Ix[0]) / M

    T = np.linspace(It[0], It[1], N + 1)
    k = (It[1] - It[0]) / N

    #Initialize the approximation matrix with zeros, and the initial conditions of x in the first column
    W = np.zeros((M + 1, N + 1))
    W[:, 0] = eq['ic'](X)

    #Define sigma as Dk/h**2
    sigma = eq['D'] * k / (h**2)

    #Create the semi-tridiagonal sparse matrix, then calculate the inverse in order to save time when solving the system
    di = [[1]+[1+2*sigma]*(M-1)+[3/(2*h)], \
         [-sigma]*(M-1) + [-2/h], \
         [0] + [-sigma]*(M-1), \
         [0]*(M-2) + [1/(2*h)]]
    A = diags(di, [0, -1, 1, -2], (M + 1, M + 1), format='csc')
    Ainv = inv(A)

    for i in range(N):
        #Define b as a 0 vector with b[0] = l(t), b[-1] = r(t), and the previous approximations in every other entry
        b = np.zeros(M + 1)
        b[0] = eq['bcL'](T[i + 1])
        b[-1] = eq['bcR'](T[i + 1])
        b[1:M] = W[1:M, i]

        #Solve AW[:,i+1] = b
        W[:, i + 1] = Ainv.dot(b)

    return W, X, T
    identity = test.compute_identity()
    '''
    # try for a square lattice

    fig = plt.figure()

    dim = 152
    dim1 = 152

    sinks = [
        i + dim * j for i in range(0, dim) for j in range(0, dim1)
        if (i == 0 or i == (dim - 1) or j == 0 or j == (dim1 - 1))
    ]

    if dim == dim1:
        toe = sparse.diags([1, 1], [1, -1], shape=(dim, dim), format='csr')
        adjacency = sparse.kron(toe, sparse.eye(dim)) + sparse.kron(
            sparse.eye(dim), toe)

        #plt.imshow(adjacency.todense())
        #plt.show()

        square_sandpile = Sandpile(adjacency, sinks)

        result = square_sandpile.stabilize(
            2 * square_sandpile.get_max_configuration())[0]

        #result2 = square_sandpile.stabilize(2*square_sandpile.get_max_configuration()-result)

        animation_helper = AnimateSquareSandpile(
            2 * square_sandpile.get_max_configuration(), square_sandpile,
Example #58
0
def explicitHeat(eq, Ix, It, M=1e3, N=1e3):
    """
    Explicit method for solving the heat equation:
        u_{t} = D * u_{xx}
    With an initial condition for x, a Dirichlet left boundry condition and a Neumann 
    right boundry condition for t, over an interval of space Ix = [a,b] and an interval
    of time It = [0,t]:
        u(x,0)     =    b(x)
        u(a,b)     =    l(t)
        u_{x}(b,t) =    r(t)
    The method uses forward finite differences over N points to approximate u_{t} and
    centered finite differences over M points to approximate u_{xx}. We also approximate
    the values at the right boundry through the Taylor series:
        u_{t}(x,t)  = (u(x,t+k) - u(x,t)) / k
        u_{xx}(x,t) = (u(x+h,t) - 2u(x,t) + u(x-h,t)) / (h^2)
        u_{x}(x,t)  = (u(x-2h,t) - 4u(x-h,t) + 3u(x,t)) / 2h
    This results in a linear system:
        Cw_{:,j+1} = Aw{:,j}+b
    Where:
        - A is a tridiagonal matrix with the coefficients from the approximations
        - C is the identity, except for the last row where it is the approximation for the right boundry
        - b is a zero vector with the left boundry condition on the first entry and the right boundry condition on the last.
        - w_{:,j} is the vector with the approximations for x at time j
    
    
    Parameters
    ----------
    eq : dictionary
        Description of the equation, containing:
            =========  =============================================
            Key        Value
            =========  =============================================
            D          int - Diffusion Coeficient
            ic         function - Initial Condition
            bcL        funciton - Left Boundry Condition (Dirichlet)
            bcR        function - Right Boundry Condition (Neumann)
            =========  =============================================
        
    Ix : tuple (x0 xf)
        The interval (x0, xf) of distance over which to approximate the solution.
    It : tuple (t0 tf)
        The interval (t0, tf) of time over which to approximate the solution.
    M : int, optional
        Number of steps for space in the approximation. Default is 1e3.
    N : int, optional
        Number of steps for time in the approximation. Default is 1e3.
    
    Returns
    -------
    W : (M + 1, N + 1) double ndarray
        Matrix with the approximations of the PDE.
    X : (M + 1) double ndarray
        Times for x
    T : (N + 1) double ndarray
        Times for t
    """

    #Generate equally spaced intervals for X and T
    X = np.linspace(Ix[0], Ix[1], M + 1)
    h = (Ix[1] - Ix[0]) / M

    T = np.linspace(It[0], It[1], N + 1)
    k = (It[1] - It[0]) / N

    #Initialize the approximation matrix with zeros, and the initial conditions of x in the first column
    W = np.zeros((M + 1, N + 1))
    W[:, 0] = eq['ic'](X)

    #Define sigma as Dk/h**2
    sigma = eq['D'] * k / (h**2)

    #Create the matrix A as a tridiagonal sparse matrix
    d = [[0]+[1-2*sigma]*(M-1)+[0], \
         [sigma]*(M-1) + [0], \
         [0] + [sigma]*(M-1)]
    A = diags(d, [0, -1, 1], (M + 1, M + 1))

    #Create Cinv, the inverse of the semi-identity matrix C.
    Cinv = np.eye(M + 1)
    Cinv[M, M - 2] = -1 / 3
    Cinv[M, M - 1] = 4 / 3
    Cinv[M, M] = 2 * h / 3

    #Iterate to fill all the columns of the matrix with the appropriate solutions
    for i in range(N):
        #Define b as a 0 vector with b[0] = l(t) and b[-1] = r(t)
        b = np.zeros(M + 1)
        b[0] = eq['bcL'](T[i + 1])
        b[-1] = eq['bcR'](T[i + 1])

        #Solve CW[:,i+1] = AW[:,i] + b
        b = b + A.dot(W[:, i])
        W[:, i + 1] = Cinv.dot(b)

    return W, X, T
Example #59
0
    def create_UVP_cov_matrix(self, var_img, option="ABS", m2pix=None):
        ''' -------------------------------------------------------------------
        generate the covariance matrix for the UV phase.

        For independent image noises, the covariance matrix of the imaginary
        part of the Fourier transform can be computed explicitly. To go from
        that to the covariance of the phase is possible in the high-Strehl
        regime where Arctan(imag/real) ~ imag/real.

        Possibilities for the real part:
        - model redundancy -> option="RED"
        - "real part"      -> option="REAL"
        - "modulus"        -> option="ABS"

        Parameters:
        ----------
        - var_img: a 2D image with variance per pixel

        Further remark:
        ------
        A more sophisticated implementation could include the computation
        of cross-terms between imaginary and real parts to be more exact?

        Note: Covariance matrix can also be computed via MC simulations, if
        you are unhappy with this one.

        Note: This algorithm was developed in part by Romain Laugier
        ------------------------------------------------------------------- '''

        ISZ = var_img.shape[0]
        try:
            _ = self.FF  # check to avoid recomputing existing arrays!

        except AttributeError:
            if m2pix is not None:
                self.FF = core.compute_DFTM1(self.kpi.UVC, m2pix, ISZ)
            else:
                print("Fourier matrix and/or m2pix are not available.")
                print("Please compute Fourier matrix.")
                return

        cov_img = diags(var_img.flat)  # image covariance matrix

        if option == "RED":
            BB = self.FF.imag / self.kpi.RED[:, None]
            BB *= self.kpi.TRM.sum() / var_img.sum()
            print("Covariance Matrix computed using model redundancy!")

        if option == "REAL":
            ft = self.FF.dot(var_img.flat)
            BB = self.FF.imag / ft.real[:, None]
            print("Covariance Matrix computed using the real part of FT!")

        if option == "ABS":
            ft = self.FF.dot(var_img.flat)
            BB = self.FF.imag / np.abs(ft)[:, None]
            print("Covariance Matrix computed using the modulus of FT!")

        # fourier phase covariance added to the KPO data structure
        self.phi_cov = BB.dot(cov_img.dot(BB.T))
        # kernel phase covariance added to the KPO data structure
        self.kp_cov = self.kpi.KPM.dot(self.phi_cov.dot(self.kpi.KPM.T))
        return self.phi_cov
Example #60
0
def deconvolve(spec, specerr, lsf=None, eps=2500., smooth=None):
    """
    NAME:
       deconvolve
    PURPOSE:
       deconvolve the LSF
    INPUT:
       spec - spectrum (nwave)
       specerr - spectrum uncertainty array (nwave)
       lsf= (None) LSF to deconvolve, needs to be specified in non-sparse format
       eps= (2500.) smoothness parameter
       smooth= (None) if set to a resolution, smooth with a FWHM resolution of 'smooth' and return the spectrum on the apStar wavelength grid
    OUTPUT:
       high-resolution deconvolved spectrum or smoothed deconvolved spectrum on apStar wavelength grid is smooth= is set
    HISTORY:
       2015-04-24 - Written - Bovy (IAS)
    """
    # Parse LSF input
    if lsf is None:
        raise ValueError(
            "lsf= keyword with LSF in non-sparse format required for apogee.spec.lsf.deconvolve"
        )
    if isinstance(lsf, sparse.dia_matrix):
        raise ValueError(
            "lsf= keyword with LSF needs to be in non-sparse format")
    lsf[numpy.isnan(lsf)] = 0.
    # How much higher resolution is the LSF than the data?
    hires = int(round(lsf.shape[0] / 8575.))
    # Setup output
    out = numpy.zeros(lsf.shape[0])
    # Loop through the detectors and analyze each one separately
    for sindx, eindx in zip([140, 3450, 6250], [3370, 6200, 8450]):
        # Get the LSF for this detector
        slsf = sparsify(lsf[hires * sindx:hires * eindx])
        # Parse the spectrum and its error for this detector, normalize
        tspec = numpy.ones((eindx - sindx) * hires)
        tinvspecerr = numpy.zeros((eindx - sindx) * hires)
        norm = numpy.nanmean(spec[sindx:eindx])
        tspec[::hires] = spec[sindx:eindx] / norm
        tinvspecerr[::hires] = norm / specerr[sindx:eindx]
        # Deal with NaNs
        tinvspecerr[numpy.isnan(tspec)] = 0.
        tspec[numpy.isnan(tspec)] = 1.
        # Set up the necessary sparse matrices
        Cinv = sparse.diags([tinvspecerr**2.], [0])
        CinvL = Cinv.dot(slsf)
        LTCinvL = (slsf.T).dot(CinvL)
        # P smoothness matrix
        diags1 = -numpy.ones(slsf.shape[1])
        diags1[-1] = 0.
        diags2 = numpy.ones(slsf.shape[1] - 1)
        P = sparse.diags([diags1, diags2], [0, 1])
        A = LTCinvL + eps * (P.T).dot(P)
        # b
        Cinvs = Cinv.dot(tspec)
        b = (slsf.T).dot(Cinvs)
        tmp = scipy.sparse.linalg.bicg(A, b)
        if tmp[1] == 0:
            tmp = tmp[0]
        else:
            raise RuntimeError("Deconvolution did not converge")
        out[sindx * hires:eindx * hires] = tmp * norm
    if not smooth is None:
        wav = apStarWavegrid()
        l10wav = numpy.log10(wav)
        dowav = l10wav[1] - l10wav[0]
        sigvm= hires/dowav/smooth/numpy.log(10.)\
            /2./numpy.sqrt(2.*numpy.log(2.))
        out = ndimage.gaussian_filter1d(out, sigvm, mode='constant')[::hires]
    return out