def solve_hank(model, sparse_mat=True):
    GAM0, GAM1, PSI, PI, C = eq.eqcond(model)

#     GAM0, GAM1, PSI, PI, C, basis_redundant, inv_basis_redundant= \
#                 red.solve_static_conditions(GAM0, GAM1, PSI, PI, C)

    inv_basis_redundant = speye(GAM0.shape[0], format='csc')
    basis_redundant     = speye(GAM0.shape[0], format='csc')

    if sparse_mat:
        GAM0 = csr_matrix(GAM0)
        GAM1 = csr_matrix(GAM1)
        PSI  = csr_matrix(PSI)
        PI   = csr_matrix(PI)
        C    = csr_matrix(C.reshape(len(C), 1, order='F'))

    # krylov reduction
    if model.settings['reduce_state_vars'].value:
        GAM0, GAM1, PSI, PI, C, basis_kry, inv_basis_kry = \
                red.krylov_reduction(model, GAM0, GAM1, PSI, PI, C)


    # value function reduction via spline projection
    if model.settings['reduce_v'].value:
        GAM0, GAM1, PSI, PI, C, basis_spl, inv_basis_spl = \
                red.valuef_reduction(model, GAM0, GAM1, PSI, PI, C)

    # Compute inverse basis for Z matrix and IRFs transformation
    if model.settings['reduce_v'].value & model.settings['reduce_state_vars'].value:
        inverse_basis = inv_basis_redundant @ inv_basis_kry * inv_basis_spl # from_spline
        basis = basis_spl @ basis_kry @ basis_redundant
    elif model.settings['reduce_state_vars'].value:
        inverse_basis = inv_basis_redundant @ inv_basis_kry
        basis = basis_kry @ basis_redundant
    else:
        inverse_basis = inv_basis_redundant
        basis = basis_redundant

    # Solve LRE model
    G1, C, impact, _, _, eu = \
            gensys_hank(GAM1.toarray(), C.toarray(),PSI.toarray(), PI.toarray())

    if eu[0] != 1 | eu[1] != 1:
        raise util.GensysError()

    G1 = np.real(G1)
    impact = np.real(impact)
    C = np.real(C).flatten()

    return G1, impact, C, inverse_basis, basis
Exemplo n.º 2
0
    def check_eye(self):
        a = speye(2, 3 )
#        print a, a.__repr__
        b = array([[1, 0, 0], [0, 1, 0]], dtype='d')
        assert_array_equal(a.toarray(), b)

        a = speye(3, 2)
#        print a, a.__repr__
        b = array([[1, 0], [0, 1], [0, 0]], dtype='d')
        assert_array_equal( a.toarray(), b)

        a = speye(3, 3)
#        print a, a.__repr__
        b = array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype='d')
        assert_array_equal(a.toarray(), b)
Exemplo n.º 3
0
def spline_basis(x, knots_dict):
    knots = knots_dict[0]
    n_a = len(x)
    n_knots = len(knots)

    first_interp_mat = np.zeros([n_a, n_knots + 1])
    aux_mat = np.zeros([n_a, n_knots])

    for i in range(n_a):
        loc = sum(knots <= x[i])
        if loc == n_knots:
            loc = n_knots - 1
        first_interp_mat[i, loc - 1] = 1 - (x[i] - knots[loc - 1])**2 / (
            knots[loc] - knots[loc - 1])**2
        first_interp_mat[i, loc] = (x[i] - knots[loc - 1])**2 / (
            knots[loc] - knots[loc - 1])**2
        aux_mat[i, loc - 1] = (x[i] - knots[loc - 1]) - (
            x[i] - knots[loc - 1])**2 / (knots[loc] - knots[loc - 1])

    aux_mat2 = spdiag(np.ones(n_knots), offsets=0, shape=(n_knots, n_knots), format="csc") \
              +spdiag(np.ones(n_knots), offsets=1, shape=(n_knots, n_knots), format="csc")
    aux_mat2[-1, -1] = 0
    aux_mat2[n_knots - 1, 0] = 1
    aux_mat3 = spdiag(np.hstack([-2/np.diff(knots), 0.0]), offsets=0, shape=(n_knots, n_knots+1), format="csc") \
              +spdiag(np.hstack([ 2/np.diff(knots), 1.0]), offsets=1, shape=(n_knots, n_knots+1), format="csc")

    from_knots = csc_matrix(first_interp_mat) + csc_matrix(aux_mat) * (spsolve(
        aux_mat2, aux_mat3))
    to_knots = spsolve((from_knots.conj().T * from_knots),
                       from_knots.conj().T) * speye(n_a, format="csc")

    return from_knots, to_knots
Exemplo n.º 4
0
def _make_morph_map(subject_from, subject_to, subjects_dir=None):
    """Construct morph map from one subject to another.

    Note that this is close, but not exactly like the C version.
    For example, parts are more accurate due to double precision,
    so expect some small morph-map differences!

    Note: This seems easily parallelizable, but the overhead
    of pickling all the data structures makes it less efficient
    than just running on a single core :(
    """
    subjects_dir = get_subjects_dir(subjects_dir)
    morph_maps = list()

    # add speedy short-circuit for self-maps
    if subject_from == subject_to:
        for hemi in ['lh', 'rh']:
            fname = op.join(subjects_dir, subject_from, 'surf',
                            '%s.sphere.reg' % hemi)
            n_pts = len(read_surface(fname, verbose=False)[0])
            morph_maps.append(speye(n_pts, n_pts, format='csr'))
        return morph_maps

    for hemi in ['lh', 'rh']:
        # load surfaces and normalize points to be on unit sphere
        fname = op.join(subjects_dir, subject_from, 'surf',
                        '%s.sphere.reg' % hemi)
        from_rr, from_tri = read_surface(fname, verbose=False)
        fname = op.join(subjects_dir, subject_to, 'surf',
                        '%s.sphere.reg' % hemi)
        to_rr = read_surface(fname, verbose=False)[0]
        _normalize_vectors(from_rr)
        _normalize_vectors(to_rr)

        # from surface: get nearest neighbors, find triangles for each vertex
        nn_pts_idx = _compute_nearest(from_rr, to_rr)
        from_pt_tris = _triangle_neighbors(from_tri, len(from_rr))
        from_pt_tris = [from_pt_tris[pt_idx] for pt_idx in nn_pts_idx]

        # find triangle in which point lies and assoc. weights
        tri_inds = []
        weights = []
        tri_geom = _get_tri_supp_geom(dict(rr=from_rr, tris=from_tri))
        for pt_tris, to_pt in zip(from_pt_tris, to_rr):
            p, q, idx, dist = _find_nearest_tri_pt(to_pt,
                                                   tri_geom,
                                                   pt_tris,
                                                   run_all=False)
            tri_inds.append(idx)
            weights.append([1. - (p + q), p, q])

        nn_idx = from_tri[tri_inds]
        weights = np.array(weights)

        row_ind = np.repeat(np.arange(len(to_rr)), 3)
        this_map = csr_matrix((weights.ravel(), (row_ind, nn_idx.ravel())),
                              shape=(len(to_rr), len(from_rr)))
        morph_maps.append(this_map)

    return morph_maps
Exemplo n.º 5
0
    def __init__(self, graph, tau, gamma, lamda=1e-3, epsilon=1e-6, alpha=None, use_tau_offset=True):
        """

        :param graph:
        :type graph: nx.Graph
        :param tau:
        :type tau: float
        :param gamma:
        :type gamma: float
        :param lamda:
        :type lamda: float
        :param epsilon:
        :type epsilon: float
        :param alpha:
        :type alpha: float
        """

        self.graph = graph
        self.tau = tau
        self.gamma = gamma
        self.lamda = lamda
        self.epsilon = epsilon
        if alpha is None:
            self.alpha = self.epsilon
        else:
            self.alpha = alpha
        self.use_tau_offset = use_tau_offset

        self.V = nx.linalg.laplacian_matrix(self.graph)
        self.V += speye(self.graph.number_of_nodes()) * self.lamda
        self.x = np.zeros(self.graph.number_of_nodes())
        self.n = np.zeros_like(self.x)
        self.mu_hat = np.ones_like(self.x) * self.tau
Exemplo n.º 6
0
def euler_backward(A, u, ht, f, g, start, start_halo, end, end_halo, N, comm):
    '''
    Carry out backward Euler's method for one time step
       
    Input
    -----
    A <CSR matrix>  : Discretization matrix of Poisson operator
    u <array>       : Current solution vector at previous time step
    ht <scalar>     : Time step size
    f <array>       : Current forcing vector
    g <array>       : Current vector containing boundary condition information
    start <int>     : First domain row owned by this processor (same as HW4)
    start_halo <int>: Halo region "below" this processor (same as HW4)
    end <int>       : Last domain row owned by this processor (same as HW4)
    end_halo <int>  : Halo region "above" this processor (same as HW4)
    N <int>         : Size of a domain row, excluding boundary points
                      ===> THAT IS, N = n-2
    comm            : MPI communicator

    Output
    ------
    u at the next time step
    '''

    rank = comm.Get_rank()

    # Task: Form the system matrix for backward Euler
    I = speye(A.shape[0], format='csr')
    G = I - ht * A  #...
    b = u + ht * g + ht * f

    # Task: return solution from Jacobi
    return jacobi(G, b, u, 10**-9, 300, start, start_halo, end, end_halo, N,
                  comm)
Exemplo n.º 7
0
def _make_morph_map(subject_from, subject_to, subjects_dir=None):
    """Construct morph map from one subject to another.

    Note that this is close, but not exactly like the C version.
    For example, parts are more accurate due to double precision,
    so expect some small morph-map differences!

    Note: This seems easily parallelizable, but the overhead
    of pickling all the data structures makes it less efficient
    than just running on a single core :(
    """
    subjects_dir = get_subjects_dir(subjects_dir)
    morph_maps = list()

    # add speedy short-circuit for self-maps
    if subject_from == subject_to:
        for hemi in ['lh', 'rh']:
            fname = op.join(subjects_dir, subject_from, 'surf',
                            '%s.sphere.reg' % hemi)
            from_pts = read_surface(fname, verbose=False)[0]
            n_pts = len(from_pts)
            morph_maps.append(speye(n_pts, n_pts, format='csr'))
        return morph_maps

    for hemi in ['lh', 'rh']:
        # load surfaces and normalize points to be on unit sphere
        fname = op.join(subjects_dir, subject_from, 'surf',
                        '%s.sphere.reg' % hemi)
        from_pts, from_tris = read_surface(fname, verbose=False)
        n_from_pts = len(from_pts)
        _normalize_vectors(from_pts)
        tri_geom = _get_tri_supp_geom(dict(rr=from_pts, tris=from_tris))

        fname = op.join(subjects_dir, subject_to, 'surf',
                        '%s.sphere.reg' % hemi)
        to_pts = read_surface(fname, verbose=False)[0]
        n_to_pts = len(to_pts)
        _normalize_vectors(to_pts)

        # from surface: get nearest neighbors, find triangles for each vertex
        nn_pts_idx = _compute_nearest(from_pts, to_pts)
        from_pt_tris = _triangle_neighbors(from_tris, len(from_pts))
        from_pt_tris = [from_pt_tris[pt_idx] for pt_idx in nn_pts_idx]

        # find triangle in which point lies and assoc. weights
        nn_tri_inds = []
        nn_tris_weights = []
        for pt_tris, to_pt in zip(from_pt_tris, to_pts):
            p, q, idx, dist = _find_nearest_tri_pt(to_pt, tri_geom, pt_tris,
                                                   run_all=False)
            nn_tri_inds.append(idx)
            nn_tris_weights.extend([1. - (p + q), p, q])

        nn_tris = from_tris[nn_tri_inds]
        row_ind = np.repeat(np.arange(n_to_pts), 3)
        this_map = csr_matrix((nn_tris_weights, (row_ind, nn_tris.ravel())),
                              shape=(n_to_pts, n_from_pts))
        morph_maps.append(this_map)

    return morph_maps
Exemplo n.º 8
0
def solve_kfe(A: spmatrix,
              g0: np.ndarray,
              weight_mat: spmatrix,
              maxit_kfe: int = 1000,
              tol_kfe: float = 1e-12,
              d_kfe: float = 1e6):

    if weight_mat.shape != A.shape:
        raise Exception('Dimension of weight matrix is incorrect.')

    weight_mat = csr_matrix(weight_mat)
    dim_size = A.shape[0]
    gg = g0.flatten(order='F')  # stack distribution matrix into vector

    # Solve linear system
    for ikfe in range(maxit_kfe):
        gg_tilde = weight_mat @ gg  # weight distribution points by their measure across wealth
        gg1_tilde = spsolve(
            (speye(dim_size, dtype=np.complex) - d_kfe * A.conj().T), gg_tilde)

        gg1_tilde = gg1_tilde / gg1_tilde.sum()

        gg1 = spsolve(weight_mat, gg1_tilde)

        # Check iteration for convergence
        err_kfe = max(abs(gg1 - gg))
        if err_kfe < tol_kfe:
            #print('converged!')
            break
        gg = gg1

    return gg.flatten(order='F')
Exemplo n.º 9
0
def euler_backward(A, u, ht, f, g):
    '''
    Carry out backward Euler's method for one time step
    
    Input
    -----
    A <CSR matrix>  : Discretization matrix of Poisson operator
    u <array>       : Current solution vector at previous time step
    ht <scalar>     : Time step size
    f <array>       : Current forcing vector
    g <array>       : Current vector containing boundary condition information

    Output
    ------
    u at the next time step

    '''

    # Task: Form the system matrix for backward Euler
    I = speye(A.shape[0], format='csr')
    G = I - ht * A  #...
    b = u + ht * g + ht * f

    # Task: return solution from Jacobi
    return jacobi(G, b, u, 10**-9, 300)  #...
def increment_mccf(A, B, X, y, nu=0.125, l=0.01, boundary='constant'):
    r"""
    Incremental Multi-Channel Correlation Filter (MCCF)
    """
    # number of images; number of channels, height and width
    n, k, hx, wx = X.shape
    x_shape = (hx, wx)

    # height and width of desired responses
    _, hy, wy = y.shape
    y_shape = (hy, wy)

    # extended shape
    ext_h = hx + hy - 1
    ext_w = wx + wy - 1
    ext_shape = (ext_h, ext_w)
    # extended dimensionality
    ext_d = ext_h * ext_w

    # extend desired response
    ext_y = pad(y, ext_shape)
    # fft of extended desired response
    fft_ext_y = fft2(ext_y)

    # auto and cross spectral energy matrices
    sXX = 0
    sXY = 0
    # for each training image and desired response
    for x in X:
        # extend image
        ext_x = pad(x, ext_shape, boundary=boundary)
        # fft of extended image
        fft_ext_x = fft2(ext_x)

        # store extended image fft as sparse diagonal matrix
        diag_fft_x = spdiags(fft_ext_x.reshape((k, -1)),
                             -np.arange(0, k) * ext_d, ext_d * k, ext_d).T
        # vectorize extended desired response fft
        diag_fft_y = fft_ext_y.ravel()

        # update auto and cross spectral energy matrices
        sXX += diag_fft_x.conj().T.dot(diag_fft_x)
        sXY += diag_fft_x.conj().T.dot(diag_fft_y)

    # combine old and new auto and cross spectral energy matrices
    sXY = (1 - nu) * A + nu * sXY
    sXX = (1 - nu) * B + nu * sXX
    # solve ext_d independent k x k linear systems (with regularization)
    # to obtain desired extended multi-channel correlation filter
    fft_ext_f = spsolve(sXX + l * speye(sXX.shape[-1]), sXY)
    # reshape extended filter to extended image shape
    fft_ext_f = fft_ext_f.reshape((k, ext_h, ext_w))

    # compute filter inverse fft
    ext_f = np.real(ifftshift(ifft2(fft_ext_f), axes=(-2, -1)))
    # crop extended filter to match desired response shape
    f = crop(ext_f, y_shape)

    return f, sXY, sXX
Exemplo n.º 11
0
    def check_identiy(self, A):
        rows, cols = A.shape
        if rows != cols:
            return False
        if isinstance(A, sp.ndarray) and (sp.diag(A) == 1).all() != True:
            return False
        if isinstance(A, smat.spmatrix):
            return smat.csr_matrix(A) - speye(rows).nnz == 0

        return True
Exemplo n.º 12
0
    def test_mat_solve(self, seed: int, sparse: bool):
        A, B = _create_a_b_matrices(seed, sparse)
        A = (speye(*A.shape) if sparse else np.eye(*A.shape)) - A

        X = _petsc_mat_solve(A, B, tol=1e-8)
        if sparse:
            A = A.A
            B = B.A

        np.testing.assert_allclose(A @ X, B, rtol=1e-6)
Exemplo n.º 13
0
 def __init__(
     self,
     adata: AnnData,
     backward: bool = False,
 ):
     super().__init__(
         speye(adata.n_obs, format="csr"),
         adata,
         backward=backward,
         compute_cond_num=False,
     )
Exemplo n.º 14
0
def compute_stationary_income_distribution(P, n_income_states, iter_num=50):
    Pt = P.conj().T
    g_z = np.tile(1 / n_income_states, n_income_states)
    for n in range(iter_num):
        g_z_new = np.linalg.solve((speye(n_income_states) - Pt * 1000), g_z)
        diff = np.max(abs(g_z_new - g_z))
        if diff < 1e-5:
            break
        g_z = g_z_new

    return g_z
Exemplo n.º 15
0
def projection_for_subset(from_small, to_small, n_pre, n_post):

    n_full, n_red = from_small.shape

    spdiag_internal = lambda n: (*spdiag(np.ones(n)).nonzero(), np.ones(n))
    I, J, V = spdiag_internal(n_red + n_pre)
    from_approx = csc_matrix((V, (I, J)),
                             shape=(n_full + n_pre, n_pre + n_red))
    I, J, V = spdiag_internal(n_red + n_pre)
    to_approx = csc_matrix((V, (I, J)), shape=(n_pre + n_red, n_full + n_pre))

    from_approx[n_pre:n_pre + n_full, n_pre:n_pre + n_red] = from_small
    to_approx[n_pre:n_pre + n_red, n_pre:n_pre + n_full] = to_small

    # Expand matrices and add needed values
    dim1_from_approx, dim2_from_approx = from_approx.shape
    from_approx = hstack([
        from_approx,
        csc_matrix(np.zeros((dim1_from_approx, n_post)), dtype=float)
    ],
                         format='csc')
    from_approx = vstack([
        from_approx,
        csc_matrix(np.zeros((n_post, dim2_from_approx + n_post)), dtype=float)
    ],
                         format='csc')

    to_approx = hstack([
        to_approx,
        csc_matrix(np.zeros((dim2_from_approx, n_post)), dtype=float)
    ],
                       format='csc')
    to_approx = vstack([
        to_approx,
        csc_matrix(np.zeros((n_post, dim1_from_approx + n_post)), dtype=float)
    ],
                       format='csc')
    from_approx[dim1_from_approx:, dim2_from_approx:] = speye(n_post)
    to_approx[dim2_from_approx:, dim1_from_approx:] = speye(n_post)

    return from_approx, to_approx
Exemplo n.º 16
0
Arquivo: SR3.py Projeto: stanleyjs/SR3
def tensor_incidence(X,
                     phi=None,
                     k=5,
                     as_sparse=True,
                     n_pca=10,
                     rank_threshold=None,
                     **kwargs):
    X = linalg.check_tensor(X)
    k = match_and_pad_like(k, X.shape)
    n_pca = match_and_pad_like(n_pca, X.shape)
    rank_threshold = match_and_pad_like(rank_threshold, np.ones(X.ndim))
    L = csr_matrix((np.prod(X.shape), np.prod(X.shape)))
    phis = []
    Ads = []
    for mode in range(X.dim()):
        if k[mode] == 0:
            continue
        else:
            Y = linalg.tenmat(X, mode)
            if phi is None:
                phi_bak, _ = approximate_nn_incidence(
                    Y,
                    k=k[mode],
                    n_pca=n_pca[mode],
                    rank_threshold=rank_threshold[mode],
                    **kwargs)
            else:
                phi_bak = phi[mode]
            phi_c = phi_bak.tocsc()
            # phi = coo_matrix_to_torch(phi)
            left_n = np.prod(
                np.array(X.shape)[np.flatnonzero(np.arange(X.dim()) > mode)])
            left_eye = speye(int(left_n))
            right_n = np.prod(
                np.array(X.shape)[np.flatnonzero(np.arange(X.dim()) < mode)])
            right_eye = speye(int(right_n))
            Ad = kron(left_eye, kron(phi_c, right_eye))
            L = L + Ad.T.dot(Ad)
            phis.append(phi_bak)
            Ads.append(Ad.tocsc())
    return L.tocsc(), phis, Ads
Exemplo n.º 17
0
Arquivo: SR3.py Projeto: stanleyjs/SR3
 def fit(self, X, phi=None, mask=False):
     # construct a knn graph on X
     # TODO: make this take nans in X and convert them to
     # the missing data case.
     X = linalg.check_tensor(X)
     self.X = X
     self.L, self.phi, self.A = tensor_incidence(self.X, phi=phi, k=self.k)
     self.nd = self.ndim = self.X.ndim
     self.numel = self.L.shape[0]
     self.I = speye(self.numel).tocsc()
     self.solver = self.solver.build(A=self.nu * self.I + self.L)
     return self
Exemplo n.º 18
0
def hessian(basis_matrix, P, use_sample_average=True):
    """
    Hessian H of x'Hx+g'x

    P : num eta
    """
    N, M = basis_matrix.shape
    H = lil_matrix((M+P*N, M+P*N))
    if use_sample_average:
        H[:M, :M] = basis_matrix.T.dot(basis_matrix)/N
    else:
        H[:M, :M] = speye(M, M)
    return H
Exemplo n.º 19
0
def hess7(x, lam, sigma=1):
    lmbda = asscalar( lam['eqnonlin'] )
    mu    = asscalar( lam['ineqnonlin'] )
    _, _, d2f = f7(x, True)

    Lxx = sigma * d2f + lmbda * 2 * speye(4, 4) - \
        mu * sparse([
            [        0.0, x[2] * x[3], x[2] * x[3], x[1] * x[2]],
            [x[2] * x[2],         0.0, x[0] * x[3], x[0] * x[2]],
            [x[1] * x[3], x[0] * x[3],         0.0, x[0] * x[1]],
            [x[1] * x[2], x[0] * x[2], x[0] * x[1],         0.0]
        ])
    return Lxx
Exemplo n.º 20
0
def hess7(x, lam, sigma=1):
    lmbda = asscalar(lam['eqnonlin'])
    mu = asscalar(lam['ineqnonlin'])
    _, _, d2f = f7(x, True)

    Lxx = sigma * d2f + lmbda * 2 * speye(4, 4) - \
        mu * sparse([
            [        0.0, x[2] * x[3], x[2] * x[3], x[1] * x[2]],
            [x[2] * x[2],         0.0, x[0] * x[3], x[0] * x[2]],
            [x[1] * x[3], x[0] * x[3],         0.0, x[0] * x[1]],
            [x[1] * x[2], x[0] * x[2], x[0] * x[1],         0.0]
        ])
    return Lxx
Exemplo n.º 21
0
def jacobi(A, b, x0, tol, maxiter):
    '''
    Carry out the Jacobi method to invert A

    Input
    -----
    A <CSR matrix>  : Matrix to invert
    b <array>       : Right hand side
    x0 <array>      : Initial solution guess

    Output
    ------
    x <array>       : Solution to A x = b
    '''

    # This useful function returns an array containing diag(A)
    D = A.diagonal()

    # compute initial residual norm
    r0 = ravel(b - A * x0)
    r0 = sqrt(dot(r0, r0))

    xk = x0
    I = speye(A.shape[0], format='csr')
    #print(I.shape) 36x36
    # Start Jacobi iterations
    # Task in serial: implement Jacobi formula and halting if tolerance is satisfied
    # Task in parallel: extend the matrix-vector multiply to the parallel setting.
    #                   Additionally, you'll have to compute a vector norm in parallel.
    #
    #                   It is suggested to write separate subroutines for the norm and
    #                   matrix-vector product, as this will make your code much, much
    #                   easier to debug and extend to CG.

    for i in range(maxiter):
        x0 = xk

        xk = x0 - (A * x0) / D + b / D

        # Carry out Jacobi
        #for pll u(i)=(u(i-1)+u(i+1))/2
        rk = ravel(b - A * xk)
        rk = sqrt(dot(rk, rk))

        error = rk / r0
        if error < tol:
            return xk

    # Task: Print out if Jacobi did not converge.
    print("Jacobi did not converge")
    return xk
Exemplo n.º 22
0
def simplify_contour(c, min_edge=1e-3, angle_threshold=2e-2, smooth=True):
    """
    Simplifies contours by merging small (short) segments and
    with only a small angle difference.

    Optionally applies smoothing to contour shape.

    Parameters
    ----------
    c: list
        List of polygons describing closed loops.
    min_edge: float
        Minimum edge length. Edges shorter than this are merged.
    angle_threshold: float
        Minimum angle. Edges with smaller angle differences are merged.
    smooth: bool
        If True, apply smoothing to the polygon shapes.

    Returns
    -------
    c: list
        Modified list of polygons

    """
    # Remove small edges by threshold
    vals = [np.ones(c.shape[0]), -np.ones(c.shape[0]), np.ones(c.shape[0])]
    D = spdiags(vals, [1, 0, -c.shape[0] + 1], c.shape[0], c.shape[0])
    edges = D @ c
    c = c[np.linalg.norm(edges, axis=1) > min_edge]
    if len(c) == 0:
        return None

    # Remove nodes on straight lines
    D = spdiags(vals, [1, 0, -c.shape[0] + 1], c.shape[0], c.shape[0])
    H = spdiags(1 / np.linalg.norm(D @ c, axis=1), 0, c.shape[0], c.shape[0])
    DD = H @ D
    c = c[np.linalg.norm(D.T @ DD @ c, axis=-1) > angle_threshold]

    if smooth:
        D = spdiags(vals, [1, 0, -c.shape[0] + 1], c.shape[0], c.shape[0])
        H = spdiags(1 / np.linalg.norm(D @ c, axis=1), 0, c.shape[0],
                    c.shape[0])
        DD = H @ D
        lengths = np.linalg.norm(D @ c, axis=1)
        lengths = 0.5 * abs(D.T) @ lengths  # Mean of edges
        #            c = c - 0.2*lengths[:,None]*(D.T @ DD @ c)
        Nc = c.shape[0]
        c = spsolve(
            speye(Nc, Nc) + 1.0 * spdiags(lengths, 0, Nc, Nc) @ (D.T @ DD), c)

    return c
Exemplo n.º 23
0
def test_surface_source_morph_shortcut():
    """Test that our shortcut for smooth=0 works."""
    stc = mne.read_source_estimate(fname_smorph)
    morph_identity = compute_source_morph(stc,
                                          'sample',
                                          'sample',
                                          spacing=stc.vertices,
                                          smooth=0,
                                          subjects_dir=subjects_dir)
    stc_back = morph_identity.apply(stc)
    assert_allclose(stc_back.data, stc.data, rtol=1e-4)
    abs_sum = morph_identity.morph_mat - speye(len(stc.data), format='csc')
    abs_sum = np.abs(abs_sum.data).sum()
    assert abs_sum < 1e-4
Exemplo n.º 24
0
def change_basis(basis, inv_basis, GAM0, GAM1, PSI, PI, C, ignore_GAM0):

    g1 = basis @ GAM1 @ inv_basis

    if ignore_GAM0:
        g0 = speye(g1.shape[0], dtype=float, format='csc')
    else:
        g0 = basis @ GAM0 @ inv_basis

    c = basis @ C
    Psi = basis @ PSI
    Pi = basis @ PI

    return g0, g1, Psi, Pi, c
Exemplo n.º 25
0
def hpfilter(X, lamb):
    """ Code to implement a Hodrick-Prescott with smoothing parameter
    lambda. Code taken from statsmodels python package (easier than
    importing/installing, https://github.com/statsmodels/statsmodels """

    X = np.asarray(X, float)
    if X.ndim > 1:
        X = X.squeeze()
    nobs = len(X)
    I = speye(nobs, nobs)
    offsets = np.array([0, 1, 2])
    data = np.repeat([[1.], [-2.], [1.]], nobs, axis=1)
    K = dia_matrix((data, offsets), shape=(nobs - 2, nobs))

    trend = spsolve(I + lamb * K.T.dot(K), X, use_umfpack=True)
    return trend
Exemplo n.º 26
0
def sakurai(n):
    """ Example taken from
        T. Sakurai, H. Tadano, Y. Inadomi and U. Nagashima 
        A moment-based method for large-scale generalized eigenvalue problems 
        Appl. Num. Anal. Comp. Math. Vol. 1 No. 2 (2004) """

    A = speye( n, n )
    d0 = array(r_[5,6*ones(n-2),5])
    d1 = -4*ones(n)
    d2 =  ones(n)
    B = spdiags([d2,d1,d0,d1,d2],[-2,-1,0,1,2],n,n)

    k = arange(1,n+1)
    w_ex = sort(1./(16.*pow(cos(0.5*k*pi/(n+1)),4))) # exact eigenvalues

    return A,B, w_ex
Exemplo n.º 27
0
def userfcn_reserves_formulation(om, *args):
    """This is the 'formulation' stage userfcn callback that defines the
    user costs and constraints for fixed reserves. It expects to find
    a 'reserves' field in the ppc stored in om, as described above.
    By the time it is passed to this callback, ppc['reserves'] should
    have two additional fields:
        - C{igr}     C{1 x ngr}, indices of generators available for reserves
        - C{rgens}   C{1 x ng}, 1 if gen avaiable for reserves, 0 otherwise
    It is also assumed that if cost or qty were C{ngr x 1}, they have been
    expanded to C{ng x 1} and that everything has been converted to
    internal indexing, i.e. all gens are on-line (by the 'ext2int'
    callback). The optional args are not currently used.
    """
    ## initialize some things
    ppc = om.get_ppc()
    r = ppc['reserves']
    igr = r['igr']                ## indices of gens available to provide reserves
    ngr = len(igr)                ## number of gens available to provide reserves
    ng  = ppc['gen'].shape[0]     ## number of on-line gens (+ disp loads)

    ## variable bounds
    Rmin = zeros(ngr)               ## bound below by 0
    Rmax = Inf * ones(ngr)          ## bound above by ...
    k = find(ppc['gen'][igr, RAMP_10])
    Rmax[k] = ppc['gen'][igr[k], RAMP_10] ## ... ramp rate and ...
    if 'qty' in r:
        k = find(r['qty'][igr] < Rmax)
        Rmax[k] = r['qty'][igr[k]]        ## ... stated max reserve qty
    Rmax = Rmax / ppc['baseMVA']

    ## constraints
    I = speye(ngr, ngr, format='csr')                     ## identity matrix
    Ar = hstack([sparse((ones(ngr), (arange(ngr), igr)), (ngr, ng)), I], 'csr')
    ur = ppc['gen'][igr, PMAX] / ppc['baseMVA']
    lreq = r['req'] / ppc['baseMVA']

    ## cost
    Cw = r['cost'][igr] * ppc['baseMVA']     ## per unit cost coefficients

    ## add them to the model
    om.add_vars('R', ngr, [], Rmin, Rmax)
    om.add_constraints('Pg_plus_R', Ar, [], ur, ['Pg', 'R'])
    om.add_constraints('Rreq', sparse( r['zones'][:, igr] ), lreq, [], ['R'])
    om.add_costs('Rcost', {'N': I, 'Cw': Cw}, ['R'])

    return om
Exemplo n.º 28
0
def userfcn_reserves_formulation(om, *args):
    """This is the 'formulation' stage userfcn callback that defines the
    user costs and constraints for fixed reserves. It expects to find
    a 'reserves' field in the ppc stored in om, as described above.
    By the time it is passed to this callback, ppc['reserves'] should
    have two additional fields:
        - C{igr}     C{1 x ngr}, indices of generators available for reserves
        - C{rgens}   C{1 x ng}, 1 if gen avaiable for reserves, 0 otherwise
    It is also assumed that if cost or qty were C{ngr x 1}, they have been
    expanded to C{ng x 1} and that everything has been converted to
    internal indexing, i.e. all gens are on-line (by the 'ext2int'
    callback). The optional args are not currently used.
    """
    ## initialize some things
    ppc = om.get_ppc()
    r = ppc['reserves']
    igr = r['igr']  ## indices of gens available to provide reserves
    ngr = len(igr)  ## number of gens available to provide reserves
    ng = ppc['gen'].shape[0]  ## number of on-line gens (+ disp loads)

    ## variable bounds
    Rmin = zeros(ngr)  ## bound below by 0
    Rmax = Inf * ones(ngr)  ## bound above by ...
    k = find(ppc['gen'][igr, RAMP_10])
    Rmax[k] = ppc['gen'][igr[k], RAMP_10]  ## ... ramp rate and ...
    if 'qty' in r:
        k = find(r['qty'][igr] < Rmax)
        Rmax[k] = r['qty'][igr[k]]  ## ... stated max reserve qty
    Rmax = Rmax / ppc['baseMVA']

    ## constraints
    I = speye(ngr, ngr, format='csr')  ## identity matrix
    Ar = hstack([sparse((ones(ngr), (arange(ngr), igr)), (ngr, ng)), I], 'csr')
    ur = ppc['gen'][igr, PMAX] / ppc['baseMVA']
    lreq = r['req'] / ppc['baseMVA']

    ## cost
    Cw = r['cost'][igr] * ppc['baseMVA']  ## per unit cost coefficients

    ## add them to the model
    om.add_vars('R', ngr, [], Rmin, Rmax)
    om.add_constraints('Pg_plus_R', Ar, [], ur, ['Pg', 'R'])
    om.add_constraints('Rreq', sparse(r['zones'][:, igr]), lreq, [], ['R'])
    om.add_costs('Rcost', {'N': I, 'Cw': Cw}, ['R'])

    return om
Exemplo n.º 29
0
def compute_fundamental_matrix(P, fast=True, drop_tol=1e-5, fill_factor=1000):
    """Computes the fundamental matrix for an absorbing random walk.

    Parameters
    ----------
    P : scipy.sparse matrix
        The transition probability matrix of the absorbing random walk. To
        construct this matrix, you start from the original transition matrix
        and delete the rows that correspond to the absorbing nodes.

    fast : bool, optional
    If True (default), use the iterative SuperLU solver from
    scipy.sparse.linalg.

    drop_tol : float, optional
        If `fast` is True, the `drop_tol` parameter of the SuperLU solver is
        set to this value (default is 1e-5).

    fill_factor: int, optional
        If `If `fast` is True, the `fill_factor` parameter of the SuperLU
        solver is set to this value (default is 1000).

    Returns
    -------
    F : scipy.sparse matrix
        The fundamental matrix of the random walk. Element (i,j) holds the
        expected number of times the random walk will be in state j before
        absorption, when it starts from state i. For more information, check
        [1]_.

    References
    ----------
    .. [1] Doyle, Peter G., and J. Laurie Snell.
       Random walks and electric networks.
       Carus mathematical monographs 22 (2000).
       https://math.dartmouth.edu/~doyle/docs/walks/walks.pdf

    """
    n = P.shape[0]
    F_inv = speye(n, format='csc') - P.tocsc()
    if fast:
        solver = spilu(F_inv, drop_tol=drop_tol, fill_factor=fill_factor)
        F = matrix(solver.solve(eye(n)))
    else:
        F = spinv(F_inv).todense()
    return F
Exemplo n.º 30
0
def compute_fundamental_matrix(P, fast=True, drop_tol=1e-5, fill_factor=1000):
    """Computes the fundamental matrix for an absorbing random walk.

    Parameters
    ----------
    P : scipy.sparse matrix
        The transition probability matrix of the absorbing random walk. To
        construct this matrix, you start from the original transition matrix
        and delete the rows that correspond to the absorbing nodes.

    fast : bool, optional
    If True (default), use the iterative SuperLU solver from
    scipy.sparse.linalg.

    drop_tol : float, optional
        If `fast` is True, the `drop_tol` parameter of the SuperLU solver is
        set to this value (default is 1e-5).

    fill_factor: int, optional
        If `If `fast` is True, the `fill_factor` parameter of the SuperLU
        solver is set to this value (default is 1000).

    Returns
    -------
    F : scipy.sparse matrix
        The fundamental matrix of the random walk. Element (i,j) holds the
        expected number of times the random walk will be in state j before
        absorption, when it starts from state i. For more information, check
        [1]_.

    References
    ----------
    .. [1] Doyle, Peter G., and J. Laurie Snell.
       Random walks and electric networks.
       Carus mathematical monographs 22 (2000).
       https://math.dartmouth.edu/~doyle/docs/walks/walks.pdf

    """
    n = P.shape[0]
    F_inv = speye(n, format='csc') - P.tocsc()
    if fast:
        solver = spilu(F_inv, drop_tol=drop_tol, fill_factor=fill_factor)
        F = matrix(solver.solve(eye(n)))
    else:
        F = spinv(F_inv).todense()
    return F
Exemplo n.º 31
0
def _make_morph_map_hemi(subject_from, subject_to, subjects_dir, reg_from,
                         reg_to):
    """Construct morph map for one hemisphere."""
    from scipy.sparse import csr_matrix, eye as speye
    # add speedy short-circuit for self-maps
    if subject_from == subject_to and reg_from == reg_to:
        fname = op.join(subjects_dir, subject_from, 'surf', reg_from)
        n_pts = len(read_surface(fname, verbose=False)[0])
        return speye(n_pts, n_pts, format='csr')

    # load surfaces and normalize points to be on unit sphere
    fname = op.join(subjects_dir, subject_from, 'surf', reg_from)
    from_rr, from_tri = read_surface(fname, verbose=False)
    fname = op.join(subjects_dir, subject_to, 'surf', reg_to)
    to_rr = read_surface(fname, verbose=False)[0]
    _normalize_vectors(from_rr)
    _normalize_vectors(to_rr)

    # from surface: get nearest neighbors, find triangles for each vertex
    nn_pts_idx = _compute_nearest(from_rr, to_rr, method='cKDTree')
    from_pt_tris = _triangle_neighbors(from_tri, len(from_rr))
    from_pt_tris = [from_pt_tris[pt_idx].astype(int) for pt_idx in nn_pts_idx]
    from_pt_lens = np.cumsum([0] + [len(x) for x in from_pt_tris])
    from_pt_tris = np.concatenate(from_pt_tris)
    assert from_pt_tris.ndim == 1
    assert from_pt_lens[-1] == len(from_pt_tris)

    # find triangle in which point lies and assoc. weights
    tri_inds = []
    weights = []
    tri_geom = _get_tri_supp_geom(dict(rr=from_rr, tris=from_tri))
    weights, tri_inds = _find_nearest_tri_pts(to_rr,
                                              from_pt_tris,
                                              from_pt_lens,
                                              run_all=False,
                                              reproject=False,
                                              **tri_geom)

    nn_idx = from_tri[tri_inds]
    weights = np.array(weights)

    row_ind = np.repeat(np.arange(len(to_rr)), 3)
    this_map = csr_matrix((weights.ravel(), (row_ind, nn_idx.ravel())),
                          shape=(len(to_rr), len(from_rr)))
    return this_map
Exemplo n.º 32
0
def userfcn_dcline_formulation(om, args):
    """This is the 'formulation' stage userfcn callback that defines the
    user constraints for the dummy generators representing DC lines.
    It expects to find a 'dcline' field in the ppc stored in om, as
    described above. By the time it is passed to this callback,
    MPC.dcline should contain only in-service lines and the from and
    two bus columns should be converted to internal indexing. The
    optional args are not currently used.

    If Pf, Pt and Ploss are the flow at the "from" end, flow at the
    "to" end and loss respectively, and L0 and L1 are the linear loss
    coefficients, the the relationships between them is given by:
        Pf - Ploss = Pt
        Ploss = L0 + L1 * Pf
    If Pgf and Pgt represent the injections of the dummy generators
    representing the DC line injections into the network, then
    Pgf = -Pf and Pgt = Pt, and we can combine all of the above to
    get the following constraint on Pgf ang Pgt:
        -Pgf - (L0 - L1 * Pgf) = Pgt
    which can be written:
        -L0 <= (1 - L1) * Pgf + Pgt <= -L0
    """
    ## define named indices into data matrices
    c = idx_dcline.c

    ## initialize some things
    ppc = om.get_ppc()
    dc = ppc['dcline']
    ndc = dc.shape[0]              ## number of in-service DC lines
    ng  = ppc['gen'].shape[0] - 2 * ndc  ## number of original gens/disp loads

    ## constraints
    #nL0 = -dc[:, c['LOSS0']] / ppc.baseMVA
    nL0 = -dc[:, c['LOSS0']] / ppc['baseMVA']
    L1  =  dc[:, c['LOSS1']]


    Adc = hstack([sparse((ndc, ng)), spdiags(1-L1, 0, ndc, ndc), speye(ndc, ndc)], format="csr")
    #Adc = c_[zeros(ndc, ng), spdiags(1-L1, 0, ndc, ndc), speye(ndc, ndc)]
    #print("Adc", Adc.todense())
    ## add them to the model
    om = om.add_constraints('dcline', Adc, nL0, nL0, ['Pg'])

    return om
Exemplo n.º 33
0
def userfcn_dcline_formulation(om, args):
    """This is the 'formulation' stage userfcn callback that defines the
    user constraints for the dummy generators representing DC lines.
    It expects to find a 'dcline' field in the ppc stored in om, as
    described above. By the time it is passed to this callback,
    MPC.dcline should contain only in-service lines and the from and
    two bus columns should be converted to internal indexing. The
    optional args are not currently used.

    If Pf, Pt and Ploss are the flow at the "from" end, flow at the
    "to" end and loss respectively, and L0 and L1 are the linear loss
    coefficients, the the relationships between them is given by:
        Pf - Ploss = Pt
        Ploss = L0 + L1 * Pf
    If Pgf and Pgt represent the injections of the dummy generators
    representing the DC line injections into the network, then
    Pgf = -Pf and Pgt = Pt, and we can combine all of the above to
    get the following constraint on Pgf ang Pgt:
        -Pgf - (L0 - L1 * Pgf) = Pgt
    which can be written:
        -L0 <= (1 - L1) * Pgf + Pgt <= -L0
    """
    ## define named indices into data matrices
    c = idx_dcline.c

    ## initialize some things
    ppc = om.get_ppc()
    dc = ppc['dcline']
    ndc = dc.shape[0]  ## number of in-service DC lines
    ng = ppc['gen'].shape[0] - 2 * ndc  ## number of original gens/disp loads

    ## constraints
    nL0 = -dc[:, c['LOSS0']] / ppc['baseMVA']
    L1 = dc[:, c['LOSS1']]
    Adc = hstack(
        [sparse((ndc, ng)),
         spdiags(1 - L1, 0, ndc, ndc),
         speye(ndc, ndc)],
        format="csr")

    ## add them to the model
    om = om.add_constraints('dcline', Adc, nL0, nL0, ['Pg'])

    return om
Exemplo n.º 34
0
def _invert_matrix(mat, use_petsc: bool = True, **kwargs) -> np.ndarray:
    if use_petsc:
        try:
            import petsc4py  # noqa
        except ImportError:
            global _PETSC_ERROR_MSG_SHOWN
            if not _PETSC_ERROR_MSG_SHOWN:
                _PETSC_ERROR_MSG_SHOWN = True
                logg.warning(_PETSC_ERROR_MSG.format(_DEFAULT_SOLVER))
            kwargs["solver"] = _DEFAULT_SOLVER
            use_petsc = False

    if use_petsc:
        return _solve_lin_system(mat,
                                 speye(mat.shape[0]),
                                 use_petsc=True,
                                 **kwargs)

    return sinv(mat).toarray() if issparse(mat) else np.linalg.inv(mat)
Exemplo n.º 35
0
    def gram_matrix(self, ):
        '''
        Return the gram matrix of the basis.

        Returns
        -------
        numpy.ndarray
            The gram matrix of the basis.

        '''
        if self.is_orthonormal:
            M = speye(self.indices.cardinal())
        else:
            G = self.bases.gram_matrix()
            ind = self.indices.array
            M = G[0][np.ix_(ind[:, 0], ind[:, 0])]
            for i in np.arange(1, ind.shape[1]):
                M *= G[i][np.ix_(ind[:, i], ind[:, i])]
        return M
Exemplo n.º 36
0
def computePiMethod1():
    e0 = time.time()
    Q = sp.dok_matrix((size,size)) 
    fillOffDiagonal(Q)
    # Set the diagonal of Q such that the row sums are zero
    Q.setdiag( -Q*ones(size) )
    # Compute a suitable stochastic matrix by means of uniformization
    l = min(Q.values())*1.001  # avoid periodicity, see trivedi's book
    P = sp.speye(size, size) - Q/l
    # compute Pi
    P =  P.tocsr()
    pi = zeros(size);  pi1 = zeros(size)
    pi[0] = 1;
    n = norm(pi - pi1,1); i = 0; 
    while n > 1e-3 and i < 1e5:
        pi1 = pi*P
        pi = pi1*P   # avoid copying pi1 to pi
        n = norm(pi - pi1,1); i += 1
    print "Method 1: ", time.time() - e0, i
    return pi
def computePiMethod1():
    e0 = time.time()
    Q = sp.dok_matrix((size,size))
    fillOffDiagonal(Q)
    # Set the diagonal of Q such that the row sums are zero
    Q.setdiag( -Q*ones(size) )
    # Compute a suitable stochastic matrix by means of uniformization
    l = min(Q.values())*1.001  # avoid periodicity, see the book of Bolch et al.
    P = sp.speye(size, size) - Q/l
    # compute Pi
    P =  P.tocsr()
    pi = zeros(size);  pi1 = zeros(size)
    pi[0] = 1;
    n = norm(pi - pi1,1); i = 0;
    while n > 1e-3 and i < 1e5:
        pi1 = pi*P
        pi = pi1*P   # avoid copying pi1 to pi
        n = norm(pi - pi1,1); i += 1
    print "Method 1: ", time.time() - e0, i
    return pi
Exemplo n.º 38
0
def _make_morph_map_hemi(subject_from, subject_to, subjects_dir, reg_from,
                         reg_to):
    """Construct morph map for one hemisphere."""
    # add speedy short-circuit for self-maps
    if subject_from == subject_to and reg_from == reg_to:
        fname = op.join(subjects_dir, subject_from, 'surf', reg_from)
        n_pts = len(read_surface(fname, verbose=False)[0])
        return speye(n_pts, n_pts, format='csr')

    # load surfaces and normalize points to be on unit sphere
    fname = op.join(subjects_dir, subject_from, 'surf', reg_from)
    from_rr, from_tri = read_surface(fname, verbose=False)
    fname = op.join(subjects_dir, subject_to, 'surf', reg_to)
    to_rr = read_surface(fname, verbose=False)[0]
    _normalize_vectors(from_rr)
    _normalize_vectors(to_rr)

    # from surface: get nearest neighbors, find triangles for each vertex
    nn_pts_idx = _compute_nearest(from_rr, to_rr)
    from_pt_tris = _triangle_neighbors(from_tri, len(from_rr))
    from_pt_tris = [from_pt_tris[pt_idx] for pt_idx in nn_pts_idx]

    # find triangle in which point lies and assoc. weights
    tri_inds = []
    weights = []
    tri_geom = _get_tri_supp_geom(dict(rr=from_rr, tris=from_tri))
    for pt_tris, to_pt in zip(from_pt_tris, to_rr):
        p, q, idx, dist = _find_nearest_tri_pt(to_pt,
                                               tri_geom,
                                               pt_tris,
                                               run_all=False)
        tri_inds.append(idx)
        weights.append([1. - (p + q), p, q])

    nn_idx = from_tri[tri_inds]
    weights = np.array(weights)

    row_ind = np.repeat(np.arange(len(to_rr)), 3)
    this_map = csr_matrix((weights.ravel(), (row_ind, nn_idx.ravel())),
                          shape=(len(to_rr), len(from_rr)))
    return this_map
Exemplo n.º 39
0
def integer_left_basis_semipos(S):

    #TODO: chack sparsity

    n,r = S.shape

    T = hstack((speye(n), S),format='lil')

    for j in range(n+r-1,n-1,-1):

        zero_rows,_ = T[:,j].nonzero()
        zero_ix = list(set(range(T.shape[0])).difference(zero_rows))
        Tnew = T[zero_ix, 0:j]
        posinds,_,_ = sfind(T[:,j]>0)
        neginds,_,_ = sfind(T[:,j]<0)

        lni = len(neginds)

        for i in posinds:
            nnz_rows, nnz_cols = (T[[i]*lni,:n+1] + T[neginds,:n+1]).nonzero()
            zero_cols = list(set(range(T.shape[1])).difference(nnz_cols))

            for k in range(lni):
                flags = T[:, zero_cols[k]]
                flags[i] = True
                flags[neginds[k]] = True

                if flags.nnz == flags.shape[0]:
                    newrow = -1*T[neginds[k],j]*T[i,:j] + T[i,j]*T[neginds[k],:j]
                    Tnew = vstack(Tnew, newrow)

        T = Tnew

    g=np.zeros((T.shape[0],1))

    for c in range(len(g)):
        g[c] = local_gcd(T[c,:])

    return T/g,T,g
Exemplo n.º 40
0
def _make_morph_map_hemi(subject_from, subject_to, subjects_dir, reg_from,
                         reg_to):
    """Construct morph map for one hemisphere."""
    # add speedy short-circuit for self-maps
    if subject_from == subject_to and reg_from == reg_to:
        fname = op.join(subjects_dir, subject_from, 'surf', reg_from)
        n_pts = len(read_surface(fname, verbose=False)[0])
        return speye(n_pts, n_pts, format='csr')

    # load surfaces and normalize points to be on unit sphere
    fname = op.join(subjects_dir, subject_from, 'surf', reg_from)
    from_rr, from_tri = read_surface(fname, verbose=False)
    fname = op.join(subjects_dir, subject_to, 'surf', reg_to)
    to_rr = read_surface(fname, verbose=False)[0]
    _normalize_vectors(from_rr)
    _normalize_vectors(to_rr)

    # from surface: get nearest neighbors, find triangles for each vertex
    nn_pts_idx = _compute_nearest(from_rr, to_rr)
    from_pt_tris = _triangle_neighbors(from_tri, len(from_rr))
    from_pt_tris = [from_pt_tris[pt_idx] for pt_idx in nn_pts_idx]

    # find triangle in which point lies and assoc. weights
    tri_inds = []
    weights = []
    tri_geom = _get_tri_supp_geom(dict(rr=from_rr, tris=from_tri))
    for pt_tris, to_pt in zip(from_pt_tris, to_rr):
        p, q, idx, dist = _find_nearest_tri_pt(to_pt, tri_geom, pt_tris,
                                               run_all=False)
        tri_inds.append(idx)
        weights.append([1. - (p + q), p, q])

    nn_idx = from_tri[tri_inds]
    weights = np.array(weights)

    row_ind = np.repeat(np.arange(len(to_rr)), 3)
    this_map = csr_matrix((weights.ravel(), (row_ind, nn_idx.ravel())),
                          shape=(len(to_rr), len(from_rr)))
    return this_map
Exemplo n.º 41
0
def hpfilter(X, lamb=1600):
    """
    Hodrick-Prescott filter

    Parameters
    ----------
    X : array-like
        The 1d ndarray timeseries to filter of length (nobs,) or (nobs,1)
    lamb : float
        The Hodrick-Prescott smoothing parameter. A value of 1600 is
        suggested for quarterly data. Ravn and Uhlig suggest using a value
        of 6.25 (1600/4**4) for annual data and 129600 (1600*3**4) for monthly
        data.

    Returns
    -------
    cycle : array
        The estimated cycle in the data given lamb.
    trend : array
        The estimated trend in the data given lamb.

    Examples
    ---------
    >>> import statsmodels.api as sm
    >>> dta = sm.datasets.macrodata.load()
    >>> X = dta.data['realgdp']
    >>> cycle, trend = sm.tsa.filters.hpfilter(X,1600)

    Notes
    -----
    The HP filter removes a smooth trend, `T`, from the data `X`. by solving

    min sum((X[t] - T[t])**2 + lamb*((T[t+1] - T[t]) - (T[t] - T[t-1]))**2)
     T   t

    Here we implemented the HP filter as a ridge-regression rule using
    scipy.sparse. In this sense, the solution can be written as

    T = inv(I - lamb*K'K)X

    where I is a nobs x nobs identity matrix, and K is a (nobs-2) x nobs matrix
    such that

    K[i,j] = 1 if i == j or i == j + 2
    K[i,j] = -2 if i == j + 1
    K[i,j] = 0 otherwise

    References
    ----------
    Hodrick, R.J, and E. C. Prescott. 1980. "Postwar U.S. Business Cycles: An
        Empricial Investigation." `Carnegie Mellon University discussion
        paper no. 451`.
    Ravn, M.O and H. Uhlig. 2002. "Notes On Adjusted the Hodrick-Prescott
        Filter for the Frequency of Observations." `The Review of Economics and
        Statistics`, 84(2), 371-80.
    """
    _pandas_wrapper = _maybe_get_pandas_wrapper(X)
    X = np.asarray(X, float)
    if X.ndim > 1:
        X = X.squeeze()
    nobs = len(X)
    I = speye(nobs,nobs)
    offsets = np.array([0,1,2])
    data = np.repeat([[1.],[-2.],[1.]], nobs, axis=1)
    K = dia_matrix((data, offsets), shape=(nobs-2,nobs))

    import scipy
    if (X.dtype != np.dtype('<f8') and
            int(scipy.__version__[:3].split('.')[1]) < 11):
        #scipy umfpack bug on Big Endian machines, will be fixed in 0.11
        use_umfpack = False
    else:
        use_umfpack = True

    if scipy.__version__[:3] == '0.7':
        #doesn't have use_umfpack option
        #will be broken on big-endian machines with scipy 0.7 and umfpack
        trend = spsolve(I+lamb*K.T.dot(K), X)
    else:
        trend = spsolve(I+lamb*K.T.dot(K), X, use_umfpack=use_umfpack)
    cycle = X-trend
    if _pandas_wrapper is not None:
        return _pandas_wrapper(cycle), _pandas_wrapper(trend)
    return cycle, trend
Exemplo n.º 42
0
def hpfilter(X, lamb=1600):
    """
    Hodrick-Prescott filter

    Parameters
    ----------
    X : array-like
        The 1d ndarray timeseries to filter of length (nobs,) or (nobs,1)
    lamb : float
        The Hodrick-Prescott smoothing parameter. A value of 1600 is
        suggested for quarterly data. Ravn and Uhlig suggest using a value
        of 6.25 (1600/4**4) for annual data and 129600 (1600*3**4) for monthly
        data.

    Returns
    -------
    cycle : array
        The estimated cycle in the data given lamb.
    trend : array
        The estimated trend in the data given lamb.

    Examples
    ---------
    >>> import scikits.statsmodels.api as sm
    >>> dta = sm.datasets.macrodata.load()
    >>> X = dta.data['realgdp']
    >>> cycle, trend = sm.tsa.filters.hpfilter(X,1600)

    Notes
    -----
    The HP filter removes a smooth trend, `T`, from the data `X`. by solving

    min sum((X[t] - T[t])**2 + lamb*((T[t+1] - T[t]) - (T[t] - T[t-1]))**2)
     T   t

    Here we implemented the HP filter as a ridge-regression rule using
    scipy.sparse. In this sense, the solution can be written as

    T = inv(I - lamb*K'K)X

    where I is a nobs x nobs identity matrix, and K is a (nobs-2) x nobs matrix
    such that

    K[i,j] = 1 if i == j or i == j + 2
    K[i,j] = -2 if i == j + 1
    K[i,j] = 0 otherwise

    References
    ----------
    Hodrick, R.J, and E. C. Prescott. 1980. "Postwar U.S. Business Cycles: An
        Empricial Investigation." `Carnegie Mellon University discussion
        paper no. 451`.
    Ravn, M.O and H. Uhlig. 2002. "Notes On Adjusted the Hodrick-Prescott
        Filter for the Frequency of Observations." `The Review of Economics and
        Statistics`, 84(2), 371-80.
    """
    X = np.asarray(X)
    if X.ndim > 1:
        X = X.squeeze()
    nobs = len(X)
    I = speye(nobs,nobs)
    offsets = np.array([0,1,2])
    data = np.repeat([[1],[-2],[1]], nobs, axis=1)
    K = dia_matrix((data, offsets), shape=(nobs-2,nobs))
    trend = spsolve(I+lamb*K.T.dot(K), X)
    cycle = X-trend
    return cycle, trend
Exemplo n.º 43
0
 def check_lil_eye(self):
     for dim in [(3,5),(5,3)]:
         for k in range(-5,5):
             r,c = dim
             assert_array_equal(lil_eye(dim,k).todense(),
                                speye(r,c,k).todense())
Exemplo n.º 44
0
            return _lambda, blockVectorX, residualNormsHistory
        else:
            return _lambda, blockVectorX

###########################################################################
if __name__ == '__main__':
    from scipy.sparse import spdiags, speye
    import time

##     def operatorB( vec ):
##         return vec

    n = 100
    vals = [nm.arange( n, dtype = nm.float64 ) + 1]
    operatorA = spdiags( vals, 0, n, n )
    operatorB = speye( n, n )
#    operatorB[0,0] = 0
    operatorB = nm.eye( n, n )
    Y = nm.eye( n, 3 )


#    X = sc.rand( n, 3 )
    xfile = {100 : 'X.txt', 1000 : 'X2.txt', 10000 : 'X3.txt'}
    X = nm.fromfile( xfile[n], dtype = nm.float64, sep = ' ' )
    X.shape = (n, 3)

    ivals = [1./vals[0]]
    def precond( x ):
        invA = spdiags( ivals, 0, n, n )
        y = invA  * x
        if sp.issparse( y ):
Exemplo n.º 45
0
def imccf(A, B, n_ab, X, y, l=0.01, boundary='constant', crop_filter=True,
          f=1.0):
    r"""
    Incremental Multi-Channel Correlation Filter (MCCF)

    Parameters
    ----------
    A :
    B :
    n_ab : `int`
        Total number of samples used to produce A and B.
    X : ``(n_images, n_channels, height, width)`` `ndarray`
        Training images.
    y : ``(1, height, width)`` `ndarray`
        Desired response.
    l : `float`, optional
        Regularization parameter.
    boundary : str {`constant`, `symmetric`}, optional
        Determines how the image is padded.
    crop_filter : `bool`, optional
    f : ``[0, 1]`` `float`, optional
        Forgetting factor that weights the relative contribution of new
        samples vs old samples. If 1.0, all samples are weighted equally.
        If <1.0, more emphasis is put on the new samples.

    Returns
    -------
    mccf : ``(1, height, width)`` `ndarray`
        Multi-Channel Correlation Filter (MCCF) filter associated to the
        training images.
    sXY :
    sXX :

    References
    ----------
    .. [1] David S. Bolme, J. Ross Beveridge,  Bruce A. Draper and Yui Man Lui.
    "Visual Object Tracking using Adaptive Correlation Filters". CVPR, 2010.
    .. [2] Hamed Kiani Galoogahi, Terence Sim,  Simon Lucey. "Multi-Channel
    Correlation Filters". ICCV, 2013.
    """
    # number of images; number of channels, height and width
    n_x, k, hz, wz = X.shape

    # height and width of desired responses
    _, hy, wy = y.shape
    y_shape = (hy, wy)

    # multiply the number of samples used to produce the auto and cross
    # spectral energy matrices A and B by forgetting factor
    n_ab *= f
    # total number of samples
    n = n_ab + n_x
    # compute weighting factors
    nu_ab = n_ab / n
    nu_x = n_x / n

    # extended shape
    ext_h = hz + hy - 1
    ext_w = wz + wy - 1
    ext_shape = (ext_h, ext_w)
    # extended dimensionality
    ext_d = ext_h * ext_w

    # extend desired response
    ext_y = pad(y, ext_shape)
    # fft of extended desired response
    fft_ext_y = fft2(ext_y)

    # extend images
    ext_X = pad(X, ext_shape, boundary=boundary)

    # auto and cross spectral energy matrices
    sXX = 0
    sXY = 0
    # for each training image and desired response
    for ext_x in ext_X:
        # fft of extended image
        fft_ext_x = fft2(ext_x)

        # store extended image fft as sparse diagonal matrix
        diag_fft_x = spdiags(fft_ext_x.reshape((k, -1)),
                             -np.arange(0, k) * ext_d, ext_d * k, ext_d).T
        # vectorize extended desired response fft
        diag_fft_y = fft_ext_y.ravel()

        # update auto and cross spectral energy matrices
        sXX += diag_fft_x.conj().T.dot(diag_fft_x)
        sXY += diag_fft_x.conj().T.dot(diag_fft_y)

    # combine old and new auto and cross spectral energy matrices
    sXY = nu_ab * A + nu_x * sXY
    sXX = nu_ab * B + nu_x * sXX
    # solve ext_d independent k x k linear systems (with regularization)
    # to obtain desired extended multi-channel correlation filter
    fft_ext_f = spsolve(sXX + l * speye(sXX.shape[-1]), sXY)
    # reshape extended filter to extended image shape
    fft_ext_f = fft_ext_f.reshape((k, ext_h, ext_w))

    # compute filter inverse fft
    f = np.real(ifftshift(ifft2(fft_ext_f), axes=(-2, -1)))
    if crop_filter:
        # crop extended filter to match desired response shape
        f = crop(f, y_shape)

    return f, sXY, sXX
Exemplo n.º 46
0
def mccf(X, y, l=0.01, boundary='constant', crop_filter=True):
    r"""
    Multi-Channel Correlation Filter (MCCF).

    Parameters
    ----------
    X : ``(n_images, n_channels, height, width)`` `ndarray`
        Training images.
    y : ``(1, height, width)`` `ndarray`
        Desired response.
    l : `float`, optional
        Regularization parameter.
    boundary : str {`constant`, `symmetric`}, optional
        Determines how the image is padded.
    crop_filter : `bool`, optional

    Returns
    -------
    mccf: ``(1, height, width)`` `ndarray`
        Multi-Channel Correlation Filter (MCCF) filter associated to the
        training images.
    sXY :
    sXX :

    References
    ----------
    .. [1] Hamed Kiani Galoogahi, Terence Sim,  Simon Lucey. "Multi-Channel
    Correlation Filters". ICCV, 2013.
    """
    # number of images; number of channels, height and width
    n, k, hx, wx = X.shape

    # height and width of desired responses
    _, hy, wy = y.shape
    y_shape = (hy, wy)

    # extended shape
    ext_h = hx + hy - 1
    ext_w = wx + wy - 1
    ext_shape = (ext_h, ext_w)
    # extended dimensionality
    ext_d = ext_h * ext_w

    # extend desired response
    ext_y = pad(y, ext_shape)
    # fft of extended desired response
    fft_ext_y = fft2(ext_y)

    # extend images
    ext_X = pad(X, ext_shape, boundary=boundary)

    # auto and cross spectral energy matrices
    sXX = 0
    sXY = 0
    # for each training image and desired response
    for ext_x in ext_X:
        # fft of extended image
        fft_ext_x = fft2(ext_x)

        # store extended image fft as sparse diagonal matrix
        diag_fft_x = spdiags(fft_ext_x.reshape((k, -1)),
                             -np.arange(0, k) * ext_d, ext_d * k, ext_d).T
        # vectorize extended desired response fft
        diag_fft_y = fft_ext_y.ravel()

        # update auto and cross spectral energy matrices
        sXX += diag_fft_x.conj().T.dot(diag_fft_x)
        sXY += diag_fft_x.conj().T.dot(diag_fft_y)

    # solve ext_d independent k x k linear systems (with regularization)
    # to obtain desired extended multi-channel correlation filter
    fft_ext_f = spsolve(sXX + l * speye(sXX.shape[-1]), sXY)
    # reshape extended filter to extended image shape
    fft_ext_f = fft_ext_f.reshape((k, ext_h, ext_w))

    # compute filter inverse fft
    f = np.real(ifftshift(ifft2(fft_ext_f), axes=(-2, -1)))

    if crop_filter:
        # crop extended filter to match desired response shape
        f = crop(f, y_shape)

    return f, sXY, sXX
Exemplo n.º 47
0
def default_scaling(x):
    n, = np.shape(x)
    return speye(n)
Exemplo n.º 48
0
Arquivo: ller.py Projeto: imgemp/ller
def ller(X, Y, n_neighbors, n_components, mu=0.5, gamma=None,
         reg=1e-3,eigen_solver='auto', tol=1e-6, max_iter=100,
         random_state=None):
    """
    Locally Linear Embedding for Regression (LLER)

    Parameters
    ----------
    X : ndarray, 2-dimensional
        The data matrix, shape (num_data_points, num_dims)

    Y : ndarray, 1 or 2-dimensional
        The response matrix, shape (num_response_points, num_responses).
        Y[0:] is assumed to provide responses for X[:num_response_points]

    n_neighbors : int
        Number of neighbors for kNN graph construction.

    n_components : int
        Number of dimensions for embedding.

    mu : float, optional
        Influence of the Y-similarity penalty.

    gamma : float, optional
        Scaling factor for RBF kernel on Y.
        Defaults to the inverse of the median distance between rows of Y.

    Returns
    -------
    embedding : ndarray, 2-dimensional
        The embedding of X, shape (num_points, n_components)

    lle_error : float
        The embedding error of X (for a fixed reconstruction matrix W)

    ller_error : float
        The embedding error of X that takes Y into account.
    """
    if eigen_solver not in ('auto', 'arpack', 'dense'):
        raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)

    if Y.ndim == 1:
        Y = Y[:, None]

    if gamma is None:
        dists = pairwise_distances(Y)
        gamma = 1.0 / np.median(dists)

    nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
    nbrs.fit(X)
    X = nbrs._fit_X

    Nx, d_in = X.shape
    Ny = Y.shape[0]

    if n_components > d_in:
        raise ValueError("output dimension must be less than or equal "
                         "to input dimension")
    if n_neighbors >= Nx:
        raise ValueError("n_neighbors must be less than number of points")
    if n_neighbors <= 0:
        raise ValueError("n_neighbors must be positive")
    if Nx < Ny:
        raise ValueError("X should have at least as many points as Y")

    M_sparse = (eigen_solver != 'dense')

    W = barycenter_kneighbors_graph(
        nbrs, n_neighbors=n_neighbors, reg=reg)

    if M_sparse:
        M = speye(*W.shape, format=W.format) - W
        M = (M.T * M).tocsr()
    else:
        M = (W.T * W - W.T - W).toarray()
        M.flat[::M.shape[0] + 1] += 1

    P = rbf_kernel(Y, gamma=gamma)
    L = laplacian(P, normed=False)
    M /= np.abs(M).max()  # optional scaling step
    L /= np.abs(L).max()
    if Nx > Ny:
        # zeros = csr_matrix((Nx-Ny,Nx-Ny),dtype=M.dtype)
        # L = bmat([[L, None], [None, zeros]])
        ones = csr_matrix(np.ones((Nx-Ny,Nx-Ny)),dtype=M.dtype)
        L = bmat([[L, None], [None, ones]])
    omega = M + mu * L
    embedding, lle_error = null_space(omega, n_components, k_skip=1,
                                      eigen_solver=eigen_solver, tol=tol,
                                      max_iter=max_iter,
                                      random_state=random_state)
    ller_error = np.trace(embedding.T.dot(L).dot(embedding))
    return embedding, lle_error, ller_error
Exemplo n.º 49
0
def mccf(X, y, l=0.01, boundary='constant', crop_filter=True):
    r"""
    Multi-Channel Correlation Filter (MCCF).

    Parameters
    ----------
    X : ``(n_images, n_channels, image_h, image_w)`` `ndarray`
        The training images.
    y : ``(1, response_h, response_w)`` `ndarray`
        The desired response.
    l : `float`, optional
        Regularization parameter.
    boundary : ``{'constant', 'symmetric'}``, optional
        Determines how the image is padded.
    crop_filter : `bool`, optional
        If ``True``, the shape of the MOSSE filter is the same as the shape
        of the desired response. If ``False``, the filter's shape is equal to:
        ``X[0].shape + y.shape - 1``

    Returns
    -------
    f : ``(1, response_h, response_w)`` `ndarray`
        Multi-Channel Correlation Filter (MCCF) filter associated to the
        training images.
    sXY : ``(N,)`` `ndarray`
        The auto-correlation array, where
        ``N = (image_h+response_h-1) * (image_w+response_w-1) * n_channels``.
    sXX : ``(N, N)`` `ndarray`
        The cross-correlation array, where
        ``N = (image_h+response_h-1) * (image_w+response_w-1) * n_channels``.

    References
    ----------
    .. [1] H. K. Galoogahi, T. Sim, and Simon Lucey. "Multi-Channel
        Correlation Filters". IEEE Proceedings of International Conference on
        Computer Vision (ICCV), 2013.
    """
    # number of images; number of channels, height and width
    n, k, hx, wx = X.shape

    # height and width of desired responses
    _, hy, wy = y.shape
    y_shape = (hy, wy)

    # extended shape
    ext_h = hx + hy - 1
    ext_w = wx + wy - 1
    ext_shape = (ext_h, ext_w)
    # extended dimensionality
    ext_d = ext_h * ext_w

    # extend desired response
    ext_y = pad(y, ext_shape)
    # fft of extended desired response
    fft_ext_y = fft2(ext_y)

    # extend images
    ext_X = pad(X, ext_shape, boundary=boundary)

    # auto and cross spectral energy matrices
    sXX = 0
    sXY = 0
    # for each training image and desired response
    for ext_x in ext_X:
        # fft of extended image
        fft_ext_x = fft2(ext_x)

        # store extended image fft as sparse diagonal matrix
        diag_fft_x = spdiags(fft_ext_x.reshape((k, -1)),
                             -np.arange(0, k) * ext_d, ext_d * k, ext_d).T
        # vectorize extended desired response fft
        diag_fft_y = fft_ext_y.ravel()

        # update auto and cross spectral energy matrices
        sXX += diag_fft_x.conj().T.dot(diag_fft_x)
        sXY += diag_fft_x.conj().T.dot(diag_fft_y)

    # solve ext_d independent k x k linear systems (with regularization)
    # to obtain desired extended multi-channel correlation filter
    fft_ext_f = spsolve(sXX + l * speye(sXX.shape[-1]), sXY)
    # reshape extended filter to extended image shape
    fft_ext_f = fft_ext_f.reshape((k, ext_h, ext_w))

    # compute filter inverse fft
    f = np.real(ifftshift(ifft2(fft_ext_f), axes=(-2, -1)))

    if crop_filter:
        # crop extended filter to match desired response shape
        f = crop(f, y_shape)

    return f, sXY, sXX
Exemplo n.º 50
0
def imccf(A, B, n_ab, X, y, l=0.01, boundary='constant', crop_filter=True,
          f=1.0):
    r"""
    Incremental Multi-Channel Correlation Filter (MCCF)

    Parameters
    ----------
    A : ``(N,)`` `ndarray`
        The current auto-correlation array, where
        ``N = (patch_h+response_h-1) * (patch_w+response_w-1) * n_channels``.
    B : ``(N, N)`` `ndarray`
        The current cross-correlation array, where
        ``N = (patch_h+response_h-1) * (patch_w+response_w-1) * n_channels``.
    n_ab : `int`
        The current number of images.
    X : ``(n_images, n_channels, image_h, image_w)`` `ndarray`
        The training images (patches).
    y : ``(1, response_h, response_w)`` `ndarray`
        The desired response.
    l : `float`, optional
        Regularization parameter.
    boundary : ``{'constant', 'symmetric'}``, optional
        Determines how the image is padded.
    crop_filter : `bool`, optional
        If ``True``, the shape of the MOSSE filter is the same as the shape
        of the desired response. If ``False``, the filter's shape is equal to:
        ``X[0].shape + y.shape - 1``
    f : ``[0, 1]`` `float`, optional
        Forgetting factor that weights the relative contribution of new
        samples vs old samples. If ``1.0``, all samples are weighted equally.
        If ``<1.0``, more emphasis is put on the new samples.

    Returns
    -------
    f : ``(1, response_h, response_w)`` `ndarray`
        Multi-Channel Correlation Filter (MCCF) filter associated to the
        training images.
    sXY : ``(N,)`` `ndarray`
        The auto-correlation array, where
        ``N = (image_h+response_h-1) * (image_w+response_w-1) * n_channels``.
    sXX : ``(N, N)`` `ndarray`
        The cross-correlation array, where
        ``N = (image_h+response_h-1) * (image_w+response_w-1) * n_channels``.

    References
    ----------
    .. [1] D. S. Bolme, J. R. Beveridge, B. A. Draper, and Y. M. Lui. "Visual
        Object Tracking using Adaptive Correlation Filters", IEEE Proceedings
        of International Conference on Computer Vision and Pattern Recognition
        (CVPR), 2010.
    .. [2] H. K. Galoogahi, T. Sim, and Simon Lucey. "Multi-Channel
        Correlation Filters". IEEE Proceedings of International Conference on
        Computer Vision (ICCV), 2013.
    """
    # number of images; number of channels, height and width
    n_x, k, hz, wz = X.shape

    # height and width of desired responses
    _, hy, wy = y.shape
    y_shape = (hy, wy)

    # multiply the number of samples used to produce the auto and cross
    # spectral energy matrices A and B by forgetting factor
    n_ab *= f
    # total number of samples
    n = n_ab + n_x
    # compute weighting factors
    nu_ab = n_ab / n
    nu_x = n_x / n

    # extended shape
    ext_h = hz + hy - 1
    ext_w = wz + wy - 1
    ext_shape = (ext_h, ext_w)
    # extended dimensionality
    ext_d = ext_h * ext_w

    # extend desired response
    ext_y = pad(y, ext_shape)
    # fft of extended desired response
    fft_ext_y = fft2(ext_y)

    # extend images
    ext_X = pad(X, ext_shape, boundary=boundary)

    # auto and cross spectral energy matrices
    sXX = 0
    sXY = 0
    # for each training image and desired response
    for ext_x in ext_X:
        # fft of extended image
        fft_ext_x = fft2(ext_x)

        # store extended image fft as sparse diagonal matrix
        diag_fft_x = spdiags(fft_ext_x.reshape((k, -1)),
                             -np.arange(0, k) * ext_d, ext_d * k, ext_d).T
        # vectorize extended desired response fft
        diag_fft_y = fft_ext_y.ravel()

        # update auto and cross spectral energy matrices
        sXX += diag_fft_x.conj().T.dot(diag_fft_x)
        sXY += diag_fft_x.conj().T.dot(diag_fft_y)

    # combine old and new auto and cross spectral energy matrices
    sXY = nu_ab * A + nu_x * sXY
    sXX = nu_ab * B + nu_x * sXX
    # solve ext_d independent k x k linear systems (with regularization)
    # to obtain desired extended multi-channel correlation filter
    fft_ext_f = spsolve(sXX + l * speye(sXX.shape[-1]), sXY)
    # reshape extended filter to extended image shape
    fft_ext_f = fft_ext_f.reshape((k, ext_h, ext_w))

    # compute filter inverse fft
    f = np.real(ifftshift(ifft2(fft_ext_f), axes=(-2, -1)))
    if crop_filter:
        # crop extended filter to match desired response shape
        f = crop(f, y_shape)

    return f, sXY, sXX
def ipoptopf_solver(om, ppopt):
    """Solves AC optimal power flow using IPOPT.

    Inputs are an OPF model object and a PYPOWER options vector.

    Outputs are a C{results} dict, C{success} flag and C{raw} output dict.

    C{results} is a PYPOWER case dict (ppc) with the usual C{baseMVA}, C{bus}
    C{branch}, C{gen}, C{gencost} fields, along with the following additional
    fields:
        - C{order}      see 'help ext2int' for details of this field
        - C{x}          final value of optimization variables (internal order)
        - C{f}          final objective function value
        - C{mu}         shadow prices on ...
            - C{var}
                - C{l}  lower bounds on variables
                - C{u}  upper bounds on variables
            - C{nln}
                - C{l}  lower bounds on nonlinear constraints
                - C{u}  upper bounds on nonlinear constraints
            - C{lin}
                - C{l}  lower bounds on linear constraints
                - C{u}  upper bounds on linear constraints

    C{success} is C{True} if solver converged successfully, C{False} otherwise

    C{raw} is a raw output dict in form returned by MINOS
        - C{xr}     final value of optimization variables
        - C{pimul}  constraint multipliers
        - C{info}   solver specific termination code
        - C{output} solver specific output information

    @see: L{opf}, L{pips}

    @author: Ray Zimmerman (PSERC Cornell)
    @author: Carlos E. Murillo-Sanchez (PSERC Cornell & Universidad
    Autonoma de Manizales)
    @author: Richard Lincoln
    """
    import pyipopt

    ## unpack data
    ppc = om.get_ppc()
    baseMVA, bus, gen, branch, gencost = \
        ppc['baseMVA'], ppc['bus'], ppc['gen'], ppc['branch'], ppc['gencost']
    vv, _, nn, _ = om.get_idx()

    ## problem dimensions
    nb = shape(bus)[0]          ## number of buses
    ng = shape(gen)[0]          ## number of gens
    nl = shape(branch)[0]       ## number of branches
    ny = om.getN('var', 'y')    ## number of piece-wise linear costs

    ## linear constraints
    A, l, u = om.linear_constraints()

    ## bounds on optimization vars
    _, xmin, xmax = om.getv()

    ## build admittance matrices
    Ybus, Yf, Yt = makeYbus(baseMVA, bus, branch)

    ## try to select an interior initial point
    ll = xmin.copy(); uu = xmax.copy()
    ll[xmin == -Inf] = -2e19   ## replace Inf with numerical proxies
    uu[xmax ==  Inf] =  2e19
    x0 = (ll + uu) / 2
    Varefs = bus[bus[:, BUS_TYPE] == REF, VA] * (pi / 180)
    x0[vv['i1']['Va']:vv['iN']['Va']] = Varefs[0]  ## angles set to first reference angle
    if ny > 0:
        ipwl = find(gencost[:, MODEL] == PW_LINEAR)
#        PQ = r_[gen[:, PMAX], gen[:, QMAX]]
#        c = totcost(gencost[ipwl, :], PQ[ipwl])
        ## largest y-value in CCV data
        c = gencost.flatten('F')[sub2ind(shape(gencost), ipwl, NCOST + 2 * gencost[ipwl, NCOST])]
        x0[vv['i1']['y']:vv['iN']['y']] = max(c) + 0.1 * abs(max(c))
#        x0[vv['i1']['y']:vv['iN']['y']) = c + 0.1 * abs(c)

    ## find branches with flow limits
    il = find((branch[:, RATE_A] != 0) & (branch[:, RATE_A] < 1e10))
    nl2 = len(il)           ## number of constrained lines

    ##-----  run opf  -----
    ## build Jacobian and Hessian structure
    if A is not None and issparse(A):
        nA = A.shape[0]                ## number of original linear constraints
    else:
        nA = 0
    nx = len(x0)
    f = branch[:, F_BUS]                           ## list of "from" buses
    t = branch[:, T_BUS]                           ## list of "to" buses
    Cf = sparse((ones(nl), (arange(nl), f)), (nl, nb))      ## connection matrix for line & from buses
    Ct = sparse((ones(nl), (arange(nl), t)), (nl, nb))      ## connection matrix for line & to buses
    Cl = Cf + Ct
    Cb = Cl.T * Cl + speye(nb, nb)
    Cl2 = Cl[il, :]
    Cg = sparse((ones(ng), (gen[:, GEN_BUS], arange(ng))), (nb, ng))
    nz = nx - 2 * (nb + ng)
    nxtra = nx - 2 * nb
    if nz > 0:
        Js = vstack([
            hstack([Cb,      Cb,      Cg,              sparse((nb, ng)),   sparse((nb,  nz))]),
            hstack([Cb,      Cb,      sparse((nb, ng)),   Cg,              sparse((nb,  nz))]),
            hstack([Cl2,     Cl2,     sparse((nl2, 2 * ng)),               sparse((nl2, nz))]),
            hstack([Cl2,     Cl2,     sparse((nl2, 2 * ng)),               sparse((nl2, nz))])
        ], 'coo')
    else:
        Js = vstack([
            hstack([Cb,      Cb,      Cg,              sparse((nb, ng))]),
            hstack([Cb,      Cb,      sparse((nb, ng)),   Cg,          ]),
            hstack([Cl2,     Cl2,     sparse((nl2, 2 * ng)),           ]),
            hstack([Cl2,     Cl2,     sparse((nl2, 2 * ng)),           ])
        ], 'coo')

    if A is not None and issparse(A):
        Js = vstack([Js, A], 'coo')

    f, _, d2f = opf_costfcn(x0, om, True)
    Hs = tril(d2f + vstack([
        hstack([Cb,  Cb,  sparse((nb, nxtra))]),
        hstack([Cb,  Cb,  sparse((nb, nxtra))]),
        sparse((nxtra, nx))
    ]), format='coo')

    ## set options struct for IPOPT
#    options = {}
#    options['ipopt'] = ipopt_options([], ppopt)

    ## extra data to pass to functions
    userdata = {
        'om':       om,
        'Ybus':     Ybus,
        'Yf':       Yf[il, :],
        'Yt':       Yt[il, :],
        'ppopt':    ppopt,
        'il':       il,
        'A':        A,
        'nA':       nA,
        'neqnln':   2 * nb,
        'niqnln':   2 * nl2,
        'Js':       Js,
        'Hs':       Hs
    }

    ## check Jacobian and Hessian structure
    #xr                  = rand(x0.shape)
    #lmbda               = rand( 2 * nb + 2 * nl2)
    #Js1 = eval_jac_g(x, flag, userdata) #(xr, options.auxdata)
    #Hs1  = eval_h(xr, 1, lmbda, userdata)
    #i1, j1, s = find(Js)
    #i2, j2, s = find(Js1)
    #if (len(i1) != len(i2)) | (norm(i1 - i2) != 0) | (norm(j1 - j2) != 0):
    #    raise ValueError, 'something''s wrong with the Jacobian structure'
    #
    #i1, j1, s = find(Hs)
    #i2, j2, s = find(Hs1)
    #if (len(i1) != len(i2)) | (norm(i1 - i2) != 0) | (norm(j1 - j2) != 0):
    #    raise ValueError, 'something''s wrong with the Hessian structure'

    ## define variable and constraint bounds
    # n is the number of variables
    n = x0.shape[0]
    # xl is the lower bound of x as bounded constraints
    xl = xmin
    # xu is the upper bound of x as bounded constraints
    xu = xmax

    neqnln = 2 * nb
    niqnln = 2 * nl2

    # number of constraints
    m = neqnln + niqnln + nA
    # lower bound of constraint
    gl = r_[zeros(neqnln), -Inf * ones(niqnln), l]
    # upper bound of constraints
    gu = r_[zeros(neqnln),       zeros(niqnln), u]

    # number of nonzeros in Jacobi matrix
    nnzj = Js.nnz
    # number of non-zeros in Hessian matrix, you can set it to 0
    nnzh = Hs.nnz

    eval_hessian = True
    if eval_hessian:
        hessian = lambda x, lagrange, obj_factor, flag, user_data=None: \
                eval_h(x, lagrange, obj_factor, flag, userdata)

        nlp = pyipopt.create(n, xl, xu, m, gl, gu, nnzj, nnzh,
                             eval_f, eval_grad_f, eval_g, eval_jac_g, hessian)
    else:
        nnzh = 0
        nlp = pyipopt.create(n, xl, xu, m, gl, gu, nnzj, nnzh,
                             eval_f, eval_grad_f, eval_g, eval_jac_g)

    nlp.int_option('print_level', 5)
    nlp.num_option('tol', 1.0000e-12)
    nlp.int_option('max_iter', 250)
    nlp.num_option('dual_inf_tol', 0.10000)
    nlp.num_option('constr_viol_tol', 1.0000e-06)
    nlp.num_option('compl_inf_tol', 1.0000e-05)
    nlp.num_option('acceptable_tol', 1.0000e-08)
    nlp.num_option('acceptable_constr_viol_tol', 1.0000e-04)
    nlp.num_option('acceptable_compl_inf_tol', 0.0010000)
    nlp.str_option('mu_strategy', 'adaptive')

    iter = 0
    def intermediate_callback(algmod, iter_count, obj_value, inf_pr, inf_du,
            mu, d_norm, regularization_size, alpha_du, alpha_pr, ls_trials,
            user_data=None):
        iter = iter_count
        return True

    nlp.set_intermediate_callback(intermediate_callback)

    ## run the optimization
    # returns final solution x, upper and lower bound for multiplier, final
    # objective function obj and the return status of ipopt
    x, zl, zu, obj, status, zg = nlp.solve(x0, m, userdata)

    info = {'x': x, 'zl': zl, 'zu': zu, 'obj': obj, 'status': status, 'lmbda': zg}

    nlp.close()

    success = (status == 0) | (status == 1)

    output = {'iterations': iter}

    f, _ = opf_costfcn(x, om)

    ## update solution data
    Va = x[vv['i1']['Va']:vv['iN']['Va']]
    Vm = x[vv['i1']['Vm']:vv['iN']['Vm']]
    Pg = x[vv['i1']['Pg']:vv['iN']['Pg']]
    Qg = x[vv['i1']['Qg']:vv['iN']['Qg']]
    V = Vm * exp(1j * Va)

    ##-----  calculate return values  -----
    ## update voltages & generator outputs
    bus[:, VA] = Va * 180 / pi
    bus[:, VM] = Vm
    gen[:, PG] = Pg * baseMVA
    gen[:, QG] = Qg * baseMVA
    gen[:, VG] = Vm[gen[:, GEN_BUS].astype(int)]

    ## compute branch flows
    f_br = branch[:, F_BUS].astype(int)
    t_br = branch[:, T_BUS].astype(int)
    Sf = V[f_br] * conj(Yf * V)  ## cplx pwr at "from" bus, p.u.
    St = V[t_br] * conj(Yt * V)  ## cplx pwr at "to" bus, p.u.
    branch[:, PF] = Sf.real * baseMVA
    branch[:, QF] = Sf.imag * baseMVA
    branch[:, PT] = St.real * baseMVA
    branch[:, QT] = St.imag * baseMVA

    ## line constraint is actually on square of limit
    ## so we must fix multipliers
    muSf = zeros(nl)
    muSt = zeros(nl)
    if len(il) > 0:
        muSf[il] = 2 * info['lmbda'][2 * nb +       arange(nl2)] * branch[il, RATE_A] / baseMVA
        muSt[il] = 2 * info['lmbda'][2 * nb + nl2 + arange(nl2)] * branch[il, RATE_A] / baseMVA

    ## update Lagrange multipliers
    bus[:, MU_VMAX]  = info['zu'][vv['i1']['Vm']:vv['iN']['Vm']]
    bus[:, MU_VMIN]  = info['zl'][vv['i1']['Vm']:vv['iN']['Vm']]
    gen[:, MU_PMAX]  = info['zu'][vv['i1']['Pg']:vv['iN']['Pg']] / baseMVA
    gen[:, MU_PMIN]  = info['zl'][vv['i1']['Pg']:vv['iN']['Pg']] / baseMVA
    gen[:, MU_QMAX]  = info['zu'][vv['i1']['Qg']:vv['iN']['Qg']] / baseMVA
    gen[:, MU_QMIN]  = info['zl'][vv['i1']['Qg']:vv['iN']['Qg']] / baseMVA
    bus[:, LAM_P]    = info['lmbda'][nn['i1']['Pmis']:nn['iN']['Pmis']] / baseMVA
    bus[:, LAM_Q]    = info['lmbda'][nn['i1']['Qmis']:nn['iN']['Qmis']] / baseMVA
    branch[:, MU_SF] = muSf / baseMVA
    branch[:, MU_ST] = muSt / baseMVA

    ## package up results
    nlnN = om.getN('nln')

    ## extract multipliers for nonlinear constraints
    kl = find(info['lmbda'][:2 * nb] < 0)
    ku = find(info['lmbda'][:2 * nb] > 0)
    nl_mu_l = zeros(nlnN)
    nl_mu_u = r_[zeros(2 * nb), muSf, muSt]
    nl_mu_l[kl] = -info['lmbda'][kl]
    nl_mu_u[ku] =  info['lmbda'][ku]

    ## extract multipliers for linear constraints
    lam_lin = info['lmbda'][2 * nb + 2 * nl2 + arange(nA)]   ## lmbda for linear constraints
    kl = find(lam_lin < 0)                     ## lower bound binding
    ku = find(lam_lin > 0)                     ## upper bound binding
    mu_l = zeros(nA)
    mu_l[kl] = -lam_lin[kl]
    mu_u = zeros(nA)
    mu_u[ku] = lam_lin[ku]

    mu = {
      'var': {'l': info['zl'], 'u': info['zu']},
      'nln': {'l': nl_mu_l, 'u': nl_mu_u}, \
      'lin': {'l': mu_l, 'u': mu_u}
    }

    results = ppc
    results['bus'], results['branch'], results['gen'], \
        results['om'], results['x'], results['mu'], results['f'] = \
            bus, branch, gen, om, x, mu, f

    pimul = r_[
        results['mu']['nln']['l'] - results['mu']['nln']['u'],
        results['mu']['lin']['l'] - results['mu']['lin']['u'],
        -ones(ny > 0),
        results['mu']['var']['l'] - results['mu']['var']['u']
    ]
    raw = {'xr': x, 'pimul': pimul, 'info': info['status'], 'output': output}

    return results, success, raw
Exemplo n.º 52
0
    #print "Rp: ", result
    return result

def Rpp(v):
    """ Hessian """
    result = 2*(A-R(v)*B-outer(B*v,Rp(v))-outer(Rp(v),B*v))/dot(v.T,B*v)
    #print "Rpp: ", result
    return result


A = io.mmread('nos4.mtx') # clustered eigenvalues
#B = io.mmread('bcsstm02.mtx.gz')
#A = io.mmread('bcsstk06.mtx.gz') # clustered eigenvalues
#B = io.mmread('bcsstm06.mtx.gz')
n = A.shape[0]
B = speye(n,n)
random.seed(1)
v_0=random.rand(n)

print("try fmin_bfgs")
full_output = 1
data=[]
v,fopt, gopt, Hopt, func_calls, grad_calls, warnflag, allvecs = \
        optimize.fmin_bfgs(R,v_0,fprime=Rp,full_output=full_output,retall=1)
if warnflag == 0:
   plt.semilogy(np.arange(0,len(data)),data)
   print('Rayleigh quotient BFGS',R(v))


print("fmin_bfgs OK")
Exemplo n.º 53
0
# choose a Closest Point Method algorithm
cpm = 0

# choose timestep
if cpm == 0:
    dt = 0.2 * np.min(dx)**2
elif cpm == 1:
    dt = 0.2 * np.min(dx)**2
elif cpm == 2:
    dt = 0.5 * np.min(dx)

# build the vGMM matrix
if cpm == 1 or cpm == 2:
    #I = speye(L.shape[0], L.shape[1])
    I = speye(*L.shape)
    lamb = 4.0/np.min(dx)**2
    M = E*L - lamb*(I - E)
if cpm == 2:
    A = I - dt*M

Tf = 2
numtimesteps = int(Tf // dt + 1)


start_time = timeit.default_timer()
# serial implementation of the closest point method
next_out_time = 0.1
for kt in xrange(numtimesteps):
    if cpm == 0:
        # explicit Euler, Ruuth--Merriman style