Example #1
0
    def test_constant(self):
        """Test creating a constant.
        """
        # Scalar constant.
        size = (1, 1)
        mat = create_const(1.0, size)
        self.assertEqual(mat.size, size)
        self.assertEqual(len(mat.args), 0)
        self.assertEqual(mat.type, SCALAR_CONST)
        assert mat.data == 1.0

        # Dense matrix constant.
        size = (5, 4)
        mat = create_const(np.ones(size), size)
        self.assertEqual(mat.size, size)
        self.assertEqual(len(mat.args), 0)
        self.assertEqual(mat.type, DENSE_CONST)
        assert (mat.data == np.ones(size)).all()

        # Sparse matrix constant.
        size = (5, 5)
        mat = create_const(sp.eye(5), size, sparse=True)
        self.assertEqual(mat.size, size)
        self.assertEqual(len(mat.args), 0)
        self.assertEqual(mat.type, SPARSE_CONST)
        assert (mat.data.todense() == sp.eye(5).todense()).all()
Example #2
0
    def _K(self, g, beta, ifix=None):
        """ Returns the discretised integral operator
        """
        if ifix is None:
            ifix = self.ifix
    
        g = g.reshape(self.dim.R, self.dim.N).T        # set g as g_{nr}, n=1..N, r=1..R
        g = np.column_stack((np.ones(self.dim.N), g))  # append a col. of 1s to g

        # matrices A[r] = sum(b_rd * Ld
        struct_mats = np.array([sum(brd*Ld for brd, Ld in zip(br, self.basis_mats))
                                for br in beta])
        # Add an axis for n=0,...,N-1
        At = struct_mats[None, :, :, :] * g[..., None, None]
        At = At.sum(1)

        W = weight_matrix(self.ttc, ifix) #self._weight_matrix
        K = sum(sparse.kron(sparse.kron(W[:, i][:, None],
                                        sparse.eye(1, self.dim.N, i)),
                            At[i, ...])
                for i in range(self.dim.N))
        I = sparse.eye(self.dim.K)

        # because x[ifix] is fixed by the integral transform
        # we need to add a column of identity matrices to K
        # - could do this by assignment
        eifix = sparse.eye(1, self.dim.N, ifix)
        K += sum(sparse.kron(sparse.kron(sparse.eye(1, self.dim.N, i).transpose(),
                                         eifix),
                             I)
                 for i in range(self.dim.N))
        return K
Example #3
0
def pauli_x(n, N):
    '''compute the pauli_x operator acting on the n^th spin of N'''

    px = Pauli['x']
    e1, e2 = sp.eye(2**(n-1)), sp.eye(2**(N-n))

    return sp.kron(e1, sp.kron(px, e2))
Example #4
0
    def solve_system(self, rhs, factor, u0, t):
        """
        Simple linear solver for (I-dtA)u = rhs

        Args:
            rhs: right-hand side for the nonlinear system
            factor: abbrev. for the node-to-node stepsize (or any other factor required)
            u0: initial guess for the iterative solver (not used here so far)
            t: current time (e.g. for time-dependent BCs)

        Returns:
            solution as mesh
        """

        M1 = sp.hstack((sp.eye(self.nvars[1]), -factor * self.A))
        M2 = sp.hstack((-factor * self.A, sp.eye(self.nvars[1])))
        M = sp.vstack((M1, M2))

        b = np.concatenate((rhs.values[0, :], rhs.values[1, :]))

        sol = LA.spsolve(M, b)

        me = mesh(self.nvars)
        me.values[0, :], me.values[1, :] = np.split(sol, 2)

        return me
def construct_hams(hs, Js, Jxs):
    '''Construct static Hamiltonians'''
    
    Nz = len(hs)     # number of zones
    
    H0s = [construct_H0(hs[i], Js[i]) for i in xrange(Nz)]
    Gammas = [gen_gamma(len(hs[i])) for i in xrange(Nz)]

    M = [H.size for H in H0s]   # number of states per zone
    C = [1]+list(np.cumprod(M))     # cumprod of H0 sizes

    # pad single zone Hamiltonians
    for i in xrange(Nz):
        H0s[i] = np.tile(np.repeat(H0s[i], C[-1]/C[i+1]), [1, C[i]])
        Gammas[i] = sp.kron(sp.eye(C[i]), sp.kron(Gammas[i], sp.eye(C[-1]/C[i+1])))
    
    # only need sum of H0s
    H0 = np.sum(np.array(H0s), axis=0)
    
    # add zone-zone interactions to H0
    for i,j in Jxs:
        Jx = Jxs[(i,j)]
        Hx = np.zeros([1, M[i]*M[j]], dtype=float)
        for n,m in zip(*Jx.nonzero()):
            Hx += Jx[n, m]*np.kron(pauli_z(n+1, Jx.shape[0]),
                                   pauli_z(m+1, Jx.shape[1]))
        if np.any(Hx):
            H0 += np.tile(np.repeat(Hx, C[i]), [1, C[-1]/C[j+1]])
    
    H0 = sp.diags(H0, [0])
    return H0, Gammas
Example #6
0
def multi_kron_sum(*mats):
    '''Compute the Kronecker sum of multiple matrices. Special case if all
    diagonal'''

    N = len(mats)

    diag = all(mat.ndim==1 for mat in mats)

    # determine size of each matrix
    Cm = [1]
    for mat in mats:
        Cm.append(Cm[-1]*mat.shape[0])

    if diag:
        S = np.zeros(Cm[-1])

        for i, mat in enumerate(mats):
            S += np.tile(np.repeat(mat, Cm[N]/Cm[i+1]), Cm[i])
    else:
        S = sp.coo_matrix((Cm[-1],Cm[-1]))

        for i, mat in enumerate(mats):
            e1, e2 = sp.eye(Cm[i]), sp.eye(Cm[N]/Cm[i+1])
            S += multi_kron_prod(e1, mat, e2)

    return S
Example #7
0
 def test_reshape8(self):
   t1 = expr.sparse_diagonal((137, 113))
   t2 = expr.sparse_diagonal((113, 137))
   a = expr.reshape(t1, (113, 137))
   b = expr.reshape(t2, (137, 113))
   Assert.all_eq(a.glom().todense(), sp.eye(137, 113).tolil().reshape((113, 137)).todense())
   Assert.all_eq(b.glom().todense(), sp.eye(113, 137).tolil().reshape((137, 113)).todense())
def connected_components(adj):
    """Find the connected components in an undirected graph

    Parameters :
        adj : array-like, shape = [n_nodes, n_nodes] :
            A dense matrix or rank-2 ndarray.

    Returns :
        cluster : np.ndarray, shape = [n_cc, ] :
            The connected components in the graph.

    Raises :
        None

    Notes :
        cluster = connected_components(adj)
    """
    A = sparse.csr_matrix(adj)
    n_node = A.shape[0]
    A = A + A.transpose()
    newA = A + sparse.eye(n_node, n_node)
    connected = A + sparse.eye(n_node, n_node)
    cl = 0
    cluster = np.zeros(n_node, dtype=int)
    for i in range(n_node - 1):
        connected = np.dot(connected, newA)
    for i in range(n_node):
        vars, redun = connected[:, i].nonzero()
        if vars.size != 0:
            cl += 1
            cluster[vars] = cl
            for var1 in vars:
                for var2 in vars:
                    connected[var1, var2] = 0
    return cluster
Example #9
0
 def test_transpose3(self):
   t1 = expr.sparse_diagonal((107, 401)).evaluate()
   t2 = expr.sparse_diagonal((401, 107)).evaluate()
   a = expr.transpose(t1)
   b = expr.transpose(t2)
   Assert.all_eq(a.glom().todense(), sp.eye(107, 401).transpose().todense())
   Assert.all_eq(b.glom().todense(), sp.eye(401, 107).transpose().todense())
Example #10
0
 def leslie(self,A):
     
     incidence_class    = [sp.csr_matrix((self.size,self.size)) for ii in range(self.memory+1)]
     incidence_class[0] = sp.eye(self.size,self.size,format='csr')
     cumulative      = sp.eye(self.size,self.size,format='csr')
     prevalence  = sp.eye(self.size,self.size,format='csr')
     
     mean_cumulative   = np.zeros((1,self.runtime))[0]
     mean_prevalence = np.zeros((1,self.runtime))[0]
     #=======================================================================
     # Die zeitabhaengige Zugangsmatrix wird berechnet
     #=======================================================================
     sparse_add = sp.compressed._cs_matrix.__add__
     memory     = self.memory
     for jj in range(0,self.runtime):
         
         incidence = A[jj].dot(prevalence)
         for ii in xrange(memory,0,-1):
             incidence_class[ii] = incidence_class[ii-1]
         incidence_class[0] = incidence
         
         prevalence  = reduce(sparse_add, incidence_class)
         cumulative      = cumulative + incidence
         cumulative.data = np.ones_like(cumulative.data)
         
         mean_cumulative[jj]   = cumulative.nnz
         mean_prevalence[jj] = prevalence.nnz
         
     self.mean_cumulative   = mean_cumulative.astype(float)/self.size**2
     self.mean_prevalence = mean_prevalence.astype(float)/self.size**2
     return
Example #11
0
def sparse_orth(d):
    """ Constructs a sparse orthogonal matrix.
    
    The method is described in:
    Gi-Sang Cheon et al., Constructions for the sparsest orthogonal matrices,
    Bull. Korean Math. Soc 36 (1999) No.1 pp.199-129
    """
    from scipy.sparse import eye
    from scipy import r_, pi, sin, cos

    if d % 2 == 0:
        seq = r_[0:d:2, 1:d - 1:2]
    else:
        seq = r_[0:d - 1:2, 1:d:2]
    Q = eye(d, d).tocsc()
    for i in seq:
        theta = random() * 2 * pi
        flip = (random() - 0.5) > 0;
        Qi = eye(d, d).tocsc()
        Qi[i, i] = cos(theta)
        Qi[(i + 1), i] = sin(theta)
        if flip > 0:
            Qi[i, (i + 1)] = -sin(theta)
            Qi[(i + 1), (i + 1)] = cos(theta)
        else:
            Qi[i, (i + 1)] = sin(theta)
            Qi[(i + 1), (i + 1)] = -cos(theta)
        Q = Q * Qi;
    return Q
    def iteration(self, user, fixed_vecs):
        num_solve = self.num_users if user else self.num_items
        num_fixed = fixed_vecs.shape[0]
        YTY = fixed_vecs.T.dot(fixed_vecs)
        eye = sparse.eye(num_fixed)
        lambda_eye = self.reg_param * sparse.eye(self.num_factors)
        solve_vecs = np.zeros((num_solve, self.num_factors))
        if DETAIL_MODE:
            t = time.time()
        for i in xrange(num_solve):
            if user:
                counts_i = self.counts[i].toarray()
            else:
                counts_i = self.counts[:, i].T.toarray()
            CuI = sparse.diags(counts_i, [0])
            pu = counts_i.copy()
            pu[np.where(pu != 0)] = 1.0
            YTCuIY = fixed_vecs.T.dot(CuI).dot(fixed_vecs)
            YTCupu = fixed_vecs.T.dot(CuI + eye).dot(sparse.csr_matrix(pu).T)
            xu = spsolve(YTY + YTCuIY + lambda_eye, YTCupu)
            solve_vecs[i] = xu
            if DETAIL_MODE and i % 1000 == 0:
                print 'Solved %i vecs in %d seconds' % (i, time.time() - t)
                t = time.time()

        return solve_vecs
Example #13
0
def sparse_cH(terms, ldim=2):
    """Construct a sparse cyclic nearest-neighbour Hamiltonian

    :param terms: List of nearst-neighbour terms (square array or MPO,
        see return value of :func:`cXY_local_terms`)
    :param ldim: Local dimension

    :returns: The Hamiltonian as sparse matrix

    """
    H = 0
    N = len(terms)
    for pos, term in enumerate(terms[:-1]):
        if hasattr(term, 'lt'):
            # Convert MPO to regular matrix
            term = term.to_array_global().reshape((ldim**2, ldim**2))
        left = sp.eye(ldim**pos)
        right = sp.eye(ldim**(N - pos - 2))
        H += sp.kron(left, sp.kron(term, right))
    # The last term acts on the first and last site.
    cyc = terms[-1]
    middle = sp.eye(ldim**pos)
    for i in range(cyc.ranks[0]):
        H += sp.kron(cyc.lt[0][0, ..., i], sp.kron(middle, cyc.lt[1][i, ..., 0]))
    return H
def heat_do_implicit_step(ux, u0, k, h):
    """
    Does one timestep for given initial conditions at u0 = u^(j) with spatial meshwidth h and temporal meshwidth k for
    the 1D heat equation. For notation an theory see
        "Karpfinger, Hoehere Mathematik in Rezepten, 2.Auflage, p.894 ff."
    Since we are using an implicit time stepping scheme, there is no stability criterion. We use
    scipy.linalg.solve_banded for solving the implicit equation. Theferfore we need to convert our implicit equation
    into ordered diagonal format:
        u1 = u0 + A_h * u1
    ->  (Id - A_h) * u1 = u0
    This is a system of linear equations: A * x = b with
        A = Id - A_h
        x = u1
        b = u0

    :param u0: solution u(x,t=t)
    :param k: temporal meshwidth
    :param h: spatial meshwidth
    :return: u1: solution u(x,t=t+k)
    """

    n = u0.shape[0]
    r = (c_heat ** 2) * k / (h ** 2)
    iteration_matrix = (r * sp.eye(n, n, -1) - 2 * r * sp.eye(n, n) + r * sp.eye(n, n, 1)).tocsr()
    iteration_matrix[0, 0] = 0  # enforcing dirichlet BC -> no change!
    iteration_matrix[0, 1] = 0
    iteration_matrix[n - 1, n - 1] = 0
    iteration_matrix[n - 1, n - 2] = 0
    iteration_matrix = -iteration_matrix
    iteration_matrix += sp.eye(n, n)
    u1 = lin.spsolve(iteration_matrix, u0)
    return u1
def wave_do_explicit_step(u0, u1, k, h):
    """
    Does one timestep for given initial conditions at u0 = u^(j-1) and u1 = u^(j) with spatial meshwidth h and temporal
    meshwidth k for the 1D wave equation. For notation an theory see
        "Karpfinger, Hoehere Mathematik in Rezepten, 2.Auflage, p.904 ff."
    Stability criterion:
        r  = (k / h)**2 <= 1
    :param u0: solution u(x,t=t-k)
    :param u1: solution u(x,t=t)
    :param k: temporal meshwidth
    :param h: spatial meshwidth
    :return: u2: solution u(x,t=t+k)
    """

    n = u0.shape[0]
    r = (c_wave * k / h) ** 2
    a_h = (-2 * r * sp.eye(n, n) + r * sp.eye(n, n, -1) + r * sp.eye(n, n, 1)).tocsr()
    iteration_matrix1 = 2 * sp.eye(n, n) + a_h
    iteration_matrix1[0, 0] = 1
    iteration_matrix1[0, 1] = 0
    iteration_matrix1[n - 1, n - 1] = 1
    iteration_matrix1[n - 1, n - 2] = 0

    iteration_matrix0 = - 1 * sp.eye(n, n).tocsr()
    iteration_matrix0[0, 0] = 0
    iteration_matrix0[n - 1, n - 1] = 0

    u2 = iteration_matrix1.dot(u1) + iteration_matrix0.dot(u0)

    return u2
Example #16
0
    def test_3sum(self):
        nP = 90

        alpha1 = 0.3
        alpha2 = 0.6
        alpha3inv = 9

        phi1 = ObjectiveFunction.L2ObjectiveFunction(W=sp.eye(nP))
        phi2 = ObjectiveFunction.L2ObjectiveFunction(W=sp.eye(nP))
        phi3 = ObjectiveFunction.L2ObjectiveFunction(W=sp.eye(nP))

        phi = alpha1 * phi1 + alpha2 * phi2 + phi3 / alpha3inv

        m = np.random.rand(nP)

        self.assertTrue(
            np.all(phi.multipliers == np.r_[alpha1, alpha2, 1./alpha3inv])
        )

        self.assertTrue(
           np.allclose((alpha1*phi1(m) + alpha2*phi2(m) + phi3(m)/alpha3inv), phi(m))
        )

        self.assertTrue(len(phi.objfcts) == 3)

        self.assertTrue(phi.test())
Example #17
0
def _pade(A, m):
    n = np.shape(A)[0]
    c = _padecoeff(m)
    if m != 13:
        apows = [[] for jj in range(int(np.ceil((m + 1) / 2)))]
        apows[0] = sp.eye(n, n, format='csc')
        apows[1] = A * A
        for jj in range(2, int(np.ceil((m + 1) / 2))):
            apows[jj] = apows[jj - 1] * apows[1]
        U = sp.lil_matrix((n, n)).tocsc()
        V = sp.lil_matrix((n, n)).tocsc()
        for jj in range(m, 0, -2):
            U = U + c[jj] * apows[jj // 2]
        U = A * U
        for jj in range(m - 1, -1, -2):
            V = V + c[jj] * apows[(jj + 1) // 2]
        F = spla.spsolve((-U + V), (U + V))
        return F.tocsr()
    elif m == 13:
        A2 = A * A
        A4 = A2 * A2
        A6 = A2 * A4
        U = A * (A6 * (c[13] * A6 + c[11] * A4 + c[9] * A2) +
                 c[7] * A6 + c[5] * A4 + c[3] * A2 +
                 c[1] * sp.eye(n, n).tocsc())
        V = A6 * (c[12] * A6 + c[10] * A4 + c[8] * A2) + c[6] * A6 + c[4] * \
            A4 + c[2] * A2 + c[0] * sp.eye(n, n).tocsc()
        F = spla.spsolve((-U + V), (U + V))
        return F.tocsr()
Example #18
0
 def check(self,A):
     cumulative_SI = A[0].copy() + sp.eye(self.size, self.size, 0, format='csr')
     cumulative_SI.data = np.ones_like(cumulative_SI.data)
     
     incidence_class    = [sp.csr_matrix((self.size,self.size)) for ii in range(self.runtime)]
     incidence_class[0] = A[0]
     incidence_class[1] = sp.eye(self.size,self.size,format='csr')
     cumulative_SIR  = incidence_class[0] + incidence_class[1]
     prevalence  = incidence_class[0] + incidence_class[1]
     check       = np.empty(shape=(self.runtime,),dtype=bool)
     check[0]    = (cumulative_SI != cumulative_SIR).nnz > 0                        # check == 1 wenn wenigsten ein Element verschieden ist
     
     for ii in range(1,self.runtime):
         try:
             cumulative_SI      = cumulative_SI + A[ii]*cumulative_SI
             cumulative_SI.data = np.ones_like(cumulative_SI.data)
             
             incidence      = A[ii].dot(prevalence)
             incidence.data = np.ones_like(incidence.data)
             incidence      = incidence - incidence.multiply(cumulative_SIR)
             
             prevalence = prevalence - incidence_class[-1]
             prevalence = prevalence + incidence
             cumulative_SIR = cumulative_SIR     + incidence
             
             for jj in xrange(self.runtime-1,0,-1):
                 incidence_class[jj] = incidence_class[jj-1]
             incidence_class[0]      = incidence
             check[ii]    = (cumulative_SI != cumulative_SIR).nnz > 0
             print ii, check[ii]
         except:
             print 'Break at t = ', ii
             break
     return check
Example #19
0
def solve_qp_constrained(P,q,nlabel,x0,**kwargs):
    import solver_qp_constrained as solver
    reload(solver)
    
    ## constrained solver
    nvar = q.size
    npixel = nvar/nlabel
    F = sparse.bmat([
        [sparse.bmat([[-sparse.eye(npixel,npixel) for i in range(nlabel-1)]])],
        [sparse.eye(npixel*(nlabel-1),npixel*(nlabel-1))],
        ])
        
    ## quadratic objective
    objective = solver.ObjectiveAPI(P, q, G=1, h=0, F=F,**kwargs)
    
    ## log barrier solver
    t0 = kwargs.pop('logbarrier_initial_t',1.0)
    mu = kwargs.pop('logbarrier_mu',20.0)
    epsilon = kwargs.pop('logbarrier_epsilon',1e-3)
    solver = solver.ConstrainedSolver(
        objective,
        t0=t0,
        mu=mu,
        epsilon=epsilon,
        )
    
    ## remove zero entries in initial guess
    xinit = x0.reshape((-1,nlabel),order='F')
    xinit[xinit<1e-10] = 1e-3
    xinit = (xinit/np.c_[np.sum(xinit, axis=1)]).reshape((-1,1),order='F')
    
    x = solver.solve(xinit, **kwargs)
    return x
Example #20
0
def roughening_matrix(num_cols):
        ''' Return any size of roughening matrix.
'''
        A = eye(num_cols-2, num_cols, k=0, dtype='float')
        B = -2. * eye(num_cols-2, num_cols, k=1, dtype='float')
        C = eye(num_cols-2, num_cols, k=2, dtype='float')
        return A+B+C
Example #21
0
def get_irt_learner(train_df, test_df=None, is_two_po=True,
                    single_concept=True, template_precision=None, item_precision=None):
    """ Make a 1PO or 2PO learner.

    :param pd.DataFrame train_df: Train data
    :param pd.DataFrame test_df: Optional test data
    :param bool is_two_po: Whether to make a 2PO learner
    :param bool single_concept: Should we train with a single theta per user (True)
        or a single theta per user per concept (False)
    :param float template_precision: The hierarchical IRT model has a model
        item_difficulty ~ N(template_difficulty, 1.0/item_precision) and
        template_difficulty ~ N(0, 1.0/template_precision). None just ignores
        templates.
    :param float|None item_precision: The precision of the Gaussian prior around items in a
        non-templated model. Or see `template_precision` for the templated case. If None, uses 1.0.
    :return: The learner
    :rtype: BayesNetLearner
    """
    correct = train_df[CORRECT_KEY].values.astype(bool)
    item_idx = train_df[ITEM_IDX_KEY].values
    is_held_out = np.zeros(len(train_df), dtype=bool)
    if test_df is not None:
        correct = np.concatenate((correct, test_df[CORRECT_KEY].values.astype(bool)))
        item_idx = np.concatenate((item_idx, test_df[ITEM_IDX_KEY].values))
        is_held_out = np.concatenate((is_held_out, np.ones(len(test_df), dtype=bool)))

    student_idx = compute_theta_idx(train_df, test_df=test_df, single_concept=single_concept)
    if not template_precision:
        learner_class = TwoPOLearner if is_two_po else OnePOLearner
        learner = learner_class(correct, student_idx=student_idx, item_idx=item_idx,
                                is_held_out=is_held_out, max_iterations=1000,
                                callback=ConvergenceCallback())
        for node in learner.nodes.itervalues():
            node.solver_pars.updater.step_size = 0.5
        if item_precision is not None:
            learner.nodes[OFFSET_COEFFS_KEY].cpd.precision = \
                item_precision * sp.eye(learner.nodes[OFFSET_COEFFS_KEY].data.size)
            LOGGER.info("Made a 1PO IRT learner with item precision %f", item_precision)
        else:
            LOGGER.info("Made a 1PO IRT learner with default item precision")
    else:
        template_idx = train_df[TEMPLATE_IDX_KEY]
        if test_df is not None:
            template_idx = np.concatenate((template_idx, test_df[TEMPLATE_IDX_KEY].values))
        problem_to_template = {item: template for item, template in zip(item_idx, template_idx)}
        problem_to_template = sorted(problem_to_template.items())
        template_idx = np.array([x for _, x in problem_to_template])
        learner = OnePOHighRT(correct, student_idx, item_idx, template_idx,
                              is_held_out=is_held_out, max_iterations=1000,
                              higher_precision=item_precision,
                              callback=ConvergenceCallback())
        if item_precision is not None:
            learner.nodes[HIGHER_OFFSET_KEY].cpd.precision = \
                template_precision * sp.eye(learner.nodes[HIGHER_OFFSET_KEY].data.size)
        for node in learner.nodes.itervalues():
            node.solver_pars.updater.step_size = 0.5
        LOGGER.info("Made a hierarchical IRT learner with item precision %f and template "
                    "precision %f", item_precision, template_precision)
    return learner
Example #22
0
def mult_by_monomial( alpha ):
    #produces multiplication operator for monomial
    global DIM, degree_max
    out = sparse.eye( degree_max, k=alpha[0], format='dia')
    for d in range(1,DIM):
        store = sparse.eye( degree_max, k=alpha[d], format='dia')
        out = sparse.kron( store , out )
    return out
Example #23
0
def BuildLaPoisson():
    """
    pour l'etape de projection
    matrice de Laplacien phi
    avec CL Neumann pour phi

    BUT condition de Neumann pour phi 
    ==> non unicite de la solution

    besoin de fixer la pression en un point 
    pour lever la degenerescence: ici [0][1]
    
    ==> need to build a correction matrix

    """
    ### ne pas prendre en compte les points fantome (-2)
    NXi = nx
    NYi = ny

    ###### Definition of the 1D Lalace operator

    ###### AXE X
    ### Diagonal terms
    dataNXi = [numpy.ones(NXi), -2*numpy.ones(NXi), numpy.ones(NXi)]   
    
    ### Conditions aux limites : Neumann à gauche, rien à droite
    dataNXi[2][1]     = 2.  # SF left
    # dataNXi[0][NXi-2] = 2.  # SF right

    ###### AXE Y
    ### Diagonal terms
    dataNYi = [numpy.ones(NYi), -2*numpy.ones(NYi), numpy.ones(NYi)] 
   
    ### Conditions aux limites : Neumann 
    dataNYi[2][1]     = 2.  # SF low
    dataNYi[0][NYi-2] = 2.  # SF top

    ###### Their positions
    offsets = numpy.array([-1,0,1])                    
    DXX = sp.dia_matrix((dataNXi,offsets), shape=(NXi,NXi)) * dx_2
    DYY = sp.dia_matrix((dataNYi,offsets), shape=(NYi,NYi)) * dy_2
    
    ####### 2D Laplace operator
    LAP = sp.kron(sp.eye(NYi,NYi), DXX) + sp.kron(DYY, sp.eye(NXi,NXi))
    
    ####### BUILD CORRECTION MATRIX

    ### Upper Diagonal terms
    dataNYNXi = [numpy.zeros(NYi*NXi)]
    offset = numpy.array([1])

    ### Fix coef: 2+(-1) = 1 ==> Dirichlet en un point (redonne Laplacien)
    ### ATTENTION  COEF MULTIPLICATIF : dx_2 si M(j,i) j-NY i-NX
    dataNYNXi[0][1] = -1 * dx_2

    LAP0 = sp.dia_matrix((dataNYNXi,offset), shape=(NYi*NXi,NYi*NXi))
  
    return LAP + LAP0
Example #24
0
def create_iFFT2mtx(nx, ny):
	"""
	Take advantage of the use of scipy.sparse library.
	Creates the Matrixoperator for an array x.
	
	:param x: Array to calculate the Operator for
	:type x: array-like

	returns
	:param sparse_iFFT2mtx: 2D iFFT operator for matrix of shape of x.
	:type sparse_iFFT2mtx: scipy.sparse.csr.csr_matrix

	"""
	N = nx * ny

	iDFT1 = np.fft.fft(sparse.eye(nx).toarray().transpose()).conj().transpose()
	iDFT2 = np.fft.fft(sparse.eye(ny).toarray().transpose()).conj().transpose()

	# Create Sparse matrix, with iDFT1 ny-times repeatet on the diagonal.

	# Initialze lil_matrix, to write diagonals in correct way.
	tmp = sparse.lil_matrix((N,N), dtype='complex')
	row = 0
	for i in range(ny):
		for j in range(nx):
			tmp[row, (i)*nx:(i+1)*nx] = iDFT1[j,:]
			row += 1

		#Screen feedback.
		prcnt = 50*(i+1) / float(ny) -1
		if prcnt >=0 : print("%i %% done" % prcnt, end="\r")
		sys.stdout.flush()	

	# Export tmp to a diagonal sparse matrix.
	sparse_iDFT1 = tmp.tocsc()

	# Initialze lil_matrix for iDFT2 and export it to sparse.
	tmp = sparse.lil_matrix((N,N), dtype='complex')
	row = 0	
	for i in range(ny):
		for j in range(nx):
			indx = np.arange(j,N,nx)
			tmp[row,indx] = iDFT2[i,:]
			row += 1

		prcnt = (50*(i+1) / float(ny)) + 49
		print("%i %% done" % prcnt, end="\r")
		sys.stdout.flush()	
	
	sparse_iDFT2 = tmp.tocsc()
	print("\n")
	print("100 %% done \n")

	# Calculate matrix dot-product iDFT2 * iDFT1 and divide it 
	# by the number of all samples (nx * ny)
	sparse_iFFT2mtx = sparse_iDFT2.dot(sparse_iDFT1)/float(N)
	
	return sparse_iFFT2mtx
Example #25
0
 def testSub(self):
     N = 1000
     a = random(N)
     b = random(N)
     c = a - b
     self.assertEqual(0, (c.diff(a, 'tangent') - sp.eye(N,N)).nnz)
     self.assertEqual(0, (c.diff(a, 'adjoint') - sp.eye(N,N)).nnz)
     self.assertEqual(0, (c.diff(b, 'tangent') + sp.eye(N,N)).nnz)
     self.assertEqual(0, (c.diff(b, 'adjoint') + sp.eye(N,N)).nnz)
 def SI_load(self,A):
     zm = [sp.csr_matrix((self.size,self.size),dtype=np.int8) for ii in range(self.runtime+1)]
     zm[0] = sp.eye(self.size,self.size,dtype=np.int8,format='csr')
     C = A[0] + sp.eye(self.size, self.size, 0, format='csr')
     for ii in range(1,self.runtime+1):
         C.data = np.int8(C.data>0)
         zm[ii] = C
         C = (A[ii-1] + sp.eye(self.size, self.size, 0, format='csr'))*C
     return zm
Example #27
0
  def LaplacianSmooth(self, reference_mesh):
    # placing this down here for now in case people are having numpy/scipy problems
    import numpy
    import scipy.sparse as sparse
    import scipy.sparse.linalg

    num_vertices = len(self.vertices)
    num_boundary_vertices = len(self.boundary_vertices)
    num_non_boundary_vertices = num_vertices - num_boundary_vertices
    L = sparse.lil_matrix((num_vertices, num_vertices))
    C = sparse.lil_matrix((num_boundary_vertices, num_vertices))
    Cbar = sparse.lil_matrix((num_non_boundary_vertices, num_vertices))

    non_boundary_vertices_seen = 0
    boundary_vertices_seen = 0
    for i in xrange(num_vertices):
      if i in self.boundary_vertices:
        C[boundary_vertices_seen, i] = 1.0
        boundary_vertices_seen += 1
      else:
        Cbar[non_boundary_vertices_seen, i] = 1.0
        non_boundary_vertices_seen += 1
    assert (num_boundary_vertices == boundary_vertices_seen)
    assert (num_non_boundary_vertices == non_boundary_vertices_seen)
    C = sparse.kron(C, sparse.eye(3, 3)).tocsr()
    Cbar = sparse.kron(Cbar, sparse.eye(3, 3)).tocsr()

    edge_boundary_vertices = 0
    edge_non_boundary_vertices = 0
    for v0, v1 in self.edges:
      if v0 in self.boundary_vertices:
        edge_boundary_vertices += 1
      else:
        edge_non_boundary_vertices += 1
      if v1 in self.boundary_vertices:
        edge_boundary_vertices += 1
      else:
        edge_non_boundary_vertices += 1

      weight = 1.0
      L[v0,v0] -= weight
      L[v0,v1] += weight

      L[v1,v1] -= weight
      L[v1,v0] += weight
    L = sparse.kron(L, sparse.eye(3, 3)).tocsr()

    xtilde = numpy.array(self.vertices).flatten()
    y = numpy.array(reference_mesh.vertices).flatten()
    CbarLTL = Cbar * (L.T * L)
    b = CbarLTL * (y - C.T * (C * xtilde))

    xbar, info = sparse.linalg.cg(CbarLTL * Cbar.T, b)

    x = Cbar.T * xbar + C.T * (C * xtilde)

    self.vertices = x.reshape(numpy.array(self.vertices).shape)
Example #28
0
def AlphaCoeffs(n, a):
    # Construct alpha operator
    Z = sps.csr_matrix([[1,0],[0,-1]])
    I = sps.eye(2)
    alpha = sps.eye(2**n)
    # Form alpha and beta operators
    for q1 in xrange(n):
        alpha = alpha + a[q1]*_formZi(n,q1,Z)
    return alpha
Example #29
0
def lap(shape, spacing):
    """
    This function generates the laplacian operator
    :param shape:
    :param spacing:
    :return:
    """
    n = shape[0]*shape[1]
    return ( -4.*sparse.eye(n, n, 0) + sparse.eye(n, n, 1) + sparse.eye(n, n, -1) + sparse.eye(n, n, shape[1]) + sparse.eye(n, n, -shape[1]) )/spacing**2
Example #30
0
def factor(X,rho):
    m,n = X.shape
    if m>=n:
       L = cholesky(X.T.dot(X)+rho*sparse.eye(n))
    else:
       L = cholesky(sparse.eye(m)+1./rho*(X.dot(X.T)))
    L = sparse.csc_matrix(L)
    U = sparse.csc_matrix(L.T)
    return L,U
Example #31
0
def minimize_svrg(
    f_deriv,
    A,
    b,
    x0,
    step_size,
    alpha=0,
    prox=None,
    max_iter=500,
    tol=1e-6,
    verbose=False,
    callback=None,
):
    r"""Stochastic average gradient augmented (SAGA) algorithm.

    The SAGA algorithm can solve optimization problems of the form

        argmin_{x \in R^p} \sum_{i}^n_samples f(A_i^T x, b_i) + alpha *
        ||x||_2^2 +
                                            + beta * ||x||_1

    Args:
      f_deriv
          derivative of f

      x0: np.ndarray or None, optional
          Starting point for optimization.

      step_size: float or None, optional
          Step size for the optimization. If None is given, this will be
          estimated from the function f.

      n_jobs: int
          Number of threads to use in the optimization. A number higher than 1
          will use the Asynchronous SAGA optimization method described in
          [Pedregosa et al., 2017]

      max_iter: int
          Maximum number of passes through the data in the optimization.

      tol: float
          Tolerance criterion. The algorithm will stop whenever the norm of the
          gradient mapping (generalization of the gradient for nonsmooth
          optimization)
          is below tol.

      verbose: bool
          Verbosity level. True might print some messages.

      trace: bool
          Whether to trace convergence of the function, useful for plotting
          and/or debugging. If ye, the result will have extra members
          trace_func, trace_time.


    Returns:
      opt: OptimizeResult
          The optimization result represented as a
          ``scipy.optimize.OptimizeResult`` object. Important attributes are:
          ``x`` the solution array, ``success`` a Boolean flag indicating if
          the optimizer exited successfully and ``message`` which describes
          the cause of the termination. See `scipy.optimize.OptimizeResult`
          for a description of other attributes.


    References:
      The SAGA algorithm was originally described in

      Aaron Defazio, Francis Bach, and Simon Lacoste-Julien. `SAGA: A fast
      incremental gradient method with support for non-strongly convex composite
      objectives. <https://arxiv.org/abs/1407.0202>`_ Advances in Neural
      Information Processing Systems. 2014.

      The implemented has some improvements with respect to the original,
      like support for sparse datasets and is described in

      Fabian Pedregosa, Remi Leblond, and Simon Lacoste-Julien.
      "Breaking the Nonsmooth Barrier: A Scalable Parallel Method
      for Composite Optimization." Advances in Neural Information
      Processing Systems (NIPS) 2017.
    """
    x = np.ascontiguousarray(x0).copy()
    n_samples, n_features = A.shape
    A = sparse.csr_matrix(A)

    if step_size is None:
        # then need to use line search
        raise ValueError

    if hasattr(prox, "__len__") and len(prox) == 2:
        blocks = prox[1]
        prox = prox[0]
    else:
        blocks = sparse.eye(n_features, n_features, format="csr")

    if prox is None:

        @utils.njit
        def prox(x, i, indices, indptr, d, step_size):
            pass

    A_data = A.data
    A_indices = A.indices
    A_indptr = A.indptr
    n_samples, n_features = A.shape

    rblocks_indices = blocks.T.tocsr().indices
    blocks_indptr = blocks.indptr
    bs_data, bs_indices, bs_indptr = _support_matrix(A_indices, A_indptr,
                                                     rblocks_indices,
                                                     blocks.shape[0])
    csr_blocks_1 = sparse.csr_matrix((bs_data, bs_indices, bs_indptr))

    # .. diagonal reweighting ..
    d = np.array(csr_blocks_1.sum(0), dtype=np.float).ravel()
    idx = d != 0
    d[idx] = n_samples / d[idx]
    d[~idx] = 1

    @utils.njit
    def full_grad(x):
        grad = np.zeros(x.size)
        for i in range(n_samples):
            p = 0.0
            for j in range(A_indptr[i], A_indptr[i + 1]):
                j_idx = A_indices[j]
                p += x[j_idx] * A_data[j]
            grad_i = f_deriv(np.array([p]), np.array([b[i]]))[0]
            # .. gradient estimate (XXX difference) ..
            for j in range(A_indptr[i], A_indptr[i + 1]):
                j_idx = A_indices[j]
                grad[j_idx] += grad_i * A_data[j] / n_samples
        return grad

    @utils.njit(nogil=True)
    def _svrg_epoch(x, x_snapshot, idx, gradient_average, grad_tmp, step_size):

        # .. inner iteration ..
        for i in idx:
            p = 0.0
            p_old = 0.0
            for j in range(A_indptr[i], A_indptr[i + 1]):
                j_idx = A_indices[j]
                p += x[j_idx] * A_data[j]
                p_old += x_snapshot[j_idx] * A_data[j]

            grad_i = f_deriv(np.array([p]), np.array([b[i]]))[0]
            old_grad_i = f_deriv(np.array([p_old]), np.array([b[i]]))[0]
            for j in range(A_indptr[i], A_indptr[i + 1]):
                j_idx = A_indices[j]
                grad_tmp[j_idx] = (grad_i - old_grad_i) * A_data[j]

            # .. update coefficients ..
            # .. first iterate on blocks ..
            for h_j in range(bs_indptr[i], bs_indptr[i + 1]):
                h = bs_indices[h_j]
                # .. then iterate on features inside block ..
                for b_j in range(blocks_indptr[h], blocks_indptr[h + 1]):
                    bias_term = d[h] * (gradient_average[b_j] + alpha * x[b_j])
                    x[b_j] -= step_size * (grad_tmp[b_j] + bias_term)
            prox(x, i, bs_indices, bs_indptr, d, step_size)

    idx = np.arange(n_samples)
    grad_tmp = np.zeros(n_features)
    success = False
    if callback is not None:
        callback(locals())
    for it in range(max_iter):
        x_snapshot = x.copy()
        gradient_average = full_grad(x_snapshot)
        np.random.shuffle(idx)
        _svrg_epoch(x, x_snapshot, idx, gradient_average, grad_tmp, step_size)
        if callback is not None:
            callback(locals())

        if np.abs(x - x_snapshot).sum() < tol:
            success = True
            break
    message = ""
    return optimize.OptimizeResult(x=x,
                                   success=success,
                                   nit=it,
                                   message=message)
Example #32
0
 def identity(self, size):
     """Return an identity matrix.
     """
     return sp.eye(size, size, format="csc")
Example #33
0
    def solve(self, problem):
        """
        Solves optimization problem.

        Parameters
        ----------
        problem : Object
        """

        # Local vars
        norm2 = self.norm2
        norminf = self.norminf
        parameters = self.parameters

        # Parameters
        tol = parameters['tol']
        maxiter = parameters['maxiter']
        quiet = parameters['quiet']
        sigma = parameters['sigma']
        eps = parameters['eps']
        eps_cold = parameters['eps_cold']

        # Problem
        if not isinstance(problem, QuadProblem):
            problem = cast_problem(problem)
            quad_problem = QuadProblem(None,
                                       None,
                                       None,
                                       None,
                                       None,
                                       None,
                                       problem=problem)
        else:
            quad_problem = problem
        self.problem = problem
        self.quad_problem = quad_problem

        # Linsolver
        self.linsolver = new_linsolver(parameters['linsolver'], 'symmetric')

        # Reset
        self.reset()

        # Checks
        if not np.all(problem.l <= problem.u):
            raise OptSolverError_NoInterior(self)

        # Data
        self.H = quad_problem.H
        self.g = quad_problem.g
        self.A = quad_problem.A
        self.AT = quad_problem.A.T
        self.b = quad_problem.b
        self.l = quad_problem.l
        self.u = quad_problem.u
        self.n = quad_problem.H.shape[0]
        self.m = quad_problem.A.shape[0]
        self.e = np.ones(self.n)
        self.I = eye(self.n, format='coo')
        self.Onm = coo_matrix((self.n, self.m))
        self.Omm = coo_matrix((self.m, self.m))

        # Make interior
        d = np.abs(self.u - self.l)
        self.l = self.l - 1e-2 * tol * d - 1e-8
        self.u = self.u + 1e-2 * tol * d + 1e-8

        # Initial primal
        if quad_problem.x is None:
            self.x = (self.u + self.l) / 2.
        else:
            dul = eps * (self.u - self.l)
            self.x = np.maximum(np.minimum(quad_problem.x, self.u - dul),
                                self.l + dul)

        # Initial duals
        if quad_problem.lam is None:
            self.lam = np.zeros(self.m)
        else:
            self.lam = quad_problem.lam.copy()
        if quad_problem.mu is None:
            self.mu = np.ones(self.x.size) * eps_cold
        else:
            self.mu = np.maximum(quad_problem.mu, eps)
        if quad_problem.pi is None:
            self.pi = np.ones(self.x.size) * eps_cold
        else:
            self.pi = np.maximum(quad_problem.pi, eps)

        # Check interior
        try:
            assert (np.all(self.l < self.x))
            assert (np.all(self.x < self.u))
            assert (np.all(self.mu > 0))
            assert (np.all(self.pi > 0))
        except AssertionError:
            raise OptSolverError_Infeasibility(self)

        # Init vector
        self.y = np.hstack((self.x, self.lam, self.mu, self.pi))

        # Complementarity measures
        self.eta_mu = np.dot(self.mu, self.u - self.x) / self.x.size
        self.eta_pi = np.dot(self.pi, self.x - self.l) / self.x.size

        # Objective scaling
        fdata = self.func(self.y)
        self.obj_sca = np.maximum(norminf(self.g + self.H * self.x) / 10., 1.)
        self.H = self.H / self.obj_sca
        self.g = self.g / self.obj_sca
        fdata = self.func(self.y)

        # Header
        if not quiet:
            print('\nSolver: IQP')
            print('-----------')

        # Outer
        s = 0.
        self.k = 0
        while True:

            # Complementarity measures
            self.eta_mu = np.dot(self.mu, self.u - self.x) / self.x.size
            self.eta_pi = np.dot(self.pi, self.x - self.l) / self.x.size

            # Init eval
            fdata = self.func(self.y)
            fmax = norminf(fdata.f)
            gmax = norminf(fdata.GradF)

            # Done
            if fmax < tol and sigma * np.maximum(self.eta_mu,
                                                 self.eta_pi) < tol:
                self.set_status(self.STATUS_SOLVED)
                self.set_error_msg('')
                return

            # Target
            tau = sigma * norminf(fdata.GradF)

            # Header
            if not quiet:
                if self.k > 0:
                    print('')
                print('{0:^3s}'.format('iter'), end=' ')
                print('{0:^9s}'.format('phi'), end=' ')
                print('{0:^9s}'.format('fmax'), end=' ')
                print('{0:^9s}'.format('gmax'), end=' ')
                print('{0:^8s}'.format('cu'), end=' ')
                print('{0:^8s}'.format('cl'), end=' ')
                print('{0:^8s}'.format('s'))

            # Inner
            while True:

                # Eval
                fdata = self.func(self.y)
                fmax = norminf(fdata.f)
                gmax = norminf(fdata.GradF)
                compu = norminf(self.mu * (self.u - self.x))
                compl = norminf(self.pi * (self.x - self.l))
                phi = (0.5 * np.dot(self.x, self.H * self.x) +
                       np.dot(self.g, self.x)) * self.obj_sca

                # Show progress
                if not quiet:
                    print('{0:^3d}'.format(self.k), end=' ')
                    print('{0:^9.2e}'.format(phi), end=' ')
                    print('{0:^9.2e}'.format(fmax), end=' ')
                    print('{0:^9.2e}'.format(gmax), end=' ')
                    print('{0:^8.1e}'.format(compu), end=' ')
                    print('{0:^8.1e}'.format(compl), end=' ')
                    print('{0:^8.1e}'.format(s))

                # Done
                if gmax < tau:
                    break

                # Done
                if fmax < tol and np.maximum(compu, compl) < tol:
                    break

                # Maxiters
                if self.k >= maxiter:
                    raise OptSolverError_MaxIters(self)

                # Search direction
                ux = self.u - self.x
                xl = self.x - self.l
                D1 = spdiags(self.mu / ux, 0, self.n, self.n, format='coo')
                D2 = spdiags(self.pi / xl, 0, self.n, self.n, format='coo')
                fbar = np.hstack(
                    (-fdata.rd + fdata.ru / ux - fdata.rl / xl, fdata.rp))
                if self.A.shape[0] > 0:
                    Jbar = bmat(
                        [[tril(self.H) + D1 + D2, None], [-self.A, self.Omm]],
                        format='coo')
                else:
                    Jbar = bmat([[tril(self.H) + D1 + D2]], format='coo')
                try:
                    if not self.linsolver.is_analyzed():
                        self.linsolver.analyze(Jbar)
                    pbar = self.linsolver.factorize_and_solve(Jbar, fbar)
                except RuntimeError:
                    raise OptSolverError_BadLinSystem(self)
                px = pbar[:self.n]
                pmu = (-fdata.ru + self.mu * px) / ux
                ppi = (-fdata.rl - self.pi * px) / xl
                p = np.hstack((pbar, pmu, ppi))

                # Steplength bounds
                indices = px > 0
                s1 = np.min(
                    np.hstack(
                        ((1. - eps) * (self.u - self.x)[indices] / px[indices],
                         np.inf)))
                indices = px < 0
                s2 = np.min(
                    np.hstack(
                        ((eps - 1.) * (self.x - self.l)[indices] / px[indices],
                         np.inf)))
                indices = pmu < 0
                s3 = np.min(
                    np.hstack(((eps - 1.) * self.mu[indices] / pmu[indices],
                               np.inf)))
                indices = ppi < 0
                s4 = np.min(
                    np.hstack(((eps - 1.) * self.pi[indices] / ppi[indices],
                               np.inf)))
                smax = np.min([s1, s2, s3, s4])

                # Line search
                s, fdata = self.line_search(self.y, p, fdata.F, fdata.GradF,
                                            self.func, smax)

                # Update x
                self.y += s * p
                self.k += 1
                self.x, self.lam, self.mu, self.pi = self.extract_components(
                    self.y)

                # Check
                try:
                    assert (np.all(self.x < self.u))
                    assert (np.all(self.x > self.l))
                    assert (np.all(self.mu > 0))
                    assert (np.all(self.pi > 0))
                except AssertionError:
                    raise OptSolverError_Infeasibility(self)
Example #34
0
def minimize_saga(
    f_deriv,
    A,
    b,
    x0,
    step_size,
    prox=None,
    alpha=0,
    max_iter=500,
    tol=1e-6,
    verbose=1,
    callback=None,
):
    r"""Stochastic average gradient augmented (SAGA) algorithm.

    This algorithm can solve linearly-parametrized loss functions of the form

        minimize_x \sum_{i}^n_samples f(A_i^T x, b_i) + alpha ||x||_2^2 + g(x)

    where g is a function for which we have access to its proximal operator.

    .. warning::
        This function is experimental, API is likely to change.


    Args:
      f
          loss functions.

      x0: np.ndarray or None, optional
          Starting point for optimization.

      step_size: float or None, optional
          Step size for the optimization. If None is given, this will be
          estimated from the function f.

      max_iter: int
          Maximum number of passes through the data in the optimization.

      tol: float
          Tolerance criterion. The algorithm will stop whenever the norm of the
          gradient mapping (generalization of the gradient for nonsmooth
          optimization) is below tol.

      verbose: bool
          Verbosity level. True might print some messages.

      trace: bool
          Whether to trace convergence of the function, useful for plotting
          and/or debugging. If ye, the result will have extra members trace_func,
          trace_time.


    Returns:
      opt: OptimizeResult
          The optimization result represented as a
          ``scipy.optimize.OptimizeResult`` object. Important attributes are:
          ``x`` the solution array, ``success`` a Boolean flag indicating if
          the optimizer exited successfully and ``message`` which describes
          the cause of the termination. See `scipy.optimize.OptimizeResult`
          for a description of other attributes.


    References:
      This variant of the SAGA algorithm is described in:

      `"Breaking the Nonsmooth Barrier: A Scalable Parallel Method for Composite
      Optimization."
      <https://arxiv.org/pdf/1707.06468.pdf>`_, Fabian Pedregosa, Remi Leblond,
      and Simon Lacoste-Julien. Advances in Neural Information Processing Systems
      (NIPS) 2017.
    """
    # convert any input to CSR sparse matrix representation. In the future we
    # might want to implement also a version for dense data (numpy arrays) to
    # better exploit data locality
    x = np.ascontiguousarray(x0).copy()
    n_samples, n_features = A.shape
    A = sparse.csr_matrix(A)

    if step_size is None:
        # then need to use line search
        raise ValueError

    if hasattr(prox, "__len__") and len(prox) == 2:
        blocks = prox[1]
        prox = prox[0]
    else:
        blocks = sparse.eye(n_features, n_features, format="csr")

    if prox is None:

        @utils.njit
        def prox(x, i, indices, indptr, d, step_size):
            pass

    A_data = A.data
    A_indices = A.indices
    A_indptr = A.indptr
    n_samples, n_features = A.shape

    rblocks_indices = blocks.T.tocsr().indices
    blocks_indptr = blocks.indptr
    bs_data, bs_indices, bs_indptr = _support_matrix(A_indices, A_indptr,
                                                     rblocks_indices,
                                                     blocks.shape[0])
    csr_blocks_1 = sparse.csr_matrix((bs_data, bs_indices, bs_indptr))

    # .. diagonal reweighting ..
    d = np.array(csr_blocks_1.sum(0), dtype=np.float).ravel()
    idx = d != 0
    d[idx] = n_samples / d[idx]
    d[~idx] = 1

    @utils.njit(nogil=True)
    def _saga_epoch(x, idx, memory_gradient, gradient_average, grad_tmp,
                    step_size):
        # .. inner iteration of the SAGA algorithm..
        for i in idx:

            # .. gradient estimate ..
            p = 0.0
            for j in range(A_indptr[i], A_indptr[i + 1]):
                j_idx = A_indices[j]
                p += x[j_idx] * A_data[j]
            grad_i = f_deriv(np.array([p]), np.array([b[i]]))[0]
            for j in range(A_indptr[i], A_indptr[i + 1]):
                j_idx = A_indices[j]
                grad_tmp[j_idx] = (grad_i - memory_gradient[i]) * A_data[j]

            # .. update coefficients ..
            # .. first iterate on blocks ..
            for h_j in range(bs_indptr[i], bs_indptr[i + 1]):
                h = bs_indices[h_j]
                # .. then iterate on features inside block ..
                for b_j in range(blocks_indptr[h], blocks_indptr[h + 1]):
                    bias_term = d[h] * (gradient_average[b_j] + alpha * x[b_j])
                    x[b_j] -= step_size * (grad_tmp[b_j] + bias_term)
            prox(x, i, bs_indices, bs_indptr, d, step_size)

            # .. update memory terms ..
            for j in range(A_indptr[i], A_indptr[i + 1]):
                j_idx = A_indices[j]
                tmp = (grad_i - memory_gradient[i]) * A_data[j]
                tmp /= n_samples
                gradient_average[j_idx] += tmp
                grad_tmp[j_idx] = 0
            memory_gradient[i] = grad_i

    # .. initialize memory terms ..
    memory_gradient = np.zeros(n_samples)
    gradient_average = np.zeros(n_features)
    grad_tmp = np.zeros(n_features)
    idx = np.arange(n_samples)
    success = False
    if callback is not None:
        callback(locals())
    for it in range(max_iter):
        x_old = x.copy()
        np.random.shuffle(idx)
        _saga_epoch(x, idx, memory_gradient, gradient_average, grad_tmp,
                    step_size)
        if callback is not None:
            callback(locals())

        diff_norm = np.abs(x - x_old).sum()
        if diff_norm < tol:
            success = True
            break
    return optimize.OptimizeResult(x=x, success=success, nit=it)
def my_graph_multiresolution(G, levels, r=0.5, sparsify=True, sparsify_eps=None,
                          downsampling_method='largest_eigenvector',
                          reduction_method='kron', compute_full_eigen=False,
                          reg_eps=0.005):
    r"""Compute a pyramid of graphs (by Kron reduction).

    'graph_multiresolution(G,levels)' computes a multiresolution of
    graph by repeatedly downsampling and performing graph reduction. The
    default downsampling method is the largest eigenvector method based on
    the polarity of the components of the eigenvector associated with the
    largest graph Laplacian eigenvalue. The default graph reduction method
    is Kron reduction followed by a graph sparsification step.
    *param* is a structure of optional parameters.

    Parameters
    ----------
    G : Graph structure
        The graph to reduce.
    levels : int
        Number of level of decomposition
    lambd : float
        Stability parameter. It adds self loop to the graph to give the
        algorithm some stability (default = 0.025). [UNUSED?!]
    sparsify : bool
        To perform a spectral sparsification step immediately after
        the graph reduction (default is True).
    sparsify_eps : float
        Parameter epsilon used in the spectral sparsification
        (default is min(10/sqrt(G.N),.3)).
    downsampling_method: string
        The graph downsampling method (default is 'largest_eigenvector').
    reduction_method : string
        The graph reduction method (default is 'kron')
    compute_full_eigen : bool
        To also compute the graph Laplacian eigenvalues and eigenvectors
        for every graph in the multiresolution sequence (default is False).
    reg_eps : float
        The regularized graph Laplacian is :math:`\bar{L}=L+\epsilon I`.
        A smaller epsilon may lead to better regularization, but will also
        require a higher order Chebyshev approximation. (default is 0.005)

    Returns
    -------
    Gs : list
        A list of graph layers.

    Examples
    --------
    >>> from pygsp import reduction
    >>> levels = 5
    >>> G = graphs.Sensor(N=512)
    >>> G.compute_fourier_basis()
    >>> Gs = reduction.graph_multiresolution(G, levels, sparsify=False)
    >>> for idx in range(levels):
    ...     Gs[idx].plotting['plot_name'] = 'Reduction level: {}'.format(idx)
    ...     Gs[idx].plot()

    """
    if sparsify_eps is None:
        sparsify_eps = min(10. / np.sqrt(G.N), 0.3)

    if compute_full_eigen:
        G.compute_fourier_basis()
    else:
        G.estimate_lmax()

    Gs = [G]
    Gs[0].mr = {'idx': np.arange(G.N), 'orig_idx': np.arange(G.N)}

    n_target = int(np.floor(G.N * (1-r)))
    
    for i in range(levels):
        if downsampling_method == 'largest_eigenvector':
            if hasattr(Gs[i], '_U'):
                V = Gs[i].U[:, -1]
            else:
                V = sp.sparse.linalg.eigs(Gs[i].L, 1)[1][:, 0]

            V *= np.sign(V[0])
            n  = max(int(Gs[i].N/2), n_target)            

            ind = np.argsort(V) # np.nonzero(V >= 0)[0] 
            ind = np.flip(ind,0)
            ind = ind[:n]

        else:
            raise NotImplementedError('Unknown graph downsampling method.')

        if reduction_method == 'kron':
            Gs.append(reduction.kron_reduction(Gs[i], ind))

        else:
            raise NotImplementedError('Unknown graph reduction method.')

        if sparsify and Gs[i+1].N > 2:
            Gs[i+1] = reduction.graph_sparsify(Gs[i+1], min(max(sparsify_eps, 2. / np.sqrt(Gs[i+1].N)), 1.))

        if Gs[i+1].is_directed():
            W = (Gs[i+1].W + Gs[i+1].W.T)/2
            Gs[i+1] = graphs.Graph(W, coords=Gs[i+1].coords)
            
        if compute_full_eigen:
            Gs[i+1].compute_fourier_basis()
        else:
            Gs[i+1].estimate_lmax()

        Gs[i+1].mr = {'idx': ind, 'orig_idx': Gs[i].mr['orig_idx'][ind], 'level': i}

        L_reg = Gs[i].L + reg_eps * sparse.eye(Gs[i].N)
        Gs[i].mr['K_reg'] = reduction.kron_reduction(L_reg, ind)
        Gs[i].mr['green_kernel'] = filters.Filter(Gs[i], lambda x: 1./(reg_eps + x))

    return Gs
    def fit(self, biadjacency):
        """Embedding of bipartite graphs from a clustering obtained with Louvain.

        Parameters
        ----------
        biadjacency:
            Biadjacency matrix of the graph.

        Returns
        -------
        self: :class:`BiLouvainEmbedding`
        """
        bilouvain = BiLouvain(resolution=self.resolution,
                              modularity=self.modularity,
                              tol_optimization=self.tol_optimization,
                              tol_aggregation=self.tol_aggregation,
                              n_aggregations=self.n_aggregations,
                              shuffle_nodes=self.shuffle_nodes,
                              sort_clusters=True,
                              return_membership=True,
                              return_aggregate=True,
                              random_state=self.random_state)
        bilouvain.fit(biadjacency)

        self.labels_ = bilouvain.labels_

        embedding_row = bilouvain.membership_row_
        embedding_col = bilouvain.membership_col_

        if self.merge_isolated:
            _, counts_row = np.unique(bilouvain.labels_row_,
                                      return_counts=True)

            n_clusters_row = embedding_row.shape[1]
            n_isolated_nodes_row = (counts_row == 1).sum()
            if n_isolated_nodes_row:
                n_remaining_row = n_clusters_row - n_isolated_nodes_row
                indptr_row = np.zeros(n_remaining_row + 2, dtype=int)
                indptr_row[-1] = n_isolated_nodes_row
                combiner_row = sparse.vstack([
                    sparse.eye(n_remaining_row,
                               n_remaining_row + 1,
                               format='csr'),
                    sparse.csr_matrix((np.ones(n_isolated_nodes_row,
                                               dtype=int),
                                       np.full(n_isolated_nodes_row,
                                               n_remaining_row,
                                               dtype=int),
                                       np.arange(n_isolated_nodes_row + 1,
                                                 dtype=int)))
                ])
                embedding_row = embedding_row.dot(combiner_row)
                self.labels_[n_remaining_row +
                             1:] = self.labels_[n_remaining_row + 1]

            _, counts_col = np.unique(bilouvain.labels_col_,
                                      return_counts=True)
            n_clusters_col = embedding_col.shape[1]
            n_isolated_nodes_col = (counts_col == 1).sum()
            if n_isolated_nodes_col:
                n_remaining_col = n_clusters_col - n_isolated_nodes_col
                indptr_col = np.zeros(n_remaining_col + 2, dtype=int)
                indptr_col[-1] = n_isolated_nodes_col
                combiner_col = sparse.vstack([
                    sparse.eye(n_remaining_col,
                               n_remaining_col + 1,
                               format='csr'),
                    sparse.csr_matrix((np.ones(n_isolated_nodes_col,
                                               dtype=int),
                                       np.full(n_isolated_nodes_col,
                                               n_remaining_col,
                                               dtype=int),
                                       np.arange(n_isolated_nodes_col + 1,
                                                 dtype=int)))
                ])
                embedding_col = embedding_col.dot(combiner_col)

        self.embedding_row_ = embedding_row
        self.embedding_col_ = embedding_col
        self.embedding_ = self.embedding_row_

        return self
Example #37
0
    def fit(self,
            train,
            userGraph=None,
            itemGraph=None,
            method="random",
            U0=None,
            V0=None):
        """
        Learn factors from training set.
        User and item factors are fitted alternately.
        
        train : array-like of three columns 
            contains row index, column index, value of not null entries
        rowGraph : array-like of three columns
            contains the first two columns are the index of the linked rows,
            the third is the weight of the link
        colGraph : same as rowGraph for the column links
        method : string
            factor initialisation. Can be random, svd or given in U0 and V0
        U0, V0: array-like
            initial value for U and V. If not None, method is ignored
        """
        if self.seed is not None:
            np.random.seed(self.seed)

        self.userGraph = False
        self.itemGraph = False

        self.train = sparse_matrix(train, n=self.num_users, p=self.num_items)

        self.n_u = list(
            map(lambda u: self.train[u, :].nnz, range(self.num_users)))
        self.n_i = list(
            map(lambda i: self.train[:, i].nnz, range(self.num_items)))

        if userGraph is not None:
            self.userGraph = True
            self.A_user = sparse.csgraph.laplacian(
                sparse_matrix(userGraph,
                              n=self.num_users,
                              p=self.num_users,
                              w=(1 - self.mu) / self.mu))
            if self.reg == "weighted":
                self.A_user += sparse.diags(self.n_u)
            elif self.reg == "default":
                self.A_user += sparse.eye(self.num_users)
            #self.A_user_sqrt = kron(sqrtm(self.A_user.todense()).real,sparse.eye(self.d))
            self.A_user = kron(self.A_user, sparse.eye(self.d))
        else:
            if self.reg == "weighted":
                self.A_user = sparse.diags(self.n_u)
            elif self.reg == "default":
                self.A_user = sparse.eye(self.num_users)
            self.A_user = kron(self.A_user, sparse.eye(self.d))

        if itemGraph is not None:
            self.itemGraph = True
            self.A_item = sparse.csgraph.laplacian(
                sparse_matrix(itemGraph,
                              n=self.num_items,
                              p=self.num_items,
                              w=(1 - self.mu) / self.mu))
            if self.reg == "weighted":
                self.A_item += sparse.diags(self.n_i)
            elif self.reg == "default":
                self.A_item += sparse.eye(self.num_items)
            #self.A_item_sqrt = kron(sqrtm(self.A_item.todense()).real,sparse.eye(self.d))
            self.A_item = kron(self.A_item, sparse.eye(self.d))
        else:
            if self.reg == "weighted":
                self.A_item = sparse.diags(self.n_i)
            elif self.reg == "default":
                self.A_item = sparse.eye(self.num_items)
            self.A_item = kron(self.A_item, sparse.eye(self.d))

        self.U = np.random.normal(size=(self.num_users, self.d))
        self.V = np.random.normal(size=(self.num_items, self.d))

        for it in range(self.num_iters):
            if self.userGraph:
                self.U = self.user_iteration()
            else:
                for u in range(self.num_users):
                    indices = self.train[u].nonzero()[1]
                    if indices.size:
                        R_u = self.train[u, indices]
                        self.U[u, :] = self.update(indices, self.V,
                                                   R_u.toarray().T)
                    else:
                        self.U[u, :] = np.zeros(self.d)

            if self.itemGraph:
                self.V = self.item_iteration()
            else:
                for i in range(self.num_items):
                    indices = self.train[:, i].nonzero()[0]
                    if indices.size:
                        R_i = self.train[indices, i]
                        self.V[i, :] = self.update(indices, self.U,
                                                   R_i.toarray())
                    else:
                        self.V[i, :] = np.zeros(self.d)

            if self.verbose:
                print("end iteration " + str(it + 1))
# Simple case
test_solve_KKT_n = 3
test_solve_KKT_m = 4

test_solve_KKT_P = sparse.random(test_solve_KKT_n, test_solve_KKT_n,
                                 density=0.4, format='csc')
test_solve_KKT_P = test_solve_KKT_P.dot(test_solve_KKT_P.T).tocsc()
test_solve_KKT_A = sparse.random(test_solve_KKT_m, test_solve_KKT_n,
                                 density=0.4, format='csc')
test_solve_KKT_Pu = sparse.triu(test_solve_KKT_P, format='csc')

test_solve_KKT_rho = 4.0
test_solve_KKT_sigma = 1.0
test_solve_KKT_KKT = sparse.vstack([
                        sparse.hstack([test_solve_KKT_P + test_solve_KKT_sigma *
                        sparse.eye(test_solve_KKT_n), test_solve_KKT_A.T]),
                        sparse.hstack([test_solve_KKT_A,
                        -1./test_solve_KKT_rho * sparse.eye(test_solve_KKT_m)])
                        ], format='csc')
test_solve_KKT_rhs = np.random.randn(test_solve_KKT_m + test_solve_KKT_n)
test_solve_KKT_x = spla.splu(test_solve_KKT_KKT).solve(test_solve_KKT_rhs)

test_solve_KKT_x[test_solve_KKT_n:] = test_solve_KKT_rhs[test_solve_KKT_n:] + \
                                      test_solve_KKT_x[test_solve_KKT_n:] / test_solve_KKT_rho

# Generate test data and solutions
data = {'test_solve_KKT_n': test_solve_KKT_n,
        'test_solve_KKT_m': test_solve_KKT_m,
        'test_solve_KKT_A': test_solve_KKT_A,
        'test_solve_KKT_Pu': test_solve_KKT_Pu,
        'test_solve_KKT_rho': test_solve_KKT_rho,
Example #39
0
    def _map_variables(self, x: np.ndarray) -> None:
        """
        Map variables from old to new grids in d[pp.STATE] and d[pp.STATE][pp.ITERATE].
        Also call update of self.assembler.update_dof_count and update the current
        solution vector accordingly.

        Newly created DOFs are assigned values by _initialize_new_variable_values,
        which for now returns zeros, but can be tailored for specific variables
        etc.

        Parameters
        ------
        x: np.ndarray
            Solution vector, or other vector to be mapped.

        Raises
        ------
        NotImplementedError
            DESCRIPTION.

        Returns
        -------
        None.

        """
        # Obtain old solution vector. The values are extracted in the first two loops
        # and mapped and updated in the last two, after update_dof_count has been called.
        for g, d in self.gb:
            # First check if cells and faces have been updated, by checking if index maps are
            # available. If this is not the case, there is no need to map variables.
            if not ("cell_index_map" in d and "face_index_map" in d):
                continue

            cell_map: sps.spmatrix = d["cell_index_map"]

            d[pp.STATE]["old_solution"] = {}
            for var, dofs in d[pp.PRIMARY_VARIABLES].items():
                # Copy old solution vector values
                d[pp.STATE]["old_solution"][var] = x[self.assembler.dof_ind(
                    g, var)]

                # Only cell-based dofs have been considered so far.
                # It should not be difficult to handle other types of variables,
                # but the need has not been there.
                face_dof: int = dofs.get("faces", 0)
                node_dof: int = dofs.get("nodes", 0)
                if face_dof != 0 or node_dof != 0:
                    raise NotImplementedError(
                        "Have only implemented variable mapping for face dofs")

                cell_dof: int = dofs.get("cells")

                # Map old solution
                mapping = sps.kron(cell_map, sps.eye(cell_dof))
                d[pp.STATE][var] = mapping * d[pp.STATE][var]

                # Initialize new values
                new_ind = self._new_dof_inds(mapping)
                new_vals = self._initialize_new_variable_values(
                    g, d, var, dofs)
                d[pp.STATE][var][new_ind] = new_vals

                # Repeat for iterate:
                if var in d[pp.STATE][pp.ITERATE].keys():
                    d[pp.STATE][pp.ITERATE][var] = (
                        mapping * d[pp.STATE][pp.ITERATE][var])
                    d[pp.STATE][pp.ITERATE][var][new_ind] = new_vals

        for e, d in self.gb.edges():

            # Check if the mortar grid geometry has been updated.
            if "cell_index_map" not in d:
                # No need to do anything
                continue

            d[pp.STATE]["old_solution"] = {}
            cell_map = d["cell_index_map"]

            for var, dofs in d[pp.PRIMARY_VARIABLES].items():
                # Copy old solution vector values
                d[pp.STATE]["old_solution"][var] = x[self.assembler.dof_ind(
                    e, var)]

                # Only cell-based dofs have been considered so far.
                cell_dof = dofs.get("cells")

                # Map old solution
                mapping = sps.kron(cell_map, sps.eye(cell_dof))
                d[pp.STATE][var] = mapping * d[pp.STATE][var]

                # Initialize new values
                new_ind = self._new_dof_inds(mapping)
                new_vals = self._initialize_new_variable_values(
                    e, d, var, dofs)
                d[pp.STATE][var][new_ind] = new_vals

                # Repeat for iterate
                if var in d[pp.STATE][pp.ITERATE].keys():
                    d[pp.STATE][pp.ITERATE][var] = (
                        mapping * d[pp.STATE][pp.ITERATE][var])
                    d[pp.STATE][pp.ITERATE][var][new_ind] = new_vals

        # Update the assembler's counting of dofs
        self.assembler.update_dof_count()

        x_new = np.zeros(self.assembler.num_dof())
        # For each grid-variable pair, map old solution and initialize for new
        # DOFs.
        for g, d in self.gb:
            # Check if there has been updates to this grid.
            if not ("cell_index_map" in d and "face_index_map" in d):
                continue

            cell_map = d["cell_index_map"]

            for var, dofs in d[pp.PRIMARY_VARIABLES].items():
                # Update consist of two parts: First map the old solution to the new
                # grid, second populate newly formed cells.

                # Mapping of old variables
                cell_dof = dofs.get("cells")
                mapping = sps.kron(cell_map, sps.eye(cell_dof))
                x_new[self.assembler.dof_ind(
                    g, var)] = (mapping * d[pp.STATE]["old_solution"][var])

                # Index of newly formed variables
                new_ind = self._new_dof_inds(mapping)
                # Values of newly formed variables
                new_vals = self._initialize_new_variable_values(
                    g, d, var, dofs)
                # Update newly formed variables
                x_new[self.assembler.dof_ind(g, var)[new_ind]] = new_vals

        for e, d in self.gb.edges():
            # Same procedure as for nodes, see above for comments
            if "cell_index_map" not in d:
                continue

            cell_map = d["cell_index_map"]

            for var, dofs in d[pp.PRIMARY_VARIABLES].items():
                cell_dof = dofs.get("cells")
                mapping = sps.kron(cell_map, sps.eye(cell_dof))
                x_new[self.assembler.dof_ind(
                    e, var)] = (mapping * d[pp.STATE]["old_solution"][var])
                new_ind = self._new_dof_inds(mapping)
                new_vals = self._initialize_new_variable_values(
                    e, d, var, dofs)
                x_new[self.assembler.dof_ind(e, var)[new_ind]] = new_vals

        # Store the mapped solution vector
        return x_new
Example #40
0
def normalized_laplacian(A):
    d = degree(A).data
    e = d ** -0.5
    L = sps.eye(len(d)) - e.T * A * e  # TODO
    return L
Example #41
0
features_dim = feat.shape[1]
size_update = int(feat.shape[0] * size_update_ratio)
print("size_update: {}".format(size_update))

# Store original adjacency matrix (without diagonal entries) for later
adj_orig = adj
adj_orig = adj_orig - sp.dia_matrix(
    (adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape)
adj_orig.eliminate_zeros()

adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false = mask_test_edges(
    adj)
adj = adj_train

adj_label = adj_train + sp.eye(adj_train.shape[0])

adj_norm = torch.from_numpy(preprocess_graph(adj))
adj_label = torch.from_numpy(adj_label.todense().astype(np.float32))
feat = torch.from_numpy(feat.todense().astype(np.float32))

############## init model ##############
gcn_vae = GraphVae(features_dim, hidden_dim, out_dim, bias=False, dropout=0.0)
optimizer_vae = torch.optim.Adam(gcn_vae.parameters(), lr=1e-3)

gcn_step = RecursiveGraphConvolutionStepAddOn(features_dim,
                                              hidden_dim,
                                              out_dim,
                                              dropout=0.0)
optimizer = torch.optim.Adam(gcn_step.parameters(),
                             lr=1e-3,
Example #42
0
u0 = 10.5916
umin = np.array([9.6, 9.6, 9.6, 9.6]) - u0
umax = np.array([13., 13., 13., 13.]) - u0
xmin = np.array([
    -np.pi / 6, -np.pi / 6, -np.inf, -np.inf, -np.inf, -1., -np.inf, -np.inf,
    -np.inf, -np.inf, -np.inf, -np.inf
])
xmax = np.array([
    np.pi / 6, np.pi / 6, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf,
    np.inf, np.inf, np.inf, np.inf
])

# Objective function
Q = sparse.diags([0., 0., 10., 10., 10., 10., 0., 0., 0., 5., 5., 5.])
QN = Q
R = 0.1 * sparse.eye(4)

# Initial and reference states
# x0: initial state
x0 = np.zeros(12)
# xr: reference state
xr = np.array([0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.])

# Prediction horizon
N = 10

# Cast MPC problem to a QP: x = (x(0),x(1),...,x(N),u(0),...,u(N-1))
# - quadratic objective
P = sparse.block_diag(
    [sparse.kron(sparse.eye(N), Q), QN,
     sparse.kron(sparse.eye(N), R)],
Example #43
0
def preprocess_adj(adj):
    """Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
    adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
    return sparse_to_tuple(adj_normalized)
Example #44
0
def test_hash():
    """Test dictionary hashing and comparison functions."""
    # does hashing all of these types work:
    # {dict, list, tuple, ndarray, str, float, int, None}
    d0 = dict(a=dict(a=0.1, b='fo', c=1),
              b=[1, 'b'],
              c=(),
              d=np.ones(3),
              e=None)
    d0[1] = None
    d0[2.] = b'123'

    d1 = deepcopy(d0)
    assert len(object_diff(d0, d1)) == 0
    assert len(object_diff(d1, d0)) == 0
    assert object_hash(d0) == object_hash(d1)

    # change values slightly
    d1['data'] = np.ones(3, int)
    d1['d'][0] = 0
    assert object_hash(d0) != object_hash(d1)

    d1 = deepcopy(d0)
    assert object_hash(d0) == object_hash(d1)
    d1['a']['a'] = 0.11
    assert (len(object_diff(d0, d1)) > 0)
    assert (len(object_diff(d1, d0)) > 0)
    assert object_hash(d0) != object_hash(d1)

    d1 = deepcopy(d0)
    assert object_hash(d0) == object_hash(d1)
    d1['a']['d'] = 0  # non-existent key
    assert (len(object_diff(d0, d1)) > 0)
    assert (len(object_diff(d1, d0)) > 0)
    assert object_hash(d0) != object_hash(d1)

    d1 = deepcopy(d0)
    assert object_hash(d0) == object_hash(d1)
    d1['b'].append(0)  # different-length lists
    assert (len(object_diff(d0, d1)) > 0)
    assert (len(object_diff(d1, d0)) > 0)
    assert object_hash(d0) != object_hash(d1)

    d1 = deepcopy(d0)
    assert object_hash(d0) == object_hash(d1)
    d1['e'] = 'foo'  # non-None
    assert (len(object_diff(d0, d1)) > 0)
    assert (len(object_diff(d1, d0)) > 0)
    assert object_hash(d0) != object_hash(d1)

    d1 = deepcopy(d0)
    d2 = deepcopy(d0)
    d1['e'] = StringIO()
    d2['e'] = StringIO()
    d2['e'].write('foo')
    assert (len(object_diff(d0, d1)) > 0)
    assert (len(object_diff(d1, d0)) > 0)

    d1 = deepcopy(d0)
    d1[1] = 2
    assert (len(object_diff(d0, d1)) > 0)
    assert (len(object_diff(d1, d0)) > 0)
    assert object_hash(d0) != object_hash(d1)

    # generators (and other types) not supported
    d1 = deepcopy(d0)
    d2 = deepcopy(d0)
    d1[1] = (x for x in d0)
    d2[1] = (x for x in d0)
    pytest.raises(RuntimeError, object_diff, d1, d2)
    pytest.raises(RuntimeError, object_hash, d1)

    x = sparse.eye(2, 2, format='csc')
    y = sparse.eye(2, 2, format='csr')
    assert ('type mismatch' in object_diff(x, y))
    y = sparse.eye(2, 2, format='csc')
    assert len(object_diff(x, y)) == 0
    y[1, 1] = 2
    assert ('elements' in object_diff(x, y))
    y = sparse.eye(3, 3, format='csc')
    assert ('shape' in object_diff(x, y))
    y = 0
    assert ('type mismatch' in object_diff(x, y))

    # smoke test for gh-4796
    assert object_hash(np.int64(1)) != 0
    assert object_hash(np.bool_(True)) != 0
Example #45
0
    def train(self, data):
        num_nodes = data.x.shape[0]
        features = preprocess_features(data.x.cpu().numpy())
        features = torch.FloatTensor(features).unsqueeze(0).to(self.device)

        adj = sp.coo_matrix(
            (np.ones(data.edge_index.shape[1]), data.edge_index.cpu()),
            (num_nodes, num_nodes),
        )
        adj = normalize_adj(adj + sp.eye(adj.shape[0]))
        sp_adj = sparse_mx_to_torch_sparse_tensor(adj)

        sp_adj = sp_adj.to(self.device)

        best = 1e9
        cnt_wait = 0
        b_xent = nn.BCEWithLogitsLoss()
        optimizer = torch.optim.Adam(self.model.parameters(),
                                     lr=0.001,
                                     weight_decay=0.0)

        epoch_iter = tqdm(range(self.epochs))
        for epoch in epoch_iter:
            self.model.train()
            optimizer.zero_grad()

            idx = np.random.permutation(num_nodes)
            shuf_fts = features[:, idx, :]

            lbl_1 = torch.ones(1, num_nodes)
            lbl_2 = torch.zeros(1, num_nodes)
            lbl = torch.cat((lbl_1, lbl_2), 1)

            shuf_fts = shuf_fts.to(self.device)
            lbl = lbl.to(self.device)

            logits = self.model(features, shuf_fts, sp_adj, True, None, None,
                                None)

            loss = b_xent(logits, lbl)

            epoch_iter.set_description(
                f"Epoch: {epoch:03d}, Loss: {loss.item()}")

            if loss < best:
                best = loss
                cnt_wait = 0
            else:
                cnt_wait += 1

            if cnt_wait == self.patience:
                print("Early stopping!")
                break

            loss.backward()
            optimizer.step()
        embeds, _ = self.model.embed(features, sp_adj, True, None)

        opt = {
            "idx_train": data.train_mask,
            "idx_val": data.val_mask,
            "idx_test": data.test_mask,
            "num_classes": self.nclass,
        }
        result = LogRegTrainer().train(embeds[0], data.y, opt)
        return result
Example #46
0
connected_packages = max(nx.connected_components(dependencies.to_undirected()),
                         key=len)
conn_dependencies = nx.subgraph(dependencies, connected_packages)
package_names = np.array(conn_dependencies.nodes())  # array for multi-indexing
adjacency_matrix = nx.to_scipy_sparse_matrix(conn_dependencies,
                                             dtype=np.float64)
n = len(package_names)
np.seterr(divide='ignore')  # ignore division-by-zero errors
from scipy import sparse

degrees = np.ravel(adjacency_matrix.sum(axis=1))
degrees_matrix = sparse.spdiags(1 / degrees, 0, n, n, format='csr')
transition_matrix = (degrees_matrix @ adjacency_matrix).T
from scipy.sparse.linalg.isolve import bicg  # biconjugate gradient solver
damping = 0.85
I = sparse.eye(n, format='csc')
pagerank, error = bicg(I - damping * transition_matrix,
                       (1-damping) / n * np.ones(n),
                       maxiter=int(1e4))
print('error code: ', error)
top = np.argsort(pagerank)[::-1]

print([package_names[i] for i in top[:40]])
def power(trans, damping=0.85, max_iter=int(1e5)):
    n = trans.shape[0]
    r0 = np.full(n, 1/n)
    r = r0
    for _ in range(max_iter):
        rnext = damping * trans @ r + (1 - damping) / n
        if np.allclose(rnext, r):
            print('converged')
Example #47
0
def minimize_vrtos(
    f_deriv,
    A,
    b,
    x0,
    step_size,
    prox_1=None,
    prox_2=None,
    alpha=0,
    max_iter=500,
    tol=1e-6,
    callback=None,
    verbose=0,
):
    r"""Variance-reduced three operator splitting (VRTOS) algorithm.

    The VRTOS algorithm can solve optimization problems of the form

        argmin_{x \in R^p} \sum_{i}^n_samples f(A_i^T x, b_i) + alpha *
        ||x||_2^2 +
                                            + pen1(x) + pen2(x)

    Parameters
    ----------
    f_deriv
        derivative of f

    x0: np.ndarray or None, optional
        Starting point for optimization.

    step_size: float or None, optional
        Step size for the optimization. If None is given, this will be
        estimated from the function f.

    n_jobs: int
        Number of threads to use in the optimization. A number higher than 1
        will use the Asynchronous SAGA optimization method described in
        [Pedregosa et al., 2017]

    max_iter: int
        Maximum number of passes through the data in the optimization.

    tol: float
        Tolerance criterion. The algorithm will stop whenever the norm of the
        gradient mapping (generalization of the gradient for nonsmooth
        optimization)
        is below tol.

    verbose: bool
        Verbosity level. True might print some messages.

    trace: bool
        Whether to trace convergence of the function, useful for plotting and/or
        debugging. If ye, the result will have extra members trace_func,
        trace_time.

    Returns
    -------
    opt: OptimizeResult
        The optimization result represented as a
        ``scipy.optimize.OptimizeResult`` object. Important attributes are:
        ``x`` the solution array, ``success`` a Boolean flag indicating if
        the optimizer exited successfully and ``message`` which describes
        the cause of the termination. See `scipy.optimize.OptimizeResult`
        for a description of other attributes.

    References
    ----------
    Pedregosa, Fabian, Kilian Fatras, and Mattia Casotto. "Variance Reduced
    Three Operator Splitting." arXiv preprint arXiv:1806.07294 (2018).
    """

    n_samples, n_features = A.shape
    success = False

    # FIXME: just a workaround for now
    # FIXME: check if prox_1 is a tuple
    if hasattr(prox_1, "__len__") and len(prox_1) == 2:
        blocks_1 = prox_1[1]
        prox_1 = prox_1[0]
    else:
        blocks_1 = sparse.eye(n_features, n_features, format="csr")
    if hasattr(prox_2, "__len__") and len(prox_2) == 2:
        blocks_2 = prox_2[1]
        prox_2 = prox_2[0]
    else:
        blocks_2 = sparse.eye(n_features, n_features, format="csr")

    Y = np.zeros((2, x0.size))
    z = x0.copy()

    assert A.shape[0] == b.size

    if step_size < 0:
        raise ValueError

    if prox_1 is None:

        @utils.njit
        def prox_1(x, i, indices, indptr, d, step_size):
            pass

    if prox_2 is None:

        @utils.njit
        def prox_2(x, i, indices, indptr, d, step_size):
            pass

    A = sparse.csr_matrix(A)
    epoch_iteration = _factory_sparse_vrtos(f_deriv, prox_1, prox_2, blocks_1,
                                            blocks_2, A, b, alpha, step_size)

    # .. memory terms ..
    memory_gradient = np.zeros(n_samples)
    gradient_average = np.zeros(n_features)
    x1 = x0.copy()
    grad_tmp = np.zeros(n_features)

    # warm up for the JIT
    epoch_iteration(
        Y,
        x0,
        x1,
        z,
        memory_gradient,
        gradient_average,
        np.array([0]),
        grad_tmp,
        step_size,
    )

    # .. iterate on epochs ..
    if callback is not None:
        callback(locals())
    for it in range(max_iter):
        epoch_iteration(
            Y,
            x0,
            x1,
            z,
            memory_gradient,
            gradient_average,
            np.random.permutation(n_samples),
            grad_tmp,
            step_size,
        )

        certificate = np.linalg.norm(x0 - z) + np.linalg.norm(x1 - z)
        if callback is not None:
            callback(locals())

    return optimize.OptimizeResult(x=z,
                                   success=success,
                                   nit=it,
                                   certificate=certificate)
Example #48
0
def preprocess_adj(adj, symmetric=True):
    adj = adj + sp.eye(adj.shape[0])
    adj = normalize_adj(adj, symmetric)
    adj = adj.todense()
    return adj
    def _rebuild_operators(self):
        if self.mesh.x.lbc.type == 'pml' and self.compact:
            # build intermediates for the compact operator
            raise ValidationFunctionError(
                " This solver is in construction and is not yet complete. For variable density and compact PML."
            )
            dof = self.mesh.dof(include_bc=True)

            oc = self.operator_components

            built = oc.get('_numpy_components_built', False)
            oc.M = make_diag_mtx(self.model_parameters.C.squeeze()**-2)
            # build the static components
            if not built:
                # build Dxx
                oc.Dxx = build_derivative_matrix(
                    self.mesh,
                    2,
                    self.spatial_accuracy_order,
                    dimension='x',
                    use_shifted_differences=self.spatial_shifted_differences)
                # build Dzz
                oc.Dzz = build_derivative_matrix(
                    self.mesh,
                    2,
                    self.spatial_accuracy_order,
                    dimension='z',
                    use_shifted_differences=self.spatial_shifted_differences)
                # build Dx
                oc.Dx = build_derivative_matrix(
                    self.mesh,
                    1,
                    self.spatial_accuracy_order,
                    dimension='x',
                    use_shifted_differences=self.spatial_shifted_differences)

                # build Dz
                oc.Dz = build_derivative_matrix(
                    self.mesh,
                    1,
                    self.spatial_accuracy_order,
                    dimension='z',
                    use_shifted_differences=self.spatial_shifted_differences)

                # build sigma
                oc.sx, oc.sz, oc.sxp, oc.szp = self._sigma_PML(self.mesh)

                oc._numpy_components_built = True
        else:
            # build intermediates for operator with auxiliary fields
            dof = self.mesh.dof(include_bc=True)

            oc = self.operator_components

            built = oc.get('_numpy_components_built', False)

            # build the static components
            if not built:
                # build sigmax
                sx = build_sigma(self.mesh, self.mesh.x)
                oc.sigmax = make_diag_mtx(sx)

                # build sigmaz
                sz = build_sigma(self.mesh, self.mesh.z)
                oc.sigmaz = make_diag_mtx(sz)

                # build Dx
                oc.minus_Dx = build_derivative_matrix(
                    self.mesh,
                    1,
                    self.spatial_accuracy_order,
                    dimension='x',
                    use_shifted_differences=self.spatial_shifted_differences)
                oc.minus_Dx.data *= -1

                # build Dz
                oc.minus_Dz = build_derivative_matrix(
                    self.mesh,
                    1,
                    self.spatial_accuracy_order,
                    dimension='z',
                    use_shifted_differences=self.spatial_shifted_differences)
                oc.minus_Dz.data *= -1

                # build other useful things
                oc.I = spsp.eye(dof, dof)
                oc.empty = spsp.csr_matrix((dof, dof))

                # useful intermediates
                oc.sigma_xz = make_diag_mtx(sx * sz)
                oc.sigma_xPz = oc.sigmax + oc.sigmaz

                oc.minus_sigma_zMx_Dx = make_diag_mtx((sz - sx)) * oc.minus_Dx
                oc.minus_sigma_xMz_Dz = make_diag_mtx((sx - sz)) * oc.minus_Dz

                oc._numpy_components_built = True

        kappa = self.model_parameters.kappa
        rho = self.model_parameters.rho
        oc.m1 = make_diag_mtx((kappa**-1).reshape(-1, ))
        oc.m2 = make_diag_mtx((rho**-1).reshape(-1, ))
        # build heterogenous laplacian
        sh = self.mesh.shape(include_bc=True, as_grid=True)
        deltas = [self.mesh.x.delta, self.mesh.z.delta]
        oc.L = build_derivative_matrix_VDA(self.mesh,
                                           2,
                                           self.spatial_accuracy_order,
                                           alpha=rho**-1)

        # oc.L is a heterogenous laplacian operator. It computes div(m2 grad), where m2 = 1/rho.
        # Currently the creation of oc.L is slow. This is because we have implemented a cenetered heterogenous laplacian.
        # To speed up computation, we could compute a div(m2 grad) operator that is not centered by simply multiplying
        # a divergence operator by oc.m2 by a gradient operator.

        self.K = spsp.bmat([[
            oc.m1 * oc.sigma_xz - oc.L, oc.minus_Dx * oc.m2,
            oc.minus_Dz * oc.m2
        ], [oc.minus_sigma_zMx_Dx, oc.sigmax, oc.empty],
                            [oc.minus_sigma_xMz_Dz, oc.empty, oc.sigmaz]])

        self.C = spsp.bmat([[oc.m1 * oc.sigma_xPz, oc.empty, oc.empty],
                            [oc.empty, oc.I, oc.empty],
                            [oc.empty, oc.empty, oc.I]])

        self.M = spsp.bmat([[oc.m1, oc.empty, oc.empty],
                            [oc.empty, oc.empty, oc.empty],
                            [oc.empty, oc.empty, oc.empty]])
def load_data(
        directory: str,
        dataset: str,
        verbose: bool = False) -> Tuple['torch.Tensor',  # adjacency matrix
                                        'torch.Tensor',  # features matrix
                                        'torch.Tensor',  # labels
                                        'torch.Tensor',  # train index
                                        'torch.Tensor',  # validation index
                                        'torch.Tensor',  # test index
                                        ]:
    """Loads data from a dataset

    Args:
        directory: directory with all datasets to laod
        dataset: dataset to load. (cora, citeseer, pubmed)
        verbose: if True, prints loading information. Default is False.
    
    Returns:
        Tuple of data information of format (adjacency, features, labels
            train_index, validation_index, test_index)
    """
    datadir = pathlib.Path(directory)

    if not datadir.is_dir():
        raise FileNotFoundError(directory)

    if verbose:
        print(f'Loading {datadir} dataset')

    names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
    x, y, tx, ty, allx, ally, graph = map(
        lambda name: load_file(datadir.joinpath(f'ind.{dataset}.{name}')),
        names)
    index = [
        int(line.strip()) for line in datadir.joinpath(
            f'ind.{dataset}.test.index').open().readlines()
    ]
    index_sorted = np.sort(index)

    if dataset == 'citeseer':
        if verbose:
            print('Adding test data for citeseer unlabeled test nodes')
        index_range = range(index_sorted[0], index_sorted[-1] + 1)
        tx_extended = sp.lil_matrix((len(index_range), x.shape[1]))
        tx_extended[index_sorted - index_sorted[0], :] = tx
        tx = tx_extended
        ty_extended = np.zeros((len(index_range), y.shape[1]))
        ty_extended[index_sorted - index_sorted[0], :] = ty
        ty = ty_extended

    if dataset == 'nell.0.001' or dataset == 'nell.0.01':
        index_range = range(allx.shape[0], len(graph))
        isolated_node_idx = np.setdiff1d(index_range, index)
        tx_extended = sp.lil_matrix((len(index_range), x.shape[1]))
        tx_extended[index_sorted - index_sorted[0], :] = tx
        tx = tx_extended
        ty_extended = np.zeros((len(index_range), y.shape[1]))
        ty_extended[index_sorted - allx.shape[0], :] = ty
        ty = ty_extended

        features = sp.vstack((allx, tx)).tolil()
        features[index, :] = features[index_sorted, :]

        features_file = dataset + ".features.npz"
        features_path = pathlib.Path("data", features_file)

        if features_path.is_file():
            features = load_csr_matrix(features_path)
        else:
            if verbose:
                print(
                    "features.npz not found. Creating feature vectors for node relations."
                )
            features_extended = sp.hstack(
                (features,
                 sp.lil_matrix((features.shape[0], len(isolated_node_idx)))),
                dtype=np.int32).todense()
            features_extended[isolated_node_idx, features.shape[1]:] = np.eye(
                len(isolated_node_idx))
            features = sp.csr_matrix(features_extended).astype(np.float16)
            if verbose:
                print("Saving features.npz")
            save_csr_matrix(features_path, features)

        features = normalize(features)
        features = sparse_mx_to_torch(features)
    else:
        # Process features
        features = sp.vstack((allx, tx)).tolil()
        features[index, :] = features[index_sorted, :]
        features = normalize(features)
        features = torch.FloatTensor(np.array(features.todense()))

    # Process labels
    labels = np.vstack((ally, ty))
    labels[index, :] = labels[index_sorted, :]
    labels = torch.argmax(torch.LongTensor(labels), dim=1)

    adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph)).tocoo()
    # Make the directed graph undirected
    adj += adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
    adj = normalize_adj(adj + sp.eye(adj.shape[0]))
    adj = sparse_mx_to_torch(adj)

    index_train = torch.LongTensor(range(len(y)))
    index_val = torch.LongTensor(range(len(y), len(y) + 500))
    index_test = torch.LongTensor(index_sorted)
    return adj, features, labels, index_train, index_val, index_test
Example #51
0
def nontuple_preprocess_adj(adj):
    adj_normalized = normalize_adj(sp.eye(adj.shape[0]) + adj)
    # adj_normalized = sp.eye(adj.shape[0]) + normalize_adj(adj)
    return adj_normalized.tocsr()
Example #52
0
    RDM_env = np.trace(rho_tensor, axis1=0, axis2=2)
    return RDM_sys, RDM_env


### iDMRG Algorithm

while (sysBlock_Length + envBlock_Length) < LatticeLength:
    """ Construct a sysBlock_Ham which describes the sysBlock """
    # 1. Initialzed the dimension of sysBlock = kron(sysBlock_Ham, eye(Dim))
    # 2. Include the interaction of the site inside the sysBlock and the newly added site
    # 3. Describe their interactions as kron(sysBlock..., S...), we can understand it as sysBlock \otimes S(new site)
    """ Number operator , the meaning of sysBlock operators have not been changed in this step 
    If we put the Number op after the sysBlock_Ham, then it will gg since sysBlock_Ham dimension changes from 4**(n) to 4**(n+1) 

    """
    sysBlock_N = kron(sysBlock_N, eye(Dim)) + kron(
        eye(sysBlock_Ham.shape[0]),
        a_up.conj().T @ a_up + a_down.conj().T @ a_down)

    # consider the sysBlock already have two sites
    """ Don't know why the on-site spin spin interaction term is not hermition 
    To force the Hamiltonian becomes Hermitian, we H = 0.5( H + H.conj().T) 

    Seems even in t is non-zero, the sysBlock_Ham is not hermitian already. But interestingly the envBlock is hermitian......
    mu is ok, but t and U terms f**k up...... 
    In the 1st loop: Total Length = 6, such weird thing happens 
    when t != 0,  sysBlock_ham is not hermitian,  envBlock_ham is hermitian
    when U != 0 , sysBlock_ham is hermitian , envBlock_ham is not hermitian  
    """
    sysBlock_Ham = kron(sysBlock_Ham, eye(Dim)) \
                   - t * (kron(sysBlock_a_up.conj().T @ sysBlock_F, a_up) + kron(sysBlock_a_down.conj().T, F @ a_down)) \
Example #53
0
def normalized_laplacian(adj, symmetric=True):
    adj_normalized = normalize_adj(adj, symmetric)
    laplacian = sp.eye(adj.shape[0]) - adj_normalized
    return laplacian
Example #54
0
    def solve(self,
              data: np.ndarray,
              maxiter: int = 150,
              tol: float = 5 * 10**(-4)):

        if self.reg_mode is not None:
            if len(self.domain_shape) > 2:
                grad = Gradient(dims=self.domain_shape,
                                edge=False,
                                dtype='float64')
            else:
                dx = sparse.diags([1, -1], [0, 1],
                                  shape=(self.domain_shape[1],
                                         self.domain_shape[1])).tocsr()
                dx[self.domain_shape[1] - 1, :] = 0
                dy = sparse.diags([-1, 1], [0, 1],
                                  shape=(self.domain_shape[0],
                                         self.domain_shape[0])).tocsr()
                dy[self.domain_shape[0] - 1, :] = 0

                grad = sparse.vstack(
                    (sparse.kron(dx,
                                 sparse.eye(self.domain_shape[0]).tocsr()),
                     sparse.kron(sparse.eye(self.domain_shape[1]).tocsr(),
                                 dy)))

            K = self.alpha * grad
            if not self.tau:
                if np.prod(self.domain_shape) > 25000:
                    long = True
                else:
                    long = False
                if long:
                    print("Start evaluate tau. Long runtime.")
                if len(self.domain_shape) > 2:
                    norm = np.abs(np.asscalar(K.eigs(neigs=1, which='LM')))
                else:
                    norm = normest(K)
                sigma = 0.99 / norm
                print("Calc tau: " + str(sigma))
                tau = sigma
            else:
                tau = self.tau
                sigma = tau

            if self.reg_mode == 'tv':
                F_star = Projection(self.domain_shape, len(self.domain_shape))
            else:
                F_star = DatatermLinear()
                F_star.set_proxdata(0)
        else:
            tau = 0.99
            sigma = tau
            F_star = DatatermLinear()
            K = 0

        G = DatatermRecBregman(self.O)
        G.set_proxparam(tau)
        G.set_proxdata(data.ravel())
        F_star.set_proxparam(sigma)

        pk = np.zeros(self.domain_shape)
        pk = pk.T.ravel()
        plt.Figure()
        ulast = np.zeros(self.domain_shape)
        u01 = ulast
        i = 0

        while np.linalg.norm(self.O * u01.ravel() -
                             data.ravel()) > self.assessment:
            print(np.linalg.norm(self.O * u01.ravel() - data.ravel()))
            print(self.assessment)

            self.solver = PdHgm(K, F_star, G)
            self.solver.maxiter = maxiter
            self.solver.tol = tol

            G.set_proxdata(data.ravel())
            G.setP(pk)
            self.solver.solve()
            u01 = np.reshape(np.real(self.solver.var['x']), self.domain_shape)
            pk = pk - (1 / self.alpha) * np.real(
                self.O.H * (self.O * u01.ravel() - data.ravel()))
            i = i + 1
            if self.plot_iteration:
                plt.gray()
                plt.imshow(u01, vmin=0, vmax=1)
                plt.axis('off')
                #plt.title('RRE =' + str(round(RRE_breg, 2)), y=-0.1, fontsize=20)
                plt.savefig(self.data_output_path +
                            'Bregman_reconstruction_iter' + str(i) + '.png',
                            bbox_inches='tight',
                            pad_inches=0)
                plt.close()

        return np.reshape(self.solver.var['x'], self.domain_shape)
Example #55
0
 def ranking_function(self, y, m):
     """Creates ranking function
     Parameters
     ----------
     y: ndarray
         query vertex vector
     m: float
         tradeoff
     Returns 
     -------
     f: ndarray
         ranking vector
     """
     #===============================================================================
     # RUNS OUT OF MEMORY
     #===============================================================================
     #===============================================================================
     #         if not self.rankFunc:
     #             print('...computing Dv...')
     #             Dv=self.vertex_degrees().tocsc()
     #
     #             print('...computing H...')
     #             H=self.incidence_matrix().tocsc()
     #
     #             print('...computing W...')
     #             W=self.weight_matrix().tocsc()
     #
     #             print('...computing De...')
     #             De=self.edge_degrees().tocsc()
     #
     #             print('...computing Dv rec inv...')
     #             Dv_rec_inv=spla.inv(Dv).sqrt()
     #
     #             print('...computing Theta...')
     #             Theta=Dv_rec_inv*H*W*(spla.inv(De))*(H.transpose())*Dv_rec_inv
     #
     #             print('...deleting unnecessary matrices...')
     #             del(Dv)
     #             del(H)
     #             del(W)
     #             del(De)
     #             del(Dv_rec_inv)
     #             gc.collect()
     #
     #             Theta=Theta.tocsc()
     #
     #             print('...computing rankFunc...')
     #             self.rankFunc=spla.inv((spsp.eye(*sp.shape(Theta)).tocsc()-(1/(1+m))*Theta).tocsc())
     #
     #             del(Theta)
     #             gc.collect()
     #
     #         return self.rankFunc*y
     #===============================================================================
     if self.Theta is None:
         self.theta_matrix()
     if self.rankFunc is None:
         self.rankFunc = spsp.eye(
             *sp.shape(self.Theta)) - (1.0 / (1.0 + m)) * self.Theta
     f = spla.spsolve(self.rankFunc, y, use_umfpack=True)
     return f
Example #56
0
def diamond_norm(choi, **kwargs):
    r"""Return the diamond norm of the input quantum channel object.

    This function computes the completely-bounded trace-norm (often
    referred to as the diamond-norm) of the input quantum channel object
    using the semidefinite-program from reference [1].

    Args:
        choi(Choi or QuantumChannel): a quantum channel object or
                                      Choi-matrix array.
        kwargs: optional arguments to pass to CVXPY solver.

    Returns:
        float: The completely-bounded trace norm
               :math:`\|\mathcal{E}\|_{\diamond}`.

    Raises:
        QiskitError: if CVXPY package cannot be found.

    Additional Information:
        The input to this function is typically *not* a CPTP quantum
        channel, but rather the *difference* between two quantum channels
        :math:`\|\Delta\mathcal{E}\|_\diamond` where
        :math:`\Delta\mathcal{E} = \mathcal{E}_1 - \mathcal{E}_2`.

    Reference:
        J. Watrous. "Simpler semidefinite programs for completely bounded
        norms", arXiv:1207.5726 [quant-ph] (2012).

    .. note::

        This function requires the optional CVXPY package to be installed.
        Any additional kwargs will be passed to the ``cvxpy.solve``
        function. See the CVXPY documentation for information on available
        SDP solvers.
    """
    _cvxpy_check('`diamond_norm`')  # Check CVXPY is installed

    if not isinstance(choi, Choi):
        choi = Choi(choi)

    def cvx_bmat(mat_r, mat_i):
        """Block matrix for embedding complex matrix in reals"""
        return cvxpy.bmat([[mat_r, -mat_i], [mat_i, mat_r]])

    # Dimension of input and output spaces
    dim_in = choi._input_dim
    dim_out = choi._output_dim
    size = dim_in * dim_out

    # SDP Variables to convert to real valued problem
    r0_r = cvxpy.Variable((dim_in, dim_in))
    r0_i = cvxpy.Variable((dim_in, dim_in))
    r0 = cvx_bmat(r0_r, r0_i)

    r1_r = cvxpy.Variable((dim_in, dim_in))
    r1_i = cvxpy.Variable((dim_in, dim_in))
    r1 = cvx_bmat(r1_r, r1_i)

    x_r = cvxpy.Variable((size, size))
    x_i = cvxpy.Variable((size, size))
    iden = sparse.eye(dim_out)

    # Watrous uses row-vec convention for his Choi matrix while we use
    # col-vec. It turns out row-vec convention is requried for CVXPY too
    # since the cvxpy.kron function must have a constant as its first argument.
    c_r = cvxpy.bmat([[cvxpy.kron(iden, r0_r), x_r],
                      [x_r.T, cvxpy.kron(iden, r1_r)]])
    c_i = cvxpy.bmat([[cvxpy.kron(iden, r0_i), x_i],
                      [-x_i.T, cvxpy.kron(iden, r1_i)]])
    c = cvx_bmat(c_r, c_i)

    # Convert col-vec convention Choi-matrix to row-vec convention and
    # then take Transpose: Choi_C -> Choi_R.T
    choi_rt = np.transpose(
        np.reshape(choi.data, (dim_in, dim_out, dim_in, dim_out)),
        (3, 2, 1, 0)).reshape(choi.data.shape)
    choi_rt_r = choi_rt.real
    choi_rt_i = choi_rt.imag

    # Constraints
    cons = [
        r0 >> 0, r0_r == r0_r.T, r0_i == -r0_i.T,
        cvxpy.trace(r0_r) == 1, r1 >> 0, r1_r == r1_r.T, r1_i == -r1_i.T,
        cvxpy.trace(r1_r) == 1, c >> 0
    ]

    # Objective function
    obj = cvxpy.Maximize(
        cvxpy.trace(choi_rt_r @ x_r) + cvxpy.trace(choi_rt_i @ x_i))
    prob = cvxpy.Problem(obj, cons)
    sol = prob.solve(**kwargs)
    return sol
Example #57
0
    rhs = f(Xint, Yint)  # evaluate f at interior points for right hand side
    # rhs is modified below for boundary
    usoln = np.zeros(X.shape)

    #        rhs[:,0] -= usoln[1:-1,0] / h**2
    #        rhs[:,-1] -= usoln[1:-1,-1] / h**2
    #        rhs[0,:] -= usoln[0,1:-1] / h**2
    #        rhs[-1,:] -= usoln[-1,1:-1] / h**2
    # set boundary conditions around edges of usoln array:

    # convert the 2d grid function rhs into a column vector for rhs of system:
    F = rhs.reshape((m[i] * m[i], 1))

    # form matrix A:
    I = sp.eye(m[i], m[i])
    g = np.ones(m[i])
    T = sp.spdiags([g, -4. * g, g], [-1, 0, 1], m[i], m[i])
    S = sp.spdiags([g, g], [-1, 1], m[i], m[i])
    A = (sp.kron(I, T) + sp.kron(S, I)) / (h[i]**2)
    A = A.tocsr()

    # Solve the linear system:
    uvec = spsolve(A, F)
    #        usoln[1:-1, 1:-1] = uvec.reshape( (m,m) )
    # reshape vector solution uvec as a grid function and
    # insert this interior solution into usoln for plotting purposes:
    # (recall boundary conditions in usoln are already set)

    usoln[1:-1, 1:-1] = uvec.reshape((m[i], m[i]))
    umax = usoln.max()
def calcSolution(h,show_matri,show_result):
    ax = 0.0
    bx = 2.0
    ay = 0.0
    by = 1.0
    mx = 2.0/h-1
    my = 1.0/h-1
    x = np.linspace(ax,bx,mx+2)   # grid points x including boundaries
    y = np.linspace(ay,by,my+2)   # grid points y including boundaries

    X,Y = np.meshgrid(x,y)     # 2d arrays of x,y values
    X = X.T                    # transpose so that X(i,j),Y(i,j) are
    Y = Y.T                    # coordinates of (i,j) point

    Xint = X[1:-1,1:-1]        # interior points
    Yint = Y[1:-1,1:-1]
    rhs = f(Xint,Yint)         # evaluate f at interior points for right hand side
                           # rhs is modified below for boundary conditions.

    # set boundary conditions around edges of usoln array:

    usoln = np.zeros(X.shape)
    usoln[:,0] = u_exact(x,ay)
    usoln[:,-1] = u_exact(x,by)
    usoln[0,:] = u_exact(ax,y)
    usoln[-1,:] = u_exact(bx,y)

    # adjust the rhs to include boundary terms: 
    rhs[:,0] -= usoln[1:-1,0] / h**2
    rhs[:,-1] -= usoln[1:-1,-1] / h**2
    rhs[0,:] -= usoln[0,1:-1] / h**2
    rhs[-1,:] -= usoln[-1,1:-1] / h**2


    # convert the 2d grid function rhs into a column vector for rhs of system:
    F = rhs.reshape((mx*my,1))
    
    # form matrix A:
    Ix = sp.eye(mx,mx)
    Iy = sp.eye(my,my)
    ex = np.ones(mx)
    ey = np.ones(my)
    T = sp.spdiags([ey,-4.*ey,ey],[-1,0,1],my,my)
    S = sp.spdiags([ex,ex],[-1,1],mx,mx)
    A = (sp.kron(Ix,T) + sp.kron(S,Iy)) / h**2    
    A = A.tocsr()
    
    show_matrix = True
    if (show_matrix):
        pylab.figure()
        pylab.spy(A,marker='.')
        
    # Solve the linear system:
    tic = time.time()
    uvec = spsolve(A, F)
    toc = time.time()
    
    # reshape vector solution uvec as a grid function and
    # insert this interior solution into usoln for plotting purposes:
    # (recall boundary conditions in usoln are already set)
    
    usoln[1:-1, 1:-1] = uvec.reshape( (mx,my) )
    
    show_result = True
    if show_result:
        # plot results:
        pylab.figure()
        ax = Axes3D(pylab.gcf())
        ax.plot_surface(X,Y,usoln, rstride=1, cstride=1, cmap=pylab.cm.jet)
        ax.set_xlabel('x')
        ax.set_ylabel('y')
        ax.set_zlabel('u')
        #pylab.axis([a, b, a, b])
        #pylab.daspect([1 1 1])
        pylab.title('Surface plot of computed solution')
        
        pylab.show(block=False)
Example #59
0
def bethe_hessian(A, r):
    D = degree(A)
    H = (r ** 2 - 1) * sps.eye(A.shape[0]) - r * A + D
    return H
Example #60
0
    def fitImplicit(self,
                    train,
                    alpha=10.,
                    c="linear",
                    eps=1E-8,
                    userGraph=None,
                    itemGraph=None,
                    method="random",
                    U0=None,
                    V0=None):
        """
        Learn factors from training set with the implicit formula of Koren 
        "Collaborative Filtering for Implicit Feedback Datasets"
        User and item factors are fitted alternately.
        
        train : array-like of three columns 
            contains row index, column index, value of not null entries
        alpha : float
            Confidence weight
        c : string
            if c="linear", C = 1 + alpha*R
            if c="log", C = 1 + alpha*log(1 + R/eps)
        eps : float
            used only if c="log"
        rowGraph : array-like of three columns
            contains the first two columns are the index of the linked rows,
            the third is the weight of the link
        colGraph : same as rowGraph for the column links
        method : string
            factor initialisation. Can be random, svd or given in U0 and V0
        U0, V0: array-like
            initial value for U and V. If not None, method is ignored
        """
        if self.seed is not None:
            np.random.seed(self.seed)

        # we define a global fonction transfo that is either linear_transfo
        # or log_transfo. This prevent to make the if else check for each
        # user and for each item at each iteration !
        if c == "linear":
            self.transfo = self.linear_transfo
        elif c == "log":
            self.transfo = self.log_transfo

        self.alpha = alpha
        self.c = c
        self.eps = eps

        self.userGraph = False
        self.itemGraph = False

        self.train = sparse_matrix(train, n=self.num_users, p=self.num_items)

        self.n_u = list(
            map(lambda u: self.train[u, :].nnz, range(self.num_users)))
        self.n_i = list(
            map(lambda i: self.train[:, i].nnz, range(self.num_items)))

        if userGraph is not None:
            self.userGraph = True
            self.A_user = sparse.csgraph.laplacian(
                sparse_matrix(userGraph,
                              n=self.num_users,
                              p=self.num_users,
                              w=(1 - self.mu) / self.mu))
            if self.reg == "weighted":
                self.A_user += sparse.diags(self.n_u)
            elif self.reg == "default":
                self.A_user += sparse.eye(self.num_users)
            #self.A_user_sqrt = kron(sqrtm(self.A_user.todense()).real,sparse.eye(self.d))
            self.A_user = kron(self.A_user, sparse.eye(self.d))

        if itemGraph is not None:
            self.itemGraph = True
            self.A_item = sparse.csgraph.laplacian(
                sparse_matrix(itemGraph,
                              n=self.num_items,
                              p=self.num_items,
                              w=(1 - self.mu) / self.mu))
            if self.reg == "weighted":
                self.A_item += sparse.diags(self.n_i)
            elif self.reg == "default":
                self.A_item += sparse.eye(self.num_items)
            #self.A_item_sqrt = kron(sqrtm(self.A_item.todense()).real,sparse.eye(self.d))
            self.A_item = kron(self.A_item, sparse.eye(self.d))

        self.U = np.random.normal(size=(self.num_users, self.d))
        self.V = np.random.normal(size=(self.num_items, self.d))

        for it in range(self.num_iters):
            VV = self.V.T.dot(self.V)
            if self.userGraph:
                self.U = self.implicit_user_iteration(VV)
            else:
                for u in range(self.num_users):
                    indices = self.train[u].nonzero()[1]
                    if indices.size:
                        R_u = self.train[u, indices]
                        self.U[u, :] = self.implicit_update(
                            indices, self.V, VV,
                            R_u.toarray()[0])
                    else:
                        self.U[u, :] = np.zeros(self.d)

            UU = self.U.T.dot(self.U)
            if self.itemGraph:
                self.V = self.implicit_item_iteration(UU)
            else:
                for i in range(self.num_items):
                    indices = self.train[:, i].nonzero()[0]
                    if indices.size:
                        R_i = self.train[indices, i]
                        self.V[i, :] = self.implicit_update(
                            indices, self.U, UU,
                            R_i.toarray().T[0])
                    else:
                        self.V[i, :] = np.zeros(self.d)

            if self.verbose:
                print("end iteration " + str(it + 1))