コード例 #1
0
 def test_custom_filter(self):
     def kernel(x):
         return x / (1. + x)
     f = filters.Filter(self._G, kernels=kernel)
     self.assertEqual(f.Nf, 1)
     self.assertIs(f._kernels[0], kernel)
     self._test_methods(f, tight=False)
コード例 #2
0
def compute_spectrogram(G, atom=None, M=100, **kwargs):
    r"""
    Compute the norm of the Tig for all nodes with a kernel shifted along the
    spectral axis.

    Parameters
    ----------
    G : Graph
        Graph on which to compute the spectrogram.
    atom : func
        Kernel to use in the spectrogram (default = exp(-M*(x/lmax)²)).
    M : int (optional)
        Number of samples on the spectral scale. (default = 100)
    kwargs: dict
        Additional parameters to be passed to the
        :func:`pygsp.filters.Filter.filter` method.
    """

    if not atom:
        def atom(x):
            return np.exp(-M * (x / G.lmax)**2)

    scale = np.linspace(0, G.lmax, M)
    spectr = np.empty((G.N, M))

    for shift_idx in range(M):
        shift_filter = filters.Filter(G, lambda x: atom(x - scale[shift_idx]))
        tig = compute_norm_tig(shift_filter, **kwargs).squeeze()**2
        spectr[:, shift_idx] = tig

    G.spectr = spectr
    return spectr
コード例 #3
0
 def test_frame_bounds(self):
     # Not a frame, it as a null-space.
     g = filters.Rectangular(self._G)
     A, B = g.estimate_frame_bounds()
     self.assertEqual(A, 0)
     self.assertEqual(B, 1)
     # Identity is tight.
     g = filters.Filter(self._G, lambda x: np.full_like(x, 2))
     A, B = g.estimate_frame_bounds()
     self.assertEqual(A, 4)
     self.assertEqual(B, 4)
コード例 #4
0
ファイル: reduction.py プロジェクト: BenJamesbabala/pygsp
def interpolate(G,
                f_subsampled,
                keep_inds,
                order=100,
                reg_eps=0.005,
                **kwargs):
    r"""Interpolate a graph signal.

    Parameters
    ----------
    G : Graph
    f_subsampled : ndarray
        A graph signal on the graph G.
    keep_inds : ndarray
        List of indices on which the signal is sampled.
    order : int
        Degree of the Chebyshev approximation (default = 100).
    reg_eps : float
        The regularized graph Laplacian is $\bar{L}=L+\epsilon I$.
        A smaller epsilon may lead to better regularization,
        but will also require a higher order Chebyshev approximation.

    Returns
    -------
    f_interpolated : ndarray
        Interpolated graph signal on the full vertex set of G.

    References
    ----------
    See :cite:`pesenson2009variational`

    """
    L_reg = G.L + reg_eps * sparse.eye(G.N)
    K_reg = getattr(G.mr, 'K_reg', kron_reduction(L_reg, keep_inds))
    green_kernel = getattr(G.mr, 'green_kernel',
                           filters.Filter(G, lambda x: 1. / (reg_eps + x)))

    alpha = K_reg.dot(f_subsampled)

    try:
        Nv = np.shape(f_subsampled)[1]
        f_interpolated = np.zeros((G.N, Nv))
    except IndexError:
        f_interpolated = np.zeros((G.N))

    f_interpolated[keep_inds] = alpha

    return _analysis(green_kernel, f_interpolated, order=order, **kwargs)
コード例 #5
0
def generate_test_vectors(G,
                          num_vectors=10,
                          method="Gauss-Seidel",
                          iterations=5,
                          lambda_cut=0.1):

    L = G.L
    N = G.N
    X = np.random.randn(N, num_vectors) / np.sqrt(N)

    if method == "GS" or method == "Gauss-Seidel":

        L_upper = sp.sparse.triu(L, 1, format="csc")
        L_lower_diag = sp.sparse.triu(L, 0, format="csc").T

        for j in range(num_vectors):
            x = X[:, j]
            for t in range(iterations):
                x = -sp.sparse.linalg.spsolve_triangular(
                    L_lower_diag, L_upper @ x)
            X[:, j] = x
        return X

    if method == "JC" or method == "Jacobi":

        deg = G.dw.astype(np.float)
        D = sp.sparse.diags(deg, 0)
        deginv = deg**(-1)
        deginv[deginv == np.Inf] = 0
        Dinv = sp.sparse.diags(deginv, 0)
        M = Dinv.dot(D - L)

        for j in range(num_vectors):
            x = X[:, j]
            for t in range(iterations):
                x = 0.5 * x + 0.5 * M.dot(x)
            X[:, j] = x
        return X

    elif method == "Chebychev":
        from pygsp import filters

        f = filters.Filter(
            G, lambda x: ((x <= lambda_cut) * 1).astype(np.float32))
        return f.filter(X, method="chebyshev", order=50)
コード例 #6
0
def multiresolution(G, levels, sparsify=True):

    sparsify_eps = min(10. / np.sqrt(G.N), 0.3)
    reg_eps = 0.005

    G.estimate_lmax()

    Gs = [G]
    Gs[0].mr = {'idx': np.arange(G.N), 'orig_idx': np.arange(G.N)}

    for i in range(levels):

        if hasattr(Gs[i], '_U'):
            V = Gs[i].U[:, -1]
        else:
            V = linalg.eigs(Gs[i].L, 1)[1][:, 0]

        V *= np.sign(V[0])
        ind = np.nonzero(V >= 0)[0]

        Gs.append(kronReduction(Gs[i], ind))

        if sparsify and Gs[i + 1].N > 2:
            Gs[i + 1] = sparsifyGraph(
                Gs[i + 1], min(max(sparsify_eps, 2. / np.sqrt(Gs[i + 1].N)),
                               1.))

        Gs[i + 1].estimate_lmax()

        Gs[i + 1].mr = {
            'idx': ind,
            'orig_idx': Gs[i].mr['orig_idx'][ind],
            'level': i
        }

        L_reg = Gs[i].L + reg_eps * sparse.eye(Gs[i].N)
        Gs[i].mr['K_reg'] = kronReduction(L_reg, ind)
        Gs[i].mr['green_kernel'] = filters.Filter(Gs[i], lambda x: 1. /
                                                  (reg_eps + x))

    return Gs
コード例 #7
0
    def test_regression_tikhonov_3(self, tau=3.5):
        """Solve a relaxed regression problem."""
        G = graphs.Sensor(100)
        G.estimate_lmax()

        # Create a smooth signal.
        filt = filters.Filter(G, lambda x: 1 / (1 + 10 * x))
        rs = np.random.RandomState(1)
        signal = filt.analyze(rs.normal(size=(G.n_vertices, 6)))

        # Make the input signal.
        mask = rs.uniform(0, 1, G.n_vertices) > 0.5
        measures = signal.copy()
        measures[~mask] = 18
        measures_bak = measures.copy()

        L = G.L.toarray()
        recovery = np.matmul(np.linalg.inv(np.diag(1 * mask) + tau * L),
                             (mask * measures.T).T)

        # Solve the problem.
        recovery0 = learning.regression_tikhonov(G, measures, mask, tau=tau)
        np.testing.assert_allclose(measures_bak, measures)
        recovery1 = np.zeros_like(recovery0)
        for i in range(recovery0.shape[1]):
            recovery1[:, i] = learning.regression_tikhonov(
                G, measures[:, i], mask, tau)
        np.testing.assert_allclose(measures_bak, measures)

        G = graphs.Graph(G.W.toarray())
        recovery2 = learning.regression_tikhonov(G, measures, mask, tau)
        recovery3 = np.zeros_like(recovery0)
        for i in range(recovery0.shape[1]):
            recovery3[:, i] = learning.regression_tikhonov(
                G, measures[:, i], mask, tau)

        np.testing.assert_allclose(recovery0, recovery, atol=1e-5)
        np.testing.assert_allclose(recovery1, recovery, atol=1e-5)
        np.testing.assert_allclose(recovery2, recovery, atol=1e-5)
        np.testing.assert_allclose(recovery3, recovery, atol=1e-5)
        np.testing.assert_allclose(measures_bak, measures)
コード例 #8
0
    def test_regression_tikhonov_2(self):
        """Solve a regression problem with a constraint."""
        G = graphs.Sensor(100)
        G.estimate_lmax()

        # Create a smooth signal.
        filt = filters.Filter(G, lambda x: 1 / (1 + 10 * x))
        rs = np.random.RandomState(1)
        signal = filt.analyze(rs.normal(size=(G.n_vertices, 5)))

        # Make the input signal.
        mask = rs.uniform(0, 1, [G.n_vertices]) > 0.5
        measures = signal.copy()
        measures[~mask] = np.nan
        measures_bak = measures.copy()

        # Solve the problem.
        recovery0 = learning.regression_tikhonov(G, measures, mask, tau=0)
        np.testing.assert_allclose(measures_bak, measures)

        recovery1 = np.zeros_like(recovery0)
        for i in range(recovery0.shape[1]):
            recovery1[:, i] = learning.regression_tikhonov(G,
                                                           measures[:, i],
                                                           mask,
                                                           tau=0)
        np.testing.assert_allclose(measures_bak, measures)

        G = graphs.Graph(G.W.toarray())
        recovery2 = learning.regression_tikhonov(G, measures, mask, tau=0)
        recovery3 = np.zeros_like(recovery0)
        for i in range(recovery0.shape[1]):
            recovery3[:, i] = learning.regression_tikhonov(G,
                                                           measures[:, i],
                                                           mask,
                                                           tau=0)

        np.testing.assert_allclose(recovery1, recovery0)
        np.testing.assert_allclose(recovery2, recovery0)
        np.testing.assert_allclose(recovery3, recovery0)
        np.testing.assert_allclose(measures_bak, measures)
コード例 #9
0
def _pyramid_single_interpolation(G, ca, pe, keep_inds, h_filter, **kwargs):
    r"""Synthesize a single level of the graph pyramid transform.

    Parameters
    ----------
    G : Graph
        Graph structure on which the signal resides.
    ca : ndarray
        Coarse approximation of the signal on a reduced graph.
    pe : ndarray
        Prediction error that was made when forming the current coarse approximation.
    keep_inds : ndarray
        The indices of the vertices to keep when downsampling the graph and signal.
    h_filter : lambda expression
        The filter in use at this level.
    use_landweber : bool
        To use the Landweber iteration approximation in the least squares synthesis.
        Default is False.
    reg_eps : float
        Interpolation parameter. Default is 0.005.
    landweber_its : int
        Number of iterations in the Landweber approximation for least squares synthesis.
        Default is 50.
    landweber_tau : float
        Parameter for the Landweber iteration. Default is 1.

    Returns
    -------
    finer_approx :
        Coarse approximation of the signal on a higher resolution graph.

    """
    nb_ind = keep_inds.shape
    N = G.N
    reg_eps = float(kwargs.pop('reg_eps', 0.005))
    use_landweber = bool(kwargs.pop('use_landweber', False))
    landweber_its = int(kwargs.pop('landweber_its', 50))
    landweber_tau = float(kwargs.pop('landweber_tau', 1.))

    # index matrix (nb_ind x N) of keep_inds, S_i,j = 1 iff keep_inds[i] = j
    S = sparse.csr_matrix(([1] * nb_ind, (range(nb_ind), keep_inds)), shape=(nb_ind, N))

    if use_landweber:
        x = np.zeros(N)
        z = np.concatenate((ca, pe), axis=0)
        green_kernel = filters.Filter(G, lambda x: 1./(x+reg_eps))
        PhiVlt = _analysis(green_kernel, S.T, **kwargs).T
        filt = filters.Filter(G, h_filter, **kwargs)

        for iteration in range(landweber_its):
            h_filtered_sig = _analysis(filt, x, **kwargs)
            x_bar = h_filtered_sig[keep_inds]
            y_bar = x - interpolate(G, x_bar, keep_inds, **kwargs)
            z_delt = np.concatenate((x_bar, y_bar), axis=0)
            z_delt = z - z_delt
            alpha_new = PhiVlt * z_delt[nb_ind:]
            x_up = sparse.csr_matrix((z_delt, (range(nb_ind), [1] * nb_ind)), shape=(N, 1))
            reg_L = G.L + reg_esp * sparse.eye(N)

            elim_inds = np.setdiff1d(np.arange(N, dtype=int), keep_inds)
            L_red = reg_L[np.ix_(keep_inds, keep_inds)]
            L_in_out = reg_L[np.ix_(keep_inds, elim_inds)]
            L_out_in = reg_L[np.ix_(elim_inds, keep_inds)]
            L_comp = reg_L[np.ix_(elim_inds, elim_inds)]

            next_term = L_red * alpha_new - L_in_out * linalg.spsolve(L_comp, L_out_in * alpha_new)
            next_up = sparse.csr_matrix((next_term, (keep_inds, [1] * nb_ind)), shape=(N, 1))
            x += landweber_tau * _analysis(filt, x_up - next_up, **kwargs) + z_delt[nb_ind:]

        finer_approx = x

    else:
        # When the graph is small enough, we can do a full eigendecomposition
        # and compute the full analysis operator T_a
        H = G.U * sparse.diags(h_filter(G.e), 0) * G.U.T
        Phi = G.U * sparse.diags(1./(reg_eps + G.e), 0) * G.U.T
        Ta = np.concatenate((S * H, sparse.eye(G.N) - Phi[:, keep_inds] * linalg.spsolve(Phi[np.ix_(keep_inds, keep_inds)], S*H)), axis=0)
        finer_approx = linalg.spsolve(Ta.T * Ta, Ta.T * np.concatenate((ca, pe), axis=0))
コード例 #10
0
def pyramid_analysis(Gs, f, **kwargs):
    r"""Compute the graph pyramid transform coefficients.

    Parameters
    ----------
    Gs : list of graphs
        A multiresolution sequence of graph structures.
    f : ndarray
        Graph signal to analyze.
    h_filters : list
        A list of filter that will be used for the analysis and sythesis operator.
        If only one filter is given, it will be used for all levels.
        Default is h(x) = 1 / (2x+1)

    Returns
    -------
    ca : ndarray
        Coarse approximation at each level
    pe : ndarray
        Prediction error at each level
    h_filters : list
        Graph spectral filters applied

    References
    ----------
    See :cite:`shuman2013framework` and :cite:`pesenson2009variational`.

    """
    if np.shape(f)[0] != Gs[0].N:
        raise ValueError("PYRAMID ANALYSIS: The signal to analyze should have the same dimension as the first graph.")

    levels = len(Gs) - 1

    # check if the type of filters is right.
    h_filters = kwargs.pop('h_filters', lambda x: 1. / (2*x+1))

    if not isinstance(h_filters, list):
        if hasattr(h_filters, '__call__'):
            logger.warning('Converting filters into a list.')
            h_filters = [h_filters]
        else:
            logger.error('Filters must be a list of functions.')

    if len(h_filters) == 1:
        h_filters = h_filters * levels

    elif len(h_filters) != levels:
        message = 'The number of filters must be one or equal to {}.'.format(levels)
        raise ValueError(message)

    ca = [f]
    pe = []

    for i in range(levels):
        # Low pass the signal
        s_low = _analysis(filters.Filter(Gs[i], h_filters[i]), ca[i], **kwargs)
        # Keep only the coefficient on the selected nodes
        ca.append(s_low[Gs[i+1].mr['idx']])
        # Compute prediction
        s_pred = interpolate(Gs[i], ca[i+1], Gs[i+1].mr['idx'], **kwargs)
        # Compute errors
        pe.append(ca[i] - s_pred)

    return ca, pe
コード例 #11
0
def graph_multiresolution(G, levels, sparsify=True, sparsify_eps=None,
                          downsampling_method='largest_eigenvector',
                          reduction_method='kron', compute_full_eigen=False,
                          reg_eps=0.005):
    r"""Compute a pyramid of graphs (by Kron reduction).

    'graph_multiresolution(G,levels)' computes a multiresolution of
    graph by repeatedly downsampling and performing graph reduction. The
    default downsampling method is the largest eigenvector method based on
    the polarity of the components of the eigenvector associated with the
    largest graph Laplacian eigenvalue. The default graph reduction method
    is Kron reduction followed by a graph sparsification step.
    *param* is a structure of optional parameters.

    Parameters
    ----------
    G : Graph structure
        The graph to reduce.
    levels : int
        Number of level of decomposition
    lambd : float
        Stability parameter. It adds self loop to the graph to give the
        algorithm some stability (default = 0.025). [UNUSED?!]
    sparsify : bool
        To perform a spectral sparsification step immediately after
        the graph reduction (default is True).
    sparsify_eps : float
        Parameter epsilon used in the spectral sparsification
        (default is min(10/sqrt(G.N),.3)).
    downsampling_method: string
        The graph downsampling method (default is 'largest_eigenvector').
    reduction_method : string
        The graph reduction method (default is 'kron')
    compute_full_eigen : bool
        To also compute the graph Laplacian eigenvalues and eigenvectors
        for every graph in the multiresolution sequence (default is False).
    reg_eps : float
        The regularized graph Laplacian is :math:`\bar{L}=L+\epsilon I`.
        A smaller epsilon may lead to better regularization, but will also
        require a higher order Chebyshev approximation. (default is 0.005)

    Returns
    -------
    Gs : list
        A list of graph layers.

    Examples
    --------
    >>> from pygsp import reduction
    >>> levels = 5
    >>> G = graphs.Sensor(N=512)
    >>> G.compute_fourier_basis()
    >>> Gs = reduction.graph_multiresolution(G, levels, sparsify=False)
    >>> for idx in range(levels):
    ...     Gs[idx].plotting['plot_name'] = 'Reduction level: {}'.format(idx)
    ...     Gs[idx].plot()

    """
    if sparsify_eps is None:
        sparsify_eps = min(10. / np.sqrt(G.N), 0.3)

    if compute_full_eigen:
        G.compute_fourier_basis()
    else:
        G.estimate_lmax()

    Gs = [G]
    Gs[0].mr = {'idx': np.arange(G.N), 'orig_idx': np.arange(G.N)}

    for i in range(levels):
        if downsampling_method == 'largest_eigenvector':
            if hasattr(Gs[i], '_U'):
                V = Gs[i].U[:, -1]
            else:
                V = linalg.eigs(Gs[i].L, 1)[1][:, 0]

            V *= np.sign(V[0])
            ind = np.nonzero(V >= 0)[0]

        else:
            raise NotImplementedError('Unknown graph downsampling method.')

        if reduction_method == 'kron':
            Gs.append(kron_reduction(Gs[i], ind))

        else:
            raise NotImplementedError('Unknown graph reduction method.')

        if sparsify and Gs[i+1].N > 2:
            Gs[i+1] = graph_sparsify(Gs[i+1], min(max(sparsify_eps, 2. / np.sqrt(Gs[i+1].N)), 1.))
            # TODO : Make in place modifications instead!

        if compute_full_eigen:
            Gs[i+1].compute_fourier_basis()
        else:
            Gs[i+1].estimate_lmax()

        Gs[i+1].mr = {'idx': ind, 'orig_idx': Gs[i].mr['orig_idx'][ind], 'level': i}

        L_reg = Gs[i].L + reg_eps * sparse.eye(Gs[i].N)
        Gs[i].mr['K_reg'] = kron_reduction(L_reg, ind)
        Gs[i].mr['green_kernel'] = filters.Filter(Gs[i], lambda x: 1./(reg_eps + x))

    return Gs
コード例 #12
0
ファイル: coarsening_utils.py プロジェクト: sush1996/tgcn2
def plot_coarsening(Gall,
                    Call,
                    size=3,
                    edge_width=0.8,
                    node_size=20,
                    alpha=0.55,
                    title='',
                    smooth=0):
    """
    Plot a (hierarchical) coarsening
    
    Parameters
    ----------
    G_all : list of pygsp Graphs
    Call  : list of np.arrays
    
    Returns
    -------
    fig : matplotlib figure
    """
    # colors signify the size of a coarsened subgraph ('k' is 1, 'g' is 2, 'b' is 3, and so on)
    colors = ['k', 'g', 'b', 'r', 'y']

    n_levels = len(Gall) - 1
    if n_levels == 0: return None
    fig = plt.figure(figsize=(n_levels * size * 3, size * 2))

    for level in range(n_levels):

        G = Gall[level]
        edges = np.array(G.get_edge_list()[0:2])

        Gc = Gall[level + 1]
        #         Lc = C.dot(G.L.dot(C.T))
        #         Wc = sp.sparse.diags(Lc.diagonal(), 0) - Lc;
        #         Wc = (Wc + Wc.T) / 2
        #         Gc = gsp.graphs.Graph(Wc, coords=(C.power(2)).dot(G.coords))
        edges_c = np.array(Gc.get_edge_list()[0:2])
        C = Call[level]
        C = C.toarray()

        if level > 0 and smooth > 0:
            f = filters.Filter(G, lambda x: np.exp(-smooth * x))
            G.estimate_lmax()
            G.set_coordinates(f.filter(G.coords))

        if G.coords.shape[1] == 2:
            ax = fig.add_subplot(1, n_levels + 1, level + 1)
            ax.axis('off')
            ax.set_title(f'{title} | level = {level}, N = {G.N}')

            [x, y] = G.coords.T
            for eIdx in range(0, edges.shape[1]):
                ax.plot(x[edges[:, eIdx]],
                        y[edges[:, eIdx]],
                        color='k',
                        alpha=alpha,
                        lineWidth=edge_width)
            for i in range(Gc.N):
                subgraph = np.arange(G.N)[C[i, :] > 0]
                ax.scatter(x[subgraph],
                           y[subgraph],
                           c=colors[np.clip(len(subgraph) - 1, 0, 4)],
                           s=node_size * len(subgraph),
                           alpha=alpha)

        elif G.coords.shape[1] == 3:
            ax = fig.add_subplot(1, n_levels + 1, level + 1, projection='3d')
            ax.axis('off')

            [x, y, z] = G.coords.T
            for eIdx in range(0, edges.shape[1]):
                ax.plot(x[edges[:, eIdx]],
                        y[edges[:, eIdx]],
                        zs=z[edges[:, eIdx]],
                        color='k',
                        alpha=alpha,
                        lineWidth=edge_width)
            for i in range(Gc.N):
                subgraph = np.arange(G.N)[C[i, :] > 0]
                ax.scatter(x[subgraph],
                           y[subgraph],
                           z[subgraph],
                           c=colors[np.clip(len(subgraph) - 1, 0, 4)],
                           s=node_size * len(subgraph),
                           alpha=alpha)

                # the final graph
    Gc = Gall[-1]
    edges_c = np.array(Gc.get_edge_list()[0:2])

    if smooth > 0:
        f = filters.Filter(Gc, lambda x: np.exp(-smooth * x))
        Gc.estimate_lmax()
        Gc.set_coordinates(f.filter(Gc.coords))

    if G.coords.shape[1] == 2:
        ax = fig.add_subplot(1, n_levels + 1, n_levels + 1)
        ax.axis('off')
        [x, y] = Gc.coords.T
        ax.scatter(x, y, c='k', s=node_size, alpha=alpha)
        for eIdx in range(0, edges_c.shape[1]):
            ax.plot(x[edges_c[:, eIdx]],
                    y[edges_c[:, eIdx]],
                    color='k',
                    alpha=alpha,
                    lineWidth=edge_width)

    elif G.coords.shape[1] == 3:
        ax = fig.add_subplot(1, n_levels + 1, n_levels + 1, projection='3d')
        ax.axis('off')
        [x, y, z] = Gc.coords.T
        ax.scatter(x, y, z, c='k', s=node_size, alpha=alpha)
        for eIdx in range(0, edges_c.shape[1]):
            ax.plot(x[edges_c[:, eIdx]],
                    y[edges_c[:, eIdx]],
                    z[edges_c[:, eIdx]],
                    color='k',
                    alpha=alpha,
                    lineWidth=edge_width)

    ax.set_title(f'{title} | level = {n_levels}, n = {Gc.N}')
    fig.tight_layout()
    return fig
コード例 #13
0
def estimate_PSD(data,G,U,lamb,method="perraudin",plot=True):  
    
    #### This functions estimates the PSD of the graph signal with two different methods.
    ## The maximum-likelihood estimator (likelihood)
    ## and the method described in "Stationary signal processing on graphs" (Perraudin 2017)
    
    ### Input.
    ## data= matrix of size Txp where T is the time horizon, p the number of covariance        
    ## G= graph over the which the signal is defined 
    ## U= eigenvectors of the GSO
    ## lamb= eigenvalues of the GSO
    ## method = which algorithm to use in order to estimate the GFT
    ## plot = whether or not plot the PSD estimator
    
    ### Output
    ## PSD= Power Spectral Density
    
       
        p=data.shape[1]
        N=data.shape[0]
        
        if method=="likelihood":
            
            
            PSD=np.diag(np.cov(U.transpose().dot(data.transpose())))
            if plot:
                M=300 #### Number of filters 
                m=np.arange(0,M)
                l_max=np.max(lamb)           
                tau=((M+1)*l_max)/M**2
                plt.plot(m*tau,PSD)
                plt.show()
            
            
            return(PSD)
            
        
        if method=="perraudin":
          
            ###### Parameters initialization
            
            
            M=100 #### Number of filters
            degree=15
            m=np.arange(0,M)
            l_max=np.max(lamb)
            noise=np.random.normal(size=(p,10))
            norm_filters=np.zeros(M)
            norm_localized_filters=np.zeros(M)
            PSD=np.zeros(M)
            tau=((M+1)*l_max)/M**2
            
           ##### Applying filters to noise 
            for i in m:
                G_filter=filters.Filter(G, lambda x: gaussian_filter(x,i,M,l_max))
                filter_noise=G_filter.filter(noise)
                norm_filters[i]=np.sum(np.apply_along_axis(lambda x: np.mean(x**2),1,filter_noise))
                localized_filter=G_filter.filter(data.transpose())
                norm_localized_filters[i]=np.sum(np.apply_along_axis(lambda x: np.mean(x**2),1,localized_filter))
                PSD[i]=norm_localized_filters[i]/norm_filters[i]
            
            PSD=PSD
            coeff=np.polyfit(m*tau,PSD,deg=degree)
            if plot:
                plt.plot(m*tau,PSD)
                plt.show()
                
            p = np.poly1d(coeff)
            PSD=p(lamb)
            index_PSD=np.where(PSD<=0)[0]
            PSD[index_PSD]=np.min(PSD[PSD>0])
            
            return(PSD)      
コード例 #14
0
 def test_default_filters(G, fu):
     g = filters.Filter(G)
     g1 = filters.Filter(G, filters=fu)
コード例 #15
0
    def generate_filter(self):
        #### This function defines the Filter that will be applied to withe noise.

        self.H = filters.Filter(self.G, lambda x: self.spectral_profile(x))
        self.PSD = (self.spectral_profile(self.G.e))**2