예제 #1
0
            def __init__(self, reconstruct_method):
                super().__init__()
                self.reconstruct_method = reconstruct_method

                method = {
                    'LinearRegression':
                    LinearRegression(fit_intercept=False, n_jobs=-1),
                    'Ridge':
                    Ridge(
                        alpha=1e-4,
                        fit_intercept=False,
                        tol=1e-2,
                    ),
                    'Lasso':
                    Lasso(
                        alpha=1e-5,
                        fit_intercept=False,
                        warm_start=False,
                        tol=1e-3,
                    ),
                    'ElasticNet':
                    ElasticNet(
                        alpha=1e-2,
                        l1_ratio=0.5,
                        fit_intercept=False,
                        tol=1e-3,
                    ),
                    'OMP':
                    OMP(n_nonzero_coefs=num_range * num_sample_per_batch,
                        fit_intercept=False),
                }
                self.clf = method[reconstruct_method]

                print('Reconstructor initialized with the method as %s' %
                      reconstruct_method)
예제 #2
0
def GetNeighborDims(data, paras):
    ndata, ndim=data.shape
    kND=paras["kND"]
    objOMP=OMP(n_nonzero_coefs=kND)
    idxDict=npy.ones(ndim, dtype=npy.bool)
    w=npy.zeros((ndim-1, ndim), dtype=npy.float32)
    for kk in range(ndim):
        idxDict.fill(True)
        idxDict[kk]=False
        objOMP.fit(data[:,idxDict], data[:,kk])
        w[:,kk]=objOMP.coef_.astype(npy.float32)
    return w    
def _solver_OMP(A, b, K):
    """
    Find a K-sparse solution to Ax = b.

    @param K    Sparsity of the solution.
    """

    from sklearn.linear_model import OrthogonalMatchingPursuit as OMP
    omp = OMP(n_nonzero_coefs=K)
    omp.fit(A, b)
    x = omp.coef_

    return x
 def __init__(self,
              n_nonzero_coefs=None,
              tol=None,
              fit_intercept=True,
              normalize=False,
              precompute='auto'):
     self.n_nonzero_coefs = n_nonzero_coefs
     self.precompute = precompute
     self.fit_intercept = fit_intercept
     self.tol = tol
     self.normalize = normalize
     self.model = OMP(fit_intercept=self.fit_intercept,
                      n_nonzero_coefs=self.n_nonzero_coefs,
                      precompute=self.precompute,
                      normalize=self.normalize,
                      tol=self.tol)
# -----------------------------------
sys.stderr.write("Training classifiers... \r")
t0 = time.time()

# OMP: Orthogonal Matching Pursuit
from sklearn.linear_model import OrthogonalMatchingPursuit as OMP
from sklearn.feature_selection import f_classif, SelectKBest
from sklearn.pipeline import Pipeline

# Create as many OMP as voxels to predict
clfs = []
n_clfs = y_train.shape[1]
for i in range(y_train.shape[1]):
    sys.stderr.write("Training classifiers %03d/%d... \r" % (i + 1, n_clfs))
    clf = Pipeline([('selection', SelectKBest(f_classif, 500)),
                    ('clf', OMP(n_nonzero_coefs=10))])
    clf.fit(X_train, y_train[:, i])
    clfs.append(clf)

sys.stderr.write("Training classifiers %03d/%d... Done (%.2fs).\n" %
                 (n_clfs, n_clfs, time.time() - t0))

############################################################################
# Here we run the prediction: the decoding itself
# ------------------------------------------------
sys.stderr.write("Calculating scores and outputs...")
t0 = time.time()

y_pred = []
for clf in clfs:
    y_pred.append(clf.predict(X_test))
def shapelet_decomposition(N1=20, N2=20, basis='XY'):
    # Obtaining galaxy images
    cube_real = pyfits.getdata('../../data/cube_real.fits')
    cubr_real_noiseless = pyfits.getdata('../../data/cube_real_noiseless.fits')
    background = 1.e6 * 0.16**2
    img = galsim.Image(78, 78)  # cube_real has 100, 78x78 images
    D = np.zeros((78 * 78, 4 * N1 * N2))  # alloc for Dictionary
    base_coefs = np.zeros((N1, N2))
    X = np.linspace(0, 77, 78)
    Y = np.linspace(0, 77, 78)

    for img_idx in [91]:
        cube_real[img_idx] -= background
        img = galsim.Image(cube_real[img_idx], xmin=0, ymin=0)
        shape = img.FindAdaptiveMom()
        x0, y0 = shape.moments_centroid.x, shape.moments_centroid.y  ## possible swap b/w x,y
        sigma = shape.moments_sigma

        Xv, Yv = np.meshgrid((X - x0), (Y - y0))
        R = np.sqrt(Xv**2 + Yv**2)

        Phi = np.zeros_like(R)
        for i in xrange(np.shape(Xv)[0]):
            for j in xrange(np.shape(Xv)[1]):
                Phi[i, j] = math.atan2(Yv[i, j], Xv[i, j])

        signal = cube_real[img_idx].flatten()
        shapelet_reconst = np.zeros_like(signal)
        k_p = 0
        if (basis == 'Polar'):

            for n in xrange(N1):
                for m in xrange(-n, n + 1, 2):
                    if (
                            n <= (78 / sigma - 1)
                    ):  # n_max ~ theta_max (image size) / theta_min (pixel or kernel smoothing size) -1
                        arr = p_shapelet.polar_shapelets_real(n, m, sigma)(
                            R, Phi).flatten()
                        arr_im = p_shapelet.polar_shapelets_imag(n, m, sigma)(
                            R, Phi).flatten()
                        #arr = shapelet2d(m,n,x0=x0,y0=y0,sx=sigma,sy=sigma)(X,Y).flatten()

                        #arr2 = shapelet2d(m,n,x0=x0,y0=y0,sx=0.5*sigma,sy=0.5*sigma)(X,Y).flatten()
                        #arr3 = shapelet2d(m,n,x0=x0,y0=y0,sx=1.5*sigma,sy=2.*sigma)(X,Y).flatten()
                        #arr4 = shapelet2d(m,n,x0=x0,y0=y0,sx=2.0*sigma,sy=2.0*sigma)(X,Y).flatten()

                        D[:, k_p] = arr
                        #D[:,k+N1*N2]=arr2; D[:,k+2*N1*N2]=arr3; D[:,k+3*N1*N2]=arr4
                        k_p += 1
                        arr_norm2 = np.dot(arr, arr)
                        arr_norm_im2 = np.dot(arr_im, arr_im)
                        coef_r = np.dot(arr, signal)
                        coef_im = np.dot(arr_im, signal)
                        print(coef_im)
                        #coef_im = np.dot(arr_im, signal)
                        if (coef_r == 0):
                            base_coefs[n, m] = 0
                        else:
                            base_coefs[n, m] = coef_r / np.sqrt(
                                arr_norm2
                            )  #coef_im/np.sqrt(arr_norm_im2)#np.sqrt(arr_norm2)#np.abs(coef/np.sqrt(arr_norm2) + coef_im/np.sqrt(arr_norm_im2))
                            shapelet_reconst = shapelet_reconst + (
                                coef_r * arr
                            ) / arr_norm2  #+ coef_im*arr_im/arr_norm_im2
                    else:
                        break
        elif (basis == 'XY'):
            for k in xrange(N1 * N2):
                m, n = k / N1, k % N1
                if (m + n <= (78 / sigma - 1)):
                    arr = shapelet2d(m, n, x0=x0, y0=y0, sx=sigma,
                                     sy=sigma)(X, Y).flatten()

                    #arr2 = shapelet2d(m,n,x0=x0,y0=y0,sx=0.5*sigma,sy=0.5*sigma)(X,Y).flatten()
                    #arr3 = shapelet2d(m,n,x0=x0,y0=y0,sx=1.5*sigma,sy=2.*sigma)(X,Y).flatten()
                    #arr4 = shapelet2d(m,n,x0=x0,y0=y0,sx=2.0*sigma,sy=2.0*sigma)(X,Y).flatten()

                    D[:, k] = arr
                    #D[:,k+N1*N2]=arr2; D[:,k+2*N1*N2]=arr3; D[:,k+3*N1*N2]=arr4
                    arr_norm2 = np.dot(arr, arr)
                    coef = np.dot(arr, signal)
                    #coef_im = np.dot(arr_im, signal)
                    if (coef == 0):
                        base_coefs[n, m] = 0
                    else:
                        base_coefs[n, m] = coef / np.sqrt(
                            arr_norm2
                        )  #coef_im/np.sqrt(arr_norm_im2)#np.sqrt(arr_norm2)#np.abs(coef/np.sqrt(arr_norm2) + coef_im/np.sqrt(arr_norm_im2))
                        shapelet_reconst = shapelet_reconst + (
                            coef *
                            arr) / arr_norm2  #+ coef_im*arr_im/arr_norm_im2
                else:
                    break

        residual = signal - shapelet_reconst
        residual_energy_fraction = np.sum(residual**2) / np.sum(signal**2)
        recovered_energy_fraction = np.sum(shapelet_reconst**2) / np.sum(signal
                                                                         **2)

        print "Comparing moments_amp to base_coefs[0,0]", base_coefs[
            0, 0], shape.moments_amp
        print "Base coefficients sum over signal", np.sum(base_coefs**2) / (
            np.sum(signal**2)), np.sum(residual**2) / np.sum(signal**2)

        fig, ax = plt.subplots(2, 2)
        coeff_plot2d(base_coefs, N1, N2, ax=ax[1, 1], fig=fig)
        vmin, vmax = min(shapelet_reconst.min(),
                         signal.min()), max(shapelet_reconst.max(),
                                            signal.max())

        im00 = ax[0, 0].imshow(cube_real[img_idx], vmin=vmin, vmax=vmax)
        im01 = ax[0, 1].imshow(shapelet_reconst.reshape(78, 78),
                               vmin=vmin,
                               vmax=vmax)
        im10 = ax[1, 0].imshow(residual.reshape(78, 78))
        fig.colorbar(im00, ax=ax[0, 0])
        fig.colorbar(im01, ax=ax[0, 1])
        fig.colorbar(im10, ax=ax[1, 0])
        ax[0, 0].set_title('Original (noisy) image')
        ax[0, 1].set_title('Reconstructed image - Frac. of energy = ' +
                           str(np.round(recovered_energy_fraction, 4)))
        ax[1, 0].set_title('Residual image - Frac. of energy = ' +
                           str(np.round(residual_energy_fraction, 4)))
        ax[1, 1].set_title('Rel. magnitude of coefficients')
        fig.suptitle('Shapelet Basis decomposition')
        plt.savefig('Decomp_cartesian.png')

        # Sparse solver
        omp = OMP(n_nonzero_coefs=N1 * N2 / 4)
        omp.fit(D, signal)
        sparse_coefs = omp.coef_
        sparse_idx = sparse_coefs.nonzero()
        sparse_reconst = np.dot(D, sparse_coefs)
        sparse_residual = signal - sparse_reconst

        residual_energy_fraction = np.sum(sparse_residual**2) / np.sum(signal**
                                                                       2)
        recovered_energy_fraction = np.sum(sparse_reconst**2) / np.sum(signal**
                                                                       2)

        fig2, ax2 = plt.subplots(2, 2)
        im00 = ax2[0, 0].imshow(cube_real[img_idx])
        im01 = ax2[0, 1].imshow(sparse_reconst.reshape(78, 78))
        im10 = ax2[1, 0].imshow(sparse_residual.reshape(78, 78))
        print sparse_coefs.shape
        sparse_coefs = sparse_coefs.reshape(2 * N1, 2 * N2)
        coeff_plot2d(sparse_coefs, N1 * 2, N2 * 2, ax=ax2[1, 1], fig=fig)

        ax2[1, 1].grid(lw=2)
        fig2.colorbar(im00, ax=ax2[0, 0])
        fig2.colorbar(im01, ax=ax2[0, 1])
        fig2.colorbar(im10, ax=ax2[1, 0])
        ax2[0, 0].set_title('Original (noisy) image')
        ax2[0, 1].set_title('Reconstructed image - Frac. of energy = ' +
                            str(np.round(recovered_energy_fraction, 4)))
        ax2[1, 0].set_title('Residual image - Frac. of energy = ' +
                            str(np.round(residual_energy_fraction, 4)))
        ax2[1, 1].set_title('Rel. magnitude of coefficients - ' +
                            str(omp.n_nonzero_coefs))
        fig2.suptitle(
            'Sparse decomposition from an semi-intelligent Dictionary :) ')

        plt.show()
예제 #7
0
# OMP: Orthogonal Matching Pursuit
from sklearn.linear_model import OrthogonalMatchingPursuit as OMP
from sklearn.feature_selection import f_classif, SelectKBest
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler

# Create as many OMP as voxels to predict
clfs = []
n_clfs = y_train.shape[1]
for i in range(y_train.shape[1]):
    sys.stderr.write("Training classifiers %03d/%d... \r" % (i + 1, n_clfs))

    clf = Pipeline([('selection', SelectKBest(f_classif, k=500)),
                    ('scl', StandardScaler()),
                    ('clf', OMP(normalize=False, n_nonzero_coefs=10))])
    clf.fit(X_train, y_train[:, i])
    clfs.append(clf)

sys.stderr.write("Training classifiers %03d/%d... Done (%.2fs).\n" %
                 (n_clfs, n_clfs, time.time() - t0))

############################################################################
# Here we run the prediction: the decoding itself
# ------------------------------------------------
sys.stderr.write("Calculating scores and outputs...")
t0 = time.time()

y_pred = []
for clf in clfs:
    y_pred.append(clf.predict(X_test))
예제 #8
0
#A=create_Amat_kron(Nx,Ny,index)
#A=create_Amat_1D(Nx,Ny,index)
A=create_Amat_rand(M,Nx,Ny,index)
#A=create_Amat_rand_Wave(M,Nx,Ny,index)
#A=create_Amat_JL(Nx,Ny,index)
#A=create_Amat_1DW(Nx,Ny,M)

print "# reconstruction #"  
val=.001
count =1 # 0.0001,0.001,0.01,0.1
while(count!=0):
	#7.OMP
	#'''
	non_Zcoeff=CountSparse(Simage_)
	omp=OMP()
	omp.fit(A,Y)
	print "omp done"
	recon=idct(omp.coef_).reshape((Nx,Ny))
	print type(recon)
	print omp.coef_
	#'''


#6.Lasso
	'''
	lasso = Lasso(alpha=val)
	lasso.fit(A,Y)
	print (lasso.coef_).shape
	val=val*10
	recons_sparse = coo_matrix(lasso.coef_)
예제 #9
0
def sparse_code(Y, D, X = None):
    if X is None:
        y_cols, d_cols = Y.shape[1], D.shape[1]
        X = np.asmatrix(np.empty((d_cols, y_cols))
    
    x_rows, x_cols = X.shape
    
    for k in range(x_cols):
        omp = OMP()
        omp.fit(D, y[:, k])
        X[:,k] = np.asmatrix(omp.coef_).T
        
    return X


"""
Forms a matrix for a given vector x to enforce that the new update x will be
sparse. Here N is the columns of Y. Returns the matrix omega.
"""
def form_omega(x, N):
    w = []
    for i, x_i in enumerate(np.nditer(x)):
        if abs(x_i) > 0:
            w.append((i, x_i))
    
    W = np.asmatrix(np.zeroes((N, len(w))
    for w_i, i in w:
        W[w_i, i] = 1
        
    return W
    

"""
Update the dictionary D and the matrix X (phase 2)
"""
def update_dictionary(Y, D, X):
    n, K = D.shape
    # Dhat = np.asmatrix(np.zeroes((n, K)))
    
    # Form E_k
    for k in range(K):
        j = 0
        
        while j < K:
            if j != k:
                E_k = Y - D[:,j]*X[j,:]
                j += 1
            else:
                j += 1
        
        # Form E_kr to ensure that the update will be sparse. Call form_omega
        omega_k = form_omega(X[k,:])
        E_kr = E_k * omega_k
        
        # Form SVD of E_kr and update matrices
        U, sig, V = np.linalg.svd(E_kr, full_matrices = True)
        
        x_kr = sig[0, 0]*V[0,:]
        # Dhat[k,:] = U[0,:]
        D[k,:] = U[0,:]
    
    # Dhat = D
    
def main():
	pass

if __name__ == '__main__':
	main()