def thomson(data, cube, u=.5, **kwargs): """ Defines a Thomson scattering model for white light coronographs. Parameters ---------- data: 3D InfoArray Data stack. cube: 3D FitsArray Map cube. Returns ------- P : The projector with masking D : Smoothness priors obj_mask : object mask array data_mask : data mask array """ # data mask data_mask = solar.define_data_mask(data, **kwargs) # projector pb = kwargs.get('pb', 'pb') if pb == 'pb': P = pb_thomson_lo(data, cube, u, mask=data_mask) else: raise ValueError('Only pb implemented for now.') # priors D = [lo.diff(cube.shape, axis=i) for i in xrange(cube.ndim)] # masks kwargs['remove_nan'] = True P, D, obj_mask = _apply_object_mask(P, D, cube, **kwargs) return P, D, obj_mask, data_mask
def stsrt(data, cube, **kwargs): """ Smooth Temporal Solar Rotational Tomography. Assumes data is sorted by observation time 'DATE_OBS'. """ # Parse kwargs. obj_rmin = kwargs.get('obj_rmin', None) obj_rmax = kwargs.get('obj_rmax', None) data_rmin = kwargs.get('data_rmin', None) data_rmax = kwargs.get('data_rmax', None) mask_negative = kwargs.get('mask_negative', False) # define temporal groups times = [secchi.convert_time(t) for t in data.header['DATE_OBS']] ## if no interval is given separate every image dt_min = kwargs.get('dt_min', np.max(np.diff(times)) + 1) #groups = secchi.temporal_groups(data, dt_min) ind = secchi.temporal_groups_indexes(data, dt_min) n = len(ind) # 4d model cube_header = cube.header.copy() cube_header.update('NAXIS', 4) cube_header.update('NAXIS4', data.shape[-1]) P = siddon4d_lo(data.header, cube_header, obstacle="sun") # define per group summation of maps # define new 4D cube cube4 = cube.reshape(cube.shape + (1,)).repeat(n, axis=-1) cube4.header.update('NAXIS', 4) cube4.header.update('NAXIS4', cube4.shape[3]) cube4.header.update('CRVAL4', 0.) cube4.header.update('CDELT4', dt_min) S = group_sum(ind, cube, data) P = P * S.T # priors D = [lo.diff(cube4.shape, axis=i) for i in xrange(cube.ndim)] # mask object if obj_rmin is not None or obj_rmax is not None: Mo, obj_mask = mask_object(cube, kwargs) obj_mask = obj_mask.reshape(obj_mask.shape + (1,)).repeat(n, axis=-1) Mo = lo.mask(obj_mask) P = P * Mo.T D = [Di * Mo.T for Di in D] else: obj_mask = None # mask data if (data_rmin is not None or data_rmax is not None or mask_negative is not None): data_mask = secchi.define_data_mask(data, Rmin=data_rmin, Rmax=data_rmax, mask_negative=True) Md = lo.mask(data_mask) P = Md * P else: data_mask = None return P, D, obj_mask, data_mask, cube4
def srt(data, cube, **kwargs): """ Define Solar Rotational Tomography model with optional masking of data and map areas. Can also define priors. Parameters ---------- data: InfoArray data cube cube: FitsArray map cube obj_rmin: float Object minimal radius. Areas below obj_rmin are masked out. obj_rmax: float Object maximal radius. Areas above obj_rmax are masked out. data_rmin: float Data minimal radius. Areas below data_rmin are masked out. data_rmax: float Data maximal radius. Areas above data_rmax are masked out. mask_negative: boolean If true, negative values in the data are masked out. Returns ------- P : The projector with masking D : Smoothness priors """ # Parse kwargs. obj_rmin = kwargs.get('obj_rmin', None) obj_rmax = kwargs.get('obj_rmax', None) data_rmin = kwargs.get('data_rmin', None) data_rmax = kwargs.get('data_rmax', None) mask_negative = kwargs.get('mask_negative', None) # Model : it is Solar rotational tomography, so obstacle="sun". P = siddon_lo(data.header, cube.header, obstacle="sun") D = [lo.diff(cube.shape, axis=i) for i in xrange(cube.ndim)] # Define masking. if obj_rmin is not None or obj_rmax is not None: Mo, obj_mask = mask_object(cube, kwargs) P = P * Mo.T D = [Di * Mo.T for Di in D] else: obj_mask = None if (data_rmin is not None or data_rmax is not None or mask_negative is not None): data_mask = secchi.define_data_mask(data, Rmin=data_rmin, Rmax=data_rmax, mask_negative=True) Md = lo.mask(data_mask) P = Md * P else: data_mask = None return P, D, obj_mask, data_mask
def smoothness_prior(my_map, height_prior=False): """ Defines a smoothness prior. """ D = [lo.diff(my_map.shape, axis=i) for i in xrange(my_map.ndim)] if height_prior: r = _radius_map(my_map) R = lo.diag(r.ravel()) D = [Di * R for Di in D] return D
obj = tomograpy.centered_cubic_map(3, 32) obj[:] = tomograpy.phantom.shepp_logan(obj.shape) # data radius = 200. a = tomograpy.fov(obj.header, radius) data = tomograpy.centered_stack(a, 128, n_images=60, radius=radius, max_lon=np.pi) # projector P = tomograpy.lo(data.header, obj.header) # projection t = time.time() data = tomograpy.projector(data, obj) print("projection time : " + str(time.time() - t)) # data y = data.flatten() # backprojection t = time.time() x0 = P.T * y bpj = x0.reshape(obj.shape) print("projection time : " + str(time.time() - t)) # priors Ds = [lo.diff(obj.shape, axis=i) for i in xrange(3)] # inversion using scipy.sparse.linalg t = time.time() sol = lo.acg(P, y, Ds, 1e-2 * np.ones(3), maxiter=100, tol=1e-20) sol = sol.reshape(bpj.shape) print("inversion time : " + str(time.time() - t))
#!/usr/bin/env python from tamasis import * import numpy as np import lo import scipy.sparse.linalg as spl # data pacs = PacsObservation(filename=tamasis_dir + 'tests/frames_blue.fits', fine_sampling_factor=1, keep_bad_detectors=False) tod = pacs.get_tod() # projector projection = Projection(pacs, resolution=3.2, oversampling=False, npixels_per_sample=6) model = projection # naive map backmap = model.transpose(tod) # transform to lo P = lo.ndsubclass(backmap, tod, matvec=model.direct, rmatvec=model.transpose) # priors Dx = lo.diff(backmap.shape, axis=0, dtype=np.float64) Dy = lo.diff(backmap.shape, axis=1, dtype=np.float64) #Dw = lo.pywt_lo.wavedec2(backmap.shape, "haar") # inversion y = tod.flatten() x = lo.iterative.acg(P, (Dx, Dy), (1e1, 1e1), y) sol = backmap.zeros(backmap.shape) sol[:] = x.reshape(sol.shape)
deglitch_l2mad(tod, projection) # model masking = Masking(tod.mask) model = masking * projection P = lo.aslinearoperator(model.aslinearoperator()) # derive filter #tod = filter_median(tod, length=9999) #kernel = filt.kernel_from_tod(tod, length=1000) kernel = (1 + (10. / np.arange(500)) ** .25) kernel = np.concatenate((kernel[::-1], kernel)) #kern = np.mean(kernel, axis=0) N = filt.kernels_convolve(tod.shape, 1 / np.sqrt(kernel)) # apply to data yn = N * tod.flatten() # apply to model M = N * P # first map backmap = model.transpose(tod) # define algo # priors Dx = lo.diff(backmap.shape, axis=0, dtype=np.float64) Dy = lo.diff(backmap.shape, axis=1, dtype=np.float64) #Dw = lo.pywt_lo.wavedec2(backmap.shape, "haar", level=3) # inversion x, conv = lo.rls(M, (Dx, Dy), (1e1, 1e1), yn, tol=1e-10) sol = backmap.zeros(backmap.shape) sol[:] = x.reshape(sol.shape) # save sol.writefits(os.path.join(os.getenv('HOME'), 'data', 'csh', 'output', 'ngc6946__filter_rls.fits'))
#!/usr/bin/env python from tamasis import * import numpy as np import lo import scipy.sparse.linalg as spl # data pacs = PacsObservation(filename=tamasis_dir + 'tests/frames_blue.fits', fine_sampling_factor=1, keep_bad_detectors=False) tod = pacs.get_tod() # projector projection = Projection(pacs, resolution=3.2, oversampling=False, npixels_per_sample=6) model = projection # naive map backmap = model.transpose(tod) # transform to lo P = lo.ndsubclass(backmap, tod, matvec=model.direct, rmatvec=model.transpose) # priors Dx = lo.diff(backmap.shape, axis=0) Dy = lo.diff(backmap.shape, axis=1) #Dw = lo.pywt_lo.wavedec2(backmap.shape, "haar") # inversion y = tod.flatten() x = lo.iterative.npacg(P, (Dx, Dy), (1e1, 1e1), (2, 1.5, 1.5), y) sol = backmap.zeros(backmap.shape) sol[:] = x.reshape(sol.shape)
projection = tm.Projection(obs, npixels_per_sample=4) P = lo.aslinearoperator(projection) # simulate data x0 = tm.gaussian(projection.shapein, 3) # map with gaussian source tod0 = projection(x0) # data n = np.random.randn(*tod0.shape) # noise nsr = 1e-2 tod = tod0 + nsr * n # noisy data y = tod.ravel() # as 1d array # load compression matrix filename = os.path.join(os.getenv("HOME"), "data", "pacs", "mmc_cam_angle_0_scan_angle_0_speed60.fits") c = fa.FitsArray(file=filename).astype(np.float64) cmm = csh.compression.AnyOperator(c) C = cmm((projection.shapeout[0], projection.shapeout[1][0]), ) # compress z = C * y # inversion H = C * P Ds = [lo.diff(x0.shape, axis=i) for i in (0, 1)] x_inv = lo.acg(H, z, Ds, 1e-1 * np.ones(2), tol=1e-10, maxiter=100) x_inv.resize(x0.shape) # condition number #M = H.T * H #Md = M.todense() #print np.linalg.cond(Md) #print lo.iterative.utils.cond(H.T * H)
import lo import csh.filter as filt from time import time import scipy.sparse.linalg as spl # data pacs = tm.PacsObservation(filename=tm.tamasis_dir+'tests/frames_blue.fits') tod = pacs.get_tod() # projector model = tm.Projection(pacs, resolution=3.2, oversampling=False, npixels_per_sample=6) # naive map backmap = model.transpose(tod) # transform to lo P = lo.aslinearoperator(model.aslinearoperator()) # derive filter kernel = filt.kernel_from_tod(tod, length=10) #kern = np.mean(kernel, axis=0) N = filt.kernels_convolve(tod.shape, 1 / np.sqrt(kernel)) # apply to data yn = N * tod.flatten() # apply to model M = N * P # priors Ds = [lo.diff(backmap.shape, axis=axis) for axis in xrange(backmap.ndim)] #Ds.append(lo.pywt_lo.wavelet2(backmap.shape, "haar")) # inversion #y = tod.flatten() x, conv = lo.rls(M, Ds, (1e1, 1e1, 1e-1), yn, spl.bicgstab) sol = backmap.zeros(backmap.shape) sol[:] = x.reshape(sol.shape)
# --------------------------------------------- # convert LinearOperator to dense matrix (ndarray) Hd = H.todense() Hd = np.asmatrix(Hd) # convert into my FitsArray : Hd_fits = fa.FitsArray(data=Hd) # save the model to defined filename filename = os.path.join(os.getenv("HOME"), "data", "pacs", "mmc_model_cam_angle_0_scan_angle_0_speed60.fits") Hd_fits.tofits(filename) # Define and store mini ma-pmaking matrix # --------------------------------------- # (H^T H + a D^T D)^{-1} H^T # prior is "smoothness" along each axis (0 and 1) D = [lo.diff(projection.shapein, axis=i) for i in (0, 1)] # apply the same decimation to priors D = [Di * M.T for Di in D] # can sum LinearOperators DD = sum([Di.T * Di for Di in D]) # convert to dense matrix (ndarray) DDd = DD.todense() # Use numpy routines to compute the exact dense map-making matrix. # This is possible since we have only 8 frames here. # Otherwise conjugate gradient inversions are mandatory. # The following line can be longer (approx 1 minute). H_inv = np.linalg.inv(Hd.T * Hd + DDd) * Hd.T # save to FITS files H_inv_fits = fa.FitsArray(data=H_inv) filename = os.path.join(os.getenv("HOME"), "data", "pacs", "mmc_cam_angle_0_scan_angle_0_speed60.fits")
model = masking * projection # naive map backmap = model.transpose(tod) # coverage map weights = model.transpose(tod.ones(tod.shape)) # mask on map mask = weights == 0 M = lo.mask(mask) # preconditionner iweights = 1 / weights iweights[np.where(np.isfinite(iweights) == 0)] = 0. M0 = lo.diag(iweights.flatten()) # transform to lo P = lo.aslinearoperator(model.aslinearoperator()) # priors Dx = lo.diff(backmap.shape, axis=0) Dy = lo.diff(backmap.shape, axis=1) # inversion y = (masking.T * tod).flatten() # algos algos = [spl.cg, spl.cgs, spl.bicg, spl.bicgstab] models = [P.T * P, P.T * P + Dx.T * Dx + Dy.T * Dy,] n_iterations = [] resid = [] for algo in algos: for A in models: for is_masked in (False, True): callback = lo.CallbackFactory(verbose=True) if is_masked: A = M * A * M.T b = M * P.T * y
# data path = os.path.join(os.getenv('HOME'), 'data', '171dec08') obsrvtry = 'STEREO_A' time_window = ['2008-12-01T00:00:00.000', '2008-12-03T00:00:00.000'] # one image every time_step seconds time_step = 4 * 3600. data = siddon.secchi.read_data(path, bin_factor=4, obsrvtry=obsrvtry, time_window=time_window, time_step=time_step) # cube shape = 3 * (128,) header = {'CRPIX1':64., 'CRPIX2':64., 'CRPIX3':64., 'CDELT1':0.0234375, 'CDELT2':0.0234375, 'CDELT3':0.0234375, 'CRVAL1':0., 'CRVAL2':0., 'CRVAL3':0.,} cube = fa.zeros(shape, header=header) # model P = siddon.siddon_lo(data.header, cube.header) D = [lo.diff(cube.shape, axis=i) for i in xrange(cube.ndim)] hypers = cube.ndim * (1e0, ) # inversion t = time.time() A = P.T * P + np.sum([h * d.T * d for h, d in zip(hypers, D)]) b = P.T * data.flatten() #callback = lo.iterative.CallbackFactory(verbose=True) #x, info = spl.bicgstab(A, b, maxiter=100, callback=callback) x, info = lo.acg(P, data.flatten(), D, hypers, maxiter=100,) sol = cube.copy() sol[:] = x.reshape(cube.shape) print(time.time() - t)
ctod = tm.filter_median(ctod, length=filter_length / factor) cov = noise_covariance(ctod, obs) S = cov.aslinearoperator() # model with compression M = lo.aslinearoperator(model.aslinearoperator()) M = C * M # noise covariance cov = noise_covariance(ctod, obs) S = lo.aslinearoperator(cov.aslinearoperator()) #S = C * S * C.T #S = None # backprojection backmap = (M.T * ctod.flatten()).reshape(projection.shapein) # priors Ds = [lo.diff(backmap.shape, axis=0, dtype=np.float64),] Ds.append(lo.diff(backmap.shape, axis=1, dtype=np.float64)) # weights weights = projection.transpose(tod.ones(tod.shape)) # masking the map MM = lo.mask(weights == 0) M = M * MM.T Ds = [D * MM.T for D in Ds] # inversion x, conv = algo(M, Ds, hypers, ctod.flatten(), S=S, tol=1e-5) # reshape map sol = tm.Map(np.zeros(backmap.shape)) sol[:] = (MM.T * x).reshape(sol.shape) sol.header = header sol.writefits(os.path.join(output_path, 'sol_madmap.fits'))
mask = Masking(coverage < 10.) # The model is the masking of the sky map then the projection # This is basically matrix multiplication model = projection * mask # Performing inversion # --------------------- # with TAMASIS x_tm = mapper_rls(tod, model, hyper=1e-1, tol=1e-10, maxiter=100) # with lo routines # transform to lo H = lo.aslinearoperator(model * mask) # smoothness priors Ds = [lo.diff(backmap.shape, axis=axis) for axis in (0, 1)] # inversion y = tod.ravel() # requires 1d input x_lo = lo.acg(H, y, Ds, 1e-1 * np.ones(3), tol=1e-10, maxiter=100) x_lo.resize(backmap.shape) # output is 1d so need reshaping # with sparsity assumptions (using Huber algorithm) x_h = lo.hacg(H, y, Ds, 1e1 * np.ones(3), np.asarray((None, 1e-6, 1e-6, 1e-6)), x0=x_lo.flatten(), tol=1e-7, maxiter=200) x_h.resize(backmap.shape)
tod = pacs.get_tod() # projector projection = tm.Projection(pacs, resolution=3.2, oversampling=True, npixels_per_sample=6) masking = tm.Masking(tod.mask) compression = tm.CompressionAverage(pacs.compression_factor) model = masking * compression * projection # naive map naive = model.transpose(tod) # coverage map coverage = model.transpose(tod.ones(tod.shape)) # noise covariance length = 2**np.ceil(np.log2(np.array(tod.nsamples) + 200)) invNtt = tm.InvNtt(length, pacs.get_filter_uncorrelated()) fft = tm.Fft(length) padding = tm.Padding(left=invNtt.ncorrelations, right=length - tod.nsamples - invNtt.ncorrelations) weight = padding.T * fft.T * invNtt * fft * padding W = lo.aslinearoperator(weight.aslinearoperator()) # transform to lo P = lo.aslinearoperator(model.aslinearoperator()) # priors Ds = [lo.diff(naive.shape, axis=axis) for axis in xrange(naive.ndim)] # inversion hypers = [1e6, 1e6, ] y = tod.flatten() M = P.T * W * P + np.sum([h * D.T * D for h, D in zip(hypers, Ds)]) x, conv = spl.cgs(M, P.T * W * y, callback=lo.CallbackFactory(verbose=True)) sol = naive.zeros(naive.shape) sol[:] = x.reshape(sol.shape)
# create projector projection = tm.Projection(obs, npixels_per_sample=4) P = lo.aslinearoperator(projection) # simulate data x0 = tm.gaussian(projection.shapein, 3) # map with gaussian source tod0 = projection(x0) # data n = np.random.randn(*tod0.shape) # noise nsr = 1e-2 tod = tod0 + nsr * n # noisy data y = tod.ravel() # as 1d array # load compression matrix filename = os.path.join(os.getenv("HOME"), "data", "pacs", "mmc_cam_angle_0_scan_angle_0_speed60.fits") c = fa.FitsArray(file=filename).astype(np.float64) cmm = csh.compression.AnyOperator(c) C = cmm((projection.shapeout[0], projection.shapeout[1][0])) # compress z = C * y # inversion H = C * P Ds = [lo.diff(x0.shape, axis=i) for i in (0, 1)] x_inv = lo.acg(H, z, Ds, 1e-1 * np.ones(2), tol=1e-10, maxiter=100) x_inv.resize(x0.shape) # condition number # M = H.T * H # Md = M.todense() # print np.linalg.cond(Md) # print lo.iterative.utils.cond(H.T * H)
coverage = projection.T(np.ones(tod.shape)) # naive map naive = backmap / coverage # mask according to coverage (everything that is covered by less than 10.) mask = Masking(coverage < 10.) # The model is the masking of the sky map then the projection # This is basically matrix multiplication model = projection * mask # Performing inversion # --------------------- # with TAMASIS x_tm = mapper_rls(tod, model, hyper=1e-1, tol=1e-10, maxiter=100) # with lo routines # transform to lo H = lo.aslinearoperator(model * mask) # smoothness priors Ds = [lo.diff(backmap.shape, axis=axis) for axis in (0, 1)] # inversion y = tod.ravel() # requires 1d input x_lo = lo.acg(H, y, Ds, 1e-1 * np.ones(3), tol=1e-10, maxiter=100) x_lo.resize(backmap.shape) # output is 1d so need reshaping # with sparsity assumptions (using Huber algorithm) x_h = lo.hacg(H, y, Ds, 1e1 * np.ones(3), np.asarray((None, 1e-6, 1e-6, 1e-6)), x0=x_lo.flatten(), tol=1e-7, maxiter=200) x_h.resize(backmap.shape)
data = siddon.simu.circular_trajectory_data(**image_header) data[:] = np.zeros(data.shape) # projector P = siddon.siddon_lo(data.header, obj.header) # projection t = time.time() data = siddon.projector(data, obj) print("projection time : " + str(time.time() - t)) # data y = data.flatten() # backprojection t = time.time() x0 = P.T * y bpj = x0.reshape(obj.shape) print("projection time : " + str(time.time() - t)) # coverage map weights = (P.T * np.ones(y.size)).reshape(obj.shape) # priors Ds = [lo.diff(obj.shape, axis=i) for i in xrange(3)] hypers = 1e-2 * np.ones(3) #Ds, hypers = [], [] # inversion using scipy.sparse.linalg t = time.time() tol = 1e-5 sol, info = lo.rls(P, y, Ds, hypers, maxiter=100, tol=tol) sol = sol.reshape(bpj.shape) if info != 0: print("Inversion algorithm did not converge to " + str(tol)) print("inversion time : " + str(time.time() - t))
# data path = os.path.join(os.getenv('HOME'), 'data', '171dec08') obsrvtry = 'STEREO_A' time_window = ['2008-12-01T00:00:00.000', '2008-12-03T00:00:00.000'] # one image every time_step seconds time_step = 4 * 3600. data = tomograpy.secchi.read_data(path, bin_factor=4, obsrvtry=obsrvtry, time_window=time_window, time_step=time_step) # cube shape = 3 * (128,) header = {'CRPIX1':64., 'CRPIX2':64., 'CRPIX3':64., 'CDELT1':0.0234375, 'CDELT2':0.0234375, 'CDELT3':0.0234375, 'CRVAL1':0., 'CRVAL2':0., 'CRVAL3':0.,} cube = fa.zeros(shape, header=header) # model P = tomograpy.lo(data.header, cube.header) D = [lo.diff(cube.shape, axis=i) for i in xrange(cube.ndim)] hypers = cube.ndim * (1e0, ) # inversion t = time.time() A = P.T * P + np.sum([h * d.T * d for h, d in zip(hypers, D)]) b = P.T * data.flatten() #callback = lo.iterative.CallbackFactory(verbose=True) #x, info = spl.bicgstab(A, b, maxiter=100, callback=callback) x, info = lo.acg(P, data.flatten(), D, hypers, maxiter=100,) sol = cube.copy() sol[:] = x.reshape(cube.shape) print(time.time() - t)