コード例 #1
0
def pipeline_dli(filename, output_file, keywords, verbose=False):
    import numpy as np
    import linear_operators as lo
    import tamasis as tm
    # verbosity
    tm.var.verbose = verbose
    # define observation
    obs = tm.PacsObservation(filename, **keywords["PacsObservation"])
    # extra masking
    tm.step_scanline_masking(obs, **keywords["scanline_masking"])
    # get data
    tod = obs.get_tod(**keywords["get_tod"])
    # deglitching
    tm.step_deglitching(obs, tod, **keywords["deglitching"])
    # median filtering
    tod = tm.filter_median(tod, **keywords["filter_median"])
    # define projector
    projection = tm.Projection(obs, **keywords["Projection"])
    # build instrument model
    response = tm.ResponseTruncatedExponential(obs.pack(
            obs.instrument.detector.time_constant) / obs.SAMPLING_PERIOD)
    compression = tm.CompressionAverage(obs.slice.compression_factor)
    masking = tm.Masking(tod.mask)
    model = masking * compression * response * projection
    # set tod masked values to zero
    tod = masking(tod)
    # N^-1 operator
    invntt = tm.InvNtt(obs)

    # for the dli algorithm
    M = map_mask(tod, model, **keywords["map_mask"])

    # recast model as a new-style LinearOperator from linear_operators package
    H = lo.aslinearoperator(model) * M.T
    N = lo.aslinearoperator(invntt)
    # vectorize data so it is accepted by LinearOperators
    y = tod.ravel()
    Ds = [tm.DiscreteDifference(axis=i, shapein=projection.shapein) for i in (0, 1)]
    Ds = [lo.aslinearoperator(D) for D in Ds]
    Ds = [D * M.T for D in Ds]
    D = lo.concatenate(Ds)
    # handle tau which needs to be an ndarray
    keywords["dli"]["tau"] *= np.ones(D.shape[0])
    algo = lo.DoubleLoopAlgorithm(H, y, D, noise_covariance=N,
                                  fmin_args=keywords["fmin_args"],
                                  lanczos=keywords["lanczos"],
                                  **keywords["dli"])
    # optimize
    xe = algo()
    # reshape
    xe = (M.T * xe).reshape(projection.shapein)
    # recast as tamasis map
    xe = tm.Map(xe)
    # save
    xe.save(output_file)
コード例 #2
0
def pipeline_photproject(filenames, output_file, keywords):
    """
    Perform regularized least-square inversion of state of the art
    PACS model.  The PACS model includes tod mask, compression,
    response, projection and noise covariance (invntt).
    Processing steps are as follows :
    
        - define PacsObservation instance
        - mask first scanlines to avoid drift
        - get Time Ordered Data (get_tod)
        - 2nd level deglitching (with standard projector and tod median
            filtering with a  narrow window)
        - median filtering
        - define projection
        - perform inversion on model
        - save file

    Arguments
    ---------
    filenames: list of strings
        List of data filenames.
    output_file : string
        Name of the output fits file.
    keywords: dict
        Dictionary containing options for all steps as dictionary.

    Returns
    -------
    Returns nothing. Save result as a fits file.
    """
    from scipy.sparse.linalg import cgs
    import tamasis as tm
    # define observation
    obs = tm.PacsObservation(filenames, **keywords["PacsObservation"])
    # extra masking
    tm.step_scanline_masking(obs, **keywords["scanline_masking"])
    # get data
    tod = obs.get_tod(**keywords["get_tod"])
    # deglitching
    tm.step_deglitching(obs, tod, **keywords["deglitching"])
    # median filtering
    tod = tm.filter_median(tod, **keywords["filter_median"])
    # define projector
    projection = tm.Projection(obs, oversampling=False, **keywords["Projection"])
    # build instrument model
    masking = tm.Masking(tod.mask)
    model = masking * projection
    # set tod masked values to zero
    tod = masking(tod)
    # perform map-making
    map_naive = tm.mapper_naive(tod, model, **keywords["mapper_naive"])
    # save
    map_naive.save(output_file)
コード例 #3
0
def noise_covariance(tod, obs):
    """
    Defines the noise covariance matrix
    """
    length = 2 ** np.ceil(np.log2(np.array(tod.nsamples) + 200))
    invNtt = tm.InvNtt(length, obs.get_filter_uncorrelated())
    fft = tm.Fft(length)
    padding = tm.Padding(left=invNtt.ncorrelations,
                         right=length - tod.nsamples - invNtt.ncorrelations)
    masking = tm.Masking(tod.mask)
    cov = masking * padding.T * fft.T * invNtt * fft * padding * masking
    return cov
コード例 #4
0
ファイル: pacs_rls.py プロジェクト: nbarbey/tamasis-map
def pipeline_rls(filenames, output_file, keywords, verbose=False):
    """
    Perform regularized least-square inversion of a simple model (tod
    mask and projection). Processing steps are as follows :
    
        - define PacsObservation instance
        - get Time Ordered Data (get_tod)
        - define projection
        - perform inversion on model
        - save file

    Arguments
    ---------
    filenames: list of strings
        List of data filenames.
    output_file : string
        Name of the output fits file.
    keywords: dict
        Dictionary containing options for all steps as dictionary.
    verbose: boolean (default False)
        Set verbosity.

    Returns
    -------
    Returns nothing. Save result as a fits file.
    """
    from scipy.sparse.linalg import cgs
    import tamasis as tm
    # verbosity
    keywords["mapper_rls"]["verbose"] = verbose
    # define observation
    obs_keys = keywords.get("PacsObservation", {})
    obs = tm.PacsObservation(filenames, **obs_keys)
    # get data
    tod_keys = keywords.get("get_tod", {})
    tod = obs.get_tod(**tod_keys)
    # define projector
    proj_keys = keywords.get("Projection", {})
    projection = tm.Projection(obs, **proj_keys)
    # define mask
    masking_tod = tm.Masking(tod.mask)
    # define full model
    model = masking_tod * projection
    # perform map-making inversion
    mapper_keys = keywords.get("mapper_rls", {})
    map_rls = tm.mapper_rls(tod, model, solver=cgs, **mapper_keys)
    # save
    map_rls.save(output_file)
コード例 #5
0
ファイル: mapmaking.py プロジェクト: pombredanne/csh
 def __call__(self, data, state):
     # get parameters
     C = state.get('compression')
     factor = state.get('compression_factor', 1)
     # decompress
     uc_data = uncompress(data, C, factor)
     uc_data.mask = state['mask']
     # filter
     uc_data = tm.filter_median(uc_data, length=self.filter_length)
     tm.deglitch_l2mad(uc_data, state['model'])
     # define mask
     masking = tm.Masking(uc_data.mask)
     # update model
     state['deglitching_mask'] = uc_data.mask
     state['deglitching_mask_lo'] = masking
     state['model'] = masking * state['model']
     state['deglitching_filter_length'] = self.filter_length
     return data
コード例 #6
0
ファイル: pacs_huber.py プロジェクト: pombredanne/csh
def pipeline_huber(filenames, output_file, keywords, verbose=False):
    """
    Perform huber regularized inversion of state of the art
    PACS model.  The PACS model includes tod mask, compression,
    response, projection and noise covariance (invntt).
    Processing steps are as follows :
    
        - define PacsObservation instance
        - mask first scanlines to avoid drift
        - get Time Ordered Data (get_tod)
        - 2nd level deglitching (with standard projector and tod median
            filtering with a  narrow window)
        - median filtering
        - define projection
        - perform inversion on model
        - save file

    Arguments
    ---------
    filenames: list of strings
        List of data filenames.
    output_file : string
        Name of the output fits file.
    keywords: dict
        Dictionary containing options for all steps as dictionary.
    verbose: boolean (default False)
        Set verbosity.

    Returns
    -------
    Returns nothing. Save result as a fits file.
    """
    from scipy.sparse.linalg import cgs
    import tamasis as tm
    import linear_operators as lo
    # verbosity
    # define observation
    obs = tm.PacsObservation(filenames, **keywords["PacsObservation"])
    # extra masking
    tm.step_scanline_masking(obs, **keywords["scanline_masking"])
    # get data
    tod = obs.get_tod(**keywords["get_tod"])
    # deglitching
    # need to adapt degltiching to any compression model
    tm.step_deglitching(obs, tod, **keywords["deglitching"])
    # median filtering
    tod = tm.filter_median(tod, **keywords["filter_median"])
    # define projector
    projection = tm.Projection(obs, **keywords["Projection"])
    P = lo.aslinearoperator(projection)
    # build instrument model
    masking = tm.Masking(tod.mask)
    Mt = lo.aslinearoperator(masking)
    # compression
    mode = compressions[keywords["compression"].pop("mode")]
    factor = keywords["compression"].pop("factor")
    C = mode(data, factor, **keywords["compression"])
    # define map mask
    M = map_mask(tod, model, **keywords["map_mask"])
    # recast model as a new-style LinearOperator from linear_operators package
    H = Mt * C * P * M
    # vectorize data so it is accepted by LinearOperators
    y = tod.ravel()
    Ds = [
        tm.DiscreteDifference(axis=i, shapein=projection.shapein)
        for i in (0, 1)
    ]
    Ds = [lo.aslinearoperator(D) for D in Ds]
    Ds = [D * M.T for D in Ds]
    # set tod masked values to zero
    tod = masking(tod)
    # perform map-making inversion
    hypers = (keywords["hacg"].pop("hyper"), ) * len(Ds)
    deltas = (keywords["hacg"].pop("delta"), ) * (len(Ds) + 1)
    map_huber = lo.hacg(H,
                        tod.ravel(),
                        priors=Ds,
                        hypers=hypers,
                        deltas=deltas,
                        **keywords["hacg"])
    # save
    map_huber = (M.T * map_huber).reshape(projection.shapein)
    map_huber = tm.Map(map_huber)
    map_huber.save(output_file)
コード例 #7
0
C = lo.aslinearoperator(
    compression.aslinearoperator(shape=(tod.size / factor, tod.size)))
ctod = compression.direct(tod)
# uncompress for deglitching
uctod = tod.copy(tod.shape)
y0, t = lo.spl.cgs(C.T * C, C.T * ctod.flatten())
uctod[:] = y0.reshape(tod.shape)
# deglitching
projection = tm.Projection(pacs,
                           header=header,
                           resolution=3.,
                           npixels_per_sample=5)
uctod.mask = tm.deglitch_l2mad(uctod, projection)
ctod = compression.direct(uctod)
# model
masking = tm.Masking(uctod.mask)
model = compression * masking * projection
# remove drift
#ctod = tm.filter_median(ctod, length=3000 / 8.)
# first map
M = lo.aslinearoperator(model.aslinearoperator())
#P = lo.aslinearoperator(projection.aslinearoperator())
#C = csh.averaging(tod.shape, factor=8)
#I = lo.mask(uctod.mask)
#M = C * I.T * I * P
#M = C * P
backmap = model.transpose(ctod)
weights = model.transpose(ctod.ones(ctod.shape))
MM = lo.mask(weights == 0)
M = M * MM.T
# define algo
コード例 #8
0
# reset pacs header to have a shape multiple of 4
#header = pacs.get_map_header()
#header['NAXIS1'] = 192
#header['NAXIS2'] = 192
#header['CRPIX1'] = 96
#header['CRPIX2'] = 96
# data
tod = pacs.get_tod()
# remove bad pixels (by updating mask !)
#tod = remove_bad_pixels(tod)
# deglitching
#projection = tm.Projection(pacs, header=header, resolution=3., npixels_per_sample=6)
projection = tm.Projection(pacs, resolution=3., npixels_per_sample=6)
tm.deglitch_l2mad(tod, projection)
# model
masking = tm.Masking(tod.mask)
model = masking * projection
# remove drift
tod = tm.filter_median(tod, length=999)

model = masking * projection
# naive map
backmap = model.transpose(tod)
# coverage map
weights = model.transpose(tod.ones(tod.shape))
# mask on map
mask = weights == 0
M = lo.mask(mask)
# preconditionner
iweights = 1 / weights
iweights[np.where(np.isfinite(iweights) == 0)] = 0.
コード例 #9
0
ファイル: pacs_wrls.py プロジェクト: nbarbey/tamasis-map
def pipeline_wrls(filenames, output_file, keywords, verbose=False):
    """
    Perform regularized least-square inversion of state of the art
    PACS model.  The PACS model includes tod mask, compression,
    response, projection and noise covariance (invntt).
    Processing steps are as follows :
    
        - define PacsObservation instance
        - mask first scanlines to avoid drift
        - get Time Ordered Data (get_tod)
        - 2nd level deglitching (with standard projector and tod median
            filtering with a  narrow window)
        - median filtering
        - define projection
        - perform inversion on model
        - save file

    Arguments
    ---------
    filenames: list of strings
        List of data filenames.
    output_file : string
        Name of the output fits file.
    keywords: dict
        Dictionary containing options for all steps as dictionary.
    verbose: boolean (default False)
        Set verbosity.

    Returns
    -------
    Returns nothing. Save result as a fits file.
    """
    from scipy.sparse.linalg import cgs
    import tamasis as tm
    # verbosity
    keywords["mapper_rls"]["verbose"] = verbose
    # define observation
    obs = tm.PacsObservation(filenames, **keywords["PacsObservation"])
    # extra masking
    tm.step_scanline_masking(obs, **keywords["scanline_masking"])
    # get data
    tod = obs.get_tod(**keywords["get_tod"])
    # deglitching
    tm.step_deglitching(obs, tod, **keywords["deglitching"])
    # median filtering
    tod = tm.filter_median(tod, **keywords["filter_median"])
    # define projector
    projection = tm.Projection(obs, **keywords["Projection"])
    # build instrument model
    response = tm.ResponseTruncatedExponential(
        obs.pack(obs.instrument.detector.time_constant) / obs.SAMPLING_PERIOD)
    compression = tm.CompressionAverage(obs.slice.compression_factor)
    masking = tm.Masking(tod.mask)
    model = masking * compression * response * projection
    # set tod masked values to zero
    tod = masking(tod)
    # N^-1 operator
    invntt = tm.InvNtt(obs)
    # perform map-making inversion
    map_rls = tm.mapper_rls(tod,
                            model,
                            invntt=invntt,
                            solver=cgs,
                            **keywords["mapper_rls"])
    # save
    map_rls.save(output_file)
コード例 #10
0
ファイル: compare_algos.py プロジェクト: pombredanne/csh
#!/usr/bin/env python
import numpy as np
import tamasis as tm
import lo
import scipy.sparse.linalg as spl

# data
pacs = tm.PacsObservation(filename=tm.tamasis_dir + 'tests/frames_blue.fits')
tod = pacs.get_tod()
# projector
projector = tm.Projection(pacs,
                          resolution=3.2,
                          oversampling=False,
                          npixels_per_sample=6)
masking_tod = tm.Masking(tod.mask)
model = masking_tod * projector
# naive map
backmap = model.transpose(tod)
# coverage map
weights = model.transpose(tod.ones(tod.shape))
# mask on map
mask = weights == 0
M = lo.mask(mask)
# preconditionner
iweights = 1 / weights
iweights[np.where(np.isfinite(iweights) == 0)] = 0.
M0 = lo.diag(iweights.flatten())
# transform to lo
P = lo.aslinearoperator(model.aslinearoperator())
# priors
Dx = lo.diff(backmap.shape, axis=0)