示例#1
0
def pipeline_dli(filename, output_file, keywords, verbose=False):
    import numpy as np
    import linear_operators as lo
    import tamasis as tm
    # verbosity
    tm.var.verbose = verbose
    # define observation
    obs = tm.PacsObservation(filename, **keywords["PacsObservation"])
    # extra masking
    tm.step_scanline_masking(obs, **keywords["scanline_masking"])
    # get data
    tod = obs.get_tod(**keywords["get_tod"])
    # deglitching
    tm.step_deglitching(obs, tod, **keywords["deglitching"])
    # median filtering
    tod = tm.filter_median(tod, **keywords["filter_median"])
    # define projector
    projection = obs.get_projection_operator(**keywords["Projection"])
    # build instrument model
    response = tm.ResponseTruncatedExponentialOperator(
        obs.pack(obs.instrument.detector.time_constant) /
        obs.instrument.SAMPLING_PERIOD)
    compression = tm.CompressionAverageOperator(obs.slice.compression_factor)
    masking = tm.MaskOperator(tod.mask)
    model = masking * compression * response * projection
    # set tod masked values to zero
    tod = masking(tod)
    # N^-1 operator
    invntt = tm.InvNttOperator(obs)

    # for the dli algorithm
    M = map_mask(tod, model, **keywords["map_mask"])

    # recast model as a new-style LinearOperator from linear_operators package
    H = lo.aslinearoperator(model) * M.T
    N = lo.aslinearoperator(invntt)
    # vectorize data so it is accepted by LinearOperators
    y = tod.ravel()
    Ds = [tm.DiscreteDifferenceOperator(axis=i, shapein=projection.shapein) \
          for i in (0, 1)]
    Ds = [lo.aslinearoperator(D) for D in Ds]
    Ds = [D * M.T for D in Ds]
    D = lo.concatenate(Ds)
    # handle tau which needs to be an ndarray
    keywords["dli"]["tau"] *= np.ones(D.shape[0])
    algo = lo.DoubleLoopAlgorithm(H,
                                  y,
                                  D,
                                  noise_covariance=N,
                                  fmin_args=keywords["fmin_args"],
                                  lanczos=keywords["lanczos"],
                                  **keywords["dli"])
    # optimize
    xe = algo()
    # reshape
    xe = (M.T * xe).reshape(projection.shapein)
    # recast as tamasis map
    xe = tm.Map(xe)
    # save
    xe.save(output_file)
示例#2
0
def pipeline_photproject(filenames, output_file, keywords):
    """
    Perform regularized least-square inversion of state of the art
    PACS model.  The PACS model includes tod mask, compression,
    response, projection and noise covariance (invntt).
    Processing steps are as follows :
    
        - define PacsObservation instance
        - mask first scanlines to avoid drift
        - get Time Ordered Data (get_tod)
        - 2nd level deglitching (with standard projector and tod median
            filtering with a  narrow window)
        - median filtering
        - define projection
        - perform inversion on model
        - save file

    Arguments
    ---------
    filenames: list of strings
        List of data filenames.
    output_file : string
        Name of the output fits file.
    keywords: dict
        Dictionary containing options for all steps as dictionary.

    Returns
    -------
    Returns nothing. Save result as a fits file.
    """
    from scipy.sparse.linalg import cgs
    import tamasis as tm
    # define observation
    obs = tm.PacsObservation(filenames, **keywords["PacsObservation"])
    # extra masking
    tm.step_scanline_masking(obs, **keywords["scanline_masking"])
    # get data
    tod = obs.get_tod(**keywords["get_tod"])
    # deglitching
    tm.step_deglitching(obs, tod, **keywords["deglitching"])
    # median filtering
    tod = tm.filter_median(tod, **keywords["filter_median"])
    # define projector
    projection = obs.get_projection_operator(downsampling=True,
                                             **keywords["Projection"])
    # build instrument model
    masking = tm.MaskOperator(tod.mask)
    model = masking * projection
    # set tod masked values to zero
    tod = masking(tod)
    # perform map-making
    map_naive = tm.mapper_naive(tod, model, **keywords["mapper_naive"])
    # save
    map_naive.save(output_file)
示例#3
0
def pipeline_rls(filenames, output_file, keywords, verbose=False):
    """
    Perform regularized least-square inversion of a simple model (tod
    mask and projection). Processing steps are as follows :
    
        - define PacsObservation instance
        - get Time Ordered Data (get_tod)
        - define projection
        - perform inversion on model
        - save file

    Arguments
    ---------
    filenames: list of strings
        List of data filenames.
    output_file : string
        Name of the output fits file.
    keywords: dict
        Dictionary containing options for all steps as dictionary.
    verbose: boolean (default False)
        Set verbosity.

    Returns
    -------
    Returns nothing. Save result as a fits file.
    """
    from scipy.sparse.linalg import cgs
    import tamasis as tm
    # verbosity
    keywords["mapper_rls"]["verbose"] = verbose
    # define observation
    obs_keys = keywords.get("PacsObservation", {})
    obs = tm.PacsObservation(filenames, **obs_keys)
    # get data
    tod_keys = keywords.get("get_tod", {})
    tod = obs.get_tod(**tod_keys)
    # define projector
    proj_keys = keywords.get("Projection", {})
    projection = tm.Projection(obs, **proj_keys)
    # define mask
    masking_tod = tm.Masking(tod.mask)
    # define full model
    model = masking_tod * projection
    # perform map-making inversion
    mapper_keys = keywords.get("mapper_rls", {})
    map_rls = tm.mapper_rls(tod, model, solver=cgs, **mapper_keys)
    # save
    map_rls.save(output_file)
示例#4
0
def generate_compressed_data(filenames, **keywords):
    """
    Compress data to a given factor using a custom compression matrix
    or operator.
    """
    obs = tm.PacsObservation(filenames, **keywords["PacsObservation"])
    data = obs.get_tod(**keywords["get_tod"])
    # model
    A = tm.PacsConversionAdu(obs, **keywords["PacsConversionAdu"])
    mode = compressions[keywords["compression"].pop("mode")]
    factor = keywords["compression"].pop("factor")
    C = mode(data, factor, **keywords["compression"])
    # convert
    digital_data = A(data)
    # apply compression
    y = C * digital_data.ravel()
    # reshape and recast
    cshape = list(digital_data.shape)
    cshape[1] = y.size / cshape[0]
    compressed_data =  fa.FitsArray(data=y.reshape(cshape))
    return compressed_data
示例#5
0
def load_data(filename, header=None, resolution=3.):
    #mask = np.zeros((32, 64), dtype=np.int8)
    obs = tm.PacsObservation(filename=filename,
                             fine_sampling_factor=1,
                             #detector_mask=mask
                             )
    tod = obs.get_tod()
    if header is None:
        header = obs.get_map_header()
    header.update('CDELT1', resolution / 3600)
    header.update('CDELT2', resolution / 3600)
    npix = 5
    good_npix = False
    while good_npix is False:
        try:
            projection = tm.Projection(obs, header=header,
                                       resolution=resolution,
                                       oversampling=False,
                                       npixels_per_sample=npix)
            good_npix = True
        except(RuntimeError):
            npix +=1
    return tod, projection, header, obs
示例#6
0
def pipeline_huber(filenames, output_file, keywords, verbose=False):
    """
    Perform huber regularized inversion of state of the art
    PACS model.  The PACS model includes tod mask, compression,
    response, projection and noise covariance (invntt).
    Processing steps are as follows :
    
        - define PacsObservation instance
        - mask first scanlines to avoid drift
        - get Time Ordered Data (get_tod)
        - 2nd level deglitching (with standard projector and tod median
            filtering with a  narrow window)
        - median filtering
        - define projection
        - perform inversion on model
        - save file

    Arguments
    ---------
    filenames: list of strings
        List of data filenames.
    output_file : string
        Name of the output fits file.
    keywords: dict
        Dictionary containing options for all steps as dictionary.
    verbose: boolean (default False)
        Set verbosity.

    Returns
    -------
    Returns nothing. Save result as a fits file.
    """
    from scipy.sparse.linalg import cgs
    import tamasis as tm
    import linear_operators as lo
    # verbosity
    # define observation
    obs = tm.PacsObservation(filenames, **keywords["PacsObservation"])
    # extra masking
    tm.step_scanline_masking(obs, **keywords["scanline_masking"])
    # get data
    tod = obs.get_tod(**keywords["get_tod"])
    # deglitching
    # need to adapt degltiching to any compression model
    tm.step_deglitching(obs, tod, **keywords["deglitching"])
    # median filtering
    tod = tm.filter_median(tod, **keywords["filter_median"])
    # define projector
    projection = tm.Projection(obs, **keywords["Projection"])
    P = lo.aslinearoperator(projection)
    # build instrument model
    masking = tm.Masking(tod.mask)
    Mt = lo.aslinearoperator(masking)
    # compression
    mode = compressions[keywords["compression"].pop("mode")]
    factor = keywords["compression"].pop("factor")
    C = mode(data, factor, **keywords["compression"])
    # define map mask
    M = map_mask(tod, model, **keywords["map_mask"])
    # recast model as a new-style LinearOperator from linear_operators package
    H = Mt * C * P * M
    # vectorize data so it is accepted by LinearOperators
    y = tod.ravel()
    Ds = [
        tm.DiscreteDifference(axis=i, shapein=projection.shapein)
        for i in (0, 1)
    ]
    Ds = [lo.aslinearoperator(D) for D in Ds]
    Ds = [D * M.T for D in Ds]
    # set tod masked values to zero
    tod = masking(tod)
    # perform map-making inversion
    hypers = (keywords["hacg"].pop("hyper"), ) * len(Ds)
    deltas = (keywords["hacg"].pop("delta"), ) * (len(Ds) + 1)
    map_huber = lo.hacg(H,
                        tod.ravel(),
                        priors=Ds,
                        hypers=hypers,
                        deltas=deltas,
                        **keywords["hacg"])
    # save
    map_huber = (M.T * map_huber).reshape(projection.shapein)
    map_huber = tm.Map(map_huber)
    map_huber.save(output_file)
示例#7
0
#!/usr/bin/env python
import numpy as np
import tamasis as tm
import lo
import csh.filter as filt
from time import time
import scipy.sparse.linalg as spl

# data
pacs = tm.PacsObservation(filename=tm.tamasis_dir+'tests/frames_blue.fits')
tod = pacs.get_tod()
# projector
model = tm.Projection(pacs, resolution=3.2, oversampling=False, npixels_per_sample=6)
# naive map
backmap = model.transpose(tod)
# transform to lo
P = lo.aslinearoperator(model.aslinearoperator())
# derive filter
kernel = filt.kernel_from_tod(tod, length=10)
#kern = np.mean(kernel, axis=0)
N = filt.kernels_convolve(tod.shape, 1 / np.sqrt(kernel))
# apply to data
yn = N * tod.flatten()
# apply to model
M = N * P
# priors
Ds = [lo.diff(backmap.shape, axis=axis) for axis in xrange(backmap.ndim)]
#Ds.append(lo.pywt_lo.wavelet2(backmap.shape, "haar"))
# inversion
#y = tod.flatten()
x, conv = lo.rls(M, Ds, (1e1, 1e1, 1e-1),  yn, spl.bicgstab)
示例#8
0
#!/usr/bin/env python
import numpy as np
import os
import tamasis as tm
import lo
import csh
# define data set
datadir = os.getenv('CSH_DATA')
filenames = [
    datadir + '/1342185454_blue_PreparedFrames.fits[5954:67617]',
    datadir + '/1342185455_blue_PreparedFrames.fits[5954:67617]'
]
pacs = tm.PacsObservation(filename=filenames,
                          fine_sampling_factor=1,
                          keep_bad_detectors=False)
# reset pacs header to have a shape multiple of 4
resolution = 3.
header = pacs.get_map_header()
#header['NAXIS1'] = 192
#header['NAXIS2'] = 192
#header['CRPIX1'] = 96
#header['CRPIX2'] = 96
header.update('CDELT1', resolution / 3600)
header.update('CDELT2', resolution / 3600)
# data
tod = pacs.get_tod()
y = tod.flatten()
# remove bad pixels (by updating mask !)
#tod = remove_bad_pixels(tod)
# compress data
factor = 8
示例#9
0
def pipeline_wrls(filenames, output_file, keywords, verbose=False):
    """
    Perform regularized least-square inversion of state of the art
    PACS model.  The PACS model includes tod mask, compression,
    response, projection and noise covariance (invntt).
    Processing steps are as follows :
    
        - define PacsObservation instance
        - mask first scanlines to avoid drift
        - get Time Ordered Data (get_tod)
        - 2nd level deglitching (with standard projector and tod median
            filtering with a  narrow window)
        - median filtering
        - define projection
        - perform inversion on model
        - save file

    Arguments
    ---------
    filenames: list of strings
        List of data filenames.
    output_file : string
        Name of the output fits file.
    keywords: dict
        Dictionary containing options for all steps as dictionary.
    verbose: boolean (default False)
        Set verbosity.

    Returns
    -------
    Returns nothing. Save result as a fits file.
    """
    from scipy.sparse.linalg import cgs
    import tamasis as tm
    # verbosity
    keywords["mapper_rls"]["verbose"] = verbose
    # define observation
    obs = tm.PacsObservation(filenames, **keywords["PacsObservation"])
    # extra masking
    tm.step_scanline_masking(obs, **keywords["scanline_masking"])
    # get data
    tod = obs.get_tod(**keywords["get_tod"])
    # deglitching
    tm.step_deglitching(obs, tod, **keywords["deglitching"])
    # median filtering
    tod = tm.filter_median(tod, **keywords["filter_median"])
    # define projector
    projection = obs.get_projection_operator(**keywords["Projection"])
    # build instrument model
    response = tm.ResponseTruncatedExponentialOperator(
        obs.pack(obs.instrument.detector.time_constant) /
        obs.instrument.SAMPLING_PERIOD)
    compression = tm.CompressionAverageOperator(obs.slice.compression_factor)
    masking = tm.MaskOperator(tod.mask)
    model = masking * compression * response * projection
    # set tod masked values to zero
    tod = masking(tod)
    # N^-1 operator
    invntt = tm.InvNttOperator(obs)
    # perform map-making inversion
    map_rls = tm.mapper_rls(tod,
                            model,
                            invntt=invntt,
                            solver=cgs,
                            **keywords["mapper_rls"])
    # save
    map_rls.save(output_file)
示例#10
0
    'csh',
    'output',
)
# compression modes
#compressions = ["", "ca", "cs"]
compressions = ["ca"]
# median filter length
filter_length = 10000
hypers = (1e10, 1e10)
resolution = 3.
factor = 4
ext = ".fits"
pre = "ngc6946_madmap1_"
# find a map for each compression and save it
obs = tm.PacsObservation(filename=filename,
                         fine_sampling_factor=1,
                         detector_policy='remove')
tod = obs.get_tod()
header = obs.get_map_header()
header.update('CDELT1', resolution / 3600)
header.update('CDELT2', resolution / 3600)
npix = 5
good_npix = False
projection = tm.Projection(obs,
                           header=header,
                           resolution=resolution,
                           oversampling=False,
                           npixels_per_sample=npix)
model = projection
#C = csh.averaging(tod.shape, factor=factor)
compression_shape = [
示例#11
0
#!/usr/bin/env python
import tamasis as tm
import csh
import csh.score
import numpy as np
import lo
import scipy.sparse.linalg as spl

# data
pacs = tm.PacsObservation(filename=tm.tamasis_dir+'tests/frames_blue.fits',
                          fine_sampling_factor=1, keep_bad_detectors=True)
tod = pacs.get_tod()
# compression model
#C = lo.binning(tod.shape, factor=8, axis=1, dtype=np.float64)
shape = (64, 32) + (tod.shape[1], )
C = csh.binning3d( shape, factors=(2, 2, 2))
# compress data
ctod = C * tod.flatten()
# projector
projection = tm.Projection(pacs, resolution=3.2, oversampling=False,
                           npixels_per_sample=6)
model = projection
# naive map
backmap = model.transpose(tod)
# transform to lo
#P = lo.ndsubclass(backmap, tod, matvec=model.direct, rmatvec=model.transpose)
P = lo.aslinearoperator(model.aslinearoperator())
# full model
A = C * P
# priors
Dx = lo.diff(backmap.shape, axis=0, dtype=np.float64)