Exemple #1
0
def pipeline_dli(filename, output_file, keywords, verbose=False):
    import numpy as np
    import linear_operators as lo
    import tamasis as tm
    # verbosity
    tm.var.verbose = verbose
    # define observation
    obs = tm.PacsObservation(filename, **keywords["PacsObservation"])
    # extra masking
    tm.step_scanline_masking(obs, **keywords["scanline_masking"])
    # get data
    tod = obs.get_tod(**keywords["get_tod"])
    # deglitching
    tm.step_deglitching(obs, tod, **keywords["deglitching"])
    # median filtering
    tod = tm.filter_median(tod, **keywords["filter_median"])
    # define projector
    projection = obs.get_projection_operator(**keywords["Projection"])
    # build instrument model
    response = tm.ResponseTruncatedExponentialOperator(
        obs.pack(obs.instrument.detector.time_constant) /
        obs.instrument.SAMPLING_PERIOD)
    compression = tm.CompressionAverageOperator(obs.slice.compression_factor)
    masking = tm.MaskOperator(tod.mask)
    model = masking * compression * response * projection
    # set tod masked values to zero
    tod = masking(tod)
    # N^-1 operator
    invntt = tm.InvNttOperator(obs)

    # for the dli algorithm
    M = map_mask(tod, model, **keywords["map_mask"])

    # recast model as a new-style LinearOperator from linear_operators package
    H = lo.aslinearoperator(model) * M.T
    N = lo.aslinearoperator(invntt)
    # vectorize data so it is accepted by LinearOperators
    y = tod.ravel()
    Ds = [tm.DiscreteDifferenceOperator(axis=i, shapein=projection.shapein) \
          for i in (0, 1)]
    Ds = [lo.aslinearoperator(D) for D in Ds]
    Ds = [D * M.T for D in Ds]
    D = lo.concatenate(Ds)
    # handle tau which needs to be an ndarray
    keywords["dli"]["tau"] *= np.ones(D.shape[0])
    algo = lo.DoubleLoopAlgorithm(H,
                                  y,
                                  D,
                                  noise_covariance=N,
                                  fmin_args=keywords["fmin_args"],
                                  lanczos=keywords["lanczos"],
                                  **keywords["dli"])
    # optimize
    xe = algo()
    # reshape
    xe = (M.T * xe).reshape(projection.shapein)
    # recast as tamasis map
    xe = tm.Map(xe)
    # save
    xe.save(output_file)
Exemple #2
0
 def __call__(self, data, state):
     M = state['model']
     Ds = state.get('prior_models', [])
     W = state.get('noise_model', None)
     x = self.algo(M,
                   np.asarray(data),
                   Ds=Ds,
                   W=W,
                   **self.kwargs)
     # reshape map
     map_shape = state['map_shape']
     if 'map_mask_model' in state:
         MM = state['map_mask_model']
         sol = (MM.T * x).reshape(map_shape)
     else:
         sol = x.reshape(map_shape)
     header = state['header']
     state['map'] = tm.Map(sol, header=header)
     return data
Exemple #3
0
def pipeline_huber(filenames, output_file, keywords, verbose=False):
    """
    Perform huber regularized inversion of state of the art
    PACS model.  The PACS model includes tod mask, compression,
    response, projection and noise covariance (invntt).
    Processing steps are as follows :
    
        - define PacsObservation instance
        - mask first scanlines to avoid drift
        - get Time Ordered Data (get_tod)
        - 2nd level deglitching (with standard projector and tod median
            filtering with a  narrow window)
        - median filtering
        - define projection
        - perform inversion on model
        - save file

    Arguments
    ---------
    filenames: list of strings
        List of data filenames.
    output_file : string
        Name of the output fits file.
    keywords: dict
        Dictionary containing options for all steps as dictionary.
    verbose: boolean (default False)
        Set verbosity.

    Returns
    -------
    Returns nothing. Save result as a fits file.
    """
    from scipy.sparse.linalg import cgs
    import tamasis as tm
    import linear_operators as lo
    # verbosity
    # define observation
    obs = tm.PacsObservation(filenames, **keywords["PacsObservation"])
    # extra masking
    tm.step_scanline_masking(obs, **keywords["scanline_masking"])
    # get data
    tod = obs.get_tod(**keywords["get_tod"])
    # deglitching
    # need to adapt degltiching to any compression model
    tm.step_deglitching(obs, tod, **keywords["deglitching"])
    # median filtering
    tod = tm.filter_median(tod, **keywords["filter_median"])
    # define projector
    projection = tm.Projection(obs, **keywords["Projection"])
    P = lo.aslinearoperator(projection)
    # build instrument model
    masking = tm.Masking(tod.mask)
    Mt = lo.aslinearoperator(masking)
    # compression
    mode = compressions[keywords["compression"].pop("mode")]
    factor = keywords["compression"].pop("factor")
    C = mode(data, factor, **keywords["compression"])
    # define map mask
    M = map_mask(tod, model, **keywords["map_mask"])
    # recast model as a new-style LinearOperator from linear_operators package
    H = Mt * C * P * M
    # vectorize data so it is accepted by LinearOperators
    y = tod.ravel()
    Ds = [
        tm.DiscreteDifference(axis=i, shapein=projection.shapein)
        for i in (0, 1)
    ]
    Ds = [lo.aslinearoperator(D) for D in Ds]
    Ds = [D * M.T for D in Ds]
    # set tod masked values to zero
    tod = masking(tod)
    # perform map-making inversion
    hypers = (keywords["hacg"].pop("hyper"), ) * len(Ds)
    deltas = (keywords["hacg"].pop("delta"), ) * (len(Ds) + 1)
    map_huber = lo.hacg(H,
                        tod.ravel(),
                        priors=Ds,
                        hypers=hypers,
                        deltas=deltas,
                        **keywords["hacg"])
    # save
    map_huber = (M.T * map_huber).reshape(projection.shapein)
    map_huber = tm.Map(map_huber)
    map_huber.save(output_file)
Exemple #4
0
masking = tm.Masking(uctod.mask)
model = masking * projection
# remove drift
#ctod = tm.filter_median(ctod, length=3000 / 8.)
# first map
M = C * lo.aslinearoperator(model.aslinearoperator())
#P = lo.aslinearoperator(projection.aslinearoperator())
#C = csh.averaging(tod.shape, factor=8)
#I = lo.mask(uctod.mask)
#M = C * I.T * I * P
#M = C * P
backmap = (M.T * ctod.flatten()).reshape(projection.shapein)
#weights = (M.T * np.ones(ctod.size)).reshape(projection.shapein)
weights = projection.transpose(tod.ones(tod.shape))
MM = lo.mask(weights == 0)
M = M * MM.T
# define algo
# priors
Dx = lo.diff(backmap.shape, axis=0, dtype=np.float64) * MM.T
Dy = lo.diff(backmap.shape, axis=1, dtype=np.float64) * MM.T
#Dw = lo.pywt_lo.wavedec2(backmap.shape, "haar", level=3)
# inversion
x, conv = lo.rls(M, (Dx, Dy), (1e0, 1e0), ctod.flatten())
sol = tm.Map(np.zeros(backmap.shape))
sol[:] = (MM.T * x).reshape(sol.shape)
sol.header = header
# save
sol.writefits(
    os.path.join(os.getenv('HOME'), 'data', 'csh', 'output',
                 'ngc6946_cs_rls.fits'))