Exemplo n.º 1
0
def correct_ifgs(params: dict) -> None:
    """
    Top level function to perform PyRate workflow on given interferograms

    :param dict params: Dictionary of configuration parameters

    :return: refpt: tuple of reference pixel x and y position
    :rtype: tuple
    :return: maxvar: array of maximum variance values of interferograms
    :rtype: ndarray
    :return: vcmt: Variance-covariance matrix array
    :rtype: ndarray
    """

    __validate_correct_steps(params)

    # house keeping
    _update_params_with_tiles(params)
    _create_ifg_dict(params)
    params[cf.REFX_FOUND], params[cf.REFY_FOUND] = ref_pixel_calc_wrapper(
        params)

    # run through the correct steps in user specified sequence
    for step in params['correct']:
        correct_steps[step](params)
    log.info("Finished 'correct' step")
Exemplo n.º 2
0
def _save_stack(ifgs_dict, params, tiles, out_type):
    """
    Save stacking outputs
    """
    log.info('Merging PyRate outputs {}'.format(out_type))
    gt, md, wkt = ifgs_dict['gt'], ifgs_dict['md'], ifgs_dict['wkt']
    epochlist = ifgs_dict['epochlist']
    ifgs = [v for v in ifgs_dict.values() if isinstance(v, PrereadIfg)]
    dest = os.path.join(params[cf.OUT_DIR], out_type + ".tif")
    md[ifc.EPOCH_DATE] = epochlist.dates
    if out_type == 'stack_rate':
        md[ifc.DATA_TYPE] = ifc.STACKRATE
    elif out_type == 'stack_error':
        md[ifc.DATA_TYPE] = ifc.STACKERROR
    else:
        md[ifc.DATA_TYPE] = ifc.STACKSAMP

    rate = np.zeros(shape=ifgs[0].shape, dtype=np.float32)

    for t in tiles:
        rate_file = os.path.join(params[cf.TMPDIR],
                                 out_type + '_' + str(t.index) + '.npy')
        rate_file = Path(rate_file)
        rate_tile = np.load(file=rate_file)
        rate[t.top_left_y:t.bottom_right_y,
             t.top_left_x:t.bottom_right_x] = rate_tile
    shared.write_output_geotiff(md, gt, wkt, rate, dest, np.nan)
    npy_rate_file = os.path.join(params[cf.OUT_DIR], out_type + '.npy')
    np.save(file=npy_rate_file, arr=rate)

    log.debug('Finished PyRate merging {}'.format(out_type))
Exemplo n.º 3
0
def __save_ifgs_dict_with_headers_and_epochs(dest_tifs, ifgs_dict, params,
                                             process_tifs):
    tmpdir = params[cf.TMPDIR]
    if not os.path.exists(tmpdir):
        shared.mkdir_p(tmpdir)

    preread_ifgs_file = Configuration.preread_ifgs(params)
    nifgs = len(dest_tifs)
    # add some extra information that's also useful later
    gt, md, wkt = shared.get_geotiff_header_info(
        process_tifs[0].tmp_sampled_path)
    epochlist = algorithm.get_epochs(ifgs_dict)[0]
    log.info('Found {} unique epochs in the {} interferogram network'.format(
        len(epochlist.dates), nifgs))
    ifgs_dict['epochlist'] = epochlist
    ifgs_dict['gt'] = gt
    ifgs_dict['md'] = md
    ifgs_dict['wkt'] = wkt
    # dump ifgs_dict file for later use
    cp.dump(ifgs_dict, open(preread_ifgs_file, 'wb'))

    for k in ['gt', 'epochlist', 'md', 'wkt']:
        ifgs_dict.pop(k)

    return ifgs_dict
Exemplo n.º 4
0
def _calc_svd_time_series(ifg_paths: List[str], params: dict,
                          preread_ifgs: dict, tiles: List[Tile]) -> np.ndarray:
    """
    Helper function to obtain time series for spatio-temporal filter
    using SVD method
    """
    # Is there other existing functions that can perform this same job?
    log.info('Calculating incremental time series via SVD method for APS '
             'correction')
    # copy params temporarily
    new_params = deepcopy(params)
    new_params[C.TIME_SERIES_METHOD] = 2  # use SVD method

    process_tiles = mpiops.array_split(tiles)

    nvels = None
    for t in process_tiles:
        log.debug(f'Calculating time series for tile {t.index} during APS '
                  f'correction')
        ifgp = [shared.IfgPart(p, t, preread_ifgs, params) for p in ifg_paths]
        mst_tile = np.load(Configuration.mst_path(params, t.index))
        tsincr = time_series(ifgp, new_params, vcmt=None, mst=mst_tile)[0]
        np.save(file=os.path.join(params[C.TMPDIR],
                                  f'tsincr_aps_{t.index}.npy'),
                arr=tsincr)
        nvels = tsincr.shape[2]

    nvels = mpiops.comm.bcast(nvels, root=0)
    mpiops.comm.barrier()
    # need to assemble tsincr from all processes
    tsincr_g = _assemble_tsincr(ifg_paths, params, preread_ifgs, tiles, nvels)
    log.debug('Finished calculating time series for spatio-temporal filter')
    return tsincr_g
Exemplo n.º 5
0
def _stack_calc(ifg_paths, params, vcmt, tiles, preread_ifgs):
    """
    MPI wrapper for stacking calculation
    """
    process_tiles = mpiops.array_split(tiles)
    log.info('Calculating rate map from stacking')
    output_dir = params[cf.TMPDIR]
    for t in process_tiles:
        log.info('Stacking of tile {}'.format(t.index))
        ifg_parts = [
            shared.IfgPart(p, t, preread_ifgs, params) for p in ifg_paths
        ]
        mst_grid_n = np.load(
            os.path.join(output_dir, 'mst_mat_{}.npy'.format(t.index)))
        rate, error, samples = stack.stack_rate_array(ifg_parts, params, vcmt,
                                                      mst_grid_n)
        # declare file names
        np.save(file=os.path.join(output_dir,
                                  'stack_rate_{}.npy'.format(t.index)),
                arr=rate)
        np.save(file=os.path.join(output_dir,
                                  'stack_error_{}.npy'.format(t.index)),
                arr=error)
        np.save(file=os.path.join(output_dir,
                                  'stack_samples_{}.npy'.format(t.index)),
                arr=samples)
    mpiops.comm.barrier()
    log.debug("Finished stack rate calc!")
Exemplo n.º 6
0
def main(params):
    """
    Main workflow function for preparing interferograms for PyRate.

    :param dict params: Parameters dictionary read in from the config file
    """
    # TODO: looks like ifg_paths are ordered according to ifg list
    # This probably won't be a problem because input list won't be reordered
    # and the original gamma generated list is ordered) this may not affect
    # the important pyrate stuff anyway, but might affect gen_thumbs.py.
    # Going to assume ifg_paths is ordered correcly
    # pylint: disable=too-many-branches
    shared.mpi_vs_multiprocess_logging("prepifg", params)

    ifg_paths = params[cf.INTERFEROGRAM_FILES]
    if params[cf.DEM_FILE] is not None:  # optional DEM conversion
        ifg_paths.append(params[cf.DEM_FILE_PATH])

    shared.mkdir_p(params[cf.OUT_DIR])  # create output dir

    process_ifgs_paths = np.array_split(ifg_paths, mpiops.size)[mpiops.rank]

    gtiff_paths = [p.converted_path for p in process_ifgs_paths]
    do_prepifg(gtiff_paths, params)
    mpiops.comm.barrier()
    log.info("Finished prepifg")
Exemplo n.º 7
0
def spatial_low_pass_filter(ts_hp: np.ndarray, ifg: Ifg,
                            params: dict) -> np.ndarray:
    """
    Filter time series data spatially using a Gaussian low-pass
    filter defined by a cut-off distance. If the cut-off distance is
    defined as zero in the parameters dictionary then it is calculated for
    each time step using the pyrate.covariance.cvd_from_phase method.
    :param ts_hp: Array of temporal high-pass time series data, shape (ifg.shape, n_epochs)
    :param ifg: pyrate.core.shared.Ifg Class object.
    :param params: Dictionary of PyRate configuration parameters.
    :return: ts_lp: Low-pass filtered time series data of shape (ifg.shape, n_epochs).
    """
    log.info('Applying spatial low-pass filter')

    nvels = ts_hp.shape[2]
    cutoff = params[C.SLPF_CUTOFF]
    # nanfill = params[cf.SLPF_NANFILL]
    # fillmethod = params[cf.SLPF_NANFILL_METHOD]
    if cutoff == 0:
        r_dist = RDist(ifg)()  # only needed for cvd_for_phase
    else:
        r_dist = None
        log.info(f'Gaussian spatial filter cutoff is {cutoff:.3f} km for all '
                 f'{nvels} time-series images')

    process_nvel = mpiops.array_split(range(nvels))
    process_ts_lp = {}

    for i in process_nvel:
        process_ts_lp[i] = _slpfilter(ts_hp[:, :, i], ifg, r_dist, params)

    ts_lp_d = shared.join_dicts(mpiops.comm.allgather(process_ts_lp))
    ts_lp = np.dstack([v[1] for v in sorted(ts_lp_d.items())])
    log.debug('Finished applying spatial low pass filter')
    return ts_lp
Exemplo n.º 8
0
def main(params):
    """
    Main workflow function for preparing interferograms for PyRate.

    :param dict params: Parameters dictionary read in from the config file
    """
    # TODO: looks like ifg_paths are ordered according to ifg list
    # This probably won't be a problem because input list won't be reordered
    # and the original gamma generated list is ordered) this may not affect
    # the important pyrate stuff anyway, but might affect gen_thumbs.py.
    # Going to assume ifg_paths is ordered correcly
    # pylint: disable=too-many-branches
    shared.mpi_vs_multiprocess_logging("prepifg", params)

    ifg_paths = params[cf.INTERFEROGRAM_FILES]
    if params[cf.DEM_FILE] is not None:  # optional DEM conversion
        ifg_paths.append(params[cf.DEM_FILE_PATH])

    if params[cf.COH_MASK]:
        ifg_paths.extend(params[cf.COHERENCE_FILE_PATHS])

    shared.mkdir_p(params[cf.OUT_DIR])  # create output dir

    user_exts = (params[cf.IFG_XFIRST], params[cf.IFG_YFIRST], params[cf.IFG_XLAST], params[cf.IFG_YLAST])
    xlooks, ylooks, crop = cf.transform_params(params)
    ifgs = [prepifg_helper.dem_or_ifg(p.converted_path) for p in ifg_paths]
    exts = prepifg_helper.get_analysis_extent(crop, ifgs, xlooks, ylooks, user_exts=user_exts)

    process_ifgs_paths = np.array_split(ifg_paths, mpiops.size)[mpiops.rank]
    do_prepifg(process_ifgs_paths, exts, params)
    mpiops.comm.barrier()
    log.info("Finished prepifg")
Exemplo n.º 9
0
def coherence_masking(input_gdal_dataset: Dataset, coherence_file_path: str,
                      coherence_thresh: float) -> None:
    """
    Perform coherence masking on raster in-place.

    Based on gdal_calc formula provided by Nahidul:
    gdal_calc.py -A 20151127-20151209_VV_8rlks_flat_eqa.cc.tif
    -B 20151127-20151209_VV_8rlks_eqa.unw.tif
    --outfile=test_v1.tif --calc="B*(A>=0.8)-999*(A<0.8)"
    --NoDataValue=-999
    """

    coherence_ds = gdal.Open(coherence_file_path, gdalconst.GA_ReadOnly)
    coherence_band = coherence_ds.GetRasterBand(1)
    src_band = input_gdal_dataset.GetRasterBand(1)
    ndv = np.nan
    coherence = coherence_band.ReadAsArray()
    src = src_band.ReadAsArray()
    var = {"coh": coherence, "src": src, "t": coherence_thresh, "ndv": ndv}
    formula = "where(coh>=t, src, ndv)"
    res = ne.evaluate(formula, local_dict=var)
    src_band.WriteArray(res)
    # update metadata
    input_gdal_dataset.GetRasterBand(1).SetNoDataValue(ndv)
    input_gdal_dataset.FlushCache()  # write on the disc
    log.info(f"Applied coherence masking using coh file {coherence_file_path}")
Exemplo n.º 10
0
def mst_calc_wrapper(params):
    """
    MPI wrapper function for MST calculation
    """

    log.info('Calculating minimum spanning tree matrix')

    def _save_mst_tile(tile: Tile, params: dict) -> None:
        """
        Convenient inner loop for mst tile saving
        """
        preread_ifgs = params[C.PREREAD_IFGS]
        dest_tifs = [
            ifg_path.tmp_sampled_path
            for ifg_path in params[C.INTERFEROGRAM_FILES]
        ]
        mst_file_process_n = Configuration.mst_path(params, index=tile.index)
        if mst_file_process_n.exists():
            return
        mst_tile = mst_multiprocessing(tile, dest_tifs, preread_ifgs, params)
        # locally save the mst_mat
        np.save(file=mst_file_process_n, arr=mst_tile)

    tiles_split(_save_mst_tile, params)

    log.debug('Finished minimum spanning tree calculation')
Exemplo n.º 11
0
def mask_rate(rate, error, maxsig):
    """
    Function to mask pixels in the rate and error arrays when the error
    is greater than the error threshold 'maxsig'.

    :param ndarray rate: array of pixel rates derived by stacking
    :param ndarray error: array of errors for the pixel rates
    :param int maxsig: error threshold for masking (in millimetres).

    :return: rate: Masked rate (velocity) map
    :rtype: ndarray
    :return: error: Masked error (standard deviation) map
    :rtype: ndarray
    """
    # initialise mask array with existing NaNs
    mask = ~isnan(error)
    # original Nan count
    orig = np.count_nonzero(mask)
    # determine where error is larger than the maximum sigma threshold
    mask[mask] &= error[mask] > maxsig
    # replace values with NaNs
    rate[mask] = nan
    error[mask] = nan
    # calculate percentage of masked pixels
    nummasked = int(np.count_nonzero(mask) / orig * 100)
    log.info('Percentage of pixels masked = {}%'.format(nummasked))

    return rate, error
Exemplo n.º 12
0
def wrap_spatio_temporal_filter(ifg_paths, params, tiles, preread_ifgs):
    """
    A wrapper for the spatio-temporal filter so it can be tested.
    See docstring for spatio_temporal_filter.
    """
    if params[cf.APSEST]:
        log.info('Doing APS spatio-temporal filtering')
    else:
        log.info('APS spatio-temporal filtering not required')
        return

    # perform some checks on existing ifgs
    log.debug('Checking APS correction status')
    if mpiops.run_once(shared.check_correction_status, ifg_paths,
                       ifc.PYRATE_APS_ERROR):
        log.debug('Finished APS correction')
        return  # return if True condition returned

    tsincr = _calc_svd_time_series(ifg_paths, params, preread_ifgs, tiles)
    mpiops.comm.barrier()

    ifg = Ifg(ifg_paths[0])  # just grab any for parameters in slpfilter
    ifg.open()
    spatio_temporal_filter(tsincr, ifg, params, preread_ifgs)
    ifg.close()
    mpiops.comm.barrier()
Exemplo n.º 13
0
def _geotiff_multiprocessing(unw_path: MultiplePaths,
                             params: dict) -> Tuple[str, bool]:
    """
    Multiprocessing wrapper for full-res geotiff conversion
    """
    # TODO: Need a more robust method for identifying coherence files.
    dest = unw_path.converted_path
    processor = params[C.PROCESSOR]  # roipac or gamma

    # Create full-res geotiff if not already on disk
    if not os.path.exists(dest):
        if processor == GAMMA:
            header = gamma.gamma_header(unw_path.unwrapped_path, params)
        elif processor == ROIPAC:
            log.info(
                "Warning: ROI_PAC support will be deprecated in a future PyRate release"
            )
            header = roipac.roipac_header(unw_path.unwrapped_path, params)
        else:
            raise PreprocessError('Processor must be ROI_PAC (0) or GAMMA (1)')
        header[ifc.INPUT_TYPE] = unw_path.input_type
        shared.write_fullres_geotiff(header,
                                     unw_path.unwrapped_path,
                                     dest,
                                     nodata=params[C.NO_DATA_VALUE])
        Path(dest).chmod(0o444)  # readonly output
        return dest, True
    else:
        log.warning(
            f"Full-res geotiff already exists in {dest}! Returning existing geotiff!"
        )
        return dest, False
Exemplo n.º 14
0
def _merge_linrate(params: dict) -> None:
    """
    Merge linear rate outputs
    """
    shape, tiles, ifgs_dict = mpiops.run_once(__merge_setup, params)

    log.info('Merging and writing Linear Rate product geotiffs')

    # read and assemble tile outputs
    out_types = [
        'linear_' + x
        for x in ['rate', 'rsquared', 'error', 'intercept', 'samples']
    ]
    process_out_types = mpiops.array_split(out_types)
    for p_out_type in process_out_types:
        out = assemble_tiles(shape,
                             params[C.TMPDIR],
                             tiles,
                             out_type=p_out_type)
        __save_merged_files(ifgs_dict,
                            params,
                            out,
                            p_out_type,
                            savenpy=params["savenpy"])
    mpiops.comm.barrier()
Exemplo n.º 15
0
def __drop_ifgs_if_not_part_of_any_loop(ifg_files: List[str],
                                        loops: List[WeightedLoop],
                                        params: dict) -> List[str]:
    """
    Check if an ifg is part of any of the loops, otherwise drop it from the list of interferograms for further PyRate
    processing.
    """
    loop_ifgs = set()
    for weighted_loop in loops:
        for edge in weighted_loop.loop:
            loop_ifgs.add(Edge(edge.first, edge.second))

    ifgs = [Ifg(i) for i in ifg_files]
    for i in ifgs:
        i.open()
        i.nodata_value = params[C.NO_DATA_VALUE]
    selected_ifg_files = []
    for i, f in zip(ifgs, ifg_files):
        if Edge(i.first, i.second) in loop_ifgs:
            selected_ifg_files.append(f)
    if len(ifg_files) != len(selected_ifg_files):
        log.info(
            f'Only {len(selected_ifg_files)} (out of {len(ifg_files)}) ifgs participate in '
            f'one or more closure loops, and are selected for further PyRate analysis'
        )
    return selected_ifg_files
Exemplo n.º 16
0
def main(params):
    """
    Parse parameters and prepare files for conversion.

    :param dict params: Parameters dictionary read in from the config file
    """
    # TODO: looks like base_ifg_paths are ordered according to ifg list
    # This probably won't be a problem because input list won't be reordered
    # and the original gamma generated list is ordered) this may not affect
    # the important pyrate stuff anyway, but might affect gen_thumbs.py.
    # Going to assume base_ifg_paths is ordered correcly
    # pylint: disable=too-many-branches

    if params[cf.PROCESSOR] == 2:  # if geotif
        log.warning("'conv2tif' step not required for geotiff!")
        return

    mpi_vs_multiprocess_logging("conv2tif", params)

    base_ifg_paths = params[cf.INTERFEROGRAM_FILES]

    if params[cf.COH_FILE_LIST] is not None:
        base_ifg_paths.extend(params[cf.COHERENCE_FILE_PATHS])

    if params[cf.DEM_FILE] is not None:  # optional DEM conversion
        base_ifg_paths.append(params[cf.DEM_FILE_PATH])

    process_base_ifgs_paths = np.array_split(base_ifg_paths, mpiops.size)[mpiops.rank]
    gtiff_paths = do_geotiff(process_base_ifgs_paths, params)
    mpiops.comm.barrier()
    log.info("Finished 'conv2tif' step")
    return gtiff_paths
Exemplo n.º 17
0
    def _inner(proc_ifgs, phase_data_sum):
        if isinstance(proc_ifgs[0], Ifg):
            proc_ifgs = proc_ifgs
        else:
            proc_ifgs = [Ifg(ifg_path) for ifg_path in proc_ifgs]

        for ifg in proc_ifgs:
            if not ifg.is_open:
                ifg.open(readonly=False)

        comp = np.isnan(phase_data_sum)
        comp = np.ravel(comp, order='F')

        if params[cf.PARALLEL]:
            phase_data = [i.phase_data for i in proc_ifgs]
            log.info("Calculating ref phase using multiprocessing")
            ref_phs = Parallel(n_jobs=params[cf.PROCESSES],
                               verbose=joblib_log_level(cf.LOG_LEVEL))(
                                   delayed(_est_ref_phs_method1)(p, comp)
                                   for p in phase_data)
            for n, ifg in enumerate(proc_ifgs):
                ifg.phase_data -= ref_phs[n]
        else:
            log.info("Calculating ref phase")
            ref_phs = np.zeros(len(proc_ifgs))
            for n, ifg in enumerate(proc_ifgs):
                ref_phs[n] = _est_ref_phs_method1(ifg.phase_data, comp)
                ifg.phase_data -= ref_phs[n]

        for ifg in proc_ifgs:
            _update_phase_metadata(ifg)
            ifg.close()

        return ref_phs
Exemplo n.º 18
0
def spatial_low_pass_filter(ts_lp, ifg, params):
    """
    Filter time series data spatially using either a Butterworth or Gaussian
    low pass filter defined by a cut-off distance. If the cut-off distance is
    defined as zero in the parameters dictionary then it is calculated for
    each time step using the pyrate.covariance.cvd_from_phase method.

    :param ndarray ts_lp: Array of time series data, the result of a temporal
                low pass filter operation. shape (ifg.shape, n_epochs)
    :param shared.Ifg instance ifg: interferogram object
    :param dict params: Dictionary of configuration parameters

    :return: ts_hp: filtered time series data of shape (ifg.shape, n_epochs)
    :rtype: ndarray
    """
    log.info('Applying spatial low-pass filter')
    if params[cf.SLPF_NANFILL] == 0:
        ts_lp[np.isnan(ts_lp)] = 0  # need it here for cvd and fft
    else:
        # optionally interpolate, operation is inplace
        _interpolate_nans(ts_lp, params[cf.SLPF_NANFILL_METHOD])
    r_dist = RDist(ifg)()
    for i in range(ts_lp.shape[2]):
        ts_lp[:, :, i] = _slpfilter(ts_lp[:, :, i], ifg, r_dist, params)
    log.debug('Finished applying spatial low pass filter')
    return ts_lp
Exemplo n.º 19
0
def maxvar_vcm_calc_wrapper(params):
    """
    MPI wrapper for maxvar and vcmt computation
    """
    preread_ifgs = params[cf.PREREAD_IFGS]
    ifg_paths = [ifg_path.tmp_sampled_path for ifg_path in params[cf.INTERFEROGRAM_FILES]]
    log.info('Calculating the temporal variance-covariance matrix')

    def _get_r_dist(ifg_path):
        """
        Get RDIst class object
        """
        ifg = Ifg(ifg_path)
        ifg.open()
        r_dist = RDist(ifg)()
        ifg.close()
        return r_dist

    r_dist = mpiops.run_once(_get_r_dist, ifg_paths[0])
    prcs_ifgs = mpiops.array_split(list(enumerate(ifg_paths)))
    process_maxvar = {}
    for n, i in prcs_ifgs:
        log.debug(f'Calculating maxvar for {n} of process ifgs {len(prcs_ifgs)} of total {len(ifg_paths)}')
        process_maxvar[int(n)] = cvd(i, params, r_dist, calc_alpha=True, write_vals=True, save_acg=True)[0]
    maxvar_d = shared.join_dicts(mpiops.comm.allgather(process_maxvar))
    maxvar = [v[1] for v in sorted(maxvar_d.items(), key=lambda s: s[0])]

    vcmt = mpiops.run_once(get_vcmt, preread_ifgs, maxvar)
    log.debug("Finished maxvar and vcm calc!")
    params[cf.MAXVAR], params[cf.VCMT] = maxvar, vcmt
    np.save(Configuration.vcmt_path(params), arr=vcmt)
    return maxvar, vcmt
Exemplo n.º 20
0
def temporal_low_pass_filter(tsincr, epochlist, params):
    """
    Filter time series data temporally using either a Gaussian, triangular
    or mean low pass filter defined by a cut-off time period (in years).

    :param ndarray tsincr: Array of incremental time series data of shape
                (ifg.shape, n_epochs)
    :param list epochlist: List of shared.EpochList class instances
    :param dict params: Dictionary of configuration parameters

    :return: tsfilt_incr: filtered time series data, shape (ifg.shape, nepochs)
    :rtype: ndarray
    """
    log.info('Applying temporal low-pass filter')
    nanmat = ~isnan(tsincr)
    tsfilt_incr = np.empty_like(tsincr, dtype=np.float32) * np.nan
    intv = np.diff(epochlist.spans)  # time interval for the neighboring epoch
    span = epochlist.spans[:tsincr.shape[2]] + intv / 2  # accumulated time
    rows, cols = tsincr.shape[:2]
    cutoff = params[cf.TLPF_CUTOFF]
    method = params[cf.TLPF_METHOD]
    threshold = params[cf.TLPF_PTHR]
    if method == 1:  # gaussian filter
        func = gauss
    elif method == 2:  # triangular filter
        func = _triangle
    else:
        func = mean_filter

    _tlpfilter(cols, cutoff, nanmat, rows, span, threshold, tsfilt_incr,
               tsincr, func)
    log.debug("Finished applying temporal low-pass filter")
    return tsfilt_incr
Exemplo n.º 21
0
def correct_ifgs(config: Configuration) -> None:
    """
    Top level function to perform PyRate workflow on given interferograms
    """
    params = config.__dict__
    __validate_correct_steps(params)

    # work out the tiling and add to params dict
    _update_params_with_tiles(params)

    # create the preread_ifgs dict for use with tiled data
    _create_ifg_dict(params)

    ifg_paths = [ifg_path.tmp_sampled_path for ifg_path in params[C.INTERFEROGRAM_FILES]]

    # create initial tiled phase_data numpy files on disc
    save_numpy_phase(ifg_paths, params)

    params[C.REFX_FOUND], params[C.REFY_FOUND] = ref_pixel_calc_wrapper(params)

    # run through the correct steps in user specified sequence
    for step in params['correct']:
        if step == 'phase_closure':
            correct_steps[step](params, config)
        else:
            correct_steps[step](params)
    log.info("Finished 'correct' step")
Exemplo n.º 22
0
def _calc_svd_time_series(ifg_paths, params, preread_ifgs, tiles):
    """
    Helper function to obtain time series for spatio-temporal filter
    using SVD method
    """
    # Is there other existing functions that can perform this same job?
    log.info('Calculating time series via SVD method for ' 'APS correction')
    # copy params temporarily
    new_params = deepcopy(params)
    new_params[cf.TIME_SERIES_METHOD] = 2  # use SVD method

    process_tiles = mpiops.array_split(tiles)

    nvels = None
    for t in process_tiles:
        log.debug('Calculating time series for tile {} during APS '
                  'correction'.format(t.index))
        ifg_parts = [
            shared.IfgPart(p, t, preread_ifgs, params) for p in ifg_paths
        ]
        mst_tile = np.load(
            os.path.join(params[cf.TMPDIR], 'mst_mat_{}.npy'.format(t.index)))
        tsincr = time_series(ifg_parts, new_params, vcmt=None, mst=mst_tile)[0]
        np.save(file=os.path.join(params[cf.TMPDIR],
                                  'tsincr_aps_{}.npy'.format(t.index)),
                arr=tsincr)
        nvels = tsincr.shape[2]

    nvels = mpiops.comm.bcast(nvels, root=0)
    mpiops.comm.barrier()
    # need to assemble tsincr from all processes
    tsincr_g = mpiops.run_once(_assemble_tsincr, ifg_paths, params,
                               preread_ifgs, tiles, nvels)
    log.debug('Finished calculating time series for spatio-temporal filter')
    return tsincr_g
Exemplo n.º 23
0
def _prepifg_multiprocessing(path, xlooks, ylooks, exts, thresh, crop, params):
    """
    Multiprocessing wrapper for prepifg
    """
    processor = params[cf.PROCESSOR]  # roipac, gamma or geotif
    if (processor == GAMMA) or (processor == GEOTIF):
        header = gamma.gamma_header(path, params)
    elif processor == ROIPAC:
        log.info("Warning: ROI_PAC support will be deprecated in a future PyRate release")
        header = roipac.roipac_header(path, params)
    else:
        raise PreprocessError('Processor must be ROI_PAC (0) or GAMMA (1)')

    # If we're performing coherence masking, find the coherence file for this IFG.
    if params[cf.COH_MASK] and shared._is_interferogram(header):
        coherence_path = cf.coherence_paths_for(path, params, tif=True)
        coherence_thresh = params[cf.COH_THRESH]
    else:
        coherence_path = None
        coherence_thresh = None

    if params[cf.LARGE_TIFS]:
        op = output_tiff_filename(path, params[cf.OUT_DIR])
        looks_path = cf.mlooked_path(op, ylooks, crop)
        return path, coherence_path, looks_path
    else:
        prepifg_helper.prepare_ifg(path, xlooks, ylooks, exts, thresh, crop, out_path=params[cf.OUT_DIR],
                                   header=header, coherence_path=coherence_path, coherence_thresh=coherence_thresh)
Exemplo n.º 24
0
def spatio_temporal_filter(params: dict) -> None:
    """
    Applies a spatio-temporal filter to remove the atmospheric phase screen
    (APS) and saves the corrected interferograms. Firstly the incremental
    time series is computed using the SVD method, before a cascade of temporal
    then spatial Gaussian filters is applied. The resulting APS corrections are
    saved to disc before being subtracted from each interferogram.

    :param params: Dictionary of PyRate configuration parameters.
    """
    if params[C.APSEST]:
        log.info('Doing APS spatio-temporal filtering')
    else:
        log.info('APS spatio-temporal filtering not required')
        return
    tiles = params[C.TILES]
    preread_ifgs = params[C.PREREAD_IFGS]
    ifg_paths = [
        ifg_path.tmp_sampled_path for ifg_path in params[C.INTERFEROGRAM_FILES]
    ]

    # perform some checks on existing ifgs
    log.debug('Checking APS correction status')
    if mpiops.run_once(shared.check_correction_status, ifg_paths,
                       ifc.PYRATE_APS_ERROR):
        log.debug('Finished APS correction')
        return  # return if True condition returned

    aps_paths = [MultiplePaths.aps_error_path(i, params) for i in ifg_paths]
    if all(a.exists() for a in aps_paths):
        log.warning('Reusing APS errors from previous run')
        _apply_aps_correction(ifg_paths, aps_paths, params)
        return

    # obtain the incremental time series using SVD
    tsincr = _calc_svd_time_series(ifg_paths, params, preread_ifgs, tiles)
    mpiops.comm.barrier()

    # get lists of epochs and ifgs
    ifgs = list(OrderedDict(sorted(preread_ifgs.items())).values())
    epochlist = mpiops.run_once(get_epochs, ifgs)[0]

    # first perform temporal high pass filter
    ts_hp = temporal_high_pass_filter(tsincr, epochlist, params)

    # second perform spatial low pass filter to obtain APS correction in ts domain
    ifg = Ifg(ifg_paths[0])  # just grab any for parameters in slpfilter
    ifg.open()
    ts_aps = spatial_low_pass_filter(ts_hp, ifg, params)
    ifg.close()

    # construct APS corrections for each ifg
    _make_aps_corrections(ts_aps, ifgs, params)

    # apply correction to ifgs and save ifgs to disc.
    _apply_aps_correction(ifg_paths, aps_paths, params)

    # update/save the phase_data in the tiled numpy files
    shared.save_numpy_phase(ifg_paths, params)
Exemplo n.º 25
0
def remove_orbital_error(ifgs: List, params: dict) -> None:
    """
    Wrapper function for PyRate orbital error removal functionality.

    NB: the ifg data is modified in situ, rather than create intermediate
    files. The network method assumes the given ifgs have already been reduced
    to a minimum spanning tree network.
    """
    ifg_paths = [i.data_path
                 for i in ifgs] if isinstance(ifgs[0], Ifg) else ifgs
    degree = params[C.ORBITAL_FIT_DEGREE]
    method = params[C.ORBITAL_FIT_METHOD]
    orbfitlksx = params[C.ORBITAL_FIT_LOOKS_X]
    orbfitlksy = params[C.ORBITAL_FIT_LOOKS_Y]

    # Sanity check of the orbital params
    if type(orbfitlksx) != int or type(orbfitlksy) != int:
        msg = f"Multi-look factors for orbital correction should be of type: int"
        raise OrbitalError(msg)
    if degree not in [PLANAR, QUADRATIC, PART_CUBIC]:
        msg = "Invalid degree of %s for orbital correction" % C.ORB_DEGREE_NAMES.get(
            degree)
        raise OrbitalError(msg)
    if method not in [NETWORK_METHOD, INDEPENDENT_METHOD]:
        msg = "Invalid method of %s for orbital correction" % C.ORB_METHOD_NAMES.get(
            method)
        raise OrbitalError(msg)

    # Give informative log messages based on selected options
    log.info(
        f'Calculating {__degrees_as_string(degree)} orbital correction using '
        f'{__methods_as_string(method)} method')
    if orbfitlksx > 1 or orbfitlksy > 1:
        log.info(f'Multi-looking interferograms for orbital correction with '
                 f'factors of X = {orbfitlksx} and Y = {orbfitlksy}')

    if method == INDEPENDENT_METHOD:
        iterable_split(independent_orbital_correction, ifg_paths, params)

    elif method == NETWORK_METHOD:
        # Here we do all the multilooking in one process, but in memory
        # could use multiple processes if we write data to disc during
        # remove_orbital_error step
        # TODO: performance comparison of saving multilooked files on
        # disc vs in-memory single-process multilooking
        #
        # The gdal swig bindings prevent us from doing multi-looking in parallel
        # when using multiprocessing because the multilooked ifgs are held in
        # memory using in-memory tifs. Parallelism using MPI is possible.
        # TODO: Use a flag to select mpi parallel vs multiprocessing in the
        # iterable_split function, which will use mpi but can fall back on
        # single process based on the flag for the multiprocessing side.
        if mpiops.rank == MAIN_PROCESS:
            mlooked = __create_multilooked_datasets(params)
            _validate_mlooked(mlooked, ifg_paths)
            network_orbital_correction(ifg_paths, params, mlooked)
    else:
        raise OrbitalError("Unrecognised orbital correction method")
Exemplo n.º 26
0
def __wrap_closure_check(config: Configuration) -> \
        Tuple[
            List[str],
            NDArray[(Any, Any), Float32],
            NDArray[(Any, Any, Any), UInt16],
            NDArray[(Any,), UInt16],
            List[WeightedLoop]]:
    """
    This wrapper function returns the closure check outputs for a single iteration of closure check.

    :param config: Configuration class instance
    For return variables see docstring in `sum_phase_closures`.
    """
    params = config.__dict__
    ifg_files = [
        ifg_path.tmp_sampled_path for ifg_path in params[C.INTERFEROGRAM_FILES]
    ]
    ifg_files.sort()
    log.debug(f"The number of ifgs in the list is {len(ifg_files)}")
    sorted_signed_loops = mpiops.run_once(sort_loops_based_on_weights_and_date,
                                          params)
    log.info(
        f"Total number of selected closed loops with up to MAX_LOOP_LENGTH = "
        f"{params[C.MAX_LOOP_LENGTH]} edges is {len(sorted_signed_loops)}")

    if len(sorted_signed_loops) < 1:
        return None

    retained_loops = mpiops.run_once(discard_loops_containing_max_ifg_count,
                                     sorted_signed_loops, params)
    ifgs_with_loops = mpiops.run_once(__drop_ifgs_if_not_part_of_any_loop,
                                      ifg_files, retained_loops, params)

    msg = f"After applying MAX_LOOP_REDUNDANCY = {params[C.MAX_LOOP_REDUNDANCY]} criteria, " \
          f"{len(retained_loops)} loops are retained"
    if len(retained_loops) < 1:
        return None
    else:
        log.info(msg)

    closure, ifgs_breach_count, num_occurences_each_ifg = sum_phase_closures(
        ifgs_with_loops, retained_loops, params)

    if mpiops.rank == 0:
        closure_ins = config.closure()
        np.save(closure_ins.closure, closure)
        np.save(closure_ins.ifgs_breach_count, ifgs_breach_count)
        np.save(closure_ins.num_occurences_each_ifg, num_occurences_each_ifg)
        np.save(closure_ins.loops, retained_loops, allow_pickle=True)

    selected_ifg_files = mpiops.run_once(__drop_ifgs_exceeding_threshold,
                                         ifgs_with_loops, ifgs_breach_count,
                                         num_occurences_each_ifg, params)

    # update the ifg list in the parameters dictionary
    params[C.INTERFEROGRAM_FILES] = \
        mpiops.run_once(update_ifg_list, selected_ifg_files, params[C.INTERFEROGRAM_FILES])
    return selected_ifg_files, closure, ifgs_breach_count, num_occurences_each_ifg, retained_loops
Exemplo n.º 27
0
def process_ifgs(ifg_paths, params, rows, cols):
    """
    Top level function to perform PyRate workflow on given interferograms

    :param list ifg_paths: List of interferogram paths
    :param dict params: Dictionary of configuration parameters
    :param int rows: Number of sub-tiles in y direction
    :param int cols: Number of sub-tiles in x direction

    :return: refpt: tuple of reference pixel x and y position
    :rtype: tuple
    :return: maxvar: array of maximum variance values of interferograms
    :rtype: ndarray
    :return: vcmt: Variance-covariance matrix array
    :rtype: ndarray
    """

    if mpiops.size > 1:  # turn of multiprocessing during mpi jobs
        params[cf.PARALLEL] = False
    outdir = params[cf.TMPDIR]
    if not os.path.exists(outdir):
        shared.mkdir_p(outdir)

    tiles = mpiops.run_once(get_tiles, ifg_paths[0], rows, cols)

    preread_ifgs = _create_ifg_dict(ifg_paths, params=params)

    # validate user supplied ref pixel
    refpixel.validate_supplied_lat_lon(params)
    refpx, refpy = _ref_pixel_calc(ifg_paths, params)

    # remove non ifg keys
    _ = [preread_ifgs.pop(k) for k in ['gt', 'epochlist', 'md', 'wkt']]

    multi_paths = params[cf.INTERFEROGRAM_FILES]
    _orb_fit_calc(multi_paths, params, preread_ifgs)

    _ref_phase_estimation(ifg_paths, params, refpx, refpy)

    shared.save_numpy_phase(ifg_paths, tiles, params)
    _mst_calc(ifg_paths, params, tiles, preread_ifgs)

    # spatio-temporal aps filter
    wrap_spatio_temporal_filter(ifg_paths, params, tiles, preread_ifgs)

    maxvar, vcmt = _maxvar_vcm_calc(ifg_paths, params, preread_ifgs)
    # save phase data tiles as numpy array for timeseries and stackrate calc

    shared.save_numpy_phase(ifg_paths, tiles, params)

    _timeseries_calc(ifg_paths, params, vcmt, tiles, preread_ifgs)

    _stack_calc(ifg_paths, params, vcmt, tiles, preread_ifgs)

    log.info('PyRate workflow completed')
    return (refpx, refpy), maxvar, vcmt
Exemplo n.º 28
0
def independent_orbital_correction(ifg, params):
    """
    Calculates and removes an orbital error surface from a single independent
    interferogram.

    Warning: This will write orbital error corrected phase_data to the ifg.

    :param Ifg class instance ifg: the interferogram to be corrected
    :param str degree: model to fit (PLANAR / QUADRATIC / PART_CUBIC)
    :param bool offset: True to calculate the model using an offset
    :param dict params: dictionary of configuration parameters

    :return: None - interferogram phase data is updated and saved to disk
    """
    degree = params[cf.ORBITAL_FIT_DEGREE]
    offset = params[cf.ORBFIT_OFFSET]
    orbfit_correction_on_disc = MultiplePaths.orb_error_path(
        ifg.data_path, params)
    if not ifg.is_open:
        ifg.open()

    shared.nan_and_mm_convert(ifg, params)
    if orbfit_correction_on_disc.exists():
        log.info(
            f'Reusing already computed orbital fit correction for {ifg.data_path}'
        )
        orbital_correction = np.load(file=orbfit_correction_on_disc)
    else:
        # vectorise, keeping NODATA
        vphase = reshape(ifg.phase_data, ifg.num_cells)
        dm = get_design_matrix(ifg, degree, offset)

        # filter NaNs out before getting model
        clean_dm = dm[~isnan(vphase)]
        data = vphase[~isnan(vphase)]
        model = lstsq(clean_dm, data)[0]  # first arg is the model params

        # calculate forward model & morph back to 2D
        if offset:
            fullorb = np.reshape(np.dot(dm[:, :-1], model[:-1]),
                                 ifg.phase_data.shape)
        else:
            fullorb = np.reshape(np.dot(dm, model), ifg.phase_data.shape)

        if not orbfit_correction_on_disc.parent.exists():
            shared.mkdir_p(orbfit_correction_on_disc.parent)
        offset_removal = nanmedian(np.ravel(ifg.phase_data - fullorb))
        orbital_correction = fullorb - offset_removal
        # dump to disc
        np.save(file=orbfit_correction_on_disc, arr=orbital_correction)

    # subtract orbital error from the ifg
    ifg.phase_data -= orbital_correction
    # set orbfit meta tag and save phase to file
    _save_orbital_error_corrected_phase(ifg)
    ifg.close()
Exemplo n.º 29
0
 def __reuse_ref_pixel_file_if_exists():
     if ref_pixel_file.exists():
         refx, refy = np.load(ref_pixel_file)
         log.info('Reusing pre-calculated ref-pixel values: ({}, {}) from file {}'.format(
             refx, refy, ref_pixel_file.as_posix()))
         log.warning("Reusing ref-pixel values from previous run!!!")
         params[C.REFX_FOUND], params[C.REFY_FOUND] = int(refx), int(refy)
         return int(refx), int(refy)
     else:
         return None, None
Exemplo n.º 30
0
def _copy_mlooked(params):
    log.info(
        "Copying input files into tempdir for manipulation during 'correct' steps"
    )
    mpaths = params[cf.INTERFEROGRAM_FILES]
    process_mpaths = mpiops.array_split(mpaths)
    for p in process_mpaths:
        shutil.copy(p.sampled_path, p.tmp_sampled_path)
        Path(p.tmp_sampled_path).chmod(
            0o664)  # assign write permission as prepifg output is readonly