コード例 #1
0
def find_min_mean(mean_sds, grid):
    """
    Determine the ref pixel block with minimum mean value

    :param list mean_sds: List of mean standard deviations from each
        reference pixel grid
    :param list grid: List of ref pixel coordinates tuples

    :return: Tuple of (refy, refx) with minimum mean
    :rtype: tuple    
    """
    log.debug('Ranking ref pixel candidates based on mean values')
    try:
        refp_index = np.nanargmin(mean_sds)
        return grid[refp_index]
    except RefPixelError as v:
        log.error(v)
        return v
コード例 #2
0
def __calc_time_series_for_tile(tile, params):
    """
    Wrapper for time series calculation on a single tile
    """
    preread_ifgs = params[cf.PREREAD_IFGS]
    vcmt = params[cf.VCMT]
    ifg_paths = [
        ifg_path.tmp_sampled_path
        for ifg_path in params[cf.INTERFEROGRAM_FILES]
    ]
    output_dir = params[cf.TMPDIR]
    log.debug(f"Calculating time series for tile {tile.index}")
    ifg_parts = [
        shared.IfgPart(p, tile, preread_ifgs, params) for p in ifg_paths
    ]
    mst_tile = np.load(Configuration.mst_path(params, tile.index))
    tsincr, tscuml, _ = time_series(ifg_parts, params, vcmt, mst_tile)
    np.save(file=os.path.join(output_dir, 'tscuml_{}.npy'.format(tile.index)),
            arr=tscuml)
    # optional save of tsincr npy tiles
    if params["savetsincr"] == 1:
        np.save(file=os.path.join(output_dir,
                                  'tsincr_{}.npy'.format(tile.index)),
                arr=tsincr)
    tscuml = np.insert(tscuml, 0, 0,
                       axis=2)  # add zero epoch to tscuml 3D array
    log.info('Calculating linear regression of cumulative time series')
    linrate, intercept, r_squared, std_err, samples = linear_rate_array(
        tscuml, ifg_parts, params)
    np.save(file=os.path.join(output_dir,
                              'linear_rate_{}.npy'.format(tile.index)),
            arr=linrate)
    np.save(file=os.path.join(output_dir,
                              'linear_intercept_{}.npy'.format(tile.index)),
            arr=intercept)
    np.save(file=os.path.join(output_dir,
                              'linear_rsquared_{}.npy'.format(tile.index)),
            arr=r_squared)
    np.save(file=os.path.join(output_dir,
                              'linear_error_{}.npy'.format(tile.index)),
            arr=std_err)
    np.save(file=os.path.join(output_dir,
                              'linear_samples_{}.npy'.format(tile.index)),
            arr=samples)
コード例 #3
0
def _ts_to_ifgs(tsincr, preread_ifgs):
    """
    Function that converts an incremental displacement time series into
    interferometric phase observations. Used to re-construct an interferogram
    network from a time series.

    :param ndarray tsincr: incremental time series array of size
                (ifg.shape, nepochs-1)
    :param dict preread_ifgs: Dictionary of shared.PrereadIfg class instances

    :return: None, interferograms are saved to disk
    """
    log.debug('Reconstructing interferometric observations from time series')
    ifgs = list(OrderedDict(sorted(preread_ifgs.items())).values())
    _, n = get_epochs(ifgs)
    index_master, index_slave = n[:len(ifgs)], n[len(ifgs):]
    for i, ifg in enumerate(ifgs):
        phase = np.sum(tsincr[:, :, index_master[i]:index_slave[i]], axis=2)
        _save_aps_corrected_phase(ifg.path, phase)
コード例 #4
0
def temporal_high_pass_filter(tsincr: np.ndarray, epochlist: EpochList,
                              params: dict) -> np.ndarray:
    """
    Isolate high-frequency components of time series data by subtracting
    low-pass components obtained using a Gaussian filter defined by a
    cut-off time period (in days).
    :param tsincr: Array of incremental time series data of shape (ifg.shape, n_epochs).
    :param epochlist: A pyrate.core.shared.EpochList Class instance.
    :param params: Dictionary of PyRate configuration parameters.
    :return: ts_hp: Filtered high frequency time series data; shape (ifg.shape, nepochs).
    """
    log.info('Applying temporal high-pass filter')
    threshold = params[C.TLPF_PTHR]
    cutoff_day = params[C.TLPF_CUTOFF]
    if cutoff_day < 1 or type(cutoff_day) != int:
        raise ValueError(f'tlpf_cutoff must be an integer greater than or '
                         f'equal to 1 day. Value provided = {cutoff_day}')

    # convert cutoff in days to years
    cutoff_yr = cutoff_day / ifc.DAYS_PER_YEAR
    log.info(f'Gaussian temporal filter cutoff is {cutoff_day} days '
             f'({cutoff_yr:.4f} years)')

    intv = np.diff(epochlist.spans)  # time interval for the neighboring epochs
    span = epochlist.spans[:tsincr.shape[2]] + intv / 2  # accumulated time
    rows, cols = tsincr.shape[:2]

    tsfilt_row = {}
    process_rows = mpiops.array_split(list(range(rows)))

    for r in process_rows:
        tsfilt_row[r] = np.empty(tsincr.shape[1:], dtype=np.float32) * np.nan
        for j in range(cols):
            # Result of gaussian filter is low frequency time series
            tsfilt_row[r][j, :] = gaussian_temporal_filter(
                tsincr[r, j, :], cutoff_yr, span, threshold)

    tsfilt_combined = shared.join_dicts(mpiops.comm.allgather(tsfilt_row))
    tsfilt = np.array([v[1] for v in tsfilt_combined.items()])
    log.debug("Finished applying temporal high-pass filter")
    # Return the high-pass time series by subtracting low-pass result from input
    return tsincr - tsfilt
コード例 #5
0
def maxvar_vcm_calc_wrapper(params):
    """
    MPI wrapper for maxvar and vcmt computation
    """
    preread_ifgs = params[C.PREREAD_IFGS]
    ifg_paths = [
        ifg_path.tmp_sampled_path for ifg_path in params[C.INTERFEROGRAM_FILES]
    ]
    log.info('Calculating the temporal variance-covariance matrix')

    def _get_r_dist(ifg_path):
        """
        Get RDIst class object
        """
        ifg = Ifg(ifg_path)
        ifg.open()
        r_dist = RDist(ifg)()
        ifg.close()
        return r_dist

    r_dist = mpiops.run_once(_get_r_dist, ifg_paths[0])
    prcs_ifgs = mpiops.array_split(list(enumerate(ifg_paths)))
    process_maxvar = {}
    for n, i in prcs_ifgs:
        log.debug(
            f'Calculating maxvar for {n} of process ifgs {len(prcs_ifgs)} of total {len(ifg_paths)}'
        )
        process_maxvar[int(n)] = cvd(i,
                                     params,
                                     r_dist,
                                     calc_alpha=True,
                                     write_vals=True,
                                     save_acg=True)[0]
    maxvar_d = shared.join_dicts(mpiops.comm.allgather(process_maxvar))
    maxvar = [v[1] for v in sorted(maxvar_d.items(), key=lambda s: s[0])]

    vcmt = mpiops.run_once(get_vcmt, preread_ifgs, maxvar)
    log.debug("Finished maxvar and vcm calc!")
    params[C.MAXVAR], params[C.VCMT] = maxvar, vcmt
    np.save(Configuration.vcmt_path(params), arr=vcmt)
    return maxvar, vcmt
コード例 #6
0
def _save_merged_files(ifgs_dict,
                       outdir,
                       array,
                       out_type,
                       index=None,
                       savenpy=None):
    """
    Convenience function to save PyRate geotiff and numpy array files
    """
    log.debug('Saving PyRate outputs {}'.format(out_type))
    gt, md, wkt = ifgs_dict['gt'], ifgs_dict['md'], ifgs_dict['wkt']
    epochlist = ifgs_dict['epochlist']

    if out_type in ('tsincr', 'tscuml'):
        epoch = epochlist.dates[index + 1]
        dest = join(outdir, out_type + "_" + str(epoch) + ".tif")
        npy_file = join(outdir, out_type + "_" + str(epoch) + ".npy")
        # sequence position; first time slice is #0
        md['SEQUENCE_POSITION'] = index + 1
        md[ifc.EPOCH_DATE] = epoch
    else:
        dest = join(outdir, out_type + ".tif")
        npy_file = join(outdir, out_type + '.npy')
        md[ifc.EPOCH_DATE] = epochlist.dates

    if out_type == 'stack_rate':
        md[ifc.DATA_TYPE] = ifc.STACKRATE
    elif out_type == 'stack_error':
        md[ifc.DATA_TYPE] = ifc.STACKERROR
    elif out_type == 'stack_samples':
        md[ifc.DATA_TYPE] = ifc.STACKSAMP
    elif out_type == 'tsincr':
        md[ifc.DATA_TYPE] = ifc.INCR
    else:  #tscuml
        md[ifc.DATA_TYPE] = ifc.CUML

    shared.write_output_geotiff(md, gt, wkt, array, dest, np.nan)
    if savenpy:
        np.save(file=npy_file, arr=array)

    log.debug('Finished saving {}'.format(out_type))
コード例 #7
0
ファイル: aps.py プロジェクト: woxin5295/PyRate
def wrap_spatio_temporal_filter(params):
    """
    A wrapper for the spatio-temporal filter so it can be tested.
    See docstring for spatio_temporal_filter.
    """
    if params[cf.APSEST]:
        log.info('Doing APS spatio-temporal filtering')
    else:
        log.info('APS spatio-temporal filtering not required')
        return
    tiles = params[cf.TILES]
    preread_ifgs = params[cf.PREREAD_IFGS]
    ifg_paths = [
        ifg_path.tmp_sampled_path
        for ifg_path in params[cf.INTERFEROGRAM_FILES]
    ]

    # perform some checks on existing ifgs
    log.debug('Checking APS correction status')
    if mpiops.run_once(shared.check_correction_status, ifg_paths,
                       ifc.PYRATE_APS_ERROR):
        log.debug('Finished APS correction')
        return  # return if True condition returned

    aps_error_files_on_disc = [
        MultiplePaths.aps_error_path(i, params) for i in ifg_paths
    ]
    if all(a.exists() for a in aps_error_files_on_disc):
        log.warning("Reusing APS errors from previous run!!!")
        for ifg_path, a in mpiops.array_split(
                list(zip(ifg_paths, aps_error_files_on_disc))):
            phase = np.load(a)
            _save_aps_corrected_phase(ifg_path, phase)
    else:
        tsincr = _calc_svd_time_series(ifg_paths, params, preread_ifgs, tiles)
        mpiops.comm.barrier()

        spatio_temporal_filter(tsincr, ifg_paths, params, preread_ifgs)
    mpiops.comm.barrier()
    shared.save_numpy_phase(ifg_paths, params)
コード例 #8
0
ファイル: gamma.py プロジェクト: sixy6e/PyRate
def parse_baseline_header(path: str) -> dict:
    """
    Returns dictionary of Baseline metadata required for PyRate.
    Will read the Precise baseline estimate, if available,
    otherwise will read the Initial baseline estimate.

    :param path: Full path to GAMMA base.par file

    :return: bdict: Dictionary of baseline values
    """
    lookup = _parse_header(path)  # read file contents in to a dict

    # split the initial and precise baselines
    initial = lookup[GAMMA_INITIAL_BASELINE]
    initial_rate = lookup[GAMMA_INITIAL_BASELINE_RATE]
    precise = lookup[GAMMA_PRECISION_BASELINE]
    precise_rate = lookup[GAMMA_PRECISION_BASELINE_RATE]

    # read the initial baseline if all precise components are zero
    # (indicates that the precise baseline estimation was not ran in GAMMA workflow)
    if float(precise[0]) == 0.0 and float(precise[1]) == 0.0 and float(
            precise[2]) == 0.0:
        log.debug('Reading Initial GAMMA baseline values')
        baseline, baseline_rate = initial, initial_rate
    else:
        log.debug('Reading Precise GAMMA baseline values')
        baseline, baseline_rate = precise, precise_rate

    # Extract and return a dict of baseline values
    bdict = {}

    # baseline vector (along Track, aCross track, Normal to the track)
    bdict[ifc.PYRATE_BASELINE_T] = float(baseline[0])
    bdict[ifc.PYRATE_BASELINE_C] = float(baseline[1])
    bdict[ifc.PYRATE_BASELINE_N] = float(baseline[2])
    bdict[ifc.PYRATE_BASELINE_RATE_T] = float(baseline_rate[0])
    bdict[ifc.PYRATE_BASELINE_RATE_C] = float(baseline_rate[1])
    bdict[ifc.PYRATE_BASELINE_RATE_N] = float(baseline_rate[2])

    return bdict
コード例 #9
0
ファイル: mst_closure.py プロジェクト: sixy6e/PyRate
def __find_closed_loops(edges: List[Edge],
                        max_loop_length: int) -> List[List[date]]:
    g = nx.Graph()
    edges = [(we.first, we.second) for we in edges]
    g.add_edges_from(edges)
    A = nx.adjacency_matrix(g)
    graph = np.asarray(A.todense())

    loops = []

    for n in range(3, max_loop_length + 1):
        log.debug(
            f"Searching for loops of length {n} using Depth First Search")
        _, all_loops = find_loops(graph=graph, loop_length=n)
        loops_ = dedupe_loops(all_loops)
        log.debug(
            f"Selected number of loops of length {n} after deduplication is {len(loops_)}"
        )
        loops.extend(loops_)

    node_list = g.nodes()
    node_list_dict = {i: n for i, n in enumerate(node_list)}
    loop_subset = []
    for l in loops:
        loop = []
        for ll in l:
            loop.append(node_list_dict[ll])
        loop_subset.append(loop)

    log.debug(f"Total number of loops is {len(loop_subset)}")

    return loop_subset
コード例 #10
0
def _create_ifg_dict(params):
    """
    1. Convert ifg phase data into numpy binary files.
    2. Save the preread_ifgs dict with information about the ifgs that are
    later used for fast loading of Ifg files in IfgPart class

    :param list dest_tifs: List of destination tifs
    :param dict params: Config dictionary
    :param list tiles: List of all Tile instances

    :return: preread_ifgs: Dictionary containing information regarding
                interferograms that are used later in workflow
    :rtype: dict
    """
    dest_tifs = [ifg_path for ifg_path in params[cf.INTERFEROGRAM_FILES]]
    ifgs_dict = {}
    process_tifs = mpiops.array_split(dest_tifs)
    for d in process_tifs:
        ifg = shared._prep_ifg(d.sampled_path, params)
        ifgs_dict[d.tmp_sampled_path] = PrereadIfg(
            path=d.sampled_path,
            tmp_path=d.tmp_sampled_path,
            nan_fraction=ifg.nan_fraction,
            first=ifg.first,
            second=ifg.second,
            time_span=ifg.time_span,
            nrows=ifg.nrows,
            ncols=ifg.ncols,
            metadata=ifg.meta_data)
        ifg.close()
    ifgs_dict = join_dicts(mpiops.comm.allgather(ifgs_dict))

    ifgs_dict = mpiops.run_once(__save_ifgs_dict_with_headers_and_epochs,
                                dest_tifs, ifgs_dict, params, process_tifs)

    params[cf.PREREAD_IFGS] = ifgs_dict
    log.debug('Finished converting phase_data to numpy in process {}'.format(
        mpiops.rank))
    return ifgs_dict
コード例 #11
0
ファイル: closure_check.py プロジェクト: sixy6e/PyRate
def discard_loops_containing_max_ifg_count(loops: List[WeightedLoop],
                                           params) -> List[WeightedLoop]:
    """
    This function will discard loops when each ifg participating in a loop has met the max loop count criteria.

    :param loops: list of loops
    :param params: params dict
    :return: selected loops satisfying MAX_LOOP_REDUNDANCY criteria
    """
    selected_loops = []
    ifg_counter = defaultdict(int)
    for loop in loops:
        edge_appearances = np.array([ifg_counter[e] for e in loop.edges])
        if not np.all(edge_appearances > params[C.MAX_LOOP_REDUNDANCY]):
            selected_loops.append(loop)
            for e in loop.edges:
                ifg_counter[e] += 1
        else:
            log.debug(
                f"Loop {loop.loop} ignored: all constituent ifgs have been in a loop "
                f"{params[C.MAX_LOOP_REDUNDANCY]} times or more")
    return selected_loops
コード例 #12
0
ファイル: process.py プロジェクト: bopopescu/PyRate
def _mst_calc(dest_tifs, params, tiles, preread_ifgs):
    """
    MPI wrapper function for MST calculation
    """
    process_tiles = mpiops.array_split(tiles)
    log.info('Calculating minimum spanning tree matrix')

    def _save_mst_tile(tile, i, preread_ifgs):
        """
        Convenient inner loop for mst tile saving
        """
        mst_tile = mst.mst_multiprocessing(tile, dest_tifs, preread_ifgs,
                                           params)
        # locally save the mst_mat
        mst_file_process_n = join(params[cf.TMPDIR],
                                  'mst_mat_{}.npy'.format(i))
        np.save(file=mst_file_process_n, arr=mst_tile)

    for t in process_tiles:
        _save_mst_tile(t, t.index, preread_ifgs)
    log.debug('Finished mst calculation for process {}'.format(mpiops.rank))
    mpiops.comm.barrier()
コード例 #13
0
def mst_matrix_networkx(ifgs):
    """
    Generates MST network for a single pixel for the given ifgs using
    NetworkX-package algorithms.

    :param list ifgs: Sequence of interferogram objects

    :return: y: pixel y coordinate
    :rtype: int
    :return: x: pixel x coordinate
    :rtype: int
    :return: mst: list of tuples for edges in the minimum spanning tree
    :rtype: list
    """
    # make default MST to optimise result when no Ifg cells in a stack are nans
    edges_with_weights = [(i.master, i.slave, i.nan_fraction) for i in ifgs]
    edges, g_nx = _minimum_spanning_edges_from_mst(edges_with_weights)
    # TODO: memory efficiencies can be achieved here with tiling

    list_of_phase_data = [i.phase_data for i in ifgs]
    log.debug("list_of_phase_data length: " + str(len(list_of_phase_data)))
    for row in list_of_phase_data:
        log.debug("row length in list_of_phase_data: " + str(len(row)))
        log.debug("row in list_of_phase_data: " + str(row))
    data_stack = array(list_of_phase_data, dtype=float32)

    # create MSTs for each pixel in the ifg data stack
    nifgs = len(ifgs)

    for y, x in product(range(ifgs[0].nrows), range(ifgs[0].ncols)):
        values = data_stack[:, y,
                            x]  # vertical stack of ifg values for a pixel
        nan_count = nsum(isnan(values))

        # optimisations: use pre-created results for all nans/no nans
        if nan_count == 0:
            yield y, x, edges
            continue
        elif nan_count == nifgs:
            yield y, x, nan
            continue

        # dynamically modify graph to reuse a single graph: this should avoid
        # repeatedly creating new graph objs & reduce RAM use
        ebunch_add = []
        ebunch_delete = []
        for value, edge in zip(values, edges_with_weights):
            if not isnan(value):
                if not g_nx.has_edge(edge[0], edge[1]):
                    ebunch_add.append(edge)
            else:
                if g_nx.has_edge(edge[0], edge[1]):
                    ebunch_delete.append(edge)
        if ebunch_add:
            g_nx.add_weighted_edges_from(ebunch_add)
        if ebunch_delete:
            g_nx.remove_edges_from(ebunch_delete)
        yield y, x, nx.minimum_spanning_tree(g_nx).edges()
コード例 #14
0
 def convert_to_radians(self):
     """
     Convert phase_data units from millimetres to radians.
     Note: converted phase_data held in memory and not written to disc
     (see shared.write_modified_phase)
     """
     if self.meta_data[ifc.DATA_UNITS] == MILLIMETRES:
         self.phase_data = convert_mm_to_radians(self.phase_data,
                                                 wavelength=self.wavelength)
         self.meta_data[ifc.DATA_UNITS] = RADIANS
         self.mm_converted = False
         msg = '{}: converted phase units to radians'.format(self.data_path)
         log.debug(msg)
         return
     elif self.meta_data[ifc.DATA_UNITS] == RADIANS:
         self.mm_converted = False
         msg = '{}: ignored as phase units are already ' \
               'radians'.format(self.data_path)
         log.debug(msg)
         return
     else:  # pragma: no cover
         msg = 'Phase units are not millimetres or radians'
         raise IfgException(msg)
コード例 #15
0
ファイル: closure_check.py プロジェクト: sixy6e/PyRate
def mask_pixels_with_unwrapping_errors(ifgs_breach_count: NDArray[(
    Any, Any, Any), UInt16], num_occurrences_each_ifg: NDArray[(Any, ),
                                                               UInt16],
                                       params: dict) -> None:
    """
    Find pixels in the phase data that breach closure_thr, and mask
    (assign NaNs) to those pixels in those ifgs.
    :param ifgs_breach_count: unwrapping issues at pixels in all loops
    :param num_occurrences_each_ifg:  frequency of ifgs appearing in all loops
    :param params: params dict
    """
    log.debug("Masking phase data of retained ifgs")

    for i, m_p in enumerate(params[C.INTERFEROGRAM_FILES]):
        pix_index = ifgs_breach_count[:, :, i] == num_occurrences_each_ifg[i]
        ifg = Ifg(m_p.tmp_sampled_path)
        ifg.open()
        nan_and_mm_convert(ifg, params)
        ifg.phase_data[pix_index] = np.nan
        ifg.write_modified_phase()

    log.info(f"Masked phase data of {i + 1} retained ifgs after phase closure")
    return None
コード例 #16
0
def _make_aps_corrections(ts_aps: np.ndarray, ifgs: List[Ifg],
                          params: dict) -> None:
    """
    Function to convert the time series APS filter output into interferometric
    phase corrections and save them to disc.

    :param ts_aps: Incremental APS time series array.
    :param ifgs:   List of Ifg class objects.
    :param params: Dictionary of PyRate configuration parameters.
    """
    log.debug('Reconstructing interferometric observations from time series')
    # get first and second image indices
    _, n = mpiops.run_once(get_epochs, ifgs)
    index_first, index_second = n[:len(ifgs)], n[len(ifgs):]

    num_ifgs_tuples = mpiops.array_split(list(enumerate(ifgs)))
    for i, ifg in [(int(num), ifg) for num, ifg in num_ifgs_tuples]:
        # sum time slice data from first to second epoch
        ifg_aps = np.sum(ts_aps[:, :, index_first[i]:index_second[i]], axis=2)
        aps_error_on_disc = MultiplePaths.aps_error_path(ifg.tmp_path, params)
        np.save(file=aps_error_on_disc, arr=ifg_aps)  # save APS as numpy array

    mpiops.comm.barrier()
コード例 #17
0
ファイル: mst.py プロジェクト: woxin5295/PyRate
def mst_calc_wrapper(params):
    """
    MPI wrapper function for MST calculation
    """

    log.info('Calculating minimum spanning tree matrix')

    def _save_mst_tile(tile: Tile, params: dict) -> None:
        """
        Convenient inner loop for mst tile saving
        """
        preread_ifgs = params[cf.PREREAD_IFGS]
        dest_tifs = [ifg_path.tmp_sampled_path for ifg_path in params[cf.INTERFEROGRAM_FILES]]
        mst_file_process_n = Configuration.mst_path(params, index=tile.index)
        if mst_file_process_n.exists():
            return
        mst_tile = mst_multiprocessing(tile, dest_tifs, preread_ifgs, params)
        # locally save the mst_mat
        np.save(file=mst_file_process_n, arr=mst_tile)

    tiles_split(_save_mst_tile, params)

    log.debug('Finished minimum spanning tree calculation')
コード例 #18
0
def _merge_timeseries(params: dict, tstype: str) -> None:
    """
    Merge tiled time series outputs
    """
    log.info('Merging {} time series outputs'.format(tstype))
    shape, tiles, ifgs_dict = __merge_setup(params)

    # load the first time series file to determine the number of time series tifs
    ts_file = join(params[C.TMPDIR], tstype + '_0.npy')
    ts = np.load(file=ts_file)
    # pylint: disable=no-member
    no_ts_tifs = ts.shape[2]
    process_tifs = mpiops.array_split(range(no_ts_tifs))
    # depending on nvelpar, this will not fit in memory
    # e.g. nvelpar=100, nrows=10000, ncols=10000, 32bit floats need 40GB memory
    # 32 * 100 * 10000 * 10000 / 8 bytes = 4e10 bytes = 40 GB
    # the double for loop helps us overcome the memory limit
    log.info('Process {} writing {} {} time series tifs of '
             'total {}'.format(mpiops.rank, len(process_tifs), tstype,
                               no_ts_tifs))
    for i in process_tifs:
        ts_arr = assemble_tiles(shape,
                                params[C.TMPDIR],
                                tiles,
                                out_type=tstype,
                                index=i)
        __save_merged_files(ifgs_dict,
                            params,
                            ts_arr,
                            out_type=tstype,
                            index=i,
                            savenpy=params["savenpy"])

    mpiops.comm.barrier()
    log.debug('Process {} finished writing {} {} time series tifs of '
              'total {}'.format(mpiops.rank, len(process_tifs), tstype,
                                no_ts_tifs))
コード例 #19
0
ファイル: shared.py プロジェクト: wzmucas/PyRate
 def convert_to_mm(self):
     """
     Convert phase data units from radians to millimetres.
     """
     self.mm_converted = True
     if self.dataset.GetMetadataItem(ifc.DATA_UNITS) == MILLIMETRES:
         msg = '{}: ignored as previous phase unit conversion ' \
               'already applied'.format(self.data_path)
         log.debug(msg)
         self.phase_data = self.phase_data
         return
     elif self.dataset.GetMetadataItem(ifc.DATA_UNITS) == RADIANS:
         self.phase_data = convert_radians_to_mm(self.phase_data,
                                                 self.wavelength)
         self.meta_data[ifc.DATA_UNITS] = MILLIMETRES
         # self.write_modified_phase()
         # otherwise NaN's don't write to bytecode properly
         # and numpy complains
         # self.dataset.FlushCache()
         msg = '{}: converted phase units to millimetres'.format(self.data_path)
         log.debug(msg)
     else:  # pragma: no cover
         msg = 'Phase units are not millimetres or radians'
         raise IfgException(msg)
コード例 #20
0
 def convert_to_nans(self):
     """
     Convert phase data of given value to NaN
     """
     if (self._nodata_value is None) \
             or (self.dataset is None):  # pragma: no cover
         msg = 'nodata value needs to be set for nan conversion.' \
               'Use ifg.nodata_value = NoDataValue to set nodata_value'
         log.warning(msg)
         raise RasterException(msg)
     if ((self.dataset.GetMetadataItem(ifc.NAN_STATUS) == ifc.NAN_CONVERTED)
             or self.nan_converted):
         self.phase_data = self.phase_data
         self.nan_converted = True
         msg = '{}: ignored as previous nan ' \
               'conversion detected'.format(self.data_path)
         log.debug(msg)
         return
     else:
         self.phase_data = where(
             isclose(self.phase_data, self._nodata_value, atol=1e-6), nan,
             self.phase_data)
         self.meta_data[ifc.NAN_STATUS] = ifc.NAN_CONVERTED
         self.nan_converted = True
コード例 #21
0
ファイル: process.py プロジェクト: bopopescu/PyRate
def _orb_fit_calc(multi_paths: List[MultiplePaths],
                  params,
                  preread_ifgs=None) -> None:
    """
    MPI wrapper for orbital fit correction
    """
    if not params[cf.ORBITAL_FIT]:
        log.info('Orbital correction not required!')
        print('Orbital correction not required!')
        return
    log.info('Calculating orbital correction')

    ifg_paths = [p.sampled_path for p in multi_paths]
    if preread_ifgs:  # don't check except for mpi tests
        # perform some general error/sanity checks
        log.debug('Checking Orbital error correction status')
        if mpiops.run_once(shared.check_correction_status, ifg_paths,
                           ifc.PYRATE_ORBITAL_ERROR):
            log.debug(
                'Orbital error correction not required as all ifgs are already corrected!'
            )
            return  # return if True condition returned

    if params[cf.ORBITAL_FIT_METHOD] == 1:
        prcs_ifgs = mpiops.array_split(ifg_paths)
        orbital.remove_orbital_error(prcs_ifgs, params, preread_ifgs)
    else:
        # Here we do all the multilooking in one process, but in memory
        # can use multiple processes if we write data to disc during
        # remove_orbital_error step
        # A performance comparison should be made for saving multilooked
        # files on disc vs in memory single process multilooking
        if mpiops.rank == MASTER_PROCESS:
            headers = [find_header(p, params) for p in multi_paths]
            orbital.remove_orbital_error(ifg_paths,
                                         params,
                                         headers,
                                         preread_ifgs=preread_ifgs)
    mpiops.comm.barrier()
    log.debug('Finished Orbital error correction')
コード例 #22
0
ファイル: shared.py プロジェクト: wzmucas/PyRate
def check_correction_status(ifgs, meta):  # pragma: no cover
    """
    Generic function for checking if a correction has already been performed
    in a previous run by interrogating PyRate meta data entries

    :param preread_ifgs: Dictionary of pre-read interferogram information
    :param str meta: Meta data flag to check for

    :return: True if correction has been performed, otherwise False
    :rtype: bool
    """
    def close_all(ifgs):    
        for ifg in ifgs:
            ifg.close()
    
    if not isinstance(ifgs[0], Ifg):
        ifgs = [Ifg(ifg_path) for ifg_path in ifgs]

    for ifg in ifgs:
        if not ifg.is_open:
            ifg.open()

    flags = [meta in ifg.meta_data for ifg in ifgs]
    if all(flags):
        log.info('Skipped: interferograms already corrected')
        return True
    elif not all(flags) and any(flags):
        log.debug('Detected mix of corrected and uncorrected interferograms')
        for i, flag in zip(ifgs, flags):
            if flag:
                msg = '{}: correction detected'.format(i.data_path)
            else:
                msg = '{}: correction NOT detected'.format(i.data_path)
            log.debug(msg)
            close_all(ifgs)
            raise CorrectionStatusError(msg)
    else:
        log.debug('Calculating corrections')
        close_all(ifgs)
        return False
コード例 #23
0
def _merge_timeseries(rows, cols, params):
    """
    Merge time series output
    """
    xlks, _, crop = cf.transform_params(params)

    base_unw_paths = []

    for p in Path(params[OUT_DIR]).rglob("*rlks_*cr.tif"):
        if "dem" not in str(p):
            base_unw_paths.append(str(p))

    if "tif" in base_unw_paths[0].split(".")[1]:
        dest_tifs = base_unw_paths  # cf.get_dest_paths(base_unw_paths, crop, params, xlks)
        for i, dest_tif in enumerate(dest_tifs):
            dest_tifs[i] = dest_tif.replace("_tif", "")
    else:
        dest_tifs = base_unw_paths  # cf.get_dest_paths(base_unw_paths, crop, params, xlks)

    output_dir = params[cf.TMPDIR]
    # load previously saved prepread_ifgs dict
    preread_ifgs_file = join(output_dir, 'preread_ifgs.pk')
    ifgs = cp.load(open(preread_ifgs_file, 'rb'))

    # metadata and projections
    gt, md, wkt = ifgs['gt'], ifgs['md'], ifgs['wkt']
    epochlist = ifgs['epochlist']
    ifgs = [v for v in ifgs.values() if isinstance(v, PrereadIfg)]

    tiles = shared.get_tiles(dest_tifs[0], rows, cols)

    # load the first tsincr file to determine the number of time series tifs
    tsincr_file = os.path.join(output_dir, 'tsincr_0.npy')

    tsincr = np.load(file=tsincr_file)

    # pylint: disable=no-member
    no_ts_tifs = tsincr.shape[2]
    # we create 2 x no_ts_tifs as we are splitting tsincr and tscuml
    # to all processes.
    process_tifs = mpiops.array_split(range(2 * no_ts_tifs))

    # depending on nvelpar, this will not fit in memory
    # e.g. nvelpar=100, nrows=10000, ncols=10000, 32bit floats need 40GB memory
    # 32 * 100 * 10000 * 10000 / 8 bytes = 4e10 bytes = 40 GB
    # the double for loop helps us overcome the memory limit
    log.info('Process {} writing {} timeseries tifs of '
             'total {}'.format(mpiops.rank, len(process_tifs), no_ts_tifs * 2))
    for i in process_tifs:
        tscum_g = np.empty(shape=ifgs[0].shape, dtype=np.float32)
        if i < no_ts_tifs:
            for n, t in enumerate(tiles):
                _assemble_tiles(i, n, t, tscum_g, output_dir, 'tscuml')
            md[ifc.EPOCH_DATE] = epochlist.dates[i + 1]
            # sequence position; first time slice is #0
            md['SEQUENCE_POSITION'] = i + 1
            dest = os.path.join(
                params[cf.OUT_DIR],
                'tscuml' + "_" + str(epochlist.dates[i + 1]) + ".tif")
            md[ifc.DATA_TYPE] = ifc.CUML
            shared.write_output_geotiff(md, gt, wkt, tscum_g, dest, np.nan)
        else:
            tsincr_g = np.empty(shape=ifgs[0].shape, dtype=np.float32)
            i %= no_ts_tifs
            for n, t in enumerate(tiles):
                _assemble_tiles(i, n, t, tsincr_g, output_dir, 'tsincr')
            md[ifc.EPOCH_DATE] = epochlist.dates[i + 1]
            # sequence position; first time slice is #0
            md['SEQUENCE_POSITION'] = i + 1
            dest = os.path.join(
                params[cf.OUT_DIR],
                'tsincr' + "_" + str(epochlist.dates[i + 1]) + ".tif")
            md[ifc.DATA_TYPE] = ifc.INCR
            shared.write_output_geotiff(md, gt, wkt, tsincr_g, dest, np.nan)
    mpiops.comm.barrier()
    log.debug('Process {} finished writing {} timeseries tifs of '
              'total {}'.format(mpiops.rank, len(process_tifs),
                                no_ts_tifs * 2))
コード例 #24
0
def _process_dem_error_per_tile(tile: Tile, params: dict) -> None:
    """
    Convenience function for processing DEM error in tiles
    :param tile: pyrate.core.shared.Tile Class object.
    :param params: Dictionary of PyRate configuration parameters.
    """
    ifg_paths = [
        ifg_path.tmp_sampled_path for ifg_path in params[C.INTERFEROGRAM_FILES]
    ]
    ifg0_path = ifg_paths[0]
    ifg0 = Ifg(ifg0_path)
    ifg0.open(readonly=True)
    # read lon and lat values of multi-looked ifg (first ifg only)
    lon, lat = geometry.get_lonlat_coords(ifg0)
    # read azimuth and range coords and DEM from tif files generated in prepifg
    geom_files = Configuration.geometry_files(params)
    rdc_az_file = geom_files['rdc_azimuth']
    geom_az = Geometry(rdc_az_file)
    rdc_rg_file = geom_files['rdc_range']
    geom_rg = Geometry(rdc_rg_file)
    dem_file = params[C.DEM_FILE_PATH].sampled_path
    dem = DEM(dem_file)
    preread_ifgs = params[C.PREREAD_IFGS]
    threshold = params[C.DE_PTHR]
    ifg_parts = [
        shared.IfgPart(p, tile, preread_ifgs, params) for p in ifg_paths
    ]
    lon_parts = lon(tile)
    lat_parts = lat(tile)
    az_parts = geom_az(tile)
    rg_parts = geom_rg(tile)
    dem_parts = dem(tile)
    log.debug(
        f"Calculating per-pixel baseline for tile {tile.index} during DEM error correction"
    )
    bperp, look_angle, range_dist = _calculate_bperp_wrapper(
        ifg_paths, az_parts, rg_parts, lat_parts, lon_parts, dem_parts)
    log.debug(
        f"Calculating DEM error for tile {tile.index} during DEM error correction"
    )

    # mst_tile = np.load(Configuration.mst_path(params, tile.index))
    # calculate the DEM error estimate and the correction values for each IFG
    # current implementation uses the look angle and range distance matrix of the primary SLC in the last IFG
    # todo: check the impact of using the same information from another SLC
    dem_error, dem_error_correction, _ = calc_dem_errors(
        ifg_parts, bperp, look_angle, range_dist, threshold)
    # dem_error contains the estimated DEM error for each pixel (i.e. the topographic change relative to the DEM)
    # size [row, col]
    # dem_error_correction contains the correction value for each interferogram
    # size [num_ifg, row, col]
    # save tiled data in tmpdir
    np.save(file=os.path.join(params[C.TMPDIR],
                              'dem_error_{}.npy'.format(tile.index)),
            arr=dem_error)
    # swap the axes of 3D array to fit the style used in function assemble_tiles
    tmp_array = np.moveaxis(dem_error_correction, 0, -1)
    # new dimension is [row, col, num_ifg]
    # save tiled data into tmpdir
    np.save(file=os.path.join(
        params[C.TMPDIR], 'dem_error_correction_{}.npy'.format(tile.index)),
            arr=tmp_array)

    # Calculate and save the average perpendicular baseline for the tile
    bperp_avg = np.nanmean(bperp, axis=(1, 2), dtype=np.float64)
    np.save(file=os.path.join(params[C.TMPDIR],
                              'bperp_avg_{}.npy'.format(tile.index)),
            arr=bperp_avg)
コード例 #25
0
ファイル: ref_phs_est.py プロジェクト: bopopescu/PyRate
def _update_phase_metadata(ifg):
    ifg.meta_data[ifc.PYRATE_REF_PHASE] = ifc.REF_PHASE_REMOVED
    ifg.write_modified_phase()
    log.debug(f"Reference phase corrected for {ifg.data_path}")
コード例 #26
0
def create_png_and_kml_from_tif(output_folder_path: str,
                                output_type: str) -> None:
    """
    Function to create a preview PNG format image from a geotiff, and a KML file
    """
    log.info(f'Creating quicklook image for stack_{output_type}')
    # open raster and choose band to find min, max
    raster_path = join(output_folder_path, f"stack_{output_type}.tif")
    if not isfile(raster_path):
        raise Exception(f"stack_{output_type}.tif file not found at: " +
                        raster_path)
    gtif = gdal.Open(raster_path)
    # find bounds of image
    west, north, east, south = "", "", "", ""
    for line in gdal.Info(gtif).split('\n'):
        if "Upper Left" in line:
            west, north = line.split(")")[0].split("(")[1].split(",")
        if "Lower Right" in line:
            east, south = line.split(")")[0].split("(")[1].split(",")
    # write KML file
    kml_file_path = join(output_folder_path, f"stack_{output_type}.kml")
    kml_file_content = f"""<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://earth.google.com/kml/2.1">
  <Document>
    <name>stack_{output_type}.kml</name>
    <GroundOverlay>
      <name>stack_{output_type}.png</name>
      <Icon>
        <href>stack_{output_type}.png</href>
      </Icon>
      <LatLonBox>
        <north> """ + north + """ </north>
        <south> """ + south + """ </south>
        <east>  """ + east + """ </east>
        <west>  """ + west + """ </west>
      </LatLonBox>
    </GroundOverlay>
  </Document>
</kml>"""
    with open(kml_file_path, "w") as f:
        f.write(kml_file_content)

    # Get raster statistics
    srcband = gtif.GetRasterBand(1)
    minimum, maximum, _, _ = srcband.GetStatistics(True, True)
    del gtif  # close geotiff (used to calculate statistics)
    # steps used for the colourmap, must be even (currently hard-coded to 254 resulting in 255 values)
    no_of_steps = 254
    # slightly different code required for rate map and rate error map
    if output_type == 'rate':
        # minimum value might be negative
        maximum = max(abs(minimum), abs(maximum))
        minimum = -1 * maximum
        # colours: blue -> white -> red (white==0)
        # note that an extra value will be added for zero (i.e. white: 255 255 255)
        # generate a colourmap for odd number of values (currently hard-coded to 255)
        mid = int(no_of_steps * 0.5)
        # allocate RGB values to three numpy arrays r, g, b
        r = np.arange(0, mid) / mid
        g = r
        r = np.concatenate((r, np.ones(mid + 1)))
        g = np.concatenate((g, np.array([1]), np.flipud(g)))
        b = np.flipud(r)
        # change direction of colours (blue: positve, red: negative)
        r = np.flipud(r) * 255
        g = np.flipud(g) * 255
        b = np.flipud(b) * 255
    if output_type == 'error':
        # colours: white -> red (minimum error -> maximum error
        # allocate RGB values to three numpy arrays r, g, b
        r = np.ones(no_of_steps + 1) * 255
        g = np.arange(0, no_of_steps + 1) / (no_of_steps)
        g = np.flipud(g) * 255
        b = g
    # generate the colourmap file in the output folder
    color_map_path = join(output_folder_path, f"colourmap_{output_type}.txt")
    log.info(
        'Saving colour map to file {}; min/max values: {:.2f}/{:.2f}'.format(
            color_map_path, minimum, maximum))
    with open(color_map_path, "w") as f:
        f.write("nan 0 0 0 0\n")
        for i, value in enumerate(
                np.linspace(minimum, maximum, no_of_steps + 1)):
            f.write("%f %f %f %f 255\n" % (value, r[i], g[i], b[i]))
    input_tif_path = join(output_folder_path, f"stack_{output_type}.tif")
    output_png_path = join(output_folder_path, f"stack_{output_type}.png")
    subprocess.check_call([
        "gdaldem", "color-relief", "-of", "PNG", input_tif_path, "-alpha",
        color_map_path, output_png_path, "-nearest_color_entry"
    ])
    log.debug(f'Finished creating quicklook image for stack_{output_type}')
コード例 #27
0
def sum_phase_closures(ifg_files: List[str], loops: List[WeightedLoop], params: dict) -> \
        Tuple[NDArray[(Any, Any, Any), Float32], NDArray[(Any, Any, Any), UInt16], NDArray[(Any,), UInt16]]:
    """
    Compute the closure sum for each pixel in each loop, and count the number of times a pixel
    contributes to a failed closure loop (where the summed closure is above/below the 
    CLOSURE_THR threshold).
    :param ifg_files: list of ifg files
    :param loops: list of loops
    :param params: params dict
    :return: Tuple of closure, ifgs_breach_count, num_occurrences_each_ifg
    closure: summed closure for each loop.
    ifgs_breach_count: shape=(ifg.shape, n_ifgs) number of times a pixel in an ifg fails the closure
    check (i.e., has unwrapping error) in all loops under investigation.
    num_occurrences_each_ifg: frequency of ifg appearance in all loops.
    """
    edge_to_indexed_ifgs = __create_ifg_edge_dict(ifg_files, params)
    ifgs = [v.IfgPhase for v in edge_to_indexed_ifgs.values()]
    n_ifgs = len(ifgs)

    if params[C.PARALLEL]:
        # rets = Parallel(n_jobs=params[cf.PROCESSES], verbose=joblib_log_level(cf.LOG_LEVEL))(
        #     delayed(__compute_ifgs_breach_count)(ifg0, n_ifgs, weighted_loop, edge_to_indexed_ifgs, params)
        #     for weighted_loop in loops
        # )
        # for k, r in enumerate(rets):
        #     closure_dict[k], ifgs_breach_count_dict[k] = r
        # TODO: enable multiprocessing - needs pickle error workaround
        closure = np.zeros(shape=(* ifgs[0].phase_data.shape, len(loops)), dtype=np.float32)
        ifgs_breach_count = np.zeros(shape=(ifgs[0].phase_data.shape + (n_ifgs,)), dtype=np.uint16)
        for k, weighted_loop in enumerate(loops):
            closure[:, :, k], ifgs_breach_count_l = __compute_ifgs_breach_count(weighted_loop, edge_to_indexed_ifgs,
                                                                                params)
            ifgs_breach_count += ifgs_breach_count_l
    else:
        process_loops = mpiops.array_split(loops)
        closure_process = np.zeros(shape=(* ifgs[0].phase_data.shape, len(process_loops)), dtype=np.float32)
        ifgs_breach_count_process = np.zeros(shape=(ifgs[0].phase_data.shape + (n_ifgs,)), dtype=np.uint16)
        for k, weighted_loop in enumerate(process_loops):
            closure_process[:, :, k], ifgs_breach_count_l = \
                __compute_ifgs_breach_count(weighted_loop, edge_to_indexed_ifgs, params)
            ifgs_breach_count_process += ifgs_breach_count_l  # process

        total_gb = mpiops.comm.allreduce(ifgs_breach_count_process.nbytes / 1e9, op=mpiops.MPI.SUM)
        log.debug(f"Memory usage to compute ifgs_breach_count_process was {total_gb} GB")
        log.debug(f"shape of ifgs_breach_count_process is {ifgs_breach_count_process.shape}")
        log.debug(f"dtype of ifgs_breach_count_process is {ifgs_breach_count_process.dtype}")

        total_gb = mpiops.comm.allreduce(closure_process.nbytes / 1e9, op=mpiops.MPI.SUM)
        log.debug(f"Memory usage to compute closure_process was {total_gb} GB")
        if mpiops.rank == 0:
            ifgs_breach_count = np.zeros(shape=(ifgs[0].phase_data.shape + (n_ifgs,)), dtype=np.uint16)

            # closure
            closure = np.zeros(shape=(* ifgs[0].phase_data.shape, len(loops)), dtype=np.float32)
            main_process_indices = mpiops.array_split(range(len(loops))).astype(np.uint16)
            closure[:, :, main_process_indices] = closure_process
            for rank in range(1, mpiops.size):
                rank_indices = mpiops.array_split(range(len(loops)), rank).astype(np.uint16)
                this_rank_closure = np.zeros(shape=(* ifgs[0].phase_data.shape, len(rank_indices)), dtype=np.float32)
                mpiops.comm.Recv(this_rank_closure, source=rank, tag=rank)
                closure[:, :, rank_indices] = this_rank_closure
        else:
            closure = None
            ifgs_breach_count = None
            mpiops.comm.Send(closure_process, dest=0, tag=mpiops.rank)

        if mpiops.MPI_INSTALLED:
            mpiops.comm.Reduce([ifgs_breach_count_process, mpiops.MPI.UINT16_T],
                               [ifgs_breach_count, mpiops.MPI.UINT16_T], op=mpiops.MPI.SUM, root=0)  # global
        else:
            ifgs_breach_count = mpiops.comm.reduce(ifgs_breach_count_process, op=mpiops.sum0_op, root=0)

        log.debug(f"successfully summed phase closure breach array")

    num_occurrences_each_ifg = None
    if mpiops.rank == 0:
        num_occurrences_each_ifg = _find_num_occurrences_each_ifg(loops, edge_to_indexed_ifgs, n_ifgs)

    return closure, ifgs_breach_count, num_occurrences_each_ifg
コード例 #28
0
def ref_pixel_calc_wrapper(params: dict) -> Tuple[int, int]:
    """
    Wrapper for reference pixel calculation
    """
    __validate_supplied_lat_lon(params)
    ifg_paths = [ifg_path.tmp_sampled_path for ifg_path in params[C.INTERFEROGRAM_FILES]]
    lon = params[C.REFX]
    lat = params[C.REFY]

    ifg = Ifg(ifg_paths[0])
    ifg.open(readonly=True)
    # assume all interferograms have same projection and will share the same transform
    transform = ifg.dataset.GetGeoTransform()

    ref_pixel_file = Configuration.ref_pixel_path(params)

    def __reuse_ref_pixel_file_if_exists():
        if ref_pixel_file.exists():
            refx, refy = np.load(ref_pixel_file)
            log.info('Reusing pre-calculated ref-pixel values: ({}, {}) from file {}'.format(
                refx, refy, ref_pixel_file.as_posix()))
            log.warning("Reusing ref-pixel values from previous run!!!")
            params[C.REFX_FOUND], params[C.REFY_FOUND] = int(refx), int(refy)
            return int(refx), int(refy)
        else:
            return None, None

    # read and return
    refx, refy = mpiops.run_once(__reuse_ref_pixel_file_if_exists)
    if (refx is not None) and (refy is not None):
        update_refpix_metadata(ifg_paths, int(refx), int(refy), transform, params)
        return refx, refy

    if lon == -1 or lat == -1:
        log.info('Searching for best reference pixel location')

        half_patch_size, thresh, grid = ref_pixel_setup(ifg_paths, params)
        process_grid = mpiops.array_split(grid)
        save_ref_pixel_blocks(process_grid, half_patch_size, ifg_paths, params)
        mean_sds = _ref_pixel_mpi(process_grid, half_patch_size, ifg_paths, thresh, params)
        mean_sds = mpiops.comm.gather(mean_sds, root=0)
        if mpiops.rank == MAIN_PROCESS:
            mean_sds = np.hstack(mean_sds)

        refpixel_returned = mpiops.run_once(find_min_mean, mean_sds, grid)

        if isinstance(refpixel_returned, ValueError):
            raise RefPixelError(
                "Reference pixel calculation returned an all nan slice!\n"
                "Cannot continue downstream computation. Please change reference pixel algorithm used before "
                "continuing.")
        refy, refx = refpixel_returned   # row first means first value is latitude
        log.info('Selected reference pixel coordinate (x, y): ({}, {})'.format(refx, refy))
        lon, lat = convert_pixel_value_to_geographic_coordinate(refx, refy, transform)
        log.info('Selected reference pixel coordinate (lon, lat): ({}, {})'.format(lon, lat))
    else:
        log.info('Using reference pixel from config file (lon, lat): ({}, {})'.format(lon, lat))
        log.warning("Ensure user supplied reference pixel values are in lon/lat")
        refx, refy = convert_geographic_coordinate_to_pixel_value(lon, lat, transform)
        log.info('Converted reference pixel coordinate (x, y): ({}, {})'.format(refx, refy))

    np.save(file=ref_pixel_file, arr=[int(refx), int(refy)])
    update_refpix_metadata(ifg_paths, refx, refy, transform, params)

    log.debug("refpx, refpy: "+str(refx) + " " + str(refy))
    ifg.close()
    params[C.REFX_FOUND], params[C.REFY_FOUND] = int(refx), int(refy)
    return int(refx), int(refy)
コード例 #29
0
def ref_phase_est_wrapper(params):
    """
    Wrapper for reference phase estimation.
    """
    ifg_paths = [
        ifg_path.tmp_sampled_path for ifg_path in params[C.INTERFEROGRAM_FILES]
    ]
    refpx, refpy = params[C.REFX_FOUND], params[C.REFY_FOUND]
    if len(ifg_paths) < 2:
        raise ReferencePhaseError(
            "At least two interferograms required for reference phase correction ({len_ifg_paths} "
            "provided).".format(len_ifg_paths=len(ifg_paths)))

    # this is not going to be true as we now start with fresh multilooked ifg copies - remove?
    if mpiops.run_once(shared.check_correction_status, ifg_paths,
                       ifc.PYRATE_REF_PHASE):
        log.warning(
            'Reference phase correction already applied to ifgs; returning')
        return

    ifgs = [Ifg(ifg_path) for ifg_path in ifg_paths]
    # Save reference phase numpy arrays to disk.
    ref_phs_file = Configuration.ref_phs_file(params)

    # If ref phase file exists on disk, then reuse - subtract ref_phase from ifgs and return
    if ref_phs_file.exists():
        ref_phs = np.load(ref_phs_file)
        _update_phase_and_metadata(ifgs, ref_phs, params)
        shared.save_numpy_phase(ifg_paths, params)
        return ref_phs, ifgs

    # determine the reference phase for each ifg
    if params[C.REF_EST_METHOD] == 1:
        log.info("Calculating reference phase as median of interferogram")
        ref_phs = est_ref_phase_ifg_median(ifg_paths, params)
    elif params[C.REF_EST_METHOD] == 2:
        log.info(
            'Calculating reference phase in a patch surrounding pixel (x, y): ({}, {})'
            .format(refpx, refpy))
        ref_phs = est_ref_phase_patch_median(ifg_paths, params, refpx, refpy)
    else:
        raise ReferencePhaseError(
            "No such option, set parameter 'refest' to '1' or '2'.")

    # gather all reference phases from distributed processes and save to disk
    if mpiops.rank == MAIN_PROCESS:
        collected_ref_phs = np.zeros(len(ifg_paths), dtype=np.float64)
        process_indices = mpiops.array_split(range(len(ifg_paths))).astype(
            np.uint16)
        collected_ref_phs[process_indices] = ref_phs
        for r in range(1, mpiops.size):
            process_indices = mpiops.array_split(range(len(ifg_paths)),
                                                 r).astype(np.uint16)
            this_process_ref_phs = np.zeros(shape=len(process_indices),
                                            dtype=np.float64)
            mpiops.comm.Recv(this_process_ref_phs, source=r, tag=r)
            collected_ref_phs[process_indices] = this_process_ref_phs
        np.save(file=ref_phs_file, arr=collected_ref_phs)
    else:
        collected_ref_phs = np.empty(len(ifg_paths), dtype=np.float64)
        mpiops.comm.Send(ref_phs, dest=MAIN_PROCESS, tag=mpiops.rank)

    mpiops.comm.Bcast(collected_ref_phs, root=0)

    # subtract ref_phase from ifgs
    _update_phase_and_metadata(ifgs, collected_ref_phs, params)

    mpiops.comm.barrier()
    shared.save_numpy_phase(ifg_paths, params)

    log.debug("Finished reference phase correction")

    # Preserve old return value so tests don't break.
    return ref_phs, ifgs
コード例 #30
0
ファイル: covariance.py プロジェクト: woxin5295/PyRate
def cvd_from_phase(phase, ifg, r_dist, calc_alpha, save_acg=False, params=None):
    """
    A convenience function used to compute radial autocovariance from phase
    data

    :param ndarray phase: An array of interferogram phase data
    :param Ifg class ifg: A pyrate.shared.Ifg class instance
    :param ndarray r_dist: Array of distance values from the image centre
                (See Rdist class for more details)
    :param bool calc_alpha: If True calculate alpha
    :param bool save_acg: If True write autocorrelation and radial distance
                data to numpy array file on disk
    :param dict params: [optional] Dictionary of configuration parameters;
                Must be provided if save_acg=True

    :return: maxvar: The maximum variance (at zero lag)
    :rtype: float
    :return: alpha: the exponential length-scale of decay factor
    :rtype: float
    """
    # pylint: disable=invalid-name
    # pylint: disable=too-many-locals

    autocorr_grid = _get_autogrid(phase)
    acg = reshape(autocorr_grid, phase.size, order='F')
    # Symmetry in image; keep only unique points
    # tmp = _unique_points(zip(acg, r_dist))
    # Sudipta: Unlikely, as unique_point is a search/comparison,
    # whereas keeping 1st half is just numpy indexing.
    # If it is not faster, why was this done differently here?
    # r_dist = r_dist[:int(ceil(phase.size / 2.0)) + nrows]
    acg = acg[:len(r_dist)]
    # Alternative method to remove duplicate cells
    # r_dist = r_dist[:ceil(len(r_dist)/2)+nlines]
    #  Reason for '+nlines' term unknown
    # eg. array([x for x in set([(1,1), (2,2), (1,1)])])
    # the above shortens r_dist by some number of cells

    # pick the smallest axis to determine circle search radius
    if (ifg.x_centre * ifg.x_size) < (ifg.y_centre * ifg.y_size):
        maxdist = (ifg.x_centre+1) * ifg.x_size / DISTFACT
    else:
        maxdist = (ifg.y_centre+1) * ifg.y_size / DISTFACT

    # filter out data where the of lag distance is greater than maxdist
    # r_dist = array([e for e in rorig if e <= maxdist]) #
    # MG: prefers to use all the data
    # acg = array([e for e in rorig if e <= maxdist])
    indices_to_keep = r_dist < maxdist
    acg = acg[indices_to_keep]

    # optionally save acg vs dist observations to disk
    if save_acg:
        _save_cvd_data(acg, r_dist[indices_to_keep],
                       ifg.data_path, params[cf.TMPDIR])

    if calc_alpha:
        # bin width for collecting data
        bin_width = max(ifg.x_size, ifg.y_size) * 2 / DISTFACT  # km
        r_dist = r_dist[indices_to_keep]  # km
        # classify values of r_dist according to bin number
        rbin = ceil(r_dist / bin_width).astype(int)
        maxbin = max(rbin) - 1  # consistent with Legacy data

        cvdav = zeros(shape=(2, maxbin + 1))

        # the following stays in numpy land
        # distance instead of bin number
        cvdav[0, :] = np.multiply(range(maxbin + 1), bin_width)
        # mean variance for the bins
        cvdav[1, :] = [mean(acg[rbin == b]) for b in range(maxbin + 1)]
        # calculate best fit function maxvar*exp(-alpha*r_dist)
        alphaguess = 2 / (maxbin * bin_width)
        alpha = fmin(_pendiffexp, x0=alphaguess, args=(cvdav,), disp=False,
                     xtol=1e-6, ftol=1e-6)
        log.debug("1st guess alpha {}, converged "
                 "alpha: {}".format(alphaguess, alpha))
        # maximum variance usually at the zero lag: max(acg[:len(r_dist)])
        return np.max(acg), alpha[0]  # alpha unit 1/km
    else:
        return np.max(acg), None