コード例 #1
0
ファイル: run_pyrate.py プロジェクト: smx83230743/PyRate
def _maxvar_vcm_calc(ifg_paths, params, preread_ifgs):
    """
    MPI wrapper for maxvar and vcmt computation
    """
    log.info('Calculating maxvar and vcm')
    process_indices = mpiops.array_split(range(len(ifg_paths)))

    def _get_r_dist(ifg_path):
        """
        Get RDIst class object
        """
        ifg = Ifg(ifg_path)
        ifg.open()
        r_dist = vcm_module.RDist(ifg)()
        ifg.close()
        return r_dist

    r_dist = mpiops.run_once(_get_r_dist, ifg_paths[0])
    prcs_ifgs = mpiops.array_split(ifg_paths)
    process_maxvar = []
    for n, i in enumerate(prcs_ifgs):
        log.info('Calculating maxvar for {} of process ifgs {} of '
                 'total {}'.format(n + 1, len(prcs_ifgs), len(ifg_paths)))
        process_maxvar.append(
            vcm_module.cvd(i,
                           params,
                           r_dist,
                           calc_alpha=True,
                           write_vals=True,
                           save_acg=True)[0])
    if mpiops.rank == MASTER_PROCESS:
        maxvar = np.empty(len(ifg_paths), dtype=np.float64)
        maxvar[process_indices] = process_maxvar
        for i in range(1, mpiops.size):  # pragma: no cover
            rank_indices = mpiops.array_split(range(len(ifg_paths)), i)
            this_process_ref_phs = np.empty(len(rank_indices),
                                            dtype=np.float64)
            mpiops.comm.Recv(this_process_ref_phs, source=i, tag=i)
            maxvar[rank_indices] = this_process_ref_phs
    else:  # pragma: no cover
        maxvar = np.empty(len(ifg_paths), dtype=np.float64)
        mpiops.comm.Send(np.array(process_maxvar, dtype=np.float64),
                         dest=MASTER_PROCESS,
                         tag=mpiops.rank)

    maxvar = mpiops.comm.bcast(maxvar, root=0)
    vcmt = mpiops.run_once(vcm_module.get_vcmt, preread_ifgs, maxvar)
    return maxvar, vcmt
コード例 #2
0
ファイル: run_pyrate.py プロジェクト: wchch1010/PyRate
def _mst_calc(dest_tifs, params, tiles, preread_ifgs):
    """
    MPI wrapper function for MST calculation
    """
    process_tiles = mpiops.array_split(tiles)

    def _save_mst_tile(tile, i, preread_ifgs):
        """
        Convenient inner loop for mst tile saving
        """
        if params[cf.NETWORKX_OR_MATLAB_FLAG] == 1:
            log.info('Calculating minimum spanning tree matrix '
                     'using NetworkX method')
            mst_tile = mst.mst_multiprocessing(tile, dest_tifs, preread_ifgs)
        elif params[cf.NETWORKX_OR_MATLAB_FLAG] == 0:
            raise ConfigException('Matlab-style MST not supported')
        else:
            raise ConfigException('Only NetworkX MST is supported')
            # mst_tile = mst.mst_multiprocessing(tile, dest_tifs, preread_ifgs)
        # locally save the mst_mat
        mst_file_process_n = join(
            params[cf.TMPDIR], 'mst_mat_{}.npy'.format(i))
        np.save(file=mst_file_process_n, arr=mst_tile)

    for t in process_tiles:
        _save_mst_tile(t, t.index, preread_ifgs)
    log.info('finished mst calculation for process {}'.format(mpiops.rank))
    mpiops.comm.barrier()
コード例 #3
0
ファイル: run_pyrate.py プロジェクト: wchch1010/PyRate
def _timeseries_calc(ifg_paths, params, vcmt, tiles, preread_ifgs):
    """
    MPI wrapper for time series calculation.
    """
    if params[cf.TIME_SERIES_CAL] == 0:
        log.info('Time Series Calculation not required')
        return

    if params[cf.TIME_SERIES_METHOD] == 1:
        log.info('Calculating time series using Laplacian Smoothing method')
    elif params[cf.TIME_SERIES_METHOD] == 2:
        log.info('Calculating time series using SVD method')

    output_dir = params[cf.TMPDIR]
    process_tiles = mpiops.array_split(tiles)
    for t in process_tiles:
        log.info('Calculating time series for tile {}'.format(t.index))
        ifg_parts = [shared.IfgPart(p, t, preread_ifgs) for p in ifg_paths]
        mst_tile = np.load(os.path.join(output_dir,
                                        'mst_mat_{}.npy'.format(t.index)))
        res = timeseries.time_series(ifg_parts, params, vcmt, mst_tile)
        tsincr, tscum, _ = res
        np.save(file=os.path.join(output_dir, 'tsincr_{}.npy'.format(t.index)),
                arr=tsincr)
        np.save(file=os.path.join(output_dir, 'tscuml_{}.npy'.format(t.index)),
                arr=tscum)
    mpiops.comm.barrier()
コード例 #4
0
def timeseries_calc(ifg_paths, params, vcmt, tiles, preread_ifgs):
    """
    Time series calculation.

    :param ifg_paths: List of interferogram paths
    :param params: Parameters dictionary corresponding to config file
    :param vcmt: vcmt array
    :param tiles: List of all tiles used during MPI processes
    :param preread_ifgs: Dictionary containing interferogram characteristics for efficient computing

    :return xxxx
    """
    process_tiles = mpiops.array_split(tiles)
    log.info('Calculating time series')
    output_dir = params[cf.TMPDIR]
    for t in process_tiles:
        log.info('Calculating time series for tile {}'.format(t.index))
        ifg_parts = [shared.IfgPart(p, t, preread_ifgs) for p in ifg_paths]
        mst_tile = np.load(
            os.path.join(output_dir, 'mst_mat_{}.npy'.format(t.index)))
        res = timeseries.time_series(ifg_parts, params, vcmt, mst_tile)
        tsincr, tscum, _ = res
        np.save(file=os.path.join(output_dir, 'tsincr_{}.npy'.format(t.index)),
                arr=tsincr)
        np.save(file=os.path.join(output_dir, 'tscuml_{}.npy'.format(t.index)),
                arr=tscum)
    mpiops.comm.barrier()
コード例 #5
0
ファイル: run_pyrate.py プロジェクト: wchch1010/PyRate
def _orb_fit_calc(ifg_paths, params, preread_ifgs=None):
    """
    MPI wrapper for orbital fit correction
    """
    log.info('Calculating orbfit correction')

    if not params[cf.ORBITAL_FIT]:
        log.info('Orbital correction not required')
        return

    if preread_ifgs:  # don't check except for mpi tests
        # perform some general error/sanity checks
        log.info('Checking Orbital error correction status')
        if mpiops.run_once(shared.check_correction_status, preread_ifgs,
                           ifc.PYRATE_ORBITAL_ERROR):
            log.info('Finished Orbital error correction')
            return  # return if True condition returned

    if params[cf.ORBITAL_FIT_METHOD] == 1:
        prcs_ifgs = mpiops.array_split(ifg_paths)
        orbital.remove_orbital_error(prcs_ifgs, params, preread_ifgs)
    else:
        # Here we do all the multilooking in one process, but in memory
        # can use multiple processes if we write data to disc during
        # remove_orbital_error step
        # A performance comparison should be made for saving multilooked
        # files on disc vs in memory single process multilooking
        if mpiops.rank == MASTER_PROCESS:
            orbital.remove_orbital_error(ifg_paths, params, preread_ifgs)
    mpiops.comm.barrier()
    log.info('Finished Orbital error correction')
コード例 #6
0
def ref_phs_method2(ifg_paths, params, refpx, refpy):
    """
    Reference phase computation using method 2.

    :param ifg_paths: List of interferogram paths
    :param params: Parameters dictionary corresponding to config file
    :param refpx: Reference pixel x-coordinate
    :param refpy: Reference pixel y-coordinate

    :return ref_phs: Array of reference phase of shape ifg.shape
    """
    half_chip_size = int(np.floor(params[cf.REF_CHIP_SIZE] / 2.0))
    chipsize = 2 * half_chip_size + 1
    thresh = chipsize * chipsize * params[cf.REF_MIN_FRAC]
    process_ifg_paths = mpiops.array_split(ifg_paths)

    def _inner(ifg_path):
        ifg = Ifg(ifg_path)
        ifg.open(readonly=False)
        phase_data = ifg.phase_data
        ref_ph = rpe.est_ref_phs_method2(phase_data, half_chip_size, refpx,
                                         refpy, thresh)
        phase_data -= ref_ph
        md = ifg.meta_data
        md[ifc.REF_PHASE] = ifc.REF_PHASE_REMOVED
        ifg.write_modified_phase(data=phase_data)
        ifg.close()
        return ref_ph

    ref_phs = np.array([_inner(p) for p in process_ifg_paths])
    log.info('Ref phase computed in process {}'.format(mpiops.rank))
    return ref_phs
コード例 #7
0
def linrate_calc(ifg_paths, params, vcmt, tiles, preread_ifgs):
    """
    MPI capable linrate calculation.

    :param ifg_paths: List of interferogram paths
    :param params: Parameters dictionary corresponding to config file
    :param vcmt: vcmt array
    :param tiles: List of all tiles used during MPI processes
    :param preread_ifgs: Dictionary containing interferogram characteristics for efficient computing
    
    :return xxxx
    """

    process_tiles = mpiops.array_split(tiles)
    log.info('Calculating linear rate')
    output_dir = params[cf.TMPDIR]
    for t in process_tiles:
        log.info('calculating lin rate of tile {}'.format(t.index))
        ifg_parts = [shared.IfgPart(p, t, preread_ifgs) for p in ifg_paths]
        mst_grid_n = np.load(
            os.path.join(output_dir, 'mst_mat_{}.npy'.format(t.index)))
        rate, error, samples = linrate.linear_rate(ifg_parts, params, vcmt,
                                                   mst_grid_n)
        # declare file names
        np.save(file=os.path.join(output_dir,
                                  'linrate_{}.npy'.format(t.index)),
                arr=rate)
        np.save(file=os.path.join(output_dir,
                                  'linerror_{}.npy'.format(t.index)),
                arr=error)
        np.save(file=os.path.join(output_dir,
                                  'linsamples_{}.npy'.format(t.index)),
                arr=samples)
    mpiops.comm.barrier()
コード例 #8
0
def mst_calc(dest_tifs, params, tiles, preread_ifgs):
    """
    MPI function that control each process during MPI run
    Reference phase computation using method 2.

    :params dest_tifs: List of interferogram paths
    :param params: Parameters dictionary corresponding to config file
    :param tiles: List of all tiles used during MPI processes
    :param preread_ifgs: Dictionary containing interferogram characteristics for efficient computing
    
    :return xxxx
    """
    process_tiles = mpiops.array_split(tiles)

    def save_mst_tile(tile, i, preread_ifgs):
        """ Convenient inner loop for mst tile saving"""
        if params[cf.NETWORKX_OR_MATLAB_FLAG] == 1:
            log.info('Calculating minimum spanning tree matrix '
                     'using NetworkX method')
            mst_tile = mst.mst_multiprocessing(tile, dest_tifs, preread_ifgs)
        elif params[cf.NETWORKX_OR_MATLAB_FLAG] == 0:
            raise ConfigException('Matlab mst not supported')
        else:
            raise ConfigException('Only NetworkX mst is supported')
            # mst_tile = mst.mst_multiprocessing(tile, dest_tifs, preread_ifgs)
        # locally save the mst_mat
        mst_file_process_n = join(params[cf.TMPDIR],
                                  'mst_mat_{}.npy'.format(i))
        np.save(file=mst_file_process_n, arr=mst_tile)

    for t in process_tiles:
        save_mst_tile(t, t.index, preread_ifgs)
    log.info('finished mst calculation for process {}'.format(mpiops.rank))
    mpiops.comm.barrier()
コード例 #9
0
def _calc_svd_time_series(ifg_paths, params, preread_ifgs, tiles):
    """
    Helper function to obtain time series for spatio-temporal filter
    using SVD method
    """
    # Is there other existing functions that can perform this same job?
    log.info('Calculating time series via SVD method for '
             'spatio-temporal filter')
    # copy params temporarily
    new_params = deepcopy(params)
    new_params[cf.TIME_SERIES_METHOD] = 2  # use SVD method

    process_tiles = mpiops.array_split(tiles)
    output_dir = params[cf.TMPDIR]

    nvels = None
    for t in process_tiles:
        log.info('Calculating time series for tile {} during aps '
                 'correction'.format(t.index))
        ifg_parts = [shared.IfgPart(p, t, preread_ifgs) for p in ifg_paths]
        mst_tile = np.load(
            os.path.join(output_dir, 'mst_mat_{}.npy'.format(t.index)))
        tsincr = time_series(ifg_parts, new_params, vcmt=None, mst=mst_tile)[0]
        np.save(file=os.path.join(output_dir,
                                  'tsincr_aps_{}.npy'.format(t.index)),
                arr=tsincr)
        nvels = tsincr.shape[2]

    nvels = mpiops.comm.bcast(nvels, root=0)
    # need to assemble tsincr from all processes
    tsincr_g = mpiops.run_once(_assemble_tsincr, ifg_paths, params,
                               preread_ifgs, tiles, nvels)
    log.info('Finished calculating time series for spatio-temporal filter')
    return tsincr_g
コード例 #10
0
ファイル: run_pyrate.py プロジェクト: wchch1010/PyRate
def _ref_phs_method2(ifg_paths, params, refpx, refpy):
    """
    MPI wrapper for reference phase computation using method 2.
    Refer to documentation for ref_est_phs.est_ref_phase_method2.
    """
    half_chip_size = int(np.floor(params[cf.REF_CHIP_SIZE] / 2.0))
    chipsize = 2 * half_chip_size + 1
    thresh = chipsize * chipsize * params[cf.REF_MIN_FRAC]
    process_ifg_paths = mpiops.array_split(ifg_paths)

    def _inner(ifg_path):
        """
        Convenient inner loop
        """
        ifg = Ifg(ifg_path)
        ifg.open(readonly=False)
        phase_data = ifg.phase_data
        ref_ph = rpe._est_ref_phs_method2(phase_data,
                                         half_chip_size,
                                         refpx, refpy, thresh)
        phase_data -= ref_ph
        md = ifg.meta_data
        md[ifc.PYRATE_REF_PHASE] = ifc.REF_PHASE_REMOVED
        ifg.write_modified_phase(data=phase_data)
        ifg.close()
        return ref_ph

    ref_phs = np.array([_inner(p) for p in process_ifg_paths])
    log.info('Ref phase computed in process {}'.format(mpiops.rank))
    return ref_phs
コード例 #11
0
ファイル: run_pyrate.py プロジェクト: wchch1010/PyRate
def _linrate_calc(ifg_paths, params, vcmt, tiles, preread_ifgs):
    """
    MPI wrapper for linrate calculation
    """
    process_tiles = mpiops.array_split(tiles)
    log.info('Calculating linear rate map')
    output_dir = params[cf.TMPDIR]
    for t in process_tiles:
        log.info('Calculating linear rate of tile {}'.format(t.index))
        ifg_parts = [shared.IfgPart(p, t, preread_ifgs) for p in ifg_paths]
        mst_grid_n = np.load(os.path.join(output_dir,
                                          'mst_mat_{}.npy'.format(t.index)))
        rate, error, samples = linrate.linear_rate(ifg_parts, params,
                                                   vcmt, mst_grid_n)
        # declare file names
        np.save(file=os.path.join(output_dir,
                                  'linrate_{}.npy'.format(t.index)),
                arr=rate)
        np.save(file=os.path.join(output_dir,
                                  'linerror_{}.npy'.format(t.index)),
                arr=error)
        np.save(file=os.path.join(output_dir,
                                  'linsamples_{}.npy'.format(t.index)),
                arr=samples)
    mpiops.comm.barrier()
コード例 #12
0
ファイル: run_pyrate.py プロジェクト: wchch1010/PyRate
def _phase_sum(ifg_paths, params):
    """
    Save phase data and phase sum used in the reference phase estimation
    """
    p_paths = mpiops.array_split(ifg_paths)
    ifg = Ifg(p_paths[0])
    ifg.open(readonly=True)
    shape = ifg.shape
    phs_sum = np.zeros(shape=shape, dtype=np.float64)
    ifg.close()

    for d in p_paths:
        ifg = Ifg(d)
        ifg.open()
        ifg.nodata_value = params[cf.NO_DATA_VALUE]
        phs_sum += ifg.phase_data
        ifg.close()

    if mpiops.rank == MASTER_PROCESS:
        phase_sum_all = phs_sum
        # loop is better for memory
        for i in range(1, mpiops.size):  # pragma: no cover
            phs_sum = np.zeros(shape=shape, dtype=np.float64)
            mpiops.comm.Recv(phs_sum, source=i, tag=i)
            phase_sum_all += phs_sum
        comp = np.isnan(phase_sum_all)  # this is the same as in Matlab
        comp = np.ravel(comp, order='F')  # this is the same as in Matlab
    else:  # pragma: no cover
        comp = None
        mpiops.comm.Send(phs_sum, dest=0, tag=mpiops.rank)

    comp = mpiops.comm.bcast(comp, root=0)
    return comp
コード例 #13
0
def ref_phs_method1(ifg_paths, comp):
    """
    Reference phase computation using method 1.

    :param ifg_paths: List of interferogram paths
    :param comp: Array of phase sum of all interferograms of shape ifg.shape

    :return ref_phs: Array of reference phase of shape ifg.shape
    """
    def _inner(ifg_path):
        """ convenient inner loop """
        ifg = Ifg(ifg_path)
        ifg.open(readonly=False)
        phase_data = ifg.phase_data
        ref_phase = rpe.est_ref_phs_method1(phase_data, comp)
        phase_data -= ref_phase
        md = ifg.meta_data
        md[ifc.REF_PHASE] = ifc.REF_PHASE_REMOVED
        ifg.write_modified_phase(data=phase_data)
        ifg.close()
        return ref_phase

    this_process_ifgs = mpiops.array_split(ifg_paths)
    ref_phs = np.array([_inner(ifg) for ifg in this_process_ifgs])
    log.info('Ref phase computed in process {}'.format(mpiops.rank))
    return ref_phs
コード例 #14
0
def save_numpy_phase(ifg_paths, tiles, params):
    """
    Save interferogram phase data as numpy array file on disk.

    :param list ifg_paths: List of strings for interferogram paths
    :param list tiles: List of pyrate.shared.Tile instances
    :param dict params: Dictionary of configuration parameters

    :return: None, file saved to disk
    """
    process_ifgs = mpiops.array_split(ifg_paths)
    outdir = params[cf.TMPDIR]
    if not os.path.exists(outdir):
        mkdir_p(outdir)
    for ifg_path in process_ifgs:
        ifg = Ifg(ifg_path)
        ifg.open()
        phase_data = ifg.phase_data
        bname = basename(ifg_path).split('.')[0]
        for t in tiles:
            p_data = phase_data[t.top_left_y:t.bottom_right_y,
                                t.top_left_x:t.bottom_right_x]
            phase_file = 'phase_data_{}_{}.npy'.format(bname, t.index)
            np.save(file=join(outdir, phase_file), arr=p_data)
        ifg.close()
    mpiops.comm.barrier()
コード例 #15
0
ファイル: run_pyrate.py プロジェクト: wchch1010/PyRate
def _ref_phase_estimation(ifg_paths, params, refpx, refpy, preread_ifgs=None):
    """
    Wrapper function for MPI-enabled reference phase estimation.
    Refer to documentation for ref_est_phs.estimate_ref_phase.
    """
    # perform some checks on existing ifgs
    #if preread_ifgs and mpiops.rank == MASTER_PROCESS:
    #TODO: implement MPI capability into ref_phs_est module and remove here
    if preread_ifgs:
        log.info('Checking reference phase estimation status')
        if mpiops.run_once(shared.check_correction_status, preread_ifgs,
                           ifc.PYRATE_REF_PHASE):
            log.info('Finished reference phase estimation')
            return  # return if True condition returned

    if params[cf.REF_EST_METHOD] == 1:
        # calculate phase sum for later use in ref phase method 1
        comp = _phase_sum(ifg_paths, params)
        log.info('Computing reference phase via method 1')
        process_ref_phs = _ref_phs_method1(ifg_paths, comp)
    elif params[cf.REF_EST_METHOD] == 2:
        log.info('Computing reference phase via method 2')
        process_ref_phs = _ref_phs_method2(ifg_paths, params, refpx, refpy)
    else:
        raise ConfigException('Ref phase estimation method must be 1 or 2')

    # Save reference phase numpy arrays to disk
    ref_phs_file = join(params[cf.TMPDIR], 'ref_phs.npy')
    if mpiops.rank == MASTER_PROCESS:
        ref_phs = np.zeros(len(ifg_paths), dtype=np.float64)
        process_indices = mpiops.array_split(range(len(ifg_paths)))
        ref_phs[process_indices] = process_ref_phs
        for r in range(1, mpiops.size):  # pragma: no cover
            process_indices = mpiops.array_split(range(len(ifg_paths)), r)
            this_process_ref_phs = np.zeros(shape=len(process_indices),
                                            dtype=np.float64)
            mpiops.comm.Recv(this_process_ref_phs, source=r, tag=r)
            ref_phs[process_indices] = this_process_ref_phs
        np.save(file=ref_phs_file, arr=ref_phs)
    else:  # pragma: no cover
        # send reference phase data to master process
        mpiops.comm.Send(process_ref_phs, dest=MASTER_PROCESS,
                         tag=mpiops.rank)
    log.info('Finished reference phase estimation')
コード例 #16
0
def maxvar_vcm_calc(ifg_paths, params, preread_ifgs):
    """
    MPI capable maxvar and vcmt computation.

    :param ifg_paths: List of interferogram paths
    :param params: Parameters dictionary corresponding to config file
    :param preread_ifgs: Dictionary containing interferogram characteristics for efficient computing

    :return maxvar: Array of shape (nifgs, 1)
    :return vcmt: Array of shape (nifgs, nifgs)
    """
    log.info('Calculating maxvar and vcm')
    process_indices = mpiops.array_split(range(len(ifg_paths)))
    prcs_ifgs = mpiops.array_split(ifg_paths)
    process_maxvar = []
    for n, i in enumerate(prcs_ifgs):
        log.info('Calculating maxvar for {} of process ifgs {} of '
                 'total {}'.format(n + 1, len(prcs_ifgs), len(ifg_paths)))
        # TODO: cvd calculation is still pretty slow - revisit
        process_maxvar.append(vcm_module.cvd(i, params)[0])
    if mpiops.rank == MASTER_PROCESS:
        maxvar = np.empty(len(ifg_paths), dtype=np.float64)
        maxvar[process_indices] = process_maxvar
        for i in range(1, mpiops.size):  # pragma: no cover
            rank_indices = mpiops.array_split(range(len(ifg_paths)), i)
            this_process_ref_phs = np.empty(len(rank_indices),
                                            dtype=np.float64)
            mpiops.comm.Recv(this_process_ref_phs, source=i, tag=i)
            maxvar[rank_indices] = this_process_ref_phs
    else:  # pragma: no cover
        maxvar = np.empty(len(ifg_paths), dtype=np.float64)
        mpiops.comm.Send(np.array(process_maxvar, dtype=np.float64),
                         dest=MASTER_PROCESS,
                         tag=mpiops.rank)

    maxvar = mpiops.comm.bcast(maxvar, root=0)
    vcmt = mpiops.run_once(vcm_module.get_vcmt, preread_ifgs, maxvar)
    return maxvar, vcmt
コード例 #17
0
def ref_phase_estimation(ifg_paths, params, refpx, refpy):
    """
    Reference phase estimation.

    :param ifg_paths: List of interferogram paths
    :param params: Parameters dictionary corresponding to config file
    :param refpx: Reference pixel x-coordinate
    :param refpy: Reference pixel y-coordinate

    :return xxxx
    """

    log.info('Estimating and removing reference phase')
    if params[cf.REF_EST_METHOD] == 1:
        # calculate phase sum for later use in ref phase method 1
        comp = phase_sum(ifg_paths, params)
        process_ref_phs = ref_phs_method1(ifg_paths, comp)
    elif params[cf.REF_EST_METHOD] == 2:
        process_ref_phs = ref_phs_method2(ifg_paths, params, refpx, refpy)
    else:
        raise ConfigException('Ref phase estimation method must be 1 or 2')

    ref_phs_file = join(params[cf.TMPDIR], 'ref_phs.npy')
    if mpiops.rank == MASTER_PROCESS:
        ref_phs = np.zeros(len(ifg_paths), dtype=np.float64)
        process_indices = mpiops.array_split(range(len(ifg_paths)))
        ref_phs[process_indices] = process_ref_phs
        for r in range(1, mpiops.size):  # pragma: no cover
            process_indices = mpiops.array_split(range(len(ifg_paths)), r)
            this_process_ref_phs = np.zeros(shape=len(process_indices),
                                            dtype=np.float64)
            mpiops.comm.Recv(this_process_ref_phs, source=r, tag=r)
            ref_phs[process_indices] = this_process_ref_phs
        np.save(file=ref_phs_file, arr=ref_phs)
    else:  # pragma: no cover
        # send reference phase data to master process
        mpiops.comm.Send(process_ref_phs, dest=MASTER_PROCESS, tag=mpiops.rank)
コード例 #18
0
ファイル: run_pyrate.py プロジェクト: wchch1010/PyRate
def _create_ifg_dict(dest_tifs, params, tiles):
    """
    1. Convert ifg phase data into numpy binary files.
    2. Save the preread_ifgs dict with information about the ifgs that are
    later used for fast loading of Ifg files in IfgPart class

    :param list dest_tifs: List of destination tifs
    :param dict params: Config dictionary
    :param list tiles: List of all Tile instances

    :return: preread_ifgs: Dictionary containing information regarding
                interferograms that are used later in workflow
    :rtype: dict
    """
    ifgs_dict = {}
    process_tifs = mpiops.array_split(dest_tifs)
    shared.save_numpy_phase(dest_tifs, tiles, params)
    for d in process_tifs:
        ifg = shared._prep_ifg(d, params)
        ifgs_dict[d] = PrereadIfg(path=d,
                                  nan_fraction=ifg.nan_fraction,
                                  master=ifg.master,
                                  slave=ifg.slave,
                                  time_span=ifg.time_span,
                                  nrows=ifg.nrows,
                                  ncols=ifg.ncols,
                                  metadata=ifg.meta_data)
        ifg.close()
    ifgs_dict = _join_dicts(mpiops.comm.allgather(ifgs_dict))

    preread_ifgs_file = join(params[cf.TMPDIR], 'preread_ifgs.pk')

    if mpiops.rank == MASTER_PROCESS:

        # add some extra information that's also useful later
        gt, md, wkt = shared.get_geotiff_header_info(process_tifs[0])
        ifgs_dict['epochlist'] = algorithm.get_epochs(ifgs_dict)[0]
        ifgs_dict['gt'] = gt
        ifgs_dict['md'] = md
        ifgs_dict['wkt'] = wkt
        # dump ifgs_dict file for later use
        cp.dump(ifgs_dict, open(preread_ifgs_file, 'wb'))

    mpiops.comm.barrier()
    preread_ifgs = OrderedDict(sorted(cp.load(open(preread_ifgs_file,
                                                   'rb')).items()))
    log.info('Finished converting phase_data to numpy '
             'in process {}'.format(mpiops.rank))
    return preread_ifgs
コード例 #19
0
def find_ref_pixel(ifg_paths, params):
    """
    Find reference pixel using MPI Parameters.

    :param ifg_paths: List of interferogram paths
    :param params: Parameters dictionary corresponding to config file

    :return Tuple of (refy, refx).
    """
    half_patch_size, thresh, grid = refpixel.ref_pixel_setup(ifg_paths, params)
    process_grid = mpiops.array_split(grid)
    save_ref_pixel_blocks(process_grid, half_patch_size, ifg_paths, params)
    mean_sds = refpixel.ref_pixel_mpi(process_grid, half_patch_size, ifg_paths,
                                      thresh, params)
    mean_sds = mpiops.comm.gather(mean_sds, root=0)
    if mpiops.rank == MASTER_PROCESS:
        mean_sds = np.hstack(mean_sds)
    return mpiops.run_once(refpixel.filter_means, mean_sds, grid)
コード例 #20
0
ファイル: run_pyrate.py プロジェクト: wchch1010/PyRate
def _ref_pixel_calc(ifg_paths, params):
    """
    Wrapper for reference pixel calculation
    """
    # unlikely, but possible the refpixel can be (0,0)
    # check if there is a pre-specified reference pixel coord
    refx = params[cf.REFX]
    ifg = Ifg(ifg_paths[0])
    ifg.open(readonly=True)
    if refx > ifg.ncols - 1:
        msg = ('Supplied reference pixel X coordinate is greater than '
               'the number of ifg columns: {}').format(refx)
        raise ValueError(msg)

    refy = params[cf.REFY]
    if refy > ifg.nrows - 1:
        msg = ('Supplied reference pixel Y coordinate is greater than '
               'the number of ifg rows: {}').format(refy)
        raise ValueError(msg)

    if refx <= 0 or refy <= 0:  # if either zero or negative
        log.info('Searching for best reference pixel location')

        half_patch_size, thresh, grid = refpixel.ref_pixel_setup(ifg_paths, 
                                                                 params)
        process_grid = mpiops.array_split(grid)
        refpixel.save_ref_pixel_blocks(process_grid, half_patch_size,
                                       ifg_paths, params)
        mean_sds = refpixel._ref_pixel_mpi(process_grid, half_patch_size,
                                           ifg_paths, thresh, params)
        mean_sds = mpiops.comm.gather(mean_sds, root=0)
        if mpiops.rank == MASTER_PROCESS:
            mean_sds = np.hstack(mean_sds)

        refy, refx = mpiops.run_once(refpixel.find_min_mean, mean_sds, grid)
        log.info('Selected reference pixel coordinate: '
                 '({}, {})'.format(refx, refy))
    else:  # pragma: no cover
        log.info('Reusing reference pixel from config file: '
                 '({}, {})'.format(refx, refy))
    ifg.close()
    return refx, refy
コード例 #21
0
ファイル: run_pyrate.py プロジェクト: wchch1010/PyRate
def _ref_phs_method1(ifg_paths, comp):
    """
    MPI wrapper for reference phase computation using method 1.
    Refer to documentation for ref_est_phs.est_ref_phase_method1.
    """
    def _inner(ifg_path):
        """
        Convenient inner loop
        """
        ifg = Ifg(ifg_path)
        ifg.open(readonly=False)
        phase_data = ifg.phase_data
        ref_phase = rpe._est_ref_phs_method1(phase_data, comp)
        phase_data -= ref_phase
        md = ifg.meta_data
        md[ifc.PYRATE_REF_PHASE] = ifc.REF_PHASE_REMOVED
        ifg.write_modified_phase(data=phase_data)
        ifg.close()
        return ref_phase
    this_process_ifgs = mpiops.array_split(ifg_paths)
    ref_phs = np.array([_inner(ifg) for ifg in this_process_ifgs])
    log.info('Ref phase computed in process {}'.format(mpiops.rank))
    return ref_phs
コード例 #22
0
def orb_fit_calc(ifg_paths, params, preread_ifgs=None):
    """
    Orbital fit correction.

    :param ifg_paths: List of ifg paths
    :param params: Parameters dictionary corresponding to config file
    
    :return xxxx
    """
    log.info('Calculating orbfit correction')
    if params[cf.ORBITAL_FIT_METHOD] == 1:
        prcs_ifgs = mpiops.array_split(ifg_paths)
        orbital.remove_orbital_error(prcs_ifgs, params, preread_ifgs)
    else:
        # Here we do all the multilooking in one process, but in memory
        # can use multiple processes if we write data to disc during
        # remove_orbital_error step
        # A performance comparison should be performed be performed for saving
        # multilooked files on disc vs in memory single process multilooking
        if mpiops.rank == MASTER_PROCESS:
            orbital.remove_orbital_error(ifg_paths, params, preread_ifgs)
    mpiops.comm.barrier()
    log.info('Finished orbfit calculation in process {}'.format(mpiops.rank))
コード例 #23
0
def postprocess_timeseries(rows, cols, params):
    """
    Postprocess time series output.
    
    :param rows: xxxx
    :param cols: xxxx
    :param params: xxxx
    
    :return xxxx    
    """
    # pylint: disable=too-many-locals
    xlks, _, crop = cf.transform_params(params)
    base_unw_paths = cf.original_ifg_paths(params[cf.IFG_FILE_LIST])
    dest_tifs = cf.get_dest_paths(base_unw_paths, crop, params, xlks)
    output_dir = params[cf.TMPDIR]

    # load previously saved prepread_ifgs dict
    preread_ifgs_file = join(output_dir, 'preread_ifgs.pk')
    ifgs = cp.load(open(preread_ifgs_file, 'rb'))

    # metadata and projections
    gt, md, wkt = ifgs['gt'], ifgs['md'], ifgs['wkt']
    epochlist = ifgs['epochlist']
    ifgs = [v for v in ifgs.values() if isinstance(v, PrereadIfg)]

    tiles = run_pyrate.get_tiles(dest_tifs[0], rows, cols)

    # load the first tsincr file to determine the number of time series tifs
    tsincr_file = os.path.join(output_dir, 'tsincr_0.npy')
    tsincr = np.load(file=tsincr_file)

    # pylint: disable=no-member
    no_ts_tifs = tsincr.shape[2]
    # we create 2 x no_ts_tifs as we are splitting tsincr and tscuml
    # to all processes.
    process_tifs = mpiops.array_split(range(2 * no_ts_tifs))

    # depending on nvelpar, this will not fit in memory
    # e.g. nvelpar=100, nrows=10000, ncols=10000, 32bit floats need 40GB memory
    # 32 * 100 * 10000 * 10000 / 8 bytes = 4e10 bytes = 40 GB
    # the double for loop helps us overcome the memory limit
    log.info('process {} will write {} ts (incr/cuml) tifs of '
             'total {}'.format(mpiops.rank, len(process_tifs), no_ts_tifs * 2))
    for i in process_tifs:
        tscum_g = np.empty(shape=ifgs[0].shape, dtype=np.float32)
        if i < no_ts_tifs:
            for n, t in enumerate(tiles):
                tscum_file = os.path.join(output_dir,
                                          'tscuml_{}.npy'.format(n))
                tscum = np.load(file=tscum_file)

                md[ifc.EPOCH_DATE] = epochlist.dates[i + 1]
                # sequence position; first time slice is #0
                md['SEQUENCE_POSITION'] = i + 1
                tscum_g[t.top_left_y:t.bottom_right_y,
                        t.top_left_x:t.bottom_right_x] = tscum[:, :, i]
                dest = os.path.join(
                    params[cf.OUT_DIR],
                    'tscuml' + "_" + str(epochlist.dates[i + 1]) + ".tif")
                md[ifc.DATA_TYPE] = ifc.CUML
                shared.write_output_geotiff(md, gt, wkt, tscum_g, dest, np.nan)
        else:
            tsincr_g = np.empty(shape=ifgs[0].shape, dtype=np.float32)
            i %= no_ts_tifs
            for n, t in enumerate(tiles):
                tsincr_file = os.path.join(output_dir,
                                           'tsincr_{}.npy'.format(n))
                tsincr = np.load(file=tsincr_file)

                md[ifc.EPOCH_DATE] = epochlist.dates[i + 1]
                # sequence position; first time slice is #0
                md['SEQUENCE_POSITION'] = i + 1
                tsincr_g[t.top_left_y:t.bottom_right_y,
                         t.top_left_x:t.bottom_right_x] = tsincr[:, :, i]
                dest = os.path.join(
                    params[cf.OUT_DIR],
                    'tsincr' + "_" + str(epochlist.dates[i + 1]) + ".tif")
                md[ifc.DATA_TYPE] = ifc.INCR
                shared.write_output_geotiff(md, gt, wkt, tsincr_g, dest,
                                            np.nan)
    log.info('process {} finished writing {} ts (incr/cuml) tifs of '
             'total {}'.format(mpiops.rank, len(process_tifs), no_ts_tifs * 2))