Esempio n. 1
0
def set_initial_map():
    global d_v, h5, k, dateList, inps, data_lim

    d_v = h5['timeseries'][inps.epoch_num][:] * inps.unit_fac
    # Initial Map
    print(str(dateList))
    d_v = readfile.read(inps.timeseries_file, datasetName=dateList[inps.epoch_num])[0] * inps.unit_fac
    #d_v = h5[k].get(dateList[inps.epoch_num])[:]*inps.unit_fac
    if inps.ref_date:
        inps.ref_d_v = readfile.read(inps.timeseries_file, datasetName=inps.ref_date)[0]*inps.unit_fac
        d_v -= inps.ref_d_v

    if mask is not None:
        d_v = mask_matrix(d_v, mask)

    if inps.ref_yx:
        d_v -= d_v[inps.ref_yx[0], inps.ref_yx[1]]

    data_lim = [np.nanmin(d_v), np.nanmax(d_v)]

    if not inps.ylim_mat:
        inps.ylim_mat = data_lim

    print(('Initial data range: '+str(data_lim)))
    print(('Display data range: '+str(inps.ylim_mat)))

    print(('Initial data range: ' + str(data_lim)))
    print(('Display data range: ' + str(inps.ylim)))
Esempio n. 2
0
def mask_file(fname, mask_file, out_file, inps=None):
    """ Mask input fname with mask_file
    Inputs:
        fname/mask_file - string, 
        inps_dict - dictionary including the following options:
                    subset_x/y - list of 2 ints, subset in x/y direction
                    threshold - float, threshold/minValue to generate mask
    Output:
        out_file - string
    """
    if not inps:
        inps = cmd_line_parse()

    # read mask_file
    mask = readfile.read(mask_file)[0]
    mask = update_mask_with_inps(mask, inps)

    # masking input file
    dsNames = readfile.get_dataset_list(fname)
    maxDigit = max([len(i) for i in dsNames])
    dsDict = {}
    for dsName in dsNames:
        if dsName not in ['coherence']:
            print('masking {d:<{w}} from {f} ...'.format(d=dsName, w=maxDigit, f=fname))
            data = readfile.read(fname, datasetName=dsName, print_msg=False)[0]
            data = mask_matrix(data, mask, fill_value=inps.fill_value)
        dsDict[dsName] = data

    # default output filename
    if not out_file:
        fbase, fext = os.path.splitext(fname)
        out_file = '{}_msk{}'.format(fbase, fext)

    writefile.write(dsDict, out_file=out_file, ref_file=fname)
    return out_file
Esempio n. 3
0
def read_isce_bperp_file(fname, out_shape, box=None):
    '''Read ISCE coarse grid perpendicular baseline file, and project it to full size
    Parameters: self : geometry object,
                fname : str, bperp file name
                outShape : tuple of 2int, shape of file in full resolution
                box : tuple of 4 int, subset range in (x0, y0, x1, y1) with respect to full resolution
    Returns:    data : 2D array of float32
    Example:    fname = '$PROJECT_DIR/merged/baselines/20160418/bperp'
                data = self.read_sice_bperp_file(fname, (3600,2200), box=(200,400,1000,1000))
    '''
    # read original data
    data_c = readfile.read(fname)[0]

    # resize to full resolution
    data_min, data_max = np.nanmin(data_c), np.nanmax(data_c)
    if data_max != data_min:
        data_c = (data_c - data_min) / (data_max - data_min)
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=UserWarning)
        data = resize(data_c, out_shape, order=1, mode='edge')
    if data_max != data_min:
        data = data * (data_max - data_min) + data_min

    # for debug
    debug_mode=False
    if debug_mode:
        import matplotlib.pyplot as plt
        fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(8,6));
        im = ax1.imshow(readfile.read(fname)[0]); fig.colorbar(im, ax=ax1)
        im = ax2.imshow(data);  fig.colorbar(im, ax=ax2)
        plt.show()

    if box is not None:
        data = data[box[1]:box[3], box[0]:box[2]]
    return data
Esempio n. 4
0
    def prepare_regular_grid_interpolator(self):
        """Prepare aux data for RGI module"""
        # source points in regular grid
        src_length = int(self.src_metadata['LENGTH'])
        src_width = int(self.src_metadata['WIDTH'])
        self.src_pts = (np.arange(src_length), np.arange(src_width))

        # destination points
        dest_y = readfile.read(self.file, datasetName='azimuthCoord')[0]
        dest_x = readfile.read(self.file, datasetName='rangeCoord')[0]
        if 'SUBSET_XMIN' in self.src_metadata.keys():
            print('input data file was cropped before.')
            dest_y[dest_y != 0.] -= float(self.src_metadata['SUBSET_YMIN'])
            dest_x[dest_x != 0.] -= float(self.src_metadata['SUBSET_XMIN'])
        self.interp_mask = np.multiply(np.multiply(dest_y > 0, dest_y < src_length),
                                       np.multiply(dest_x > 0, dest_x < src_width))
        self.dest_pts = np.hstack((dest_y[self.interp_mask].reshape(-1, 1),
                                   dest_x[self.interp_mask].reshape(-1, 1)))

        # destimation data size
        self.length = int(self.lut_metadata['LENGTH'])
        self.width = int(self.lut_metadata['WIDTH'])
        lat0 = float(self.lut_metadata['Y_FIRST'])
        lon0 = float(self.lut_metadata['X_FIRST'])
        lat_step = float(self.lut_metadata['Y_STEP'])
        lon_step = float(self.lut_metadata['X_STEP'])
        self.laloStep = (lat_step, lon_step)
        self.SNWE = (lat0 + lat_step * (self.length - 1),
                     lat0,
                     lon0,
                     lon0 + lon_step * (self.width - 1))
Esempio n. 5
0
def add_file(fnames, out_file=None):
    """Generate sum of all input files
    Parameters: fnames : list of str, path/name of input files to be added
                out_file : str, optional, path/name of output file
    Returns:    out_file : str, path/name of output file
    Example:    'mask_all.h5' = add_file(['mask_1.h5','mask_2.h5','mask_3.h5'], 'mask_all.h5')
    """
    # Default output file name
    ext = os.path.splitext(fnames[0])[1]
    if not out_file:
        out_file = os.path.splitext(fnames[0])[0]
        for i in range(1, len(fnames)):
            out_file += '_plus_' + os.path.splitext(os.path.basename(fnames[i]))[0]
        out_file += ext

    atr = readfile.read_attribute(fnames[0])
    dsNames = readfile.get_dataset_list(fnames[0])
    dsDict = {}
    for dsName in dsNames:
        print('adding {} ...'.format(dsName))
        data = readfile.read(fnames[0], datasetName=dsName)[0]
        for i in range(1, len(fnames)):
            d = readfile.read(fnames[i], datasetName=dsName)[0]
            data = add_matrix(data, d)
        dsDict[dsName] = data
    writefile.write(dsDict, out_file=out_file, metadata=atr, ref_file=fnames[0])
    return out_file
Esempio n. 6
0
def correct_local_oscilator_drift(fname, rg_dist_file=None, out_file=None):
    print('-'*50)
    print('correct Local Oscilator Drift for Envisat using an empirical model (Marinkovic and Larsen, 2013)')
    print('-'*50)
    atr = readfile.read_attribute(fname)

    # Check Sensor Type
    platform = atr['PLATFORM']
    print('platform: '+platform)
    if not platform.lower() in ['env', 'envisat']:
        print('No need to correct LOD for '+platform)
        return

    # output file name
    if not out_file:
        out_file = '{}_LODcor{}'.format(os.path.splitext(fname)[0], os.path.splitext(fname)[1])

    # Get LOD ramp rate from empirical model
    if not rg_dist_file:
        print('calculate range distance from file metadata')
        rg_dist = get_relative_range_distance(atr)
    else:
        print('read range distance from file: %s' % (rg_dist_file))
        rg_dist = readfile.read(rg_dist_file, datasetName='slantRangeDistance', print_msg=False)[0]
        rg_dist -= rg_dist[int(atr['REF_Y']), int(atr['REF_X'])]
    ramp_rate = np.array(rg_dist * 3.87e-7, np.float32)

    # Correct LOD Ramp for Input fname
    range2phase = -4*np.pi / float(atr['WAVELENGTH'])
    k = atr['FILE_TYPE']
    if k == 'timeseries':
        # read
        obj = timeseries(fname)
        obj.open()
        data = obj.read()

        # correct LOD
        diff_year = np.array(obj.yearList)
        diff_year -= diff_year[obj.refIndex]
        for i in range(data.shape[0]):
            data[i, :, :] -= ramp_rate * diff_year[i]

        # write
        obj_out = timeseries(out_file)
        obj_out.write2hdf5(data, refFile=fname)

    elif k in ['.unw']:
        data, atr = readfile.read(fname)

        dates = ptime.yyyymmdd2years(ptime.yyyymmdd(atr['DATE12'].split('-')))
        dt = dates[1] - dates[0]
        data -= ramp_rate * range2phase * dt

        writefile.write(data, out_file=out_file, metadata=atr)
    else:
        print('No need to correct for LOD for %s file' % (k))
    return out_file
Esempio n. 7
0
def extract_geometry_metadata(geom_dir, metadata=dict()):
    """extract metadata from geometry files"""

    def get_nonzero_row_number(data, buffer=2):
        """Find the first and last row number of rows without zero value
        for multiple swaths data
        """
        if np.all(data):
            r0, r1 = 0 + buffer, -1 - buffer
        else:
            row_flag = np.sum(data != 0., axis=1) == data.shape[1]
            row_idx = np.where(row_flag)[0]
            r0, r1 = row_idx[0] + buffer, row_idx[-1] - buffer
        return r0, r1

    # grab existing files
    geom_files = [os.path.join(os.path.abspath(geom_dir), '{}.rdr'.format(i)) 
                  for i in ['hgt','lat','lon','los']]
    geom_files = [i for i in geom_files if os.path.isfile(i)]
    print('extract metadata from geometry files: {}'.format(
        [os.path.basename(i) for i in geom_files]))

    # get A/RLOOKS
    metadata = extract_multilook_number(geom_dir, metadata)

    # update pixel_size for multilooked data
    metadata['rangePixelSize'] *= metadata['RLOOKS']
    metadata['azimuthPixelSize'] *= metadata['ALOOKS']

    # get LAT/LON_REF1/2/3/4 and HEADING into metadata
    for geom_file in geom_files:
        if 'lat' in os.path.basename(geom_file):
            data = readfile.read(geom_file)[0]
            r0, r1 = get_nonzero_row_number(data)
            metadata['LAT_REF1'] = str(data[r0, 0])
            metadata['LAT_REF2'] = str(data[r0, -1])
            metadata['LAT_REF3'] = str(data[r1, 0])
            metadata['LAT_REF4'] = str(data[r1, -1])

        if 'lon' in os.path.basename(geom_file):
            data = readfile.read(geom_file)[0]
            r0, r1 = get_nonzero_row_number(data)
            metadata['LON_REF1'] = str(data[r0, 0])
            metadata['LON_REF2'] = str(data[r0, -1])
            metadata['LON_REF3'] = str(data[r1, 0])
            metadata['LON_REF4'] = str(data[r1, -1])

        if 'los' in os.path.basename(geom_file):
            data = readfile.read(geom_file, datasetName='az')[0]
            data[data == 0.] = np.nan
            az_angle = np.nanmean(data)
            # convert isce azimuth angle to roipac orbit heading angle
            head_angle = -1 * (270 + az_angle)
            head_angle -= np.round(head_angle / 360.) * 360.
            metadata['HEADING'] = str(head_angle)
    return metadata
Esempio n. 8
0
def get_boxes4deforming_area(vel_file, mask_file, win_size=30, min_percentage=0.2, ramp_type='quadratic', display=False):
    """Get list of boxes to cover the deforming areas.
    A pixel is identified as deforming if its velocity exceeds the MAD of the whole image.
    Parameters: vel_file : str, path of velocity file
                mask_file : str, path of mask file
                win_size  : int, length and width of the output box
                min_percentage : float between 0 and 1, minimum percentage of deforming points in the box
                ramp_type : str, type of phase ramps to be removed while evaluating the deformation
                display   : bool, plot the identification result or not
    Returns:    box_list  : list of t-tuple of int, each indicating (col0, row0, col1, row1)
    """
    print('-'*30)
    print('get boxes on deforming areas')
    mask = readfile.read(mask_file)[0]
    vel, atr = readfile.read(vel_file)
    print('removing a {} phase ramp from input velocity before the evaluation'.format(ramp_type))
    vel = deramp(vel, mask, ramp_type=ramp_type, metadata=atr)[0]               #remove ramp before the evaluation

    # get deforming pixels
    mad = ut.median_abs_deviation_threshold(vel[mask], center=0., cutoff=3)     #deformation threshold
    print('velocity threshold / median abs dev: {:.3f} cm/yr'.format(mad))
    vel[mask == 0] = 0
    mask_aoi = (vel >= mad) + (vel <= -1. * mad)
    print('number of points: {}'.format(np.sum(mask_aoi)))

    # get deforming boxes
    box_list = []
    min_num = min_percentage * (win_size ** 2)
    length, width = vel.shape
    num_row = np.ceil(length / win_size).astype(int)
    num_col = np.ceil(width / win_size).astype(int)
    for i in range(num_row):
        r0 = i * win_size
        r1 = min([length, r0 + win_size])
        for j in range(num_col):
            c0 = j * win_size
            c1 = min([width, c0 + win_size])
            box = (c0, r0, c1, r1)
            if np.sum(mask_aoi[r0:r1, c0:c1]) >= min_num:
                box_list.append(box)
    print('number of boxes : {}'.format(len(box_list)))

    if display:
        fig, axs = plt.subplots(nrows=1, ncols=2, figsize=[12, 8], sharey=True)
        vel[mask == 0] = np.nan
        axs[0].imshow(vel, cmap='jet')
        axs[1].imshow(mask_aoi, cmap='gray')
        for box in box_list:
            for ax in axs:
                rect = Rectangle((box[0],box[1]), (box[2]-box[0]), (box[3]-box[1]), linewidth=2, edgecolor='r', fill=False)
                ax.add_patch(rect)
        plt.show()
    return box_list
Esempio n. 9
0
def multilook_file(infile, lks_y, lks_x, outfile=None):
    lks_y = int(lks_y)
    lks_x = int(lks_x)

    # input file info
    atr = readfile.read_attribute(infile)
    k = atr['FILE_TYPE']
    print('multilooking {} {} file: {}'.format(atr['PROCESSOR'], k, infile))
    print('number of looks in y / azimuth direction: %d' % lks_y)
    print('number of looks in x / range   direction: %d' % lks_x)

    # output file name
    if not outfile:
        if os.getcwd() == os.path.dirname(os.path.abspath(infile)):
            ext = os.path.splitext(infile)[1]
            outfile = os.path.splitext(infile)[0]+'_'+str(lks_y)+'alks_'+str(lks_x)+'rlks'+ext
        else:
            outfile = os.path.basename(infile)
    #print('writing >>> '+outfile)

    # read source data and multilooking
    dsNames = readfile.get_dataset_list(infile)
    maxDigit = max([len(i) for i in dsNames])
    dsDict = dict()
    for dsName in dsNames:
        print('multilooking {d:<{w}} from {f} ...'.format(
            d=dsName, w=maxDigit, f=os.path.basename(infile)))
        data = readfile.read(infile, datasetName=dsName, print_msg=False)[0]
        data = multilook_data(data, lks_y, lks_x)
        dsDict[dsName] = data
    atr = multilook_attribute(atr, lks_y, lks_x)
    writefile.write(dsDict, out_file=outfile, metadata=atr, ref_file=infile)
    return outfile
Esempio n. 10
0
    def plot_coherence_matrix4pixel(self, yx):
        """Plot coherence matrix for one pixel
        Parameters: yx : list of 2 int
        """
        # read coherence
        box = (yx[1], yx[0], yx[1]+1, yx[0]+1)
        coh = readfile.read(self.ifgram_file, datasetName='coherence', box=box)[0]
        # prep metadata
        plotDict = {}
        plotDict['fig_title'] = 'Y = {}, X = {}'.format(yx[0], yx[1])
        plotDict['colormap'] = self.colormap
        plotDict['disp_legend'] = False
        # plot
        coh_mat = pp.plot_coherence_matrix(self.ax_mat,
                                           date12List=self.date12_list,
                                           cohList=coh.tolist(),
                                           date12List_drop=self.ex_date12_list,
                                           plot_dict=plotDict)[1]
        self.fig.canvas.draw()

        # status bar
        def format_coord(x, y):
            row, col = int(y+0.5), int(x+0.5)
            date12 = sorted([self.date_list[row], self.date_list[col]])
            date12 = ['{}-{}-{}'.format(i[0:4], i[4:6], i[6:8]) for i in date12]
            return 'x={}, y={}, v={:.3f}'.format(date12[0], date12[1], coh_mat[row, col])
        self.ax_mat.format_coord = format_coord
        # info
        vprint('-'*30)
        vprint('pixel: yx = {}'.format(yx))
        vprint('min/max coherence: {:.2f} / {:.2f}'.format(np.min(coh), np.max(coh)))
        return
Esempio n. 11
0
 def read_lookup_table(self, print_msg=True):
     if 'Y_FIRST' in self.lut_metadata.keys():
         self.lut_y = readfile.read(self.lookup_file[0],
                                    datasetName='azimuthCoord',
                                    print_msg=print_msg)[0]
         self.lut_x = readfile.read(self.lookup_file[1],
                                    datasetName='rangeCoord',
                                    print_msg=print_msg)[0]
     else:
         self.lut_y = readfile.read(self.lookup_file[0],
                                    datasetName='latitude',
                                    print_msg=print_msg)[0]
         self.lut_x = readfile.read(self.lookup_file[1],
                                    datasetName='longitude',
                                    print_msg=print_msg)[0]
     return self.lut_y, self.lut_x
Esempio n. 12
0
def timeseries2ifgram(ts_file, ifgram_file, out_file='reconUnwrapIfgram.h5'):
    # read time-series
    atr = readfile.read_attribute(ts_file)
    range2phase = -4.*np.pi / float(atr['WAVELENGTH'])
    print('reading timeseries data from file {} ...'.format(ts_file))
    ts_data = readfile.read(ts_file)[0] * range2phase
    num_date, length, width = ts_data.shape
    ts_data = ts_data.reshape(num_date, -1)

    # reconstruct unwrapPhase
    print('reconstructing the interferograms from timeseries')
    stack_obj = ifgramStack(ifgram_file)
    stack_obj.open(print_msg=False)
    A1 = stack_obj.get_design_matrix4timeseries(stack_obj.get_date12_list(dropIfgram=False))[0]
    num_ifgram = A1.shape[0]
    A0 = -1.*np.ones((num_ifgram, 1))
    A = np.hstack((A0, A1))
    ifgram_est = np.dot(A, ts_data).reshape(num_ifgram, length, width)
    ifgram_est = np.array(ifgram_est, dtype=ts_data.dtype)
    del ts_data

    # write to ifgram file
    dsDict = {}
    dsDict['unwrapPhase'] = ifgram_est
    writefile.write(dsDict, out_file=out_file, ref_file=ifgram_file)
    return ifgram_file
Esempio n. 13
0
def file_operation(fname, operator, operand, out_file=None):
    """Mathmathic operation of file"""

    # Basic Info
    atr = readfile.read_attribute(fname)
    k = atr['FILE_TYPE']
    print('input is '+k+' file: '+fname)
    print('operation: file %s %f' % (operator, operand))

    # default output filename
    if not out_file:
        if operator in ['+', 'plus',  'add',      'addition']:
            suffix = 'plus'
        elif operator in ['-', 'minus', 'substract', 'substraction']:
            suffix = 'minus'
        elif operator in ['*', 'times', 'multiply', 'multiplication']:
            suffix = 'multiply'
        elif operator in ['/', 'obelus', 'divide',   'division']:
            suffix = 'divide'
        elif operator in ['^', 'pow', 'power']:
            suffix = 'pow'
        out_file = '{}_{}{}{}'.format(os.path.splitext(fname)[0], suffix,
                                      str(operand), os.path.splitext(fname)[1])

    atr = readfile.read_attribute(fname)
    dsNames = readfile.get_dataset_list(fname)
    dsDict = {}
    for dsName in dsNames:
        data = readfile.read(fname, datasetName=dsName)[0]
        data = data_operation(data, operator, operand)
        dsDict[dsName] = data
    writefile.write(dsDict, out_file=out_file, metadata=atr, ref_file=fname)
    return out_file
Esempio n. 14
0
def read_aux_subset2inps(inps):
    # Convert All Inputs into subset_y/x/lat/lon
    # Input Priority: subset_y/x/lat/lon > reference > template > tight
    if all(not i for i in [inps.subset_x,
                           inps.subset_y,
                           inps.subset_lat,
                           inps.subset_lon]):
        # 1. Read subset info from Reference File
        if inps.reference:
            ref_atr = readfile.read_attribute(inps.reference)
            pix_box, geo_box = get_coverage_box(ref_atr)
            print('using subset info from '+inps.reference)

        # 2. Read subset info from template options
        elif inps.template_file:
            pix_box, geo_box = read_subset_template2box(inps.template_file)
            print('using subset info from '+inps.template_file)

        # 3. Use subset from tight info
        elif inps.tight:
            inps.lookup_file = ut.get_lookup_file(inps.lookup_file)
            if not inps.lookup_file:
                raise Exception('No lookup file found! Can not use --tight option without it.')

            atr_lut = readfile.read_attribute(inps.lookup_file)
            coord = ut.coordinate(atr_lut)
            if 'Y_FIRST' in atr_lut.keys():
                rg_lut = readfile.read(inps.lookup_file, datasetName='range')[0]
                rg_unique, rg_pos = np.unique(rg_lut, return_inverse=True)
                idx_row, idx_col = np.where(rg_lut != rg_unique[np.bincount(rg_pos).argmax()])
                pix_box = (np.min(idx_col) - 10, np.min(idx_row) - 10,
                           np.max(idx_col) + 10, np.max(idx_row) + 10)
                geo_box = coord.box_pixel2geo(pix_box)
                del rg_lut
            else:
                lat = readfile.read(inps.lookup_file, datasetName='latitude')[0]
                lon = readfile.read(inps.lookup_file, datasetName='longitude')[0]
                geo_box = (np.nanmin(lon), np.nanmax(lat),
                           np.nanmax(lon), np.nanmin(lat))
                pix_box = None
                del lat, lon
        else:
            raise Exception('No subset inputs found!')

        # Update subset_y/x/lat/lon
        inps = subset_box2inps(inps, pix_box, geo_box)
    return inps
Esempio n. 15
0
def set_dem_file():
    global ax_v, inps, img

    if inps.dem_file:
        dem = readfile.read(inps.dem_file, datasetName='height')[0]
        ax_v = pp.plot_dem_yx(ax_v, dem)

    img = ax_v.imshow(d_v, cmap=inps.colormap, clim=inps.ylim_mat, interpolation='nearest')
Esempio n. 16
0
    def run_load_data(self, step_name):
        """Load InSAR stacks into HDF5 files in ./inputs folder.
        It 1) copy auxiliary files into work directory (for Unvi of Miami only)
           2) load all interferograms stack files into mintpy/inputs directory.
           3) check loading result
           4) add custom metadata (optional, for HDF-EOS5 format only)
        """
        # 1) copy aux files (optional)
        self._copy_aux_file()

        # 2) loading data
        scp_args = '--template {}'.format(self.templateFile)
        if self.customTemplateFile:
            scp_args += ' {}'.format(self.customTemplateFile)
        if self.projectName:
            scp_args += ' --project {}'.format(self.projectName)
        # run
        print("load_data.py", scp_args)
        mintpy.load_data.main(scp_args.split())
        os.chdir(self.workDir)

        # 3) check loading result
        load_complete, stack_file, geom_file = ut.check_loaded_dataset(self.workDir, print_msg=True)[0:3]

        # 3.1) output waterMask.h5
        water_mask_file = 'waterMask.h5'
        if 'waterMask' in readfile.get_dataset_list(geom_file):
            print('generate {} from {} for conveniency'.format(water_mask_file, geom_file))
            if ut.run_or_skip(out_file=water_mask_file, in_file=geom_file) == 'run':
                water_mask, atr = readfile.read(geom_file, datasetName='waterMask')
                atr['FILE_TYPE'] = 'waterMask'
                writefile.write(water_mask, out_file=water_mask_file, metadata=atr)

        # 4) add custom metadata (optional)
        if self.customTemplateFile:
            print('updating {}, {} metadata based on custom template file: {}'.format(
                os.path.basename(stack_file),
                os.path.basename(geom_file),
                os.path.basename(self.customTemplateFile)))
            # use ut.add_attribute() instead of add_attribute.py because of
            # better control of special metadata, such as SUBSET_X/YMIN
            ut.add_attribute(stack_file, self.customTemplate)
            ut.add_attribute(geom_file, self.customTemplate)

        # 5) if not load_complete, plot and raise exception
        if not load_complete:
            # plot result if error occured
            self.plot_result(print_aux=False, plot=plot)

            # go back to original directory
            print('Go back to directory:', self.cwd)
            os.chdir(self.cwd)

            # raise error
            msg = 'step {}: NOT all required dataset found, exit.'.format(step_name)
            raise RuntimeError(msg)
        return
Esempio n. 17
0
def read_topographic_data(geom_file, metadata):
    print('read DEM from file: '+geom_file)
    dem = readfile.read(geom_file,
                        datasetName='height',
                        print_msg=False)[0]

    print('considering the incidence angle of each pixel ...')
    inc_angle = readfile.read(geom_file,
                              datasetName='incidenceAngle',
                              print_msg=False)[0]
    dem *= 1.0/np.cos(inc_angle*np.pi/180.0)

    ref_y = int(metadata['REF_Y'])
    ref_x = int(metadata['REF_X'])
    dem -= dem[ref_y, ref_x]

    # Design matrix for elevation v.s. phase
    # dem = dem.flatten()
    return dem
Esempio n. 18
0
def main(argv):
    try:
        dem_file = argv[1]
        dem_error_file = argv[2]
    except:
        usage()
        sys.exit(1)

    print('Correcting the DEM')

    dem, demrsc = readfile.read(dem_file)
    dem_error = readfile.read(dem_error_file)

    dem_out = dem + dem_error
    writefile.write(dem_out, out_file='DEM_w_error.dem', metadata=demrsc)

    date12_file = open('111111-222222_baseline.rsc', 'w')
    date12_file.write('P_BASELINE_TOP_ODR'+'     '+'000')
    date12_file.close()
    return
Esempio n. 19
0
def read_file_data(epoch=None):
    global atr, attributes, ref_dates_list

    atr = readfile.read_attribute(h5_file.get())

    file_type = atr['FILE_TYPE']

    ref_dates_list = ["All"]

    h5file = h5py.File(h5_file.get(), 'r')
    if file_type in ['HDFEOS']:
        ref_dates_list += h5file.attrs['DATE_TIMESERIES'].split()
    else:
        ref_dates_list = timeseries(h5_file.get()).get_date_list()

    if epoch and epoch is not "All":
        data, attributes = readfile.read(h5_file.get(), datasetName=ref_dates_list[epoch])
    else:
        data, attributes = readfile.read(h5_file.get(), datasetName=ref_dates_list[len(ref_dates_list) - 1])

    return data
Esempio n. 20
0
 def get_incidence_angle(self, box=None):
     if not self.extraMetadata or 'Y_FIRST' in self.extraMetadata.keys():
         return None
     if 'height' in self.dsNames:
         dem = readfile.read(self.datasetDict['height'], datasetName='height')[0]
     else:
         dem = None
     data = ut.incidence_angle(self.extraMetadata,
                               dem=dem,
                               dimension=2,
                               print_msg=False)
     if box is not None:
         data = data[box[1]:box[3], box[0]:box[2]]
     return data
Esempio n. 21
0
    def get_los_geometry(self, insar_obj, print_msg=False):
        lat, lon = self.get_stat_lat_lon(print_msg=print_msg)

        # get LOS geometry
        if isinstance(insar_obj, str):
            # geometry file
            atr = readfile.read_attribute(insar_obj)
            coord = ut.coordinate(atr, lookup_file=insar_obj)
            y, x = coord.geo2radar(lat, lon, print_msg=print_msg)[0:2]
            box = (x, y, x+1, y+1)
            inc_angle = readfile.read(insar_obj, datasetName='incidenceAngle', box=box, print_msg=print_msg)[0][0,0]
            az_angle  = readfile.read(insar_obj, datasetName='azimuthAngle', box=box, print_msg=print_msg)[0][0,0]
            head_angle = ut.azimuth2heading_angle(az_angle)
        elif isinstance(insar_obj, dict):
            # use mean inc/head_angle from metadata
            inc_angle = ut.incidence_angle(insar_obj, dimension=0, print_msg=print_msg)
            head_angle = float(insar_obj['HEADING'])
            # for old reading of los.rdr band2 data into headingAngle directly
            if (head_angle + 180.) > 45.:
                head_angle = ut.azimuth2heading_angle(head_angle)
        else:
            raise ValueError('input insar_obj is neight str nor dict: {}'.format(insar_obj))
        return inc_angle, head_angle
Esempio n. 22
0
def get_nonzero_phase_closure(ifgram_file, out_file=None, thres=0.1, unwDatasetName='unwrapPhase'):
    """Calculate/Read number of non-zero phase closure
    Parameters: ifgram_file : string, path of ifgram stack file
                out_file    : string, path of num non-zero phase closure file
    Returns:    num_nonzero_closure : 2D np.array in size of (length, width)
    """
    if not out_file:
        out_file = 'numNonzeroPhaseClosure_{}.h5'.format(unwDatasetName)
    if os.path.isfile(out_file) and readfile.read_attribute(out_file):
        print('1. read number of nonzero phase closure from file: {}'.format(out_file))
        num_nonzero_closure = readfile.read(out_file)[0]
    else:
        obj = ifgramStack(ifgram_file)
        obj.open(print_msg=False)
        length, width = obj.length, obj.width

        ref_phase = obj.get_reference_phase(unwDatasetName=unwDatasetName, dropIfgram=False)
        C = obj.get_design_matrix4triplet(obj.get_date12_list(dropIfgram=False))

        # calculate phase closure line by line to save memory usage
        num_nonzero_closure = np.zeros((length, width), np.float32)
        print('1. calculating phase closure of all pixels from dataset - {} ...'.format(unwDatasetName))
        line_step = 10
        num_loop = int(np.ceil(length / line_step))
        prog_bar = ptime.progressBar(maxValue=num_loop)
        for i in range(num_loop):
            # read phase
            i0, i1 = i*line_step, min(length, (i+1)*line_step)
            box = (0, i0, width, i1)
            pha_data = ifginv.read_unwrap_phase(obj,
                                                box,
                                                ref_phase,
                                                unwDatasetName=unwDatasetName,
                                                dropIfgram=False,
                                                print_msg=False)
            # calculate phase closure
            pha_closure = np.dot(C, pha_data)
            pha_closure = np.abs(pha_closure - ut.wrap(pha_closure))
            # get number of non-zero phase closure
            num_nonzero = np.sum(pha_closure >= thres, axis=0)
            num_nonzero_closure[i0:i1, :] = num_nonzero.reshape(i1-i0, width)
            prog_bar.update(i+1, every=1, suffix='{}/{} lines'.format((i+1)*line_step, length))
        prog_bar.close()

        atr = dict(obj.metadata)
        atr['FILE_TYPE'] = 'mask'
        atr['UNIT'] = 1
        writefile.write(num_nonzero_closure, out_file=out_file, metadata=atr)
    return num_nonzero_closure
Esempio n. 23
0
    def configure(self):
        inps = cmd_line_parse(self.iargs)
        # read network info
        inps = read_network_info(inps)
        # copy inps to self object
        for key, value in inps.__dict__.items():
            setattr(self, key, value)

        # auto figure size
        if not self.fig_size:
            ds_shape = readfile.read(self.img_file)[0].shape
            fig_size = pp.auto_figure_size(ds_shape, disp_cbar=True, ratio=0.7)
            self.fig_size = [fig_size[0]+fig_size[1], fig_size[1]]
            vprint('create figure in size of {} inches'.format(self.fig_size))
        return
Esempio n. 24
0
def calculate_temporal_coherence_patch(ifgram_file, timeseries_file, box=None, ifg_num_file=None):
    atr = readfile.read_attribute(timeseries_file)
    if not box:
        box = (0, 0, int(atr['WIDTH']), int(atr['LENGTH']))

    # Read timeseries data
    ts_obj = timeseries(timeseries_file)
    ts_obj.open(print_msg=False)
    print('reading timeseries data from file: {}'.format(timeseries_file))
    ts_data = ts_obj.read(box=box, print_msg=False).reshape(ts_obj.numDate, -1)
    ts_data = ts_data[1:, :]
    ts_data *= -4*np.pi/float(atr['WAVELENGTH'])

    # Read ifgram data
    stack_obj = ifgramStack(ifgram_file)
    stack_obj.open(print_msg=False)
    A = stack_obj.get_design_matrix4timeseries(stack_obj.get_date12_list(dropIfgram=True))[0]
    print('reading unwrapPhase data from file: {}'.format(ifgram_file))
    ifgram_data = stack_obj.read(datasetName='unwrapPhase', box=box).reshape(A.shape[0], -1)
    ref_value = stack_obj.get_reference_phase(dropIfgram=True).reshape((-1, 1))
    ifgram_data -= np.tile(ref_value, (1, ifgram_data.shape[1]))

    ifgram_diff = ifgram_data - np.dot(A, ts_data)
    del ts_data

    pixel_num = ifgram_data.shape[1]
    temp_coh = np.zeros((pixel_num), np.float32)
    # (fast) nasty solution, which used all phase value including invalid zero phase
    if not ifg_num_file:
        temp_coh = np.abs(np.sum(np.exp(1j*ifgram_diff), axis=0)) / ifgram_diff.shape[0]

    # (slow) same solution as ifgram_inversion.py, considering:
    #   1) invalid zero phase in ifgram
    #   2) design matrix rank deficiency.
    else:
        print('considering different number of interferograms used in network inversion for each pixel')
        ifg_num_map = readfile.read(ifg_num_file, box=box)[0].flatten()
        prog_bar = ptime.progressBar(maxValue=pixel_num)
        for i in range(pixel_num):
            if ifg_num_map[i] > 0:
                idx = ifgram_data[:, i] != 0.
                temp_diff = ifgram_diff[idx, i]
                temp_coh[i] = np.abs(np.sum(np.exp(1j*temp_diff), axis=0)) / temp_diff.shape[0]
            prog_bar.update(i+1, every=1000, suffix='{}/{}'.format(i+1, pixel_num))
        prog_bar.close()

    temp_coh = np.reshape(temp_coh, (box[3]-box[1], box[2]-box[0]))
    return temp_coh
Esempio n. 25
0
    def generate_temporal_coherence_mask(self):
        """Generate reliable pixel mask from temporal coherence"""
        geom_file = ut.check_loaded_dataset(self.workDir, print_msg=False)[2]
        tcoh_file = 'temporalCoherence.h5'
        mask_file = 'maskTempCoh.h5'
        tcoh_min = self.template['mintpy.networkInversion.minTempCoh']

        scp_args = '{} -m {} -o {} --shadow {}'.format(tcoh_file, tcoh_min, mask_file, geom_file)
        print('generate_mask.py', scp_args)

        # update mode: run only if:
        # 1) output file exists and newer than input file, AND
        # 2) all config keys are the same
        config_keys = ['mintpy.networkInversion.minTempCoh']
        print('update mode: ON')
        flag = 'skip'
        if ut.run_or_skip(out_file=mask_file, in_file=tcoh_file, print_msg=False) == 'run':
            flag = 'run'
        else:
            print('1) output file: {} already exists and newer than input file: {}'.format(mask_file, tcoh_file))
            atr = readfile.read_attribute(mask_file)
            if any(str(self.template[i]) != atr.get(i, 'False') for i in config_keys):
                flag = 'run'
                print('2) NOT all key configration parameters are the same: {}'.format(config_keys))
            else:
                print('2) all key configuration parameters are the same: {}'.format(config_keys))
        print('run or skip: {}'.format(flag))

        if flag == 'run':
            mintpy.generate_mask.main(scp_args.split())
            # update configKeys
            atr = {}
            for key in config_keys:
                atr[key] = self.template[key]
            ut.add_attribute(mask_file, atr)

        # check number of pixels selected in mask file for following analysis
        num_pixel = np.sum(readfile.read(mask_file)[0] != 0.)
        print('number of reliable pixels: {}'.format(num_pixel))

        min_num_pixel = float(self.template['mintpy.networkInversion.minNumPixel'])
        if num_pixel < min_num_pixel:
            msg = "Not enough reliable pixels (minimum of {}). ".format(int(min_num_pixel))
            msg += "Try the following:\n"
            msg += "1) Check the reference pixel and make sure it's not in areas with unwrapping errors\n"
            msg += "2) Check the network and make sure it's fully connected without subsets"
            raise RuntimeError(msg)
        return
Esempio n. 26
0
def main(iargs=None):
    inps = cmd_line_parse(iargs)

    # Read data
    data, atr = readfile.read(inps.file, datasetName=inps.dset)

    # 2. Write GMT .grd file
    if not inps.outfile:
        outbase = pp.auto_figure_title(inps.file,
                                       datasetNames=inps.dset,
                                       inps_dict=vars(inps))
        inps.outfile = '{}.grd'.format(outbase)

    inps.outfile = write_grd_file(data, atr, inps.outfile)
    print('Done.')
    return inps.outfile
Esempio n. 27
0
    def run_network_modification(self, step_name):
        """Modify network of interferograms before the network inversion."""
        # check the existence of ifgramStack.h5
        stack_file, geom_file = ut.check_loaded_dataset(self.workDir,
                                                        print_msg=False)[1:3]
        coh_txt = '{}_coherence_spatialAvg.txt'.format(
            os.path.splitext(os.path.basename(stack_file))[0])
        try:
            net_fig = [
                i for i in ['Network.pdf', 'pic/Network.pdf']
                if os.path.isfile(i)
            ][0]
        except:
            net_fig = None

        # 1) output waterMask.h5 to simplify the detection/use of waterMask
        water_mask_file = 'waterMask.h5'
        if 'waterMask' in readfile.get_dataset_list(geom_file):
            print('generate {} from {} for conveniency'.format(
                water_mask_file, geom_file))
            if ut.run_or_skip(out_file=water_mask_file,
                              in_file=geom_file) == 'run':
                water_mask, atr = readfile.read(geom_file,
                                                datasetName='waterMask')
                atr['FILE_TYPE'] = 'waterMask'
                writefile.write(water_mask,
                                out_file=water_mask_file,
                                metadata=atr)

        # 2) modify network
        scp_args = '{} -t {}'.format(stack_file, self.templateFile)
        print('modify_network.py', scp_args)
        mintpy.modify_network.main(scp_args.split())

        # 3) plot network
        scp_args = '{} -t {} --nodisplay'.format(stack_file, self.templateFile)
        print('\nplot_network.py', scp_args)
        if ut.run_or_skip(out_file=net_fig,
                          in_file=[stack_file, coh_txt, self.templateFile],
                          check_readable=False) == 'run':
            mintpy.plot_network.main(scp_args.split())

        # 4) aux files: maskConnComp and avgSpatialCoh
        self.generate_ifgram_aux_file()
        return
Esempio n. 28
0
def main(iargs=None):
    inps = cmd_line_parse(iargs)

    atr = readfile.read_attribute(inps.file[0])
    box = read_inps2box(inps, atr)
    num_file = len(inps.file)

    # initiate output
    dsDict = {}
    if num_file == 1:
        dsDict[inps.dset_name] = np.zeros((box[3]-box[1], box[2]-box[0]), dtype=inps.data_type)
    else:
        dsDict[inps.dset_name] = np.zeros((num_file, box[3]-box[1], box[2]-box[0]), dtype=inps.data_type)

        # add "date" dataset for timeseries
        if inps.dset_name and inps.dset_name == 'timeseries':
            date_list = [os.path.basename(os.path.dirname(x)) for x in inps.file]
            date_str_format = ptime.get_date_str_format(date_list[0])
            dsDict['date'] = np.array(date_list, dtype=np.string_)

    # metadata
    if inps.metadata:
        print('add/update the following metadata:')
        for meta_str in inps.metadata:
            key, value = meta_str.split('=')
            atr[key] = value
            print(f'{key} : {value}')

    # read
    for i, fname in enumerate(inps.file):
        print(f'reading file {i+1} / {num_file}: {fname}')
        ds_names = readfile.get_dataset_list(fname)
        ds_name = inps.dset_name if inps.dset_name in ds_names else None
        data = readfile.read(fname, datasetName=ds_name, box=box)[0]
        if num_file == 1:
            dsDict[inps.dset_name][:] = data
        else:
            dsDict[inps.dset_name][i, :, :] = data

    # write
    atr['LENGTH'] = box[3] - box[1]
    atr['WIDTH'] = box[2] - box[0]
    writefile.write(dsDict, out_file=inps.outfile, metadata=atr)

    return inps.outfile
Esempio n. 29
0
def get_lookup_file(filePattern=None, abspath=False, print_msg=True):
    """Find lookup table file with/without input file pattern"""
    # Search Existing Files
    if not filePattern:
        filePattern = [
            'geometryRadar.h5', 'geometryGeo_tight.h5', 'geometryGeo.h5',
            'geomap*lks_tight.trans', 'geomap*lks.trans',
            'sim*_tight.UTM_TO_RDC', 'sim*.UTM_TO_RDC'
        ]
        filePattern = [os.path.join('inputs', i)
                       for i in filePattern] + filePattern
    existFiles = []
    try:
        existFiles = get_file_list(filePattern)
    except:
        if print_msg:
            print('ERROR: No geometry / lookup table file found!')
            print('It should be like:')
            print(filePattern)
        return None

    # Check Files Info
    outFile = None
    for fname in existFiles:
        atr = readfile.read_attribute(fname)
        for dsName in ['rangeCoord', 'longitude']:
            try:
                dset = readfile.read(fname,
                                     datasetName=dsName,
                                     print_msg=False)[0]
                outFile = fname
                break
            except:
                pass

    if not outFile:
        if print_msg:
            print('No lookup table info range/lat found in files.')
        return None

    # Path Format
    if abspath:
        outFile = os.path.abspath(outFile)
    return outFile
Esempio n. 30
0
def dem_jpeg(dem_file):
    """generate dem.jepg file based on Yunjun's code"""
    out_file = dem_file + '.jpeg'
    rsc_file = out_file + '.rsc'
    shutil.copy2(dem_file + '.rsc', rsc_file)
    # read data
    dem = readfile.read(dem_file)[0]
    print('dem.shape:', dem.shape)
    # figure size
    ds_shape = tuple(reversed(dem.shape))
    fig_dpi = 300
    fig_size = [i / fig_dpi for i in ds_shape]
    print('fig_size:', fig_size)
    # color range
    disp_min = np.nanmin(dem) - 4000
    disp_max = np.nanmax(dem) + 2000
    # prepare shaded relief
    ls = LightSource(azdeg=315, altdeg=45)
    dem_shade = ls.shade(dem,
                         vert_exag=0.3,
                         cmap=plt.get_cmap('gray'),
                         vmin=disp_min,
                         vmax=disp_max)
    dem_shade[np.isnan(dem_shade[:, :, 0])] = np.nan
    print('dem_shade.shape:', dem_shade.shape)
    # plot
    fig, ax = plt.subplots(figsize=fig_size)
    ax.imshow(dem_shade, interpolation='spline16', origin='upper')
    # get rid of whitespace on the side
    ax.axis('off')
    ax.get_xaxis().set_ticks([])
    ax.get_yaxis().set_ticks([])
    fig.subplots_adjust(left=0, right=1, bottom=0, top=1)
    # output
    print('save figure to file {}'.format(out_file))
    plt.savefig(out_file, transparent=True, dpi=300, pad_inches=0.0)

    #resize to desired size(FA 8/19, unclear why size is wrong)
    im = Image.open(out_file)
    im_out = im.resize(dem.shape, Image.NEAREST)
    im_out.save(out_file)

    plt.show()
Esempio n. 31
0
def write_json(inps,point_disp,point_angle):
    """write points_disp.json""" 
    atr = readfile.read(inps.file[0])[1]
    lat0 = float(atr['Y_FIRST'])
    lon0 = float(atr['X_FIRST'])
    lat_step = float(atr['Y_STEP'])
    lon_step = float(atr['X_STEP'])
    lat_ref = float(atr['REF_LAT'])
    lon_ref = float(atr['REF_LON'])
    
    # write points_disp.json
    displacement = {"Ref_lat":lat_ref, "Ref_lon":lon_ref, "Y_FIRST": lat0, "X_FIRST": lon0, "Y_STEP": lat_step, "X_STEP": lon_step, "lalo_value":point_disp.tolist()}
    open('points_disp' + '.json', "w").write(json.dumps(displacement))

    # write points_angle.json
    angles = {"Y_FIRST": lat0, "X_FIRST": lon0, "Y_STEP": lat_step, "X_STEP": lon_step, "lalo_value": point_angle.tolist()}
    open('points_angle' + '.json', "w").write(json.dumps(angles))

    return 
Esempio n. 32
0
def ref_date_file(ts_file, ref_date, outfile=None):
    """Change input file reference date to a different one.
    Parameters: ts_file : str, timeseries file to be changed
                ref_date : str, date in YYYYMMDD format
                outfile  : if str, save to a different file
                           if None, modify the data value in the existing input file
    """
    print('-' * 50)
    print('change reference date for file: {}'.format(ts_file))
    atr = readfile.read_attribute(ts_file)
    if ref_date == atr['REF_DATE']:
        print('same reference date chosen as existing reference date.')
        if not outfile:
            print('Nothing to be done.')
            return ts_file
        else:
            print('Copy {} to {}'.format(ts_file, outfile))
            shutil.copy2(ts_file, outfile)
            return outfile
    else:
        obj = timeseries(ts_file)
        obj.open(print_msg=False)
        ref_idx = obj.dateList.index(ref_date)
        print('reading data ...')
        ts_data = readfile.read(ts_file)[0]

        ts_data -= np.tile(
            ts_data[ref_idx, :, :].reshape(1, obj.length, obj.width),
            (obj.numDate, 1, 1))

        if not outfile:
            print('open {} with r+ mode'.format(ts_file))
            with h5py.File(ts_file, 'r+') as f:
                print(
                    "update /timeseries dataset and 'REF_DATE' attribute value"
                )
                f['timeseries'][:] = ts_data
                f.attrs['REF_DATE'] = ref_date
            print('close {}'.format(ts_file))
        else:
            atr['REF_DATE'] = ref_date
            writefile.write(ts_data, outfile, metadata=atr, ref_file=ts_file)
    return outfile
Esempio n. 33
0
    def plot(self):
        # read 3D time-series
        self.ts_data, self.mask = read_timeseries_data(self)[0:2]

        # Figure 1 - Cumulative Displacement Map
        self.fig_img = plt.figure(self.figname_img, figsize=self.figsize_img)

        # Figure 1 - Axes 1 - Displacement Map
        self.ax_img = self.fig_img.add_axes([0.125, 0.25, 0.75, 0.65])
        img_data = np.array(self.ts_data[0][self.init_idx, :, :])

        if self.velocity:
            img_data, attr00 = readfile.read(self.velocity,
                                             datasetName='velocity')
            img_data = img_data * 100

        img_data[self.mask == 0] = np.nan
        self.plot_init_image(img_data)

        # Figure 1 - Axes 2 - Time Slider
        self.ax_tslider = self.fig_img.add_axes([0.2, 0.1, 0.6, 0.07])
        self.plot_init_time_slider(init_idx=self.init_idx,
                                   ref_idx=self.ref_idx)
        self.tslider.on_changed(self.update_time_slider)

        # Figure 2 - Time Series Displacement - Point
        self.fig_pts, self.ax_pts = plt.subplots(num=self.figname_pts,
                                                 figsize=self.figsize_pts)
        if self.yx:
            d_ts = self.plot_point_timeseries(self.yx)

        # Output
        if self.save_fig:
            save_ts_plot(self.yx, self.fig_img, self.fig_pts, d_ts, self)

        # Final linking of the canvas to the plots.
        self.cid = self.fig_img.canvas.mpl_connect('button_press_event',
                                                   self.update_plot_timeseries)
        if self.disp_fig:
            vprint('showing ...')
            plt.show()
        return
Esempio n. 34
0
def select_max_coherence_yx(coh_file, mask=None, min_coh=0.85):
    """Select pixel with coherence > min_coh in random"""
    print('random select pixel with coherence > {}'.format(min_coh))
    print('\tbased on coherence file: '+coh_file)
    coh, coh_atr = readfile.read(coh_file)
    if mask is not None:
        coh[mask == 0] = 0.0
    coh_mask = coh >= min_coh
    if np.all(coh_mask == 0.):
        msg = ('No pixel with average spatial coherence > {} '
               'are found for automatic reference point selection!').format(min_coh)
        msg += '\nTry the following:'
        msg += '\n  1) manually specify the reference point using mintpy.reference.yx/lalo option.'
        msg += '\n  2) change mintpy.reference.minCoherence to a lower value.'
        raise RuntimeError(msg)

    y, x = random_select_reference_yx(coh_mask, print_msg=False)
    #y, x = np.unravel_index(np.argmax(coh), coh.shape)
    print('y/x: {}'.format((y, x)))
    return y, x
Esempio n. 35
0
def select_max_coherence_yx(coh_file, mask=None, min_coh=0.85):
    """Select pixel with coherence > min_coh in random"""
    print('random select pixel with coherence > {}'.format(min_coh))
    print('\tbased on coherence file: '+coh_file)
    coh, coh_atr = readfile.read(coh_file)
    if not mask is None:
        coh[mask == 0] = 0.0
    coh_mask = coh >= min_coh
    if np.all(coh_mask == 0.):
        msg = ('No pixel with average spatial coherence > {} '
               'are found for automatic reference point selection!').format(min_coh)
        msg += '\nTry the following:'
        msg += '\n  1) manually specify the reference point using mintpy.reference.yx/lalo option.'
        msg += '\n  2) change mintpy.reference.minCoherence to a lower value.'
        raise RuntimeError(msg)

    y, x = random_select_reference_yx(coh_mask, print_msg=False)
    #y, x = np.unravel_index(np.argmax(coh), coh.shape)
    print('y/x: {}'.format((y, x)))
    return y, x
Esempio n. 36
0
def estimate_linear_velocity(inps):
    # read time-series data
    print('reading data from file {} ...'.format(inps.timeseries_file))
    ts_data, atr = readfile.read(inps.timeseries_file)
    ts_data = ts_data[inps.dropDate, :, :].reshape(inps.numDate, -1)
    if atr['UNIT'] == 'mm':
        ts_data *= 1. / 1000.
    length, width = int(atr['LENGTH']), int(atr['WIDTH'])

    # The following is equivalent
    # X = scipy.linalg.lstsq(A, ts_data, cond=1e-15)[0]
    # It is not used because it can not handle NaN value in ts_data
    A = timeseries.get_design_matrix4average_velocity(inps.dateList)
    X = np.dot(np.linalg.pinv(A), ts_data)
    vel = np.array(X[0, :].reshape(length, width), dtype=dataType)

    # velocity STD (Eq. (10), Fattahi and Amelung, 2015)
    ts_diff = ts_data - np.dot(A, X)
    t_diff = A[:, 0] - np.mean(A[:, 0])
    vel_std = np.sqrt(
        np.sum(ts_diff**2, axis=0) / np.sum(t_diff**2) / (inps.numDate - 2))
    vel_std = np.array(vel_std.reshape(length, width), dtype=dataType)

    # prepare attributes
    atr['FILE_TYPE'] = 'velocity'
    atr['UNIT'] = 'm/year'
    atr['START_DATE'] = inps.dateList[0]
    atr['END_DATE'] = inps.dateList[-1]
    atr['DATE12'] = '{}_{}'.format(inps.dateList[0], inps.dateList[-1])
    # config parameter
    print('add/update the following configuration metadata:\n{}'.format(
        configKeys))
    for key in configKeys:
        atr[key_prefix + key] = str(vars(inps)[key])

    # write to HDF5 file
    dsDict = dict()
    dsDict['velocity'] = vel
    dsDict['velocityStd'] = vel_std
    writefile.write(dsDict, out_file=inps.outfile, metadata=atr)
    return inps.outfile
Esempio n. 37
0
def hdf_to_geotif(inps):
    """transfer"""
    fname = inps.input_HDFEOS[0] # for asc
    output_path = inps.outdir[0]
    output_tif = output_path + inps.output[0]
    print('output tiff file name : %s' % output_tif)
    atr = readfile.read_attribute(fname)
    data = readfile.read(fname)[0]

    if not inps.unit == 'None':
        print('using the orgianl unit of input')
        disp = data
    elif inps.unit[0] == 'm':
        # the unit of velocity is m/year
        disp = data * 100 # change unit to cm/year
    elif inps.unit[0] == 'radian':
        # the unit of data is radian
        wavelength = float(atr['WAVELENGTH'])
        disp = (((-1) * (data * wavelength)) / (4 * np.pi)) * 100 # change unit to cm

    xmin = float(atr['X_FIRST'])  # corresponding to X_FIRST
    ymax = float(atr['Y_FIRST'])  # corresponding to Y_FIRST
    nrows, ncols = np.shape(disp)
    
    xres = float(atr['X_STEP']) # corresponding to X_STEP
    yres = (-1) * float(atr['Y_STEP'])  # corresponding to X_STEP

    # xmin, ymin, xmax, ymax = [np.nanmin(lon), np.nanmin(lat), np.nanmax(lon), np.nanmax(lat)]
    # nrows, ncols = np.shape(disp)
    # xres = (xmax - xmin) / float(ncols)
    # yres = (ymax - ymin) / float(nrows)
    geotransform = [xmin, xres, 0, ymax, 0, -yres]
    raster = gdal.GetDriverByName('GTiff').Create(output_tif, ncols, nrows, 1, gdal.GDT_Float32)
    raster.SetGeoTransform(geotransform)
    srs = osr.SpatialReference()
    srs.ImportFromEPSG(4326)  # WGS 84 - WGS84 - World Geodetic System 1984, used in GPS
    raster.SetProjection(srs.ExportToWkt())
    raster.GetRasterBand(1).WriteArray(disp)
    raster.FlushCache()

    print('finish conversion!')
Esempio n. 38
0
def set_mask():
    global mask, inps, atr

    if not inps.mask_file:
        if os.path.basename(inps.timeseries_file).startswith('geo_'):
            file_list = ['geo_maskTempCoh.h5']
        else:
            file_list = ['maskTempCoh.h5', 'maskConnComp.h5']

        try:
            inps.mask_file = ut.get_file_list(file_list)[0]
        except:
            inps.mask_file = None

    try:
        mask = readfile.read(inps.mask_file, datasetName='mask')[0]
        mask[mask!=0] = 1
        print(('load mask from file: '+inps.mask_file))
    except:
        mask = None
        print('No mask used.')
Esempio n. 39
0
def set_mask():
    global mask, inps, atr

    if not inps.mask_file:
        if os.path.basename(inps.timeseries_file).startswith('geo_'):
            file_list = ['geo_maskTempCoh.h5']
        else:
            file_list = ['maskTempCoh.h5', 'maskConnComp.h5']

        try:
            inps.mask_file = ut.get_file_list(file_list)[0]
        except:
            inps.mask_file = None

    try:
        mask = readfile.read(inps.mask_file, datasetName='mask')[0]
        mask[mask != 0] = 1
        print(('load mask from file: ' + inps.mask_file))
    except:
        mask = None
        print('No mask used.')
Esempio n. 40
0
def get_lookup_file(filePattern=None, abspath=False, print_msg=True):
    """Find lookup table file with/without input file pattern"""
    # Search Existing Files
    if not filePattern:
        filePattern = ['geometryRadar.h5',
                       'geometryGeo_tight.h5', 'geometryGeo.h5',
                       'geomap*lks_tight.trans', 'geomap*lks.trans',
                       'sim*_tight.UTM_TO_RDC', 'sim*.UTM_TO_RDC']
        filePattern = [os.path.join('inputs', i) for i in filePattern] + filePattern
    existFiles = []
    try:
        existFiles = get_file_list(filePattern)
    except:
        if print_msg:
            print('ERROR: No geometry / lookup table file found!')
            print('It should be like:')
            print(filePattern)
        return None

    # Check Files Info
    outFile = None
    for fname in existFiles:
        atr = readfile.read_attribute(fname)
        for dsName in ['rangeCoord', 'longitude']:
            try:
                dset = readfile.read(fname, datasetName=dsName, print_msg=False)[0]
                outFile = fname
                break
            except:
                pass

    if not outFile:
        if print_msg:
            print('No lookup table info range/lat found in files.')
        return None

    # Path Format
    if abspath:
        outFile = os.path.abspath(outFile)
    return outFile
Esempio n. 41
0
def generate_polygon_cbox(inps):
    """generate covarage box
    lat0,lon0- starting latitude/longitude (first row/column)
    lat1,lon1- ending latitude/longitude (last row/column)
    """
    file_name = inps.file[0]
    atr = readfile.read(file_name)[1]
    length, width = int(atr['LENGTH']), int(atr['WIDTH'])

    if 'Y_FIRST' in atr.keys():
        # geo coordinates
        lat0 = float(atr['Y_FIRST'])
        lon0 = float(atr['X_FIRST'])
        lat_step = float(atr['Y_STEP'])
        lon_step = float(atr['X_STEP'])
        lat1 = lat0 + (length - 1) * lat_step
        lon1 = lon0 + (width - 1) * lon_step
    else:
        # radar coordinates
        lats = [float(atr['LAT_REF{}'.format(i)]) for i in [1, 2, 3, 4]]
        lons = [float(atr['LON_REF{}'.format(i)]) for i in [1, 2, 3, 4]]
        lat0 = np.mean(lats[0:2])
        lat1 = np.mean(lats[2:4])
        lon0 = np.mean(lons[0:3:2])
        lon1 = np.mean(lons[1:4:2])

    # lon/lat of four points: upperright; lowerright; lowerleft; upperleft
    lon_ur = lon1
    lat_ur = lat0

    lon_lr = lon1
    lat_lr = lat1

    lon_ll = lon0
    lat_ll = lat1

    lon_ul = lon0
    lat_ul = lat0

    return lon_ur, lat_ur, lon_lr, lat_lr, lon_ll, lat_ll, lon_ul, lat_ul
Esempio n. 42
0
def multilook_file(infile, lks_y, lks_x, outfile=None):
    lks_y = int(lks_y)
    lks_x = int(lks_x)

    # input file info
    atr = readfile.read_attribute(infile)
    k = atr['FILE_TYPE']
    print('multilooking {} {} file: {}'.format(atr['PROCESSOR'], k, infile))
    print('number of looks in y / azimuth direction: %d' % lks_y)
    print('number of looks in x / range   direction: %d' % lks_x)

    # output file name
    if not outfile:
        if os.getcwd() == os.path.dirname(os.path.abspath(infile)):
            ext = os.path.splitext(infile)[1]
            outfile = os.path.splitext(infile)[0] + '_' + str(
                lks_y) + 'alks_' + str(lks_x) + 'rlks' + ext
        else:
            outfile = os.path.basename(infile)
    #print('writing >>> '+outfile)

    # read source data and multilooking
    dsNames = readfile.get_dataset_list(infile)
    maxDigit = max([len(i) for i in dsNames])
    dsDict = dict()
    for dsName in dsNames:
        print('multilooking {d:<{w}} from {f} ...'.format(
            d=dsName, w=maxDigit, f=os.path.basename(infile)))
        data = readfile.read(infile, datasetName=dsName, print_msg=False)[0]

        # keep timeseries data as 3D matrix when there is only one acquisition
        # because readfile.read() will squeeze it to 2D
        if atr['FILE_TYPE'] == 'timeseries' and len(data.shape) == 2:
            data = np.reshape(data, (1, data.shape[0], data.shape[1]))

        data = multilook_data(data, lks_y, lks_x)
        dsDict[dsName] = data
    atr = multilook_attribute(atr, lks_y, lks_x)
    writefile.write(dsDict, out_file=outfile, metadata=atr, ref_file=infile)
    return outfile
Esempio n. 43
0
def bootstrap(timeseriesFile, bootCount):
    ts_data, atr = readfile.read(timeseriesFile)
    tsData = readfile.timeseries(timeseriesFile)
    if atr['UNIT'] == 'mm':
        ts_data *= 1. / 1000.

    length, width = int(atr['LENGTH']), int(atr['WIDTH'])
    dateList = tsData.get_date_list()
    sampleNo = len(dateList)
    vel = np.zeros((bootCount, (length * width)))
    prog_bar = ptime.progressBar(maxValue=bootCount, prefix='Calculating ')
    for i in range(bootCount):
        bootSamples = list(
            np.sort(resample(dateList, replace=True, n_samples=sampleNo)))
        # dropList = [x for x in dateList if x not in bootSamples]

        prog_bar.update(i + 1, suffix='Running boot number: ' + str(i + 1))
        bootList = []
        for k in bootSamples:
            bootList.append(dateList.index(k))
        numDate = len(bootList)
        ts_data_sub = ts_data[bootList, :, :].reshape(numDate, -1)

        A = tsData.get_design_matrix4average_velocity(bootSamples)
        X = np.dot(np.linalg.pinv(A), ts_data_sub)
        vel[i] = np.array(X[0, :], dtype='float32')

    prog_bar.close()
    print('Finished resampling and velocity calculation')
    velMean = vel.mean(axis=0).reshape(length, width)
    velStd = vel.std(axis=0).reshape(length, width)
    print('Calculated mean and standard deviation of bootstrap estimations')

    atr['FILE_TYPE'] = 'velocity'
    atr['UNIT'] = 'm/year'
    atr['START_DATE'] = bootSamples[0]
    atr['END_DATE'] = bootSamples[-1]
    atr['DATE12'] = '{}_{}'.format(bootSamples[0], bootSamples[-1])

    return velMean, velStd, atr
Esempio n. 44
0
def read_HDFEOS(inps):
    """read displacement from HDFEOS"""
    print('read displacement, incidence and azimuth information')
    # read metadata
    HDFEOS_file = inps.input_HDFEOS[0]
    atr = readfile.read_attribute(HDFEOS_file)

    if inps.date == None:
        date1 = atr['START_DATE']
        date2 = atr['END_DATE']
    else:
        # date1 and date2
        if '_' in "".join(inps.date):
            date1, date2 = ptime.yyyymmdd("".join(inps.date).split('_'))
        else:
            date1 = atr['START_DATE']
            date2 = ptime.yyyymmdd("".join(inps.date))
    # read angle infomation
    azimuth = readfile.read(HDFEOS_file, datasetName='/HDFEOS/GRIDS/timeseries/geometry/azimuthAngle')[0]
    incidence = readfile.read(HDFEOS_file, datasetName='/HDFEOS/GRIDS/timeseries/geometry/incidenceAngle')[0]
    
    if inps.velocity:
        vel_file = 'velocity.h5'

        iargs = [HDFEOS_file, '--start-date', date1, '--end-date', date2, '-o', vel_file, '--update']
        print('\ntimeseries2velocity.py', ' '.join(iargs))
        mintpy.timeseries2velocity.main(iargs)

        data = readfile.read(vel_file, datasetName='velocity')[0]
        os.remove(vel_file) 
    else: 
        # read / prepare data
        slice_list = readfile.get_slice_list(HDFEOS_file)
        # read/prepare data
        dname = 'displacement'
        slice_name1 = view.search_dataset_input(slice_list, '{}-{}'.format(dname, date1))[0][0]
        slice_name2 = view.search_dataset_input(slice_list, '{}-{}'.format(dname, date2))[0][1]
        data = readfile.read("".join(inps.input_HDFEOS), datasetName=slice_name2)[0]
        data -= readfile.read("".join(inps.input_HDFEOS), datasetName=slice_name1)[0]
    
    print("mask file")
    maskfile = readfile.read(HDFEOS_file, datasetName='/HDFEOS/GRIDS/timeseries/quality/mask')[0]
    data[maskfile == 0] = np.nan
    azimuth[maskfile == 0] = np.nan
    incidence[maskfile == 0] = np.nan
    
    return date1, date2, data, atr, incidence, azimuth
Esempio n. 45
0
def rewrite_slave(inps, offset):
    s_atr = readfile.read_attribute("".join(inps.slave))
    s_data = readfile.read("".join(inps.slave))[0]

    s_data_offset = s_data + offset
    #if inps.output == None:
    out_file = '{}{}{}'.format(
        os.path.split("".join(inps.slave))[1].split('.')[0], '_offset.',
        os.path.split("".join(inps.slave))[1].split('.')[1])
    #else:
    #   out_file = '{}{}{}'.format("".join(inps.output),'.',os.path.split("".join(inps.slave))[1].split('.')[1])

    if inps.outdir != None:
        out_dir = inps.outdir[0]

    out_dir_file = out_dir + out_file

    if inps.rewrite_slave:
        print('Writing slave data %s' % "".join(inps.slave))
        writefile.write(s_data_offset, out_file=out_dir_file, metadata=s_atr)

    return s_data_offset
Esempio n. 46
0
def ref_date_file(ts_file, ref_date, outfile=None):
    """Change input file reference date to a different one.
    Parameters: ts_file : str, timeseries file to be changed
                ref_date : str, date in YYYYMMDD format
                outfile  : if str, save to a different file
                           if None, modify the data value in the existing input file
    """
    print('-'*50)
    print('change reference date for file: {}'.format(ts_file))
    atr = readfile.read_attribute(ts_file)
    if ref_date == atr['REF_DATE']:
        print('same reference date chosen as existing reference date.')
        if not outfile:
            print('Nothing to be done.')
            return ts_file
        else:
            print('Copy {} to {}'.format(ts_file, outfile))
            shutil.copy2(ts_file, outfile)
            return outfile
    else:
        obj = timeseries(ts_file)
        obj.open(print_msg=False)
        ref_idx = obj.dateList.index(ref_date)
        print('reading data ...')
        ts_data = readfile.read(ts_file)[0]

        ts_data -= np.tile(ts_data[ref_idx, :, :].reshape(1, obj.length, obj.width), (obj.numDate, 1, 1))

        if not outfile:
            print('open {} with r+ mode'.format(ts_file))
            with h5py.File(ts_file, 'r+') as f:
                print("update /timeseries dataset and 'REF_DATE' attribute value")
                f['timeseries'][:] = ts_data
                f.attrs['REF_DATE'] = ref_date
            print('close {}'.format(ts_file))
        else:
            atr['REF_DATE'] = ref_date
            writefile.write(ts_data, outfile, metadata=atr, ref_file=ts_file)
    return outfile
Esempio n. 47
0
    def plot(self):
        # Read data for transection
        self.data_list = []
        self.atr_list = []
        for fname in self.file:
            data, atr = readfile.read(fname, datasetName=self.dset)
            data = pp.scale_data2disp_unit(data, metadata=atr, disp_unit=self.disp_unit)[0]
            self.data_list.append(data)
            self.atr_list.append(atr)

        # Figure
        self.fig, (self.ax_img, self.ax_txn) = plt.subplots(1, 2, num=self.figname, figsize=self.fig_size)

        # Axes 1 - map with view.prep/plot_slice()
        self.ax_img = view.plot_slice(self.ax_img, self.data_img, self.atr, self)[0]

        # Axes 2 - transection
        self.ax_txn.yaxis.tick_right()
        self.ax_txn.yaxis.set_label_position("right")

        # plot initial input transect
        if self.start_yx and self.end_yx:
            self.draw_line(self.start_yx, self.end_yx)
            self.draw_transection(self.start_yx, self.end_yx, self.start_lalo, self.end_lalo)

        self.fig.subplots_adjust(left=0.05, wspace=0.25)

        # save
        if self.save_fig:
            outfile = '{}.pdf'.format(self.outfile_base)
            self.fig.savefig(outfile, bbox_inches='tight', transparent=True, dpi=self.fig_dpi)
            vprint('saved transect to', outfile)

        self.cid = self.fig.canvas.mpl_connect('button_release_event', self.select_point)

        if self.disp_fig:
            vprint('showing ...')
            plt.show()
        return
Esempio n. 48
0
def manual_select_start_end_point(File, dset=None):
    """Manual Select Start/End Point in display figure."""
    print('reading '+File+' ...')
    data, atr = readfile.read(File, datasetName=dset)
    print('displaying '+File+' ...')
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.imshow(data)

    lines = []
    def draw_line():
        xy = plt.ginput(2)
        x = [p[0] for p in xy]
        y = [p[1] for p in xy]
        line = plt.plot(x,y)
        ax.figure.canvas.draw()
        return line

    line = draw_line()
    import pdb; pdb.set_trace()
    #xc = []
    #yc = []
    #print('1) Click on start and end point of the desired profile')
    #print('2) Close the figure to continue the profile plotting')
#
    #def onclick(event):
    #    if event.button == 1:
    #        xcc, ycc = int(event.xdata), int(event.ydata)
    #        xc.append(xcc)
    #        yc.append(ycc)
    #        print('({}, {})'.format(xcc, ycc))
    #        ax.plot(xcc, ycc, 'ro')
    #cid = fig.canvas.mpl_connect('button_release_event', onclick)
    plt.show()

    #start_yx = [yc[0], xc[0]]
    #end_yx = [yc[1], xc[1]]
    #return start_yx, end_yx
    return [0,0], [10,10]
Esempio n. 49
0
def subset_dataset(fname,
                   dsName,
                   pix_box,
                   pix_box4data,
                   pix_box4subset,
                   fill_value=np.nan):

    # read data
    print('reading {d} in {b} from {f} ...'.format(d=dsName,
                                                   b=pix_box4data,
                                                   f=os.path.basename(fname)))
    data, atr = readfile.read(fname,
                              datasetName=dsName,
                              box=pix_box4data,
                              print_msg=False)
    ds_shape = data.shape
    ds_ndim = len(ds_shape)

    # keep timeseries data as 3D matrix when there is only one acquisition
    # because readfile.read() will squeeze it to 2D
    if atr['FILE_TYPE'] == 'timeseries' and ds_ndim == 2:
        data = np.reshape(data, (1, ds_shape[0], ds_shape[1]))

    # subset 2D data
    if ds_ndim == 2:
        data_out = np.ones((pix_box[3] - pix_box[1], pix_box[2] - pix_box[0]),
                           data.dtype) * fill_value
        data_out[pix_box4subset[1]:pix_box4subset[3],
                 pix_box4subset[0]:pix_box4subset[2]] = data

    # subset 3D data
    elif ds_ndim == 3:
        data_out = np.ones(
            (ds_shape[0], pix_box[3] - pix_box[1], pix_box[2] - pix_box[0]),
            data.dtype) * fill_value
        data_out[:, pix_box4subset[1]:pix_box4subset[3],
                 pix_box4subset[0]:pix_box4subset[2]] = data

    return data_out
Esempio n. 50
0
def main(iargs=None):
    inps = cmd_line_parse(iargs)
    plt.switch_backend('Agg')  # Backend setting

    # Read data
    data, atr = readfile.read(inps.file, datasetName=inps.dset)

    # mask
    mask = pp.read_mask(inps.file, mask_file=inps.mask_file, datasetName=inps.dset, print_msg=True)[0]
    if mask is not None:
        data = np.ma.masked_where(mask == 0., data)

    # Data Operation - Display Unit & Rewrapping
    (data,
     inps.disp_unit,
     inps.disp_scale,
     inps.wrap) = pp.scale_data4disp_unit_and_rewrap(data,
                                                     metadata=atr,
                                                     disp_unit=inps.disp_unit,
                                                     wrap=inps.wrap,
                                                     wrap_range=inps.wrap_range)
    if inps.wrap:
        inps.vlim = inps.wrap_range

    # Output filename
    inps.fig_title = pp.auto_figure_title(inps.file,
                                          datasetNames=inps.dset,
                                          inps_dict=vars(inps))
    if not inps.outfile:
        inps.outfile = '{}.kmz'.format(inps.fig_title)
    inps.outfile = os.path.abspath(inps.outfile)

    # 2. Generate Google Earth KMZ
    write_kmz_file(data,
                   metadata=atr,
                   out_file=inps.outfile,
                   inps=inps)
    return inps.outfile
Esempio n. 51
0
def plot_transect_location(ax, inps):
    print('plot profile line in the 1st input file')
    data0, atr0 = readfile.read(inps.file[0], datasetName=inps.dset)
    ax.imshow(data0)

    coord = ut.coordinate(atr0)
    if inps.start_lalo and inps.end_lalo:
        [y0, y1] = coord.lalo2yx([inps.start_lalo[0], inps.end_lalo[0]],
                                 coord_type='lat')
        [x0, x1] = coord.lalo2yx([inps.start_lalo[1], inps.end_lalo[1]],
                                 coord_type='lon')
        inps.start_yx = [y0, x0]
        inps.end_yx = [y1, x1]

    ax.plot([inps.start_yx[1], inps.end_yx[1]],
            [inps.start_yx[0], inps.end_yx[0]], 'ro-')
    ax.set_xlim(0, np.shape(data0)[1])
    ax.set_ylim(np.shape(data0)[0], 0)
    ax.set_title('Transect Line in ' + inps.file[0])

    # Status bar
    def format_coord(x, y):
        col = int(x)
        row = int(y)
        if 0 <= col < data0.shape[1] and 0 <= row < data0.shape[0]:
            z = data0[row, col]
            if 'X_FIRST' in atr0.keys():
                lat = coord.yx2lalo(row, coord_type='row')
                lon = coord.yx2lalo(col, coord_type='col')
                return 'lon=%.4f, lat=%.4f, x=%.0f,  y=%.0f,  value=%.4f' % (
                    lon, lat, x, y, z)
            else:
                return 'x=%.0f,  y=%.0f,  value=%.4f' % (x, y, z)
        else:
            return 'x=%.0f,  y=%.0f' % (x, y)

    ax.format_coord = format_coord
    return
Esempio n. 52
0
    def get_incidence_angle(self, box=None):
        """Generate 2D slant range distance if missing from input template file"""
        if not self.extraMetadata:
            return None

        if 'Y_FIRST' in self.extraMetadata.keys():
            # for dataset in geo-coordinates, use contant value from INCIDENCE_ANGLE.
            key = 'INCIDENCE_ANGLE'
            print('geocoded input, use contant value from metadata {}'.format(
                key))
            if key in self.extraMetadata.keys():
                length = int(self.extraMetadata['LENGTH'])
                width = int(self.extraMetadata['WIDTH'])
                inc_angle = float(self.extraMetadata[key])
                data = np.ones((length, width), dtype=np.float32) * inc_angle

            else:
                return None

        else:
            # read DEM if available for more previse calculation
            if 'height' in self.dsNames:
                dem = readfile.read(self.datasetDict['height'],
                                    datasetName='height')[0]
            else:
                dem = None

            # for dataset in radar-coordinates, calculate 2D pixel-wise value from geometry
            data = ut.incidence_angle(self.extraMetadata,
                                      dem=dem,
                                      dimension=2,
                                      print_msg=False)

        # subset
        if box is not None:
            data = data[box[1]:box[3], box[0]:box[2]]
        return data
Esempio n. 53
0
def ll2xy(inps):
    """transfer lat/lon to local coordination"""
    inps.metadata = readfile.read_attribute(inps.file[0])

    # read geometry
    inps.lat, inps.lon = ut.get_lat_lon(inps.metadata)
    #inps.inc_angle = readfile.read(inps.geometry[0], datasetName='incidenceAngle')[0]
    #inps.head_angle = np.ones(inps.inc_angle.shape, dtype=np.float32) * float(inps.metadata['HEADING'])
    #inps.height = readfile.read(inps.geometry[0], datasetName='height')[0]

    # read mask file
    inps.mask = readfile.read(inps.file[0])[0]
    # mask data
    #inps.lat[inps.mask==0] = np.nan
    #inps.lon[inps.mask==0] = np.nan
    #inps.inc_angle[inps.mask==0] = np.nan
    #inps.head_angle[inps.mask==0] = np.nan
    #inps.height[inps.mask==0] = np.nan

    # conver latlon to xy
    # origin point
    origin_lat = (inps.lat[0, 0] + inps.lat[-1, 0]) / 2
    origin_lon = (inps.lon[0, 0] + inps.lon[0, -1]) / 2

    lat = np.transpose(inps.lat.reshape(-1, 1))
    lon = np.transpose(inps.lon.reshape(-1, 1))

    llh = np.vstack((lon, lat))
    origin = np.array([origin_lon, origin_lat], dtype=float)
    XY = np.transpose(
        mut.llh2xy(llh, origin)
    ) * 1000  # unit of X/Y is meter and is a [N,2] matrix with [N,0] is X; [N,1] is Y
    X = XY[:, 0]
    Y = XY[:, 1]

    return X, Y, origin
Esempio n. 54
0
def timeseries2ifgram(ts_file, ifgram_file, out_file='reconUnwrapIfgram.h5'):
    # read time-series
    atr = readfile.read_attribute(ts_file)
    range2phase = -4. * np.pi / float(atr['WAVELENGTH'])
    print('reading timeseries data from file {} ...'.format(ts_file))
    ts_data = readfile.read(ts_file)[0] * range2phase
    num_date, length, width = ts_data.shape
    ts_data = ts_data.reshape(num_date, -1)

    # reconstruct unwrapPhase
    print('reconstructing the interferograms from timeseries')
    stack_obj = ifgramStack(ifgram_file)
    stack_obj.open(print_msg=False)
    date12_list = stack_obj.get_date12_list(dropIfgram=False)
    A = stack_obj.get_design_matrix4timeseries(date12_list, refDate='no')[0]
    ifgram_est = np.dot(A, ts_data).reshape(A.shape[0], length, width)
    ifgram_est = np.array(ifgram_est, dtype=ts_data.dtype)
    del ts_data

    # write to ifgram file
    dsDict = {}
    dsDict['unwrapPhase'] = ifgram_est
    writefile.write(dsDict, out_file=out_file, ref_file=ifgram_file)
    return ifgram_file
Esempio n. 55
0
def generate_polygon_sfoot(inps):
    """generate polygon for track using gmt formate"""
    file_name = inps.file[0]

    atr = readfile.read(file_name)[1]

    # extract lon/lat of four points
    polygon = atr['scene_footprint']
    lonlats = re.findall(r'([\d+\.]+)', polygon)

    # lon/lat of four points: upperright; lowerright; lowerleft; upperleft
    lon_ur = float(lonlats[0])
    lat_ur = float(lonlats[1])

    lon_lr = float(lonlats[2])
    lat_lr = float(lonlats[3])

    lon_ll = float(lonlats[4])
    lat_ll = float(lonlats[5])

    lon_ul = float(lonlats[6])
    lat_ul = float(lonlats[7])

    return lon_ur, lat_ur, lon_lr, lat_lr, lon_ll, lat_ll, lon_ul, lat_ul
Esempio n. 56
0
def prep_geometry_iono(geom_file, box=None, iono_height=450e3, print_msg=True):
    """Prepare geometry info of LOS vector at thin-shell ionosphere.

    Equation (11-12) in Yunjun et al. (2022, TGRS)

    Parameters: geom_file      - str, path to the geometry file in HDF5/MintPy format
                box            - tuple of 4 int, box of interest in (x0, y0, x1, y1)
                iono_height    - float, height of the assume effective thin-shell ionosphere in m
    Returns:    iono_inc_angle - 2D np.ndarray / float, incidence angle in degree
                iono_lat/lon   - float, latitude/longitude of LOS vector at the thin-shell in deg
                iono_height    - float, height of the assume effective thin-shell ionosphere in m
    """
    def get_center_lat_lon(geom_file, box=None):
        """Get the lat/lon of the scene center"""
        meta = readfile.read_attribute(geom_file)
        if box is None:
            box = (0, 0, int(meta['WIDTH']), int(meta['LENGTH']))

        col_c = int((box[0] + box[2]) / 2)
        row_c = int((box[1] + box[3]) / 2)
        if 'Y_FIRST' in meta.keys():
            lat0 = float(meta['Y_FIRST'])
            lon0 = float(meta['X_FIRST'])
            lat_step = float(meta['Y_STEP'])
            lon_step = float(meta['X_STEP'])
            lat_c = lat0 + lat_step * row_c
            lon_c = lon0 + lon_step * col_c
        else:
            box_c = (col_c, row_c, col_c + 1, row_c + 1)
            lat_c = float(
                readfile.read(geom_file, datasetName='latitude', box=box_c)[0])
            lon_c = float(
                readfile.read(geom_file, datasetName='longitude',
                              box=box_c)[0])
        return lat_c, lon_c

    # inc_angle on the ground
    inc_angle = readfile.read(geom_file, datasetName='incidenceAngle',
                              box=box)[0]
    inc_angle = np.squeeze(inc_angle)
    inc_angle[inc_angle == 0] = np.nan
    inc_angle_center = np.nanmean(inc_angle)
    if print_msg:
        print('incidence angle on the ground     min/max: {:.1f}/{:.1f} deg'.
              format(np.nanmin(inc_angle), np.nanmax(inc_angle)))

    # inc_angle on the thin-shell ionosphere - equation (11)
    iono_inc_angle = incidence_angle_ground2iono(inc_angle,
                                                 iono_height=iono_height)
    if print_msg:
        print('incidence angle on the ionosphere min/max: {:.1f}/{:.1f} deg'.
              format(np.nanmin(iono_inc_angle), np.nanmax(iono_inc_angle)))

    # center lat/lon on the ground & thin-shell ionosphere - equation (12)
    lat, lon = get_center_lat_lon(geom_file, box=box)
    az_angle = readfile.read(geom_file, datasetName='azimuthAngle', box=box)[0]
    az_angle[az_angle == 0] = np.nan
    az_angle_center = np.nanmean(az_angle)
    iono_lat, iono_lon = lalo_ground2iono(lat,
                                          lon,
                                          inc_angle=inc_angle_center,
                                          az_angle=az_angle_center)

    if print_msg:
        print('center lat/lon  on the ground    : {:.4f}/{:.4f} deg'.format(
            lat, lon))
        print('center lat/lon  on the ionosphere: {:.4f}/{:.4f} deg'.format(
            iono_lat, iono_lon))

    return iono_inc_angle, iono_lat, iono_lon, iono_height
Esempio n. 57
0
def run_geocode(inps):
    """geocode all input files"""
    start_time = time.time()

    # Prepare geometry for geocoding
    res_obj = resample(lookupFile=inps.lookupFile,
                       dataFile=inps.file[0],
                       SNWE=inps.SNWE,
                       laloStep=inps.laloStep,
                       processor=inps.processor)
    res_obj.open()

    # resample input files one by one
    for infile in inps.file:
        print('-' * 50+'\nresampling file: {}'.format(infile))
        ext = os.path.splitext(infile)[1]
        atr = readfile.read_attribute(infile, datasetName=inps.dset)
        outfile = auto_output_filename(infile, inps)
        if inps.updateMode and ut.run_or_skip(outfile, in_file=[infile, inps.lookupFile]) == 'skip':
            print('update mode is ON, skip geocoding.')
            continue

        # read source data and resample
        dsNames = readfile.get_dataset_list(infile, datasetName=inps.dset)
        maxDigit = max([len(i) for i in dsNames])
        dsResDict = dict()
        for dsName in dsNames:
            print('reading {d:<{w}} from {f} ...'.format(d=dsName,
                                                         w=maxDigit,
                                                         f=os.path.basename(infile)))
            if ext in ['.h5','.he5']:
                data = readfile.read(infile, datasetName=dsName, print_msg=False)[0]
            else:
                data, atr = readfile.read(infile, datasetName=dsName, print_msg=False)

            # keep timeseries data as 3D matrix when there is only one acquisition
            # because readfile.read() will squeeze it to 2D
            if atr['FILE_TYPE'] == 'timeseries' and len(data.shape) == 2:
                data = np.reshape(data, (1, data.shape[0], data.shape[1]))

            res_data = res_obj.run_resample(src_data=data,
                                            interp_method=inps.interpMethod,
                                            fill_value=inps.fillValue,
                                            nprocs=inps.nprocs,
                                            print_msg=True)
            dsResDict[dsName] = res_data

        # update metadata
        if inps.radar2geo:
            atr = metadata_radar2geo(atr, res_obj)
        else:
            atr = metadata_geo2radar(atr, res_obj)
        #if len(dsNames) == 1 and dsName not in ['timeseries']:
        #    atr['FILE_TYPE'] = dsNames[0]
        #    infile = None

        writefile.write(dsResDict, out_file=outfile, metadata=atr, ref_file=infile)

    m, s = divmod(time.time()-start_time, 60)
    print('time used: {:02.0f} mins {:02.1f} secs.\n'.format(m, s))
    return outfile
Esempio n. 58
0
def read_timeseries_yx(y,
                       x,
                       ts_file,
                       ref_y=None,
                       ref_x=None,
                       zero_first=True,
                       win_size=1,
                       unit='m',
                       method='mean',
                       print_msg=True):
    """ Read time-series of one pixel with input y/x
    Parameters: y/x        - int, row/column number of interest
                ts_file    - string, filename of time-series HDF5 file
                ref_y/x    - int, row/column number of reference pixel
                zero_first - bool, shift the time-series so that it starts from zero
                win_size   - int, windows size centered at point of interest
                unit       - str, output displacement unit
                method     - str, method to calculate the output displacement and its dispersity
    Returns:    dates      - 1D np.ndarray of datetime.datetime objects, i.e. datetime.datetime(2010, 10, 20, 0, 0)
                dis        - 1D np.ndarray of float32, displacement
                dis_std    - 1D np.ndarray of float32, displacement dispersity
    """
    # read date
    obj = timeseries(ts_file)
    obj.open(print_msg=False)
    dates = ptime.date_list2vector(obj.dateList)[0]
    dates = np.array(dates)

    # read displacement
    if print_msg:
        print('input y / x: {} / {}'.format(y, x))
    box = (x, y, x + 1, y + 1)
    dis = readfile.read(ts_file, box=box)[0]
    dis_std = None

    if win_size != 1:
        buf = int(win_size / 2)
        box_win = (x - buf, y - buf, x + buf + 1, y + buf + 1)
        dis_win = readfile.read(ts_file,
                                box=box_win)[0].reshape(obj.numDate, -1)

        if method == 'mean':
            dis = np.nanmean(dis_win, axis=1)
            dis_std = np.nanstd(dis_win, axis=1)

        elif method == 'median':
            dis = np.nanmedian(dis_win, axis=1)
            dis_std = median_abs_deviation(dis_win)

        else:
            raise ValueError('un-recognized method: {}'.format(method))

    # reference pixel
    if ref_y is not None:
        ref_box = (ref_x, ref_y, ref_x + 1, ref_y + 1)
        dis -= readfile.read(ts_file, box=ref_box)[0]

    #start at zero
    if zero_first:
        dis -= dis[0]

    # custom output unit
    if unit == 'm':
        pass
    elif unit == 'cm':
        dis *= 100.
        dis_std *= 100.
    elif unit == 'mm':
        dis *= 1000.
        dis_std *= 1000.
    else:
        raise ValueError('un-supported output unit: {}'.format(unit))

    return dates, dis, dis_std
Esempio n. 59
0
def run_unwrap_error_phase_closure(ifgram_file,
                                   common_regions,
                                   water_mask_file=None,
                                   ccName='connectComponent',
                                   dsNameIn='unwrapPhase',
                                   dsNameOut='unwrapPhase_phaseClosure'):
    print('-' * 50)
    print('correct unwrapping error in {} with phase closure ...'.format(
        ifgram_file))
    stack_obj = ifgramStack(ifgram_file)
    stack_obj.open()
    length, width = stack_obj.length, stack_obj.width
    ref_y, ref_x = stack_obj.refY, stack_obj.refX
    date12_list = stack_obj.get_date12_list(dropIfgram=False)
    num_ifgram = len(date12_list)
    shape_out = (num_ifgram, length, width)

    # read water mask
    if water_mask_file and os.path.isfile(water_mask_file):
        print('read water mask from file:', water_mask_file)
        water_mask = readfile.read(water_mask_file)[0]
    else:
        water_mask = None

    # prepare output data writing
    print('open {} with r+ mode'.format(ifgram_file))
    f = h5py.File(ifgram_file, 'r+')
    print('input  dataset:', dsNameIn)
    print('output dataset:', dsNameOut)
    if dsNameOut in f.keys():
        ds = f[dsNameOut]
        print('access /{d} of np.float32 in size of {s}'.format(d=dsNameOut,
                                                                s=shape_out))
    else:
        ds = f.create_dataset(dsNameOut,
                              shape_out,
                              maxshape=(None, None, None),
                              chunks=True,
                              compression=None)
        print('create /{d} of np.float32 in size of {s}'.format(d=dsNameOut,
                                                                s=shape_out))

    # correct unwrap error ifgram by ifgram
    prog_bar = ptime.progressBar(maxValue=num_ifgram)
    for i in range(num_ifgram):
        date12 = date12_list[i]

        # read unwrap phase to be updated
        unw_cor = np.squeeze(f[dsNameIn][i, :, :]).astype(np.float32)
        unw_cor -= unw_cor[ref_y, ref_x]

        # update kept interferograms only
        if stack_obj.dropIfgram[i]:
            # get local region info from connectComponent
            cc = np.squeeze(f[ccName][i, :, :])
            if water_mask is not None:
                cc[water_mask == 0] = 0
            cc_obj = connectComponent(conncomp=cc, metadata=stack_obj.metadata)
            cc_obj.label()
            local_regions = measure.regionprops(cc_obj.labelImg)

            # matching regions and correct unwrap error
            idx_common = common_regions[0].date12_list.index(date12)
            for local_reg in local_regions:
                local_mask = cc_obj.labelImg == local_reg.label
                U = 0
                for common_reg in common_regions:
                    y = common_reg.sample_coords[:, 0]
                    x = common_reg.sample_coords[:, 1]
                    if all(local_mask[y, x]):
                        U = common_reg.int_ambiguity[idx_common]
                        break
                unw_cor[local_mask] += 2. * np.pi * U

        # write to hdf5 file
        ds[i, :, :] = unw_cor
        prog_bar.update(i + 1, suffix=date12)
    prog_bar.close()
    ds.attrs['MODIFICATION_TIME'] = str(time.time())
    f.close()
    print('close {} file.'.format(ifgram_file))
    return ifgram_file
Esempio n. 60
0
def get_common_region_int_ambiguity(ifgram_file,
                                    cc_mask_file,
                                    water_mask_file=None,
                                    num_sample=100,
                                    dsNameIn='unwrapPhase'):
    """Solve the phase unwrapping integer ambiguity for the common regions among all interferograms
    Parameters: ifgram_file     : str, path of interferogram stack file
                cc_mask_file    : str, path of common connected components file
                water_mask_file : str, path of water mask file
                num_sample      : int, number of pixel sampled for each region
                dsNameIn        : str, dataset name of the unwrap phase to be corrected
    Returns:    common_regions  : list of skimage.measure._regionprops._RegionProperties object
                    modified by adding two more variables:
                    sample_coords : 2D np.ndarray in size of (num_sample, 2) in int64 format
                    int_ambiguity : 1D np.ndarray in size of (num_ifgram,) in int format
    """
    print('-' * 50)
    print(
        'calculating the integer ambiguity for the common regions defined in',
        cc_mask_file)
    # stack info
    stack_obj = ifgramStack(ifgram_file)
    stack_obj.open()
    date12_list = stack_obj.get_date12_list(dropIfgram=True)
    num_ifgram = len(date12_list)
    C = matrix(
        ifgramStack.get_design_matrix4triplet(date12_list).astype(float))
    ref_phase = stack_obj.get_reference_phase(unwDatasetName=dsNameIn,
                                              dropIfgram=True).reshape(
                                                  num_ifgram, -1)

    # prepare common label
    print('read common mask from', cc_mask_file)
    cc_mask = readfile.read(cc_mask_file)[0]
    if water_mask_file is not None and os.path.isfile(water_mask_file):
        water_mask = readfile.read(water_mask_file)[0]
        print('refine common mask based on water mask file', water_mask_file)
        cc_mask[water_mask == 0] = 0

    label_img, num_label = connectComponent.get_large_label(cc_mask,
                                                            min_area=2.5e3,
                                                            print_msg=True)
    common_regions = measure.regionprops(label_img)
    print('number of common regions:', num_label)

    # add sample_coords / int_ambiguity
    print('number of samples per region:', num_sample)
    print('solving the phase-unwrapping integer ambiguity for {}'.format(
        dsNameIn))
    print(
        '\tbased on the closure phase of interferograms triplets (Yunjun et al., 2019)'
    )
    print(
        '\tusing the L1-norm regularzed least squares approximation (LASSO) ...'
    )
    for i in range(num_label):
        common_reg = common_regions[i]
        # sample_coords
        idx = sorted(
            np.random.choice(common_reg.area, num_sample, replace=False))
        common_reg.sample_coords = common_reg.coords[idx, :].astype(int)

        # solve for int_ambiguity
        U = np.zeros((num_ifgram, num_sample))
        if common_reg.label == label_img[stack_obj.refY, stack_obj.refX]:
            print('{}/{} skip calculation for the reference region'.format(
                i + 1, num_label))
        else:
            prog_bar = ptime.progressBar(maxValue=num_sample,
                                         prefix='{}/{}'.format(
                                             i + 1, num_label))
            for j in range(num_sample):
                # read unwrap phase
                y, x = common_reg.sample_coords[j, :]
                unw = ifginv.read_unwrap_phase(stack_obj,
                                               box=(x, y, x + 1, y + 1),
                                               ref_phase=ref_phase,
                                               unwDatasetName=dsNameIn,
                                               dropIfgram=True,
                                               print_msg=False).reshape(
                                                   num_ifgram, -1)

                # calculate closure_int
                closure_pha = np.dot(C, unw)
                closure_int = matrix(
                    np.round(
                        (closure_pha - ut.wrap(closure_pha)) / (2. * np.pi)))

                # solve for U
                U[:, j] = np.round(
                    l1regls(-C, closure_int, alpha=1e-2,
                            show_progress=0)).flatten()
                prog_bar.update(j + 1, every=5)
            prog_bar.close()
        # add int_ambiguity
        common_reg.int_ambiguity = np.median(U, axis=1)
        common_reg.date12_list = date12_list

    #sort regions by size to facilitate the region matching later
    common_regions.sort(key=lambda x: x.area, reverse=True)

    # plot sample result
    fig_size = pp.auto_figure_size(label_img.shape, disp_cbar=False)
    fig, ax = plt.subplots(figsize=fig_size)
    ax.imshow(label_img, cmap='jet')
    for common_reg in common_regions:
        ax.plot(common_reg.sample_coords[:, 1],
                common_reg.sample_coords[:, 0],
                'k.',
                ms=2)
    pp.auto_flip_direction(stack_obj.metadata, ax, print_msg=False)
    out_img = 'common_region_sample.png'
    fig.savefig(out_img, bbox_inches='tight', transparent=True, dpi=300)
    print('saved common regions and sample pixels to file', out_img)

    return common_regions