コード例 #1
0
def inputs_enu_grids(e_grdfile, n_grdfile, u_grdfile, flight_angle,
                     incidence_angle):
    """For synthetic models with three deformation components calculated.
    Uses a single incidence angle and flight angle right now."""
    [lon, lat, e] = netcdf_read_write.read_any_grd(e_grdfile)
    [_, _, n] = netcdf_read_write.read_any_grd(n_grdfile)
    [_, _, u] = netcdf_read_write.read_any_grd(u_grdfile)
    look_vector = insar_vector_functions.flight_incidence_angles2look_vector(
        flight_angle, incidence_angle)
    los = np.zeros(np.shape(e))
    [numrows, numcols] = np.shape(e)
    for i in range(numrows):
        for j in range(numcols):
            los[i][j] = 1000 * insar_vector_functions.def3D_into_LOS(
                e[i][j], n[i][j], u[i][j], flight_angle, incidence_angle)
            # in mm
    InSAR_Obj = InSAR_2D_Object(lon=lon,
                                lat=lat,
                                LOS=los,
                                LOS_unc=np.zeros(np.shape(los)),
                                lkv_E=look_vector[0],
                                lkv_N=look_vector[1],
                                lkv_U=look_vector[2],
                                starttime=None,
                                endtime=None)
    return InSAR_Obj
コード例 #2
0
def write_gmtsar2roipac_phase(phasefile, phasefilt_file, ampfile, outfilename,
                              outfile_filt):
    """
    A function that reads phase and amplitude grids in GMT format and writes a Binary format file
    with the real and imaginary components of the values.
    """

    # INPUTS
    [_, _, phase] = read_any_grd(phasefile)
    [_, _, phasefilt] = read_any_grd(phasefilt_file)
    [_, _, amp] = read_any_grd(ampfile)

    # # FOR THE PHASE AND AMPLITUDE DATA, reformatting the data and making initial plot.
    phase_1d = np.reshape(phase, (np.size(phase), ))
    phasefilt_1d = np.reshape(phasefilt, (np.size(phasefilt), ))
    amp_1d = np.reshape(amp, (np.size(amp), ))

    amp_1d = [x * 1e12 for x in amp_1d]
    # Fixing the different GMTSAR definition of amplitude.

    # # WRITE THEM OUT IN BINARY FORMAT.
    print("converting phase_1d to real,imag.")
    [real, imag] = phase_math.phase_amp2real_imag(phase_1d, amp_1d)
    write_binary_roipac_real_imag(outfilename, real, imag)

    print("converting phase_1d_filt to real,imag.")
    [real, imag] = phase_math.phase_amp2real_imag(phasefilt_1d, amp_1d)
    write_binary_roipac_real_imag(outfile_filt, real, imag)
    return
コード例 #3
0
def inputs_TS_grd(filename, lonfile, latfile, day0=dt.datetime.strptime("2009-04-24", "%Y-%m-%d")):
    """
    Reads a TS file with associated lat/lon files
    The files generally are not orthorectified grids
    GRDnetcdf has tdata (days since day0), x, y, and zdata (3D cube)
    lon and lat files are 2D arrays with corresponding lon and lat for each point
    day0 is the day of the first acquisition in the time series (hard coded for a UAVSAR track default)
    """
    print("Reading TS Grid file  %s" % filename);
    [tdata, _, _, zdata] = netcdf_read_write.read_3D_netcdf(filename);
    print("tdata:", tdata);
    print("   where Day0 of this time series is %s " % dt.datetime.strftime(day0, "%Y-%m-%d"));
    [_, _, lon] = netcdf_read_write.read_any_grd(lonfile);
    [_, _, lat] = netcdf_read_write.read_any_grd(latfile);
    print("lon and lat:", np.shape(lon));
    print("zdata:", np.shape(zdata));
    zdata_correct_size = [];
    if np.shape(zdata[0])[0] == np.shape(lon)[0]+1 and np.shape(zdata[0])[1] == np.shape(lat)[1]+1:
        for i in range(len(zdata)):
            zdata_correct_size.append(zdata[i][0:-1, 0:-1]);  # cutting off one pixel on each end for pixel node problem
    else:
        zdata_correct_size = zdata;
    dtarray = [];
    for i in range(len(tdata)):
        dtarray.append(day0 + dt.timedelta(days=int(tdata[i])));

    assert(np.shape(lon) == np.shape(zdata_correct_size[0])), ValueError("Lon and Data size don't match");
    assert(np.shape(lat) == np.shape(zdata_correct_size[0])), ValueError("Lat and Data size don't match");
    assert(np.shape(zdata_correct_size)[0] == len(dtarray)), ValueError("dtarray and zdata size don't match");
    myGridTS = GrdTSData(dtarray=dtarray, lon=lon, lat=lat, TS=zdata_correct_size);
    return myGridTS;
コード例 #4
0
def generate_reflon_reflat(velfile, veldir, rowref, colref):
    # In this part, I sometimes need to flip the x-axis of the input array to make sense with geographic coordinates.
    # I suspect that for ascending orbits, this may not be necessary.
    # Worth checking if it introduces bugs.

    # Here we will use GMTSAR to geocode a small region including the reference pixel.
    # We extract the latitude and longitude of the reference pixel.
    refpoint_file = 'reference_point.grd'
    # names only here. directory gets added later.
    ref_ll_name = 'ref_ll'
    # These are temporary files.
    ref_ll = ref_ll_name + '.grd'

    [xdata, ydata, zdata] = netcdf_read_write.read_any_grd(velfile)

    # Flipping the x-axis direction and the data itself. Required for descending data, unsure about ascending.
    # All of this will change with better grid referencing in the future.
    colref = len(xdata) - 1 - colref
    # rowref = len(ydata)-1-rowref;
    zdata = np.fliplr(zdata)
    # In general we can figure this out from the flight_angle.

    print("\nHello! Your reference pixel is (row,col) = (%d, %d)" %
          (rowref, colref))
    print("Its velocity is %.2f mm/yr\n" % zdata[rowref][colref])
    print("Its azimuth is %.2f " % ydata[rowref])
    print("Its range is %.2f \n\n" % xdata[colref])

    rowarray = np.array([ydata[rowref], ydata[rowref + 1]])
    colarray = np.array([xdata[colref], xdata[colref + 1]])

    plt.figure()
    plt.imshow(zdata, vmin=-20, vmax=20, cmap='jet')
    plt.plot(colref, rowref, '.', markersize=10, color='k')
    plt.savefig('refpoint.eps')

    zarray = np.array([[0.0, 0.01], [0.01, 0.01]])

    netcdf_read_write.produce_output_netcdf(colarray, rowarray, zarray,
                                            'mm/yr',
                                            veldir + '/' + refpoint_file)
    netcdf_read_write.flip_if_necessary(veldir + '/' + refpoint_file)
    subprocess.call(
        ['geocode_mod.csh', refpoint_file, ref_ll, ref_ll_name, veldir],
        shell=False)

    [xll, yll, _] = netcdf_read_write.read_any_grd(veldir + '/' + ref_ll)
    latref = yll[0]
    lonref = xll[0]
    print("\nReference Location is: ", lonref, latref)

    subprocess.call(['rm', veldir + '/' + ref_ll_name + '.png'], shell=False)
    subprocess.call(['rm', veldir + '/' + ref_ll_name + '.kml'], shell=False)

    return [lonref, latref]
コード例 #5
0
def compare_grid_means(MyParams, filename, statistics_function, mask=None):
    """
    A driver for taking the mean of several grid quantities
    The function for taking the mean/std is passed in
    `mask` has format [filename, cutoff_value] if you want to mask based on a particular computation result.
    """
    strain_values_dict = velocity_io.read_multiple_strain_files(
        MyParams, filename)
    lons, lats, my_means, my_stds = compute_grid_statistics(
        strain_values_dict, statistics_function)
    if mask:
        [_, _, masking_values] = netcdf_read_write.read_any_grd(mask[0])
        my_means = utilities.mask_by_value(my_means, masking_values, mask[1])
    netcdf_read_write.produce_output_netcdf(
        lons, lats, my_means, 'per year',
        MyParams.outdir + "/means_" + filename)
    netcdf_read_write.produce_output_netcdf(
        lons, lats, my_stds, 'per year',
        MyParams.outdir + "/deviations_" + filename)
    if "dila" in filename or "max_shear" in filename:
        pygmt_plots.plot_method_differences(
            strain_values_dict, my_means, MyParams.range_strain,
            MyParams.outdir, MyParams.outdir + "/separate_plots_" +
            filename.split('.')[0] + '.png')
    return
コード例 #6
0
def get_100p_pixels_manually_choose(signalspread_filename):
    """
    This iterative function helps you manually choose a reference pixel based on signalspread.nc.
    You might have to run through this function a number of times
    To select your boxes and your eventual reference pixel (row, col).
    I pick one in a stable area, outside of the deformation, ideally in a desert.
    """
    print("Finding the pixels that are 100 percent coherent from signalspread")
    [x, y, ss_data] = netcdf_read_write.read_any_grd(signalspread_filename)

    count = 0
    candidate_options, great_options = [], []
    for i in range(len(y)):
        for j in range(len(x)):
            if ss_data[i][j] == 100:
                count = count + 1
                candidate_options.append((i, j))
    total_pixels = np.multiply(np.shape(ss_data)[0], np.shape(ss_data[1]))
    print("%d of %d (%f percent) are totally coherent. " %
          (count, total_pixels, 100 * (count / total_pixels)))
    print("%d pixels are good options for the reference pixel. " %
          (len(candidate_options)))

    # Manual select: great options
    xrange_great = (50, 250)
    # manual select here
    yrange_great = (900, 1000)
    # manual select here
    for item in candidate_options:
        if yrange_great[0] < item[0] < yrange_great[1]:
            if xrange_great[0] < item[1] < xrange_great[1]:
                great_options.append(item)
    print("%d pixels are great options for the reference pixel. " %
          (len(great_options)))

    # Manual select: Get a single reference pixel from the great options
    refpix = great_options[40]
    # manual select here

    # Make a plot that shows where those pixels are
    plt.figure()
    plt.imshow(ss_data, aspect=1, cmap='rainbow')
    plt.gca().invert_yaxis()
    plt.plot([item[1] for item in candidate_options],
             [item[0] for item in candidate_options],
             '.',
             color='k')
    plt.plot([item[1] for item in great_options],
             [item[0] for item in great_options],
             '.',
             color='g')
    plt.plot(refpix[1], refpix[0], '.', color='r')
    plt.savefig('best_pixels.png')
    plt.close()
    print("Based on 100p pixels, selecting reference pixel at row/col %d/%d " %
          (refpix[0], refpix[1]))
    print(
        "STOPPING ON PURPOSE: Please write your reference pixel in your config file."
    )
    return refpix[0], refpix[1]
コード例 #7
0
def produce_output_plot(netcdfname, plottitle, plotname, cblabel, aspect=1.0, invert_yaxis=True, dot_points=None,
                        vmin=None, vmax=None, cmap='rainbow'):
    # Read in the dataset
    [_, _, zread] = read_any_grd(netcdfname);

    # Make a plot
    fig = plt.figure(figsize=(7, 10));
    _ax1 = fig.add_axes([0.0, 0.1, 0.9, 0.8]);
    if vmin is not None:
        plt.imshow(zread, aspect=aspect, cmap=cmap, vmin=vmin, vmax=vmax);
    else:
        plt.imshow(zread, aspect=aspect, cmap=cmap);
    if invert_yaxis:
        plt.gca().invert_yaxis()  # for imshow, rows get labeled in the downward direction
    # plt.gca().get_xaxis().set_ticks([]);
    # plt.gca().get_yaxis().set_ticks([]);
    if dot_points is not None:
        plt.plot(dot_points[0], dot_points[1], color='black', marker='*', markersize=10);
    plt.title(plottitle);
    plt.gca().set_xlabel("Range", fontsize=16);
    plt.gca().set_ylabel("Azimuth", fontsize=16);
    cb = plt.colorbar();
    cb.set_label(cblabel, size=16);
    plt.savefig(plotname);
    plt.close();
    return;
コード例 #8
0
def produce_min_max(filename):
    x, y, z = netcdf_read_write.read_any_grd(filename)
    print("File:", filename)
    print("Max: ", np.nanmax(z))
    print("Min: ", np.nanmin(z))
    print("Shape: ", np.shape(z))
    return
コード例 #9
0
def how_many_nans(filename):
    [_, _, zdata] = netcdf_read_write.read_any_grd(filename)
    nan_pixels = np.count_nonzero(np.isnan(zdata))
    total_pixels = np.shape(zdata)[0] * np.shape(zdata)[1]
    print("For file %s: %d pixels of %d are NaNs (%f percent)." %
          (filename, nan_pixels, total_pixels,
           100 * float(nan_pixels / float(total_pixels))))
    return [nan_pixels, total_pixels]
コード例 #10
0
def mask_by_value(outdir, grid1, grid2, cutoff_value):
    # grid1 = usually azimuth deviations
    # grid2 = usually I2nd
    lon1, lat1, val1 = netcdf_read_write.read_any_grd(outdir + "/deviations_" +
                                                      grid1 + ".nc")
    lon2, lat2, val2 = netcdf_read_write.read_any_grd(outdir + "/means_" +
                                                      grid2 + ".nc")
    masked_vals = np.zeros(np.shape(val2))
    for i in range(len(lon1)):
        for j in range(len(lat1)):
            if abs(val2[j][i]) > cutoff_value:
                masked_vals[j][i] = val1[j][i]
            else:
                masked_vals[j][i] = np.nan
    netcdf_read_write.produce_output_netcdf(
        lon1, lat1, masked_vals, 'per yr',
        outdir + "/deviations_" + grid1 + ".nc")
    return
コード例 #11
0
def make_outlier_mask_for_stack(filelist, maskfile, outlier_cutoff=1e4):
    """
    Make a mask that is ones and nans
    Given a co-registered stack
    If a pixel is above the outlier cutoff in any image of the stack, make a nanmask that masks that pixel.
    """
    filename = filelist[1]
    x, y, z = netcdf_read_write.read_any_grd(
        filename)  # just to get the shape of the outputs
    crazy_mask = np.ones(np.shape(z))
    for ifile in filelist:
        print(ifile)
        x, y, ztemp = netcdf_read_write.read_any_grd(ifile)
        for i in range(len(y)):
            for j in range(len(x)):
                if abs(ztemp[i][j]) > outlier_cutoff:
                    crazy_mask[i][j] = np.nan
    # Put all the crazy pixels into a mask (across all images in the stack).
    netcdf_read_write.produce_output_netcdf(x, y, crazy_mask, "", maskfile)
    return
コード例 #12
0
def number_below_value(filename, value):
    [xdata, ydata, zdata] = netcdf_read_write.read_any_grd(filename)
    count = 0
    for i in range(len(ydata)):
        for j in range(len(xdata)):
            if zdata[i][j] < value:
                count = count + 1
    total_pixels = np.shape(zdata)[0] * np.shape(zdata)[1]
    print("For file %s: %d pixels of %d are below %f (%f percent)." %
          (filename, count, total_pixels, value,
           100 * float(count / float(total_pixels))))
    return
コード例 #13
0
def inputs_grd(los_grdfile):
    """Input function for netcdf file"""
    [lon, lat, LOS] = netcdf_read_write.read_any_grd(los_grdfile)
    InSAR_Obj = InSAR_2D_Object(lon=lon,
                                lat=lat,
                                LOS=LOS,
                                LOS_unc=np.zeros(np.shape(LOS)),
                                lkv_E=None,
                                lkv_N=None,
                                lkv_U=None,
                                starttime=None,
                                endtime=None)
    return InSAR_Obj
コード例 #14
0
def write_gmtsar2roipac_topo(infile, out_topo):
    """
    A function that reads a GMT topographic grid and writes the corresponding .hgt format
    for use with roipac functions.
    """
    [xdata, _, topo] = read_any_grd(infile)
    width = len(xdata)
    # topo = np.flipud(topo);  # early formatting needed flip up/down.  Processing as of 5/3/21 does NOT.

    topo_1d = np.reshape(topo, (np.size(topo), ))

    # WRITE THE TOPO OUT IN BINARY FORMAT
    write_binary_topo(out_topo, topo_1d, topo_1d, width)
    # output_plots(topo_1d, topo_1d, width, length, 'mendocino_topo_orig.eps');
    return
コード例 #15
0
def drive_full_TS(param_dict, intf_files, coh_files):
    param_dict["start_index"] = 0
    param_dict["end_index"] = 11000000
    intf_tuple, coh_tuple, baseline_tuple = param_dict["reader"](
        intf_files, coh_files, param_dict["baseline_file"],
        param_dict["ts_type"], param_dict["dem_error"])
    [_, _, signal_spread_tuple
     ] = rwr.read_any_grd(param_dict["signal_spread_filename"])
    TS, metrics = nsbas.Full_TS(param_dict, intf_tuple, signal_spread_tuple,
                                baseline_tuple, coh_tuple)
    rwr.produce_output_TS_grids(intf_tuple.xvalues, intf_tuple.yvalues, TS,
                                intf_tuple.ts_dates, 'mm',
                                param_dict["ts_output_dir"])
    write_output_metrics(param_dict, intf_tuple, metrics)
    return
コード例 #16
0
def read_strain_files(MyParams, filename):
    # Read strain quantities and guarantee their co-registration
    strain_values_dict = {}
    for method in MyParams.strain_dict.keys():
        specific_filename = MyParams.strain_dict[method] + "/" + filename
        if os.path.isfile(specific_filename):
            [lon, lat, val] = netcdf_read_write.read_any_grd(specific_filename)
            strain_values_dict[method] = [lon, lat, val]
        else:
            raise ("Error! Can't find file %s " % specific_filename)
    comp.defensive_programming(MyParams, strain_values_dict)
    method1 = list(strain_values_dict.keys())[0]
    lons = strain_values_dict[method1][0]
    lats = strain_values_dict[method1][1]
    return strain_values_dict, lons, lats
コード例 #17
0
def drive_velocity_simple_stack(config_params, intf_files):
    param_dict = get_simple_stack_params(config_params)
    [_, _, signal_spread_data
     ] = rwr.read_any_grd(param_dict["signal_spread_filename"])
    intf_tuple = param_dict["reader"](intf_files)
    velocities, x, y = velocity_simple_stack(intf_tuple,
                                             param_dict["wavelength"],
                                             param_dict["rowref"],
                                             param_dict["colref"],
                                             signal_spread_data, 25)
    # last argument is signal threshold (< 100%).  lower signal threshold allows for more data into the stack.
    output_manager_simple_stack(x, y, velocities, param_dict["rowref"],
                                param_dict["colref"], signal_spread_data,
                                param_dict["outdir"])
    return
コード例 #18
0
def drive_velocity(param_dict, intf_files, coh_files):
    intf_tuple, coh_tuple, baseline_tuple = param_dict["reader"](
        intf_files, coh_files, param_dict["baseline_file"],
        param_dict["ts_type"], param_dict["dem_error"])
    [_, _, signal_spread_tuple
     ] = rwr.read_any_grd(param_dict["signal_spread_filename"])
    velocities, metrics = nsbas.Velocities(param_dict, intf_tuple,
                                           signal_spread_tuple, baseline_tuple,
                                           coh_tuple)
    rwr.produce_output_netcdf(intf_tuple.xvalues, intf_tuple.yvalues,
                              velocities, 'mm/yr',
                              param_dict["ts_output_dir"] + '/velo_nsbas.grd')
    netcdf_plots.produce_output_plot(
        param_dict["ts_output_dir"] + '/velo_nsbas.grd', 'LOS Velocity',
        param_dict["ts_output_dir"] + '/velo_nsbas.png', 'velocity (mm/yr)')
    return
コード例 #19
0
def histogram_of_grd_file_values(filename,
                                 varname='Deviation',
                                 plotname='histogram_values.png'):
    """
    simple plot to make a histogram of a grid file
    """
    z = netcdf_read_write.read_any_grd(filename)[2]
    z_vector = np.reshape(z, (np.shape(z)[0] * np.shape(z)[1], ))
    plt.figure(dpi=250, figsize=(8, 7))
    plt.hist(z_vector, bins=50, color='orange')
    plt.yscale('log')
    plt.ylabel('Number of Pixels', fontsize=20)
    plt.xlabel(varname, fontsize=20)
    plt.gca().tick_params(axis='both', which='major', labelsize=16)
    plt.savefig(plotname)
    plt.close()
    return
コード例 #20
0
ファイル: velocity_io.py プロジェクト: kmaterna/Strain_2D
def read_multiple_strain_files(MyParams, filename):
    """
    Read strain quantities of `filename` into a dictionary
    Each dictionary key is a strain method
    Each dictionary value is a data structure: [lon, lat, value]
    lon : list of floats
    lat : list of floats
    value : 2D array of floats
    We also guarantee the mutual co-registration of the dictionary elements
    """
    strain_values_dict = {}
    for method in MyParams.strain_dict.keys():
        specific_filename = MyParams.strain_dict[method] + "/" + filename
        assert (os.path.isfile(specific_filename)
                ), FileNotFoundError("Cannot find file " + specific_filename)
        if os.path.isfile(specific_filename):
            [lon, lat, val] = netcdf_read_write.read_any_grd(specific_filename)
            strain_values_dict[method] = [lon, lat, val]
    utilities.check_coregistered_grids(MyParams.range_strain, MyParams.inc,
                                       strain_values_dict)
    utilities.check_coregistered_shapes(strain_values_dict)
    return strain_values_dict
コード例 #21
0
def get_file_shape(infile):
    [xdata, ydata, _] = read_any_grd(infile)
    width = len(xdata)
    length = len(ydata)
    print("shape of %s: width %d, length %d" % (infile, width, length))
    return [width, length]
コード例 #22
0
def compute_gpsgridder(myVelfield, range_strain, inc, poisson, fd, eigenvalue,
                       tempoutdir):
    print(
        "------------------------------\nComputing strain via gpsgridder method."
    )
    velocity_io.write_simple_gmt_format(myVelfield, "tempgps.txt")
    command = "gmt gpsgridder tempgps.txt" + \
              " -R"+configure_functions.get_string_range(range_strain, x_buffer=0.02, y_buffer=0.02) + \
              " -I"+configure_functions.get_string_inc(inc) + \
              " -S"+poisson + \
              " -Fd"+fd + \
              " -C"+eigenvalue + \
              " -Emisfitfile.txt -fg -r -Gnc_%s.nc"
    print(command)
    subprocess.call(command, shell=True)
    # makes a netcdf grid file
    # -R = range. -I = interval. -E prints the model and data fits at the input stations (very useful).
    # -S = poisson's ratio. -Fd = fudge factor. -C = eigenvalues below this value will be ignored.
    # -fg = flat earth approximation. -G = output netcdf files (x and y displacements).
    # You should experiment with Fd and C values to find something that you like (good fit without overfitting).
    # For Northern California, I like -Fd0.01 -C0.005. -R-125/-121/38/42.2

    subprocess.call(['rm', 'tempgps.txt'], shell=False)
    subprocess.call(['rm', 'gmt.history'], shell=False)
    subprocess.call(['mv', 'misfitfile.txt', tempoutdir], shell=False)
    subprocess.call(['mv', 'nc_u.nc', tempoutdir], shell=False)
    subprocess.call(['mv', 'nc_v.nc', tempoutdir], shell=False)

    # Get ready to do strain calculation.
    file1 = tempoutdir + "nc_u.nc"
    file2 = tempoutdir + "nc_v.nc"
    [xdata, ydata, udata] = netcdf_read_write.read_any_grd(file1)
    [_, _, vdata] = netcdf_read_write.read_any_grd(file2)
    xinc = float(
        subprocess.check_output('gmt grdinfo -M -C ' + file1 +
                                ' | awk \'{print $8}\'',
                                shell=True))
    # x-inc
    yinc = float(
        subprocess.check_output('gmt grdinfo -M -C ' + file1 +
                                ' | awk \'{print $9}\'',
                                shell=True))
    # y-inc
    xinc = xinc * 111.000 * np.cos(np.deg2rad(range_strain[2]))
    # in km (not degrees)
    yinc = yinc * 111.000
    # in km (not degrees)
    [ydim, xdim] = np.shape(udata)
    exx = np.zeros(np.shape(vdata))
    exy = np.zeros(np.shape(vdata))
    eyy = np.zeros(np.shape(vdata))
    rot = np.zeros(np.shape(vdata))
    # 2nd invariant of rotation rate tensor

    # the strain calculation
    for j in range(ydim - 1):
        for i in range(xdim - 1):
            up = udata[j][i]
            vp = vdata[j][i]
            uq = udata[j][i + 1]
            vq = vdata[j][i + 1]
            ur = udata[j + 1][i]
            vr = vdata[j + 1][i]

            [dudx, dvdx, dudy,
             dvdy] = strain_tensor_toolbox.compute_displacement_gradients(
                 up, vp, ur, vr, uq, vq, xinc, yinc)

            # The basic strain tensor components (units: nanostrain per year)
            [exx1, exy1, eyy1,
             rot1] = strain_tensor_toolbox.compute_strain_components_from_dx(
                 dudx, dvdx, dudy, dvdy)
            rot[j][i] = abs(rot1)
            exx[j][i] = exx1
            exy[j][i] = exy1
            eyy[j][i] = eyy1

    print("Success computing strain via gpsgridder method.\n")

    return [xdata, ydata, rot, exx, exy, eyy]
コード例 #23
0
def compute_gpsgridder(myVelfield, range_strain, inc, poisson, fd, eigenvalue,
                       tempoutdir):
    print(
        "------------------------------\nComputing strain via gpsgridder method."
    )
    velocity_io.write_gmt_format(myVelfield, "tempgps.txt")
    command = "gmt gpsgridder tempgps.txt" + \
              " -R" + utilities.get_string_range(range_strain, x_buffer=0.02, y_buffer=0.02) + \
              " -I" + utilities.get_string_inc(inc) + \
              " -S" + poisson + \
              " -Fd" + fd + \
              " -C" + eigenvalue + \
              " -Emisfitfile.txt -fg -r -Gnc_%s.nc"
    print(command)
    subprocess.call(command, shell=True)
    # makes a netcdf grid file
    # -R = range. -I = interval. -E prints the model and data fits at the input stations (very useful).
    # -S = poisson's ratio. -Fd = fudge factor. -C = eigenvalues below this value will be ignored.
    # -fg = flat earth approximation. -G = output netcdf files (x and y displacements).
    # You should experiment with Fd and C values to find something that you like (good fit without overfitting).
    # For Northern California, I like -Fd0.01 -C0.005. -R-125/-121/38/42.2

    subprocess.call(['rm', 'tempgps.txt'], shell=False)
    subprocess.call(['rm', 'gmt.history'], shell=False)
    subprocess.call(['mv', 'misfitfile.txt', tempoutdir], shell=False)
    subprocess.call(['mv', 'nc_u.nc', tempoutdir], shell=False)
    subprocess.call(['mv', 'nc_v.nc', tempoutdir], shell=False)

    # Get ready to do strain calculation.
    file1 = tempoutdir + "nc_u.nc"
    file2 = tempoutdir + "nc_v.nc"
    [xdata, ydata, udata] = netcdf_read_write.read_any_grd(file1)
    [_, _, vdata] = netcdf_read_write.read_any_grd(file2)
    udata = udata.T
    vdata = vdata.T

    xinc = float(
        subprocess.check_output('gmt grdinfo -M -C ' + file1 +
                                ' | awk \'{print $8}\'',
                                shell=True))
    # x-inc
    yinc = float(
        subprocess.check_output('gmt grdinfo -M -C ' + file1 +
                                ' | awk \'{print $9}\'',
                                shell=True))
    # y-inc
    xinc = xinc * 111.000 * np.cos(np.deg2rad(range_strain[2]))
    # in km (not degrees)
    yinc = yinc * 111.000
    # in km (not degrees)

    [exx, eyy, exy, rot
     ] = strain_tensor_toolbox.strain_on_regular_grid(xinc, yinc, udata, vdata)
    # Lastly we multiply by 1000 for units
    exx = exx.T
    eyy = eyy.T
    exy = exy.T
    rot = rot.T
    exx = np.multiply(exx, 1000)
    exy = np.multiply(exy, 1000)
    eyy = np.multiply(eyy, 1000)
    rot = abs(np.multiply(rot, 1000))

    print("Success computing strain via gpsgridder method.\n")
    return [xdata, ydata, rot, exx, exy, eyy]
コード例 #24
0
def compute_loops(all_loops, loops_dir, loops_guide, rowref, colref):
    subprocess.call(['mkdir', '-p', loops_dir], shell=False)
    ofile = open(loops_dir + loops_guide, 'w')
    for i in range(len(all_loops)):
        ofile.write("Loop %d: %s %s %s\n" %
                    (i, all_loops[i][0], all_loops[i][1], all_loops[i][2]))
    ofile.close()

    unwrapped = 'unwrap.grd'
    wrapped = 'phasefilt.grd'
    filename = 'intf_all/' + all_loops[0][0] + '_' + all_loops[0][
        1] + '/' + unwrapped
    z1_sample = read_netcdf3(filename)[2]
    number_of_errors = np.zeros(np.shape(z1_sample))

    for i in range(0, len(all_loops)):
        edge1 = all_loops[i][0] + '_' + all_loops[i][1]
        edge2 = all_loops[i][1] + '_' + all_loops[i][2]
        edge3 = all_loops[i][0] + '_' + all_loops[i][2]
        [xdata, ydata,
         z1] = netcdf_read_write.read_any_grd('intf_all/' + edge1 + '/' +
                                              unwrapped)
        [_, _, z2] = netcdf_read_write.read_any_grd('intf_all/' + edge2 + '/' +
                                                    unwrapped)
        [_, _, z3] = netcdf_read_write.read_any_grd('intf_all/' + edge3 + '/' +
                                                    unwrapped)

        [xdata, ydata,
         wr_z1] = netcdf_read_write.read_any_grd('intf_all/' + edge1 + '/' +
                                                 wrapped)
        [_, _, wr_z2] = netcdf_read_write.read_any_grd('intf_all/' + edge2 +
                                                       '/' + wrapped)
        [_, _, wr_z3] = netcdf_read_write.read_any_grd('intf_all/' + edge3 +
                                                       '/' + wrapped)

        print("Loop " + str(i) + ":")

        rowdim, coldim = np.shape(z1)

        histdata_raw = []
        histdata_fix = []
        znew_raw = np.zeros(np.shape(z1))
        znew_fix = np.zeros(np.shape(z1))
        errorcount = 0

        for j in range(rowdim):
            for k in range(coldim):

                wr1 = wr_z1[j][k] - wr_z1[rowref, colref]
                z1_adj = z1[j][k] - z1[rowref, colref]
                wr2 = wr_z2[j][k] - wr_z2[rowref, colref]
                z2_adj = z2[j][k] - z2[rowref, colref]
                wr3 = wr_z3[j][k] - wr_z3[rowref, colref]
                z3_adj = z3[j][k] - z3[rowref, colref]

                # Using equation from Heresh Fattahi's PhD thesis to isolate unwrapping errors.
                wrapped_closure_raw = wr_z1[j][k] + wr_z2[j][k] - wr_z3[j][k]
                wrapped_closure_fix = wr1 + wr2 - wr3
                offset_before_unwrapping = np.mod(wrapped_closure_fix,
                                                  2 * np.pi)
                if offset_before_unwrapping > np.pi:
                    offset_before_unwrapping = offset_before_unwrapping - 2 * np.pi
                    # send it to the -pi to pi realm.

                unwrapped_closure_raw = z1[j][k] + z2[j][k] - z3[j][k]
                unwrapped_closure_fix = z1_adj + z2_adj - z3_adj

                znew_raw[j][
                    k] = unwrapped_closure_raw - offset_before_unwrapping
                znew_fix[j][
                    k] = unwrapped_closure_fix - offset_before_unwrapping

                if ~np.isnan(znew_raw[j][k]):
                    histdata_raw.append(znew_raw[j][k] / np.pi)
                if ~np.isnan(znew_fix[j][k]):
                    histdata_fix.append(znew_fix[j][k] / np.pi)
                if abs(znew_fix[j][k]) > 0.5:  # if this pixel has
                    errorcount = errorcount + 1
                    number_of_errors[j][k] = number_of_errors[j][k] + 1

        errorpixels = round(100 * float(errorcount) / len(histdata_fix), 2)
        print("Most common raw loop sum: ")
        print(np.median(histdata_raw))
        print("Most common fix loop sum: ")
        print(np.median(histdata_fix))
        print("\n")

        make_plot(xdata, ydata, znew_fix,
                  loops_dir + 'phase_closure_' + str(i) + '.eps', errorpixels)
        make_histogram(histdata_fix,
                       loops_dir + 'histogram_' + str(i) + '.eps')

    return [xdata, ydata, number_of_errors]
コード例 #25
0
def make_residual_plot(file1,
                       file2,
                       plotname,
                       histname,
                       vmin=-20,
                       vmax=5,
                       title1='',
                       title2='',
                       scalelabel='LOS Velocity',
                       units='mm/yr',
                       flip_sign1=False,
                       flip_sign2=False):
    """
    A basic function that takes two co-registered grids and subtracts them, showing residuals in the third panel
    and histogram of residuals in separate plot.
    """
    data1 = netcdf_read_write.read_any_grd(file1)[2]
    data2 = netcdf_read_write.read_any_grd(file2)[2]
    if flip_sign1:
        data1 = -1 * data1
    if flip_sign2:
        data2 = -1 * data2
    residuals = np.subtract(data1, data2)
    residuals_vector = np.reshape(
        residuals, (np.shape(residuals)[0] * np.shape(residuals)[1], ))

    fig, axarr = plt.subplots(1, 3, sharey='all', figsize=(20, 8), dpi=300)
    axarr[0].imshow(data1, vmin=vmin, vmax=vmax, cmap='rainbow')
    axarr[0].tick_params(labelsize=16)
    axarr[0].set_title(title1, fontsize=20)
    axarr[0].invert_yaxis()

    axarr[1].imshow(data2, vmin=vmin, vmax=vmax, cmap='rainbow')
    axarr[1].tick_params(labelsize=16)
    axarr[1].set_title(title2, fontsize=20)
    axarr[1].invert_yaxis()

    axarr[2].imshow(residuals, vmin=-10, vmax=10, cmap='rainbow')
    axarr[2].tick_params(labelsize=16)
    axarr[2].set_title('Residuals', fontsize=20)
    axarr[2].invert_yaxis()

    # Fancy color bar #1
    _cbarax = fig.add_axes([0.85, 0.08, 0.1, 0.9], visible=False)
    color_boundary_object = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
    custom_cmap = cm.ScalarMappable(norm=color_boundary_object, cmap='rainbow')
    custom_cmap.set_array(np.arange(vmin, vmax, 0.1))
    cb = plt.colorbar(custom_cmap,
                      aspect=12,
                      fraction=0.2,
                      orientation='vertical')
    cb.set_label(scalelabel + ' (' + units + ')', fontsize=18)
    cb.ax.tick_params(labelsize=16)

    # Fancy color bar for residuals
    _cbarax = fig.add_axes([0.68, 0.05, 0.1, 0.9], visible=False)
    color_boundary_object = matplotlib.colors.Normalize(vmin=-10, vmax=10)
    custom_cmap = cm.ScalarMappable(norm=color_boundary_object, cmap='rainbow')
    custom_cmap.set_array(np.arange(vmin, vmax, 0.1))
    cb = plt.colorbar(custom_cmap,
                      aspect=12,
                      fraction=0.2,
                      orientation='horizontal')
    cb.set_label('Residual (' + units + ')', fontsize=16)
    cb.ax.tick_params(labelsize=14)
    plt.savefig(plotname)
    plt.close()

    plt.figure(dpi=300, figsize=(8, 6))
    plt.hist(residuals_vector, bins=50, color='orange')
    plt.yscale('log')
    plt.ylabel('Number of Pixels', fontsize=20)
    plt.xlabel('Residuals (' + units + ')', fontsize=20)
    plt.gca().tick_params(axis='both', which='major', labelsize=16)
    plt.savefig(histname)
    plt.close()
    return
コード例 #26
0
def main_function(intf_directory, flattentopo_directory, topo_ra_file,
                  example_rsc):
    """
    :param intf_directory: location where all your interferograms are stored
    :param flattentopo_directory: location where all your output corrected igrams are stored
    :param topo_ra_file: name of topo_ra.grd, registered with same pixels as intf files
    :param example_rsc: name of rsc file with proper length and width set
    """

    # GLOBAL PARAMETERS
    nfit = 0
    ivar = 1
    alt_ref = 100  # changing this during experiments
    thresh_amp = 0.2  # changing this during experiments
    bin_demfile = flattentopo_directory + "/topo_radar.hgt"  # binary topo file

    # INPUTS
    intf_list = glob.glob(intf_directory + "/???????_???????")
    print(len(intf_list), " interferograms found for flatten_topo")

    # Turning dem into roipac format
    readbin.write_gmtsar2roipac_topo(topo_ra_file, bin_demfile)
    [width, length] = readbin.get_file_shape(topo_ra_file)

    # COMPUTE
    for data_dir in intf_list:
        print(data_dir)
        intf_name = data_dir.split('/')[1]
        # is this general or specific to one-level-deep directories?
        outdir = flattentopo_directory + intf_name + '/'
        if os.path.isfile(outdir + '/phase.grd'):
            print("skipping %s " % outdir)
            continue
        subprocess.call(["mkdir", "-p", outdir], shell=False)

        infile = outdir + "/intf_sd.int"
        infile_filtered = outdir + "/intf_filt.int"
        stratfile = outdir + "/strat.unw"
        outfile = outdir + "/out.int"
        outfile_filtered = outdir + "/out_filtered.int"

        # GMTSAR files.
        orig_phasefile = data_dir + "/phase.grd"
        orig_phasefilt_file = data_dir + "/phasefilt.grd"
        orig_ampfile = data_dir + "/amp.grd"
        orig_corrfile = data_dir + "/corr.grd"
        orig_maskfile = data_dir + "/mask.grd"
        out_phasefile = outdir + "/phase.grd"
        out_phasefilt_file = outdir + "/phasefilt.grd"
        out_ampfile = outdir + "/amp.grd"
        out_corrfile = outdir + "/corr.grd"
        out_maskfile = outdir + "/mask.grd"

        # MAKE BINARY INTERFEROGRAMS
        readbin.write_gmtsar2roipac_phase(orig_phasefile, orig_phasefilt_file,
                                          orig_ampfile, infile,
                                          infile_filtered)

        # # # RUN THE FORTRAN
        print(
            "\nRunning the fortran code to remove atmospheric artifacts from interferogram."
        )
        subprocess.call(['cp', example_rsc, outdir + '/intf_sd.int.rsc'],
                        shell=False)
        print("flattentopo " + infile + " " + infile_filtered + " " +
              bin_demfile + " " + outfile + " " + outfile_filtered + " " +
              str(nfit) + " " + str(ivar) + " " + str(alt_ref) + " " +
              str(thresh_amp) + " " + stratfile + "\n")
        subprocess.call([
            "flattentopo", infile, infile_filtered, bin_demfile, outfile,
            outfile_filtered,
            str(nfit),
            str(ivar),
            str(alt_ref),
            str(thresh_amp), stratfile
        ],
                        shell=False)
        subprocess.call(['mv', 'ncycle_topo', outdir + '/ncycle_topo'],
                        shell=False)
        subprocess.call(['mv', 'ncycle_topo_az', outdir + '/ncycle_topo_az'],
                        shell=False)
        subprocess.call(['cp', orig_ampfile, out_ampfile], shell=False)
        subprocess.call(['cp', orig_corrfile, out_corrfile], shell=False)
        subprocess.call(['cp', orig_maskfile, out_maskfile], shell=False)

        # Output handling. First reading 1D arrays
        [real, imag] = readbin.read_binary_roipac_real_imag(outfile)
        [phase_out, _] = phase_math.real_imag2phase_amp(real, imag)
        [real, imag] = readbin.read_binary_roipac_real_imag(outfile_filtered)
        [phasefilt_out, _] = phase_math.real_imag2phase_amp(real, imag)
        # 1d arrays

        # Reshape grids into two-dimensional arrays
        phase_out_grd = np.reshape(phase_out, (length, width))
        phasefilt_out_grd = np.reshape(phasefilt_out, (length, width))

        # Write GRD files of output quantities
        [xdata_p, ydata_p] = read_any_grd(orig_phasefile)[0:2]
        [xdata_pf, ydata_pf,
         phasefilt_early] = read_any_grd(orig_phasefilt_file)
        netcdf_read_write.produce_output_netcdf(xdata_p, ydata_p,
                                                phase_out_grd, 'radians',
                                                out_phasefile)
        netcdf_read_write.produce_output_netcdf(xdata_pf, ydata_pf,
                                                phasefilt_out_grd, 'radians',
                                                out_phasefilt_file)

        # Making plot
        readbin.output_plots(phasefilt_early, phasefilt_out, width, length,
                             outdir + "/" + intf_name + "_corrected.eps")

    return
コード例 #27
0
def inputs_grdfile(geocoded_insar_file):
    [xarray, yarray, LOS_array] = read_any_grd(geocoded_insar_file);
    LOS_array[np.where(LOS_array > 1e20)] = np.nan;  # Filter spurious values from InSAR array
    if np.nanmean(xarray) > 180:
        xarray = np.subtract(xarray, 360);  # some files come in with lon=244 instead of -115.  Fixing that.
    return [xarray, yarray, LOS_array];
コード例 #28
0
def plot_two_general_grids(file1,
                           file2,
                           plotname,
                           vmin1=-20,
                           vmax1=5,
                           flip_sign1=False,
                           title1='',
                           scalelabel1='Velocity (mm/yr)',
                           vmin2=None,
                           vmax2=None,
                           flip_sign2=False,
                           title2='',
                           scalelabel2='Velocity (mm/yr)',
                           readfile=True,
                           invert_yaxis=True,
                           cmap='rainbow'):
    """
    A little function that plots two grid files in subplots side by side
    (they don't have to have the same registration, so no need to compute residuals)
    If readfile=True: then we read files. Otherwise, those two arguments are actually data
    """
    if readfile:
        data1 = netcdf_read_write.read_any_grd(file1)[2]
        data2 = netcdf_read_write.read_any_grd(file2)[2]
    else:
        data1 = file1
        data2 = file2
    if flip_sign1:
        data1 = -1 * data1
    if flip_sign2:
        data2 = -1 * data2
    if vmin2 is None:
        vmin2 = vmin1
    if vmax2 is None:
        vmax2 = vmax1

    # First figure
    fig, axarr = plt.subplots(1, 2, sharey='none', figsize=(15, 8), dpi=300)
    axarr[0].imshow(data1, vmin=vmin1, vmax=vmax1, cmap=cmap)
    axarr[0].tick_params(labelsize=16)
    axarr[0].set_title(title1, fontsize=20)
    if invert_yaxis:
        axarr[0].invert_yaxis()

    # Second figure
    axarr[1].imshow(data2, vmin=vmin2, vmax=vmax2, cmap=cmap)
    axarr[1].tick_params(labelsize=16)
    axarr[1].set_title(title2, fontsize=20)
    if invert_yaxis:
        axarr[1].invert_yaxis()

    # Colorbar #1
    _cbarax = fig.add_axes([0.15, 0.06, 0.1, 0.9], visible=False)
    color_boundary_object = matplotlib.colors.Normalize(vmin=vmin1, vmax=vmax1)
    custom_cmap = cm.ScalarMappable(norm=color_boundary_object, cmap=cmap)
    custom_cmap.set_array(np.arange(vmin1, vmax1, 0.1))
    cb = plt.colorbar(custom_cmap,
                      aspect=12,
                      fraction=0.2,
                      orientation='horizontal')
    cb.set_label(scalelabel1, fontsize=18)
    cb.ax.tick_params(labelsize=16)

    # Colorbar #2
    _cbarax = fig.add_axes([0.58, 0.06, 0.1, 0.9], visible=False)
    color_boundary_object = matplotlib.colors.Normalize(vmin=vmin2, vmax=vmax2)
    custom_cmap = cm.ScalarMappable(norm=color_boundary_object, cmap=cmap)
    custom_cmap.set_array(np.arange(vmin2, vmax2, 0.1))
    cb = plt.colorbar(custom_cmap,
                      aspect=12,
                      fraction=0.2,
                      orientation='horizontal')
    cb.set_label(scalelabel2, fontsize=18)
    cb.ax.tick_params(labelsize=16)

    plt.savefig(plotname)
    plt.close()
    return