def compute(myVelfield, MyParams):
    print("Computing strain via delaunay method.")
    z = np.array([myVelfield.elon, myVelfield.nlat])
    z = z.T
    tri = Delaunay(z)

    triangle_vertices = z[tri.simplices]
    trishape = np.shape(triangle_vertices)
    # 516 x 3 x 2, for example
    print(trishape[0])

    # We are going to solve for the velocity gradient tensor at the centroid of each triangle.
    centroids = []
    for i in range(trishape[0]):
        xcor_mean = np.mean([
            triangle_vertices[i, 0, 0], triangle_vertices[i, 1, 0],
            triangle_vertices[i, 2, 0]
        ])
        ycor_mean = np.mean([
            triangle_vertices[i, 0, 1], triangle_vertices[i, 1, 1],
            triangle_vertices[i, 2, 1]
        ])
        centroids.append([xcor_mean, ycor_mean])
    xcentroid = [x[0] for x in centroids]
    ycentroid = [x[1] for x in centroids]

    # Initialize arrays.
    I2nd = []
    rot = []
    max_shear = []
    e1 = []
    # eigenvalues
    e2 = []
    v00 = []
    # eigenvectors
    v01 = []
    v10 = []
    v11 = []
    dilatation = []

    # for each triangle:
    for i in range(trishape[0]):

        # Get the velocities of each vertex (VE1, VN1, VE2, VN2, VE3, VN3)
        # Get velocities for Vertex 1 (triangle_vertices[i,0,0] and triangle_vertices[i,0,1])
        xindex1 = np.where(myVelfield.elon == triangle_vertices[i, 0, 0])
        yindex1 = np.where(myVelfield.nlat == triangle_vertices[i, 0, 1])
        index1 = np.intersect1d(xindex1, yindex1)
        xindex2 = np.where(myVelfield.elon == triangle_vertices[i, 1, 0])
        yindex2 = np.where(myVelfield.nlat == triangle_vertices[i, 1, 1])
        index2 = np.intersect1d(xindex2, yindex2)
        xindex3 = np.where(myVelfield.elon == triangle_vertices[i, 2, 0])
        yindex3 = np.where(myVelfield.nlat == triangle_vertices[i, 2, 1])
        index3 = np.intersect1d(xindex3, yindex3)

        VE1 = myVelfield.e[index1[0]]
        VN1 = myVelfield.n[index1[0]]
        VE2 = myVelfield.e[index2[0]]
        VN2 = myVelfield.n[index2[0]]
        VE3 = myVelfield.e[index3[0]]
        VN3 = myVelfield.n[index3[0]]
        obs_vel = np.array([[VE1], [VN1], [VE2], [VN2], [VE3], [VN3]])

        # Get the distance between centroid and vertex (in km)
        dE1 = (triangle_vertices[i, 0, 0] - xcentroid[i]) * 111.0 * np.cos(
            np.deg2rad(ycentroid[i]))
        dE2 = (triangle_vertices[i, 1, 0] - xcentroid[i]) * 111.0 * np.cos(
            np.deg2rad(ycentroid[i]))
        dE3 = (triangle_vertices[i, 2, 0] - xcentroid[i]) * 111.0 * np.cos(
            np.deg2rad(ycentroid[i]))
        dN1 = (triangle_vertices[i, 0, 1] - ycentroid[i]) * 111.0
        dN2 = (triangle_vertices[i, 1, 1] - ycentroid[i]) * 111.0
        dN3 = (triangle_vertices[i, 2, 1] - ycentroid[i]) * 111.0

        Design_Matrix = np.array([[1, 0, dE1, dN1, 0,
                                   0], [0, 1, 0, 0, dE1, dN1],
                                  [1, 0, dE2, dN2, 0,
                                   0], [0, 1, 0, 0, dE2, dN2],
                                  [1, 0, dE3, dN3, 0, 0],
                                  [0, 1, 0, 0, dE3, dN3]])

        # Invert to get the components of the velocity gradient tensor.
        DMinv = inv(Design_Matrix)
        vel_grad = np.dot(DMinv, obs_vel)
        # this is the money step.
        VE_centroid = vel_grad[0][0]
        VN_centroid = vel_grad[1][0]
        dVEdE = vel_grad[2][0]
        dVEdN = vel_grad[3][0]
        dVNdE = vel_grad[4][0]
        dVNdN = vel_grad[5][0]

        # The components that are easily computed
        [exx, exy, eyy,
         rotation] = strain_tensor_toolbox.compute_strain_components_from_dx(
             dVEdE, dVNdE, dVEdN, dVNdN)

        # # Compute a number of values based on tensor properties.
        I2nd_tri = np.log10(
            np.abs(strain_tensor_toolbox.second_invariant(exx, exy, eyy)))
        I2nd.append(I2nd_tri)
        [e11, e22,
         v] = strain_tensor_toolbox.eigenvector_eigenvalue(exx, exy, eyy)

        e1.append(e11)
        e2.append(e22)
        rot.append(abs(rotation))
        max_shear.append((e11 - e22) / 2)
        v00.append(v[0][0])
        v10.append(v[1][0])
        v01.append(v[0][1])
        v11.append(v[1][1])
        dilatation.append(e11 + e22)

    return [
        xcentroid, ycentroid, triangle_vertices, I2nd, max_shear, rot, e1, e2,
        v00, v01, v10, v11, dilatation
    ]
Beispiel #2
0
def compute(myVelfield, MyParams):
	print("Computing strain via Numpy Spline method.");

	interp_kind='linear';
	
	# Scipy returns a function that you can use on a new set of x,y pairs. 
	f_east = interpolate.interp2d(myVelfield.elon, myVelfield.nlat, myVelfield.e, kind=interp_kind);
	f_north = interpolate.interp2d(myVelfield.elon, myVelfield.nlat, myVelfield.n, kind=interp_kind);


	# The new interpolation grid: a new set of points with some chosen spacing
	xarray=np.arange(MyParams.coord_box[0],MyParams.coord_box[1],MyParams.grid_inc);
	yarray=np.arange(MyParams.coord_box[2],MyParams.coord_box[3],MyParams.grid_inc);
	[X,Y]=np.meshgrid(xarray,yarray);
	

	# Evaluate the linear or cubic interpolation function at new points
	new_east=np.zeros(np.shape(X));
	new_north=np.zeros(np.shape(X));
	for i in range(len(yarray)):
		for j in range(len(xarray)):
			new_east[i][j]=f_east(xarray[j],yarray[i]);  # only want to give the functions one point at a time. 
			new_north[i][j]=f_north(xarray[j],yarray[i]);

	# Grid increments
	typical_lat=float(MyParams.map_range[2]);
	xinc = MyParams.grid_inc * 111.000 * np.cos(np.deg2rad(typical_lat)); # in km (not degrees)
	yinc = MyParams.grid_inc * 111.000;  # in km (not degrees)

	# Computing the elements of the strain tensor from the 
	rot=np.zeros(np.shape(X));  # 2nd invariant of rotation rate tensor
	I2nd=np.zeros(np.shape(X));  # 2nd invariant of strain rate tensor
	max_shear=np.zeros(np.shape(X));  # max shear of strain rate tensor
	e1=np.zeros(np.shape(X));  # maximum principal strain (array of float)
	e2=np.zeros(np.shape(X));  # minimum principal strain (array of float)
	v00=np.zeros(np.shape(X));  # more complicated: eigenvectors (array of matrix 2x2)
	v01=np.zeros(np.shape(X));  # more complicated: eigenvectors (array of matrix 2x2)
	v10=np.zeros(np.shape(X));  # more complicated: eigenvectors (array of matrix 2x2)
	v11=np.zeros(np.shape(X));  # more complicated: eigenvectors (array of matrix 2x2)
	dilatation=np.zeros(np.shape(X));

	# the strain calculation
	for j in range(len(yarray)-1):
		for i in range(len(xarray)-1):
			up=new_east[j][i];
			vp=new_north[j][i];
			uq=new_east[j][i+1];
			vq=new_north[j][i+1];
			ur=new_east[j+1][i];
			vr=new_north[j+1][i];

			[dudx, dvdx, dudy, dvdy] = strain_tensor_toolbox.compute_displacement_gradients(up, vp, ur, vr, uq, vq, xinc, yinc);
			
			# The components that are easily computed
			# Units: nanostrain per year. 
			[exx, exy, eyy, rotation] = strain_tensor_toolbox.compute_strain_components_from_dx(dudx, dvdx, dudy, dvdy);
			rot[j][i]=abs(rotation);

			# Compute a number of values based on tensor properties. 
			I2nd[j][i] = np.log10(np.abs(strain_tensor_toolbox.second_invariant(exx, exy, eyy)));
			[e11, e22, v1] = strain_tensor_toolbox.eigenvector_eigenvalue(exx, exy, eyy);
			e1[j][i]= e11;
			e2[j][i]= e22;
			v00[j][i]=v1[0][0];
			v10[j][i]=v1[1][0];
			v01[j][i]=v1[0][1];
			v11[j][i]=v1[1][1];
			max_shear[j][i] = (e11 - e22)/2;
			dilatation[j][i]= e11+e22; 

	print("Success computing strain via Numpy Spline method.\n");

	return [xarray, yarray, I2nd, max_shear, rot, e1, e2, v00, v01, v10, v11, dilatation];
def compute(myVelfield, MyParams):
    print("Computing strain via gpsgridder method.")
    outfile = open("tempgps.txt", 'w')
    for i in range(len(myVelfield.n)):
        outfile.write("%f %f %f %f %f %f 0.0\n" %
                      (myVelfield.elon[i], myVelfield.nlat[i], myVelfield.e[i],
                       myVelfield.n[i], myVelfield.se[i], myVelfield.sn[i]))
    outfile.close()
    subprocess.call(
        "gmt gpsgridder tempgps.txt -R" + MyParams.map_range + " -I" +
        str(MyParams.grid_inc) +
        " -S0.5 -Fd0.01 -C0.0005 -Emisfitfile.txt -fg -r -Gnc4_%s.nc",
        shell=True)
    # makes a netcdf grid file
    # -R = range. -I = interval. -E prints the model and data fits at the input stations (very useful).
    # -S = poisson's ratio. -Fd = fudge factor. -C = eigenvalues below this value will be ignored.
    # -fg = flat earth approximation. -G = output netcdf files (x and y displacements).
    # You should experiment with Fd and C values to find something that you like (good fit without overfitting).
    # For Northern California, I like -Fd0.01 -C0.005. -R-125/-121/38/42.2
    # I had Ff0.1 -C0.005 -fg

    # For large domains, GMT netcdf4 files instead of netcdf3. We must turn them all into netcdf3 for python to read them.
    subprocess.call('nccopy -k classic nc4_u.nc gps_u.nc', shell=True)
    subprocess.call('nccopy -k classic nc4_v.nc gps_v.nc', shell=True)
    subprocess.call(['rm', 'tempgps.txt'], shell=False)
    subprocess.call(['rm', 'gmt.history'], shell=False)
    subprocess.call(['mv', 'misfitfile.txt', MyParams.outdir], shell=False)
    subprocess.call(['mv', 'nc4_u.nc', MyParams.outdir], shell=False)
    subprocess.call(['mv', 'nc4_v.nc', MyParams.outdir], shell=False)
    subprocess.call(['mv', 'gps_u.nc', MyParams.outdir], shell=False)
    subprocess.call(['mv', 'gps_v.nc', MyParams.outdir], shell=False)

    # Get ready to do strain calculation.
    file1 = MyParams.outdir + "gps_u.nc"
    file2 = MyParams.outdir + "gps_v.nc"
    [xdata, ydata,
     udata] = netcdf_io_functions.read_grd_xyz(file1, 'lon', 'lat', 'z')
    [vdata] = netcdf_io_functions.read_grd_z(file2, 'z')
    xinc = float(
        subprocess.check_output('gmt grdinfo -M -C ' + file1 +
                                ' | awk \'{print $8}\'',
                                shell=True))
    # the x-increment
    yinc = float(
        subprocess.check_output('gmt grdinfo -M -C ' + file1 +
                                ' | awk \'{print $9}\'',
                                shell=True))
    # the y-increment
    typical_lat = float(MyParams.map_range[2])
    xinc = xinc * 111.000 * np.cos(np.deg2rad(typical_lat))
    # in km (not degrees)
    yinc = yinc * 111.000
    # in km (not degrees)
    xdata = np.flipud(xdata)
    ydata = np.flipud(ydata)
    udata = np.flipud(udata)
    vdata = np.flipud(vdata)
    [ydim, xdim] = np.shape(udata)
    rot = np.zeros(np.shape(vdata))
    # 2nd invariant of rotation rate tensor
    I2nd = np.zeros(np.shape(vdata))
    # 2nd invariant of strain rate tensor
    max_shear = np.zeros(np.shape(vdata))
    # max shear of strain rate tensor
    e1 = np.zeros(np.shape(vdata))
    # maximum principal strain (array of float)
    e2 = np.zeros(np.shape(vdata))
    # minimum principal strain (array of float)
    v00 = np.zeros(np.shape(vdata))
    # more complicated: eigenvectors (array of matrix 2x2)
    v01 = np.zeros(np.shape(vdata))
    # more complicated: eigenvectors (array of matrix 2x2)
    v10 = np.zeros(np.shape(vdata))
    # more complicated: eigenvectors (array of matrix 2x2)
    v11 = np.zeros(np.shape(vdata))
    # more complicated: eigenvectors (array of matrix 2x2)
    dilatation = np.zeros(np.shape(vdata))
    # dilatation

    # the strain calculation
    for j in range(ydim - 1):
        for i in range(xdim - 1):
            up = udata[j][i]
            vp = vdata[j][i]
            uq = udata[j][i + 1]
            vq = vdata[j][i + 1]
            ur = udata[j + 1][i]
            vr = vdata[j + 1][i]

            [dudx, dvdx, dudy,
             dvdy] = strain_tensor_toolbox.compute_displacement_gradients(
                 up, vp, ur, vr, uq, vq, xinc, yinc)

            # The components that are easily computed
            # Units: nanostrain per year.
            [exx, exy, eyy, rotation
             ] = strain_tensor_toolbox.compute_strain_components_from_dx(
                 dudx, dvdx, dudy, dvdy)
            rot[j][i] = abs(rotation)

            # Compute a number of values based on tensor properties.
            I2nd[j][i] = np.log10(
                np.abs(strain_tensor_toolbox.second_invariant(exx, exy, eyy)))
            [e11, e22,
             v1] = strain_tensor_toolbox.eigenvector_eigenvalue(exx, exy, eyy)
            e1[j][i] = e11
            e2[j][i] = e22
            v00[j][i] = v1[0][0]
            v10[j][i] = v1[1][0]
            v01[j][i] = v1[0][1]
            v11[j][i] = v1[1][1]
            max_shear[j][i] = (e11 - e22) / 2
            dilatation[j][i] = e11 + e22

    return [
        xdata, ydata, I2nd, max_shear, rot, e1, e2, v00, v01, v10, v11,
        dilatation
    ]
Beispiel #4
0
def make_output_grids_from_strain_out(infile):
    ifile = open(infile, 'r')
    x = []
    y = []
    rotation = []
    exx = []
    exy = []
    eyy = []
    for line in ifile:
        temp = line.split()
        if 'index' in temp or 'longitude' in temp or 'deg' in temp:
            continue
        else:
            x.append(float(temp[0]))
            y.append(float(temp[1]))
            rotation.append(float(temp[7]))
            exx.append(float(temp[9]))
            exy.append(float(temp[11]))
            eyy.append(float(temp[13]))
    ifile.close()
    ax1 = set(x)
    ax2 = set(y)
    xlen = len(ax1)
    ylen = len(ax2)

    xaxis = sorted(ax1)
    yaxis = sorted(ax2)

    if xlen == 0 and ylen == 0:
        print("ERROR! No valid strains have been computed. Try again.")
        sys.exit(0)

    # Loop through x and y lists, find the index of the coordinates in the xaxis and yaxis sets,
    # Then place them into the 2d arrays.
    # Then go compute I2nd, eigenvectors and eigenvalues.

    I2nd = np.zeros((ylen, xlen))
    max_shear = np.zeros((ylen, xlen))
    rot = np.zeros((ylen, xlen))
    e1 = np.zeros((ylen, xlen))
    e2 = np.zeros((ylen, xlen))
    dilatation = np.zeros((ylen, xlen))
    v00 = np.zeros((ylen, xlen))
    v01 = np.zeros((ylen, xlen))
    v10 = np.zeros((ylen, xlen))
    v11 = np.zeros((ylen, xlen))
    print(np.shape(xaxis))
    print(np.shape(I2nd))

    for i in range(len(x)):
        xindex = xaxis.index(x[i])
        yindex = yaxis.index(y[i])
        rot[yindex][xindex] = rotation[i]
        I2nd[yindex][xindex] = np.log10(
            np.abs(
                strain_tensor_toolbox.second_invariant(exx[i], exy[i],
                                                       eyy[i])))
        [e11, e22, v1] = strain_tensor_toolbox.eigenvector_eigenvalue(
            exx[i], exy[i], eyy[i])
        e1[yindex][xindex] = e11
        e2[yindex][xindex] = e22
        v00[yindex][xindex] = v1[0][0]
        v10[yindex][xindex] = v1[1][0]
        v01[yindex][xindex] = v1[0][1]
        v11[yindex][xindex] = v1[1][1]
        dilatation[yindex][xindex] = e11 + e22
        max_shear[yindex][xindex] = (e11 - e22) / 2

    return [
        xaxis, yaxis, I2nd, max_shear, rot, e1, e2, v00, v01, v10, v11,
        dilatation
    ]
Beispiel #5
0
def compute(myVelfield, MyParams):
    print("Computing strain via Hammond method.")
    z = np.array([myVelfield.elon, myVelfield.nlat])
    z = z.T
    tri = Delaunay(z)

    triangle_vertices = z[tri.simplices]
    trishape = np.shape(triangle_vertices)
    # 516 x 3 x 2, for example
    print("Number of triangle elements: %d" % (trishape[0]))

    # We are going to solve for the velocity gradient tensor at the centroid of each triangle.
    centroids = []
    for i in range(trishape[0]):
        xcor_mean = np.mean([
            triangle_vertices[i, 0, 0], triangle_vertices[i, 1, 0],
            triangle_vertices[i, 2, 0]
        ])
        ycor_mean = np.mean([
            triangle_vertices[i, 0, 1], triangle_vertices[i, 1, 1],
            triangle_vertices[i, 2, 1]
        ])
        centroids.append([xcor_mean, ycor_mean])
    xcentroid = [x[0] for x in centroids]
    ycentroid = [x[1] for x in centroids]

    # Initialize arrays.
    I2nd = []
    rot = []
    max_shear = []
    e1 = []
    # eigenvalues
    e2 = []
    v00 = []
    # eigenvectors
    v01 = []
    v10 = []
    v11 = []
    dilatation = []
    # dilatation	= e1+e2

    # for each triangle:
    for i in range(trishape[0]):

        # Get the velocities of each vertex (VE1, VN1, VE2, VN2, VE3, VN3)
        # Get velocities for Vertex 1 (triangle_vertices[i,0,0] and triangle_vertices[i,0,1])
        xindex1 = np.where(myVelfield.elon == triangle_vertices[i, 0, 0])
        yindex1 = np.where(myVelfield.nlat == triangle_vertices[i, 0, 1])
        index1 = int(np.intersect1d(xindex1, yindex1)[0])
        xindex2 = np.where(myVelfield.elon == triangle_vertices[i, 1, 0])
        yindex2 = np.where(myVelfield.nlat == triangle_vertices[i, 1, 1])
        index2 = int(np.intersect1d(xindex2, yindex2)[0])
        xindex3 = np.where(myVelfield.elon == triangle_vertices[i, 2, 0])
        yindex3 = np.where(myVelfield.nlat == triangle_vertices[i, 2, 1])
        index3 = int(np.intersect1d(xindex3, yindex3)[0])

        phi = np.array([
            triangle_vertices[i, 0, 0], triangle_vertices[i, 1, 0],
            triangle_vertices[i, 2, 0]
        ])
        theta = np.array([
            triangle_vertices[i, 0, 1], triangle_vertices[i, 1, 1],
            triangle_vertices[i, 2, 1]
        ])
        theta = [i - 90 for i in theta]
        u_phi = np.array(
            [myVelfield.e[index1], myVelfield.e[index2], myVelfield.e[index3]])
        u_theta = np.array(
            [myVelfield.n[index1], myVelfield.n[index2], myVelfield.n[index3]])
        u_theta = np.array([-i for i in u_theta])
        # colatitude needs negative theta values.
        s_phi = np.array([
            myVelfield.se[index1], myVelfield.se[index2], myVelfield.se[index3]
        ])
        s_theta = np.array([
            myVelfield.sn[index1], myVelfield.sn[index2], myVelfield.sn[index3]
        ])

        # HERE WE PLUG IN BILL'S CODE!
        weight = 1
        paramsel = 0
        [
            e_phiphi, e_thetaphi, e_thetatheta, omega_r, U_theta, U_phi,
            s_omega_r, s_e_phiphi, s_e_thetaphi, s_e_thetatheta, s_U_theta,
            s_U_phi, chi2, OMEGA, THETA_p, PHI_p, s_OMEGA, s_THETA_p, s_PHI_p,
            r_PHITHETA, u_phi_p, u_theta_p
        ] = strain_sphere(phi, theta, u_phi, u_theta, s_phi, s_theta, weight,
                          paramsel)

        # print_all_values(e_phiphi,e_thetaphi,e_thetatheta,omega_r,U_theta,U_phi,s_omega_r,s_e_phiphi,s_e_thetaphi,s_e_thetatheta,s_U_theta,s_U_phi,chi2,OMEGA,THETA_p,PHI_p,s_OMEGA,s_THETA_p,s_PHI_p,r_PHITHETA,u_phi_p,u_theta_p);

        # The components that are easily computed
        # Units: nanostrain per year.
        exx = e_phiphi * 1e6
        exy = -e_thetaphi * 1e6
        eyy = e_thetatheta * 1e6

        # # Compute a number of values based on tensor properties.
        I2nd_tri = np.log10(
            np.abs(strain_tensor_toolbox.second_invariant(exx, exy, eyy)))
        I2nd.append(I2nd_tri)
        rot.append(OMEGA * 1000 * 1000)
        [e11, e22,
         v] = strain_tensor_toolbox.eigenvector_eigenvalue(exx, exy, eyy)

        e1.append(-e11)
        # the convention of this code returns negative eigenvalues compared to my other codes.
        e2.append(-e22)
        max_shear.append((e11 - e22) / 2)
        v00.append(v[0][0])
        v10.append(v[1][0])
        v01.append(v[0][1])
        v11.append(v[1][1])
        dilatation.append(-e11 + -e22)
        # # the convention of this code returns negative eigenvalues compared to my other codes.

    print("Success computing strain via Hammond method.\n")

    return [
        xcentroid, ycentroid, triangle_vertices, I2nd, max_shear, rot, e1, e2,
        v00, v01, v10, v11, dilatation
    ]