def get_GridFSim(x1, y1, x2, y2, img1): ''' Calculate estimated ice drift on first image based on feature tracking vectors''' # # initial drift inter-/extrapolation # linear triangulation x1Grid, y1Grid = np.meshgrid(range(img1.shape[1]), range(img1.shape[0])) x2GridFSim = griddata(np.array([y1, x1]).T, x2, np.array([y1Grid, x1Grid]).T, method='linear').T y2GridFSim = griddata(np.array([y1, x1]).T, y2, np.array([y1Grid, x1Grid]).T, method='linear').T # linear fit for entire grid A = np.vstack([np.ones(len(x1)), x1, y1 ]).T # find B in x2 = B * [x1, y1] Bx = np.linalg.lstsq(A, x2)[0] By = np.linalg.lstsq(A, y2)[0] # calculate simulated x2sim = B * [x1, y1] x1GridF = x1Grid.flatten() y1GridF = y1Grid.flatten() A = np.vstack([np.ones(len(x1GridF)), x1GridF, y1GridF]).T x2GridFSim_lf = np.dot(A, Bx).reshape(img1.shape) y2GridFSim_lf = np.dot(A, By).reshape(img1.shape) # fill NaN with lf gpi = np.isnan(x2GridFSim) x2GridFSim[gpi] = x2GridFSim_lf[gpi] y2GridFSim[gpi] = y2GridFSim_lf[gpi] return x2GridFSim, y2GridFSim
def bin_confint_lookup(pc, nsamp, ci = .05): """Return the confidence interval from the lookup table. Inputs: pc - array (get back several cis) or single value (get back one ci) of percent corrects nsamp - number of trials used to obtain each pc ci - confidence level (e.g. 0.01, 0.05) bootstraps - number of bootstraps to use use_table - if true then use a precomputed table instead of doing the bootstraps Output: 3xN array - first row is pc last two rows are lower and upper ci as expected by pylab.errorbar """ points = ci_table['points'] values_lo = ci_table['values_lo'] values_high = ci_table['values_high'] from scipy.interpolate import griddata if pylab.isscalar(pc): pc = pylab.array([pc]) nsamp = pylab.array([nsamp]) ci_a = pylab.ones(pc.size)*ci xi = pylab.array((pc,nsamp,ci_a)).T low_ci = griddata(points, values_lo, xi, method='linear') high_ci = griddata(points, values_high, xi, method='linear') return pylab.array((pc,low_ci,high_ci))
def interpolateData(binaryDataFile): file = open(binaryDataFile, 'rb') if os.name == 'nt': rawTimeHistory = numpy.array(pickle.load(file, encoding='latin1')).transpose() rawStressHistory = numpy.array(pickle.load(file, encoding='latin1')).transpose() rawStrainHistory = numpy.array(pickle.load(file, encoding='latin1')).transpose() elif os.name == 'posix': rawTimeHistory = numpy.array(pickle.load(file)).transpose() rawStressHistory = numpy.array(pickle.load(file)).transpose() rawStrainHistory = numpy.array(pickle.load(file)).transpose() timeHistory = numpy.linspace(0, simulationTime, numberOfSteps+1) stressHistory = numpy.empty([3, numberOfSteps+1]); strainHistory = numpy.empty([3, numberOfSteps+1]); for i in range(3): stressHistory[i, :] = griddata(rawTimeHistory, rawStressHistory[i], timeHistory) strainHistory[i, :] = griddata(rawTimeHistory, rawStrainHistory[i], timeHistory) stressHistory = stressHistory.transpose() strainHistory = strainHistory.transpose() with open('output.dat', 'w') as f: f.write('time S11 S22 S12 LE11 LE22 LE12\n') for i in range(len(timeHistory)): f.write(str(timeHistory[i])+' ') for j in range(len(stressHistory[i])): f.write(str(stressHistory[i][j])+' ') for j in range(len(strainHistory[i])): f.write(str(strainHistory[i][j])+' ') f.write('\n')
def rasterize(geometry, points): """ Create array. """ envelope = geometry.GetEnvelope() # px, py, pz = points.transpose() x1 = 4 * math.floor(envelope[0] / 4) y1 = 4 * math.floor(envelope[2] / 4) x2 = 4 * math.ceil(envelope[1] / 4) y2 = 4 * math.ceil(envelope[3] / 4) geo_transform = x1, A, 0, y2, 0, D array = np.full((4 * (y2 - y1), 4 * (x2 - x1)), NO_DATA_VALUE, 'f4') grid = tuple(np.mgrid[y2 + D / 2:y1 + D / 2:D, x1 + A / 2:x2 + A / 2:A][::-1]) # interpolate args = points[:, :2], points[:, 2], grid linear = interpolate.griddata(*args, method='linear') nearest = interpolate.griddata(*args, method='nearest') array = np.where(np.isnan(linear), nearest, linear).astype('f4') # clip and return kwargs = { 'array': array[np.newaxis], 'projection': PROJECTION, 'no_data_value': NO_DATA_VALUE, 'geo_transform': geo_transform, } clip(kwargs=kwargs, geometry=geometry) return kwargs
def get_apriori(self, latres=0.25, lonres=0.3125): ''' Read GC HCHO sigma shape factor and regrid to lat/lon res. temporal resolution is one month inputs: latres, lonres for resolution of GC 2x2.5 hcho columns to be regridded onto ''' assert False, "Method is old and wrong currently" # new latitude longitude we interpolate to. newlats= np.arange(-90,90, latres) + latres/2.0 newlons= np.arange(-180,180, lonres) + lonres/2.0 # Mesh[lat,lon] mlons,mlats = np.meshgrid(self.lons,self.lats) mnewlons,mnewlats = np.meshgrid(newlons,newlats) ## Get sigma apriori and regrid it # newS_s = np.zeros([72,len(newlats),len(newlons)]) newSigma = np.zeros([72,len(newlats),len(newlons)]) # interpolate at each pressure level... for ii in range(72): newS_s[ii,:,:] = griddata( (mlats.ravel(), mlons.ravel()), self.Shape_s[ii,:,:].ravel(), (mnewlats, mnewlons), method='nearest') newSigma[ii,:,:]=griddata( (mlats.ravel(), mlons.ravel()), self.sigmas[ii,:,:].ravel(), (mnewlats, mnewlons), method='nearest') # return the normalised sigma apriori used to recalculate AMF return newS_s, newlats, newlons, newSigma
def __init__(self, vmec_file, ntheta=None, nzeta=None, nr=32, nz=32): # Only needed here from scipy.interpolate import griddata, RegularGridInterpolator self.read_vmec_file(vmec_file, ntheta, nzeta) self.nr = nr self.nz = nz # Make a new rectangular grid in (R,Z) self.r_1D = np.linspace(self.r_stz.min(), self.r_stz.max(), nr) self.z_1D = np.linspace(self.z_stz.min(), self.z_stz.max(), nz) self.R_2D, self.Z_2D = np.meshgrid(self.r_1D, self.z_1D, indexing='ij') # First, interpolate the magnetic field components onto (R,Z) self.br_rz = np.zeros( (nr, nz, self.nzeta) ) self.bz_rz = np.zeros( (nr, nz, self.nzeta) ) self.bphi_rz = np.zeros( (nr, nz, self.nzeta) ) # No need to interpolate in zeta, so do this one slice at a time for k, (br, bz, bphi, r, z) in enumerate(zip(self.br.T, self.bz.T, self.bphi.T, self.r_stz.T, self.z_stz.T)): points = np.column_stack( (r.flatten(), z.flatten()) ) self.br_rz[...,k] = griddata(points, br.flatten(), (self.R_2D, self.Z_2D), method='linear', fill_value=0.0) self.bz_rz[...,k] = griddata(points, bz.flatten(), (self.R_2D, self.Z_2D), method='linear', fill_value=0.0) self.bphi_rz[...,k] = griddata(points, bphi.flatten(), (self.R_2D, self.Z_2D), method='linear', fill_value=1.0) # Now we have a regular grid in (R,Z,phi) (as zeta==phi), so # we can get an interpolation function in 3D points = ( self.r_1D, self.z_1D, self.zeta ) self.br_interp = RegularGridInterpolator(points, self.br_rz, bounds_error=False, fill_value=0.0) self.bz_interp = RegularGridInterpolator(points, self.bz_rz, bounds_error=False, fill_value=0.0) self.bphi_interp = RegularGridInterpolator(points, self.bphi_rz, bounds_error=False, fill_value=1.0)
def undistort_image(self, img, Kundistortion=None): """ Transform grayscale image such that radial distortion is removed. :param img: input image :type img: np.ndarray, shape=(n, m) or (n, m, 3) :param Kundistortion: camera matrix for undistorted view, None for self.K :type Kundistortion: array-like, shape=(3, 3) :return: transformed image :rtype: np.ndarray, shape=(n, m) or (n, m, 3) """ if Kundistortion is None: Kundistortion = self.K if self.calibration_type == 'opencv': return cv2.undistort(img, self.K, self.opencv_dist_coeff, newCameraMatrix=Kundistortion) elif self.calibration_type == 'opencv_fisheye': return cv2.fisheye.undistortImage(img, self.K, self.opencv_dist_coeff, Knew=Kundistortion) else: xx, yy = np.meshgrid(np.arange(img.shape[1]), np.arange(img.shape[0])) img_coords = np.array([xx.ravel(), yy.ravel()]) y_l = self.undistort(img_coords, Kundistortion) if img.ndim == 2: return griddata(y_l.T, img.ravel(), (xx, yy), fill_value=0, method='linear') else: channels = [griddata(y_l.T, img[:, :, i].ravel(), (xx, yy), fill_value=0, method='linear') for i in xrange(img.shape[2])] return np.dstack(channels)
def read_movie_data(filename): filename = "OUTPUT_FILES/" + filename x, y, vx = numpy.loadtxt(filename + '.E.xyz', usecols=(0, 1, 2), unpack=True) z = numpy.zeros(len(x)) x, y, vy = numpy.loadtxt(filename + '.N.xyz', usecols=(0, 1, 2), unpack=True) x, y, vz = numpy.loadtxt(filename + '.Z.xyz', usecols=(0, 1, 2), unpack=True) max_x = numpy.amax(x) min_x = numpy.amin(x) num_pixels = 1000 step = (max_x - min_x) / num_pixels xs = numpy.arange(min(x), max(x), step) ys = numpy.arange(min(y), max(y), step) X, Y = numpy.meshgrid(xs, ys) vxs = griddata((x, y), vx, (X, Y), method='linear') vys = griddata((x, y), vy, (X, Y), method='linear') vzs = griddata((x, y), vz, (X, Y), method='linear') zs = griddata((x, y), z, (X, Y), method='linear') pgv = numpy.maximum(numpy.abs(vxs), numpy.abs(vys)) pgv = numpy.maximum(pgv, numpy.abs(vzs)) pgv.shape = vxs.shape ext = compute_extreme_val(vx, vy, vz) gc.collect() return X, Y, zs, vxs, vys, vzs, pgv, step, ext
def get_reference_bim(a, t0=0, x_c=0, x0=15, verbose=True): if type(t0) == list: return np.array([r for r in imap(getReferenceBIM, repeat(a), t0, repeat(x_c))]) if verbose: print 'Getting a reference solution for a={} from BIM data'.format(a) numRefDir = os.path.join(os.environ['HOME'], 'work/soliton/fullPotentialSolution') if not(os.path.exists(numRefDir)): sys.exit('Numerical reference directory does not exist: '+numRefDir) x_c = x_c - solitonVelBIM[a]*t0 - x0 N=200 line = (np.ones(N)*x_c, np.linspace(-1, a, N)) u, ext = postprocess.readGphov(os.path.join(numRefDir, str(a), 'u')) v, ext = postprocess.readGphov(os.path.join(numRefDir, str(a), 'v')) grid_x, grid_y = np.mgrid[ext[0]:ext[1]:u.shape[1]*1j, ext[2]:ext[3]:u.shape[0]*1j] u = u.transpose() v = v.transpose() ux_sampled = griddata((grid_x.flatten(), grid_y.flatten()), u.flatten(), line, method='linear', fill_value=0) uy_sampled = griddata((grid_x.flatten(), grid_y.flatten()), v.flatten(), line, method='linear', fill_value=0) return np.array(line).transpose(), np.array([ux_sampled, uy_sampled]).transpose()
def plot(x,y,field,filename,c=200): plt.figure() # define grid. xi = np.linspace(min(x),max(x),100) yi = np.linspace(min(y),max(y),100) # grid the data. si_lin = griddata((x, y), field, (xi[None,:], yi[:,None]), method='linear') si_cub = griddata((x, y), field, (xi[None,:], yi[:,None]), method='linear') print np.min(field) print np.max(field) plt.subplot(211) # contour the gridded data, plotting dots at the randomly spaced data points. CS = plt.contour(xi,yi,si_lin,c,linewidths=0.5,colors='k') CS = plt.contourf(xi,yi,si_lin,c,cmap=plt.cm.jet) plt.colorbar() # draw colorbar # plot data points. # plt.scatter(x,y,marker='o',c='b',s=5) plt.xlim(min(x),max(x)) plt.ylim(min(y),max(y)) plt.title('Lineaarinen interpolointi') #plt.tight_layout() plt.subplot(212) # contour the gridded data, plotting dots at the randomly spaced data points. CS = plt.contour(xi,yi,si_cub,c,linewidths=0.5,colors='k') CS = plt.contourf(xi,yi,si_cub,c,cmap=plt.cm.jet) plt.colorbar() # draw colorbar # plot data points. # plt.scatter(x,y,marker='o',c='b',s=5) plt.xlim(min(x),max(x)) plt.ylim(min(y),max(y)) plt.title('Kuubinen interpolointi') plt.savefig(filename)
def make_grid(points, values, grid, method=None): """Abstraction of two different versions of griddata points: Nx2 array of points where data is known values: corresponding values grid: Tuple of X, Y - Regular grid (e.g. obtained from meshgrid) """ if griddata_version == 'scipy': if method is None: m = 'cubic' else: m = method return griddata(points, values, grid, method=m) elif griddata_version == 'pylab': if method is None: m = 'nn' else: m = method x = points[:,0] y = points[:,0] z = values X, Y = grid return griddata(x, y, z, X, Y, interp=m)
def plot_QU_gd(x, y, Q, U, irad, Req): """ using griddata """ fig = _plt.figure() lins, cols = (1, 2) gs = _gridspec.GridSpec(lins, cols) axq = _plt.subplot(gs[0, 0]) axu = _plt.subplot(gs[0, 1]) xmin = _np.min(x)/Req xmax = _np.max(x)/Req ymin = _np.min(y)/Req ymax = _np.max(y)/Req xx, yy = _np.meshgrid(_np.linspace(xmin, xmax, 32), _np.linspace(ymin, ymax, 32)[::-1]) yo = y*_np.cos(irad) q = _interpolate.griddata( _np.array([x, yo]).T/Req, Q, _np.array([xx.flatten(), yy.flatten()]).T ) u = _interpolate.griddata( _np.array([x, yo]).T/Req, U, _np.array([xx.flatten(), yy.flatten()]).T ) axq.imshow(q.reshape(32, 32), origin='lower', extent=[xmin, xmax, ymin, ymax]) axu.imshow(u.reshape(32, 32), origin='lower', extent=[xmin, xmax, ymin, ymax]) return fig, [axq, axu]
def scipy_stuff(): from scipy.interpolate import griddata from matplotlib import pylab import cPickle as pickle print "loading points" points, x_diff, y_diff = pickle.load(open("temp_data.pickle", "rb")) y_pts, x_pts = zip(*points) print "Creating grid points" grid_points = [] for j in range(2500): for i in range(2500): grid_points.append((j, i)) print "Gridding data" x_grid = griddata(points, x_diff, grid_points) y_grid = griddata(points, y_diff, grid_points) x_grid.shape = (2500, 2500) y_grid.shape = (2500, 2500) print "Plotting" pylab.subplot(3, 1, 1) pylab.imshow(x_grid) pylab.subplot(3, 1, 2) pylab.imshow(y_grid) pylab.subplot(3, 1, 3) pylab.scatter(x_pts, y_pts) pylab.show()
def interpolateData(binaryDataFile, sName): file = open(binaryDataFile, 'rb') if os.name == 'nt': rawTimeHistory = numpy.array(pickle.load(file, encoding='latin1')).transpose() rawStressHistory = numpy.array(pickle.load(file, encoding='latin1')).transpose() rawStrainHistory = numpy.array(pickle.load(file, encoding='latin1')).transpose() elif os.name == 'posix': rawTimeHistory = numpy.array(pickle.load(file)).transpose() rawStressHistory = numpy.array(pickle.load(file)).transpose() rawStrainHistory = numpy.array(pickle.load(file)).transpose() timeHistory = numpy.linspace(0, simulationTime, numberOfSteps+1) stressHistory = numpy.empty([3, numberOfSteps+1]); strainHistory = numpy.empty([3, numberOfSteps+1]); for i in range(3): stressHistory[i, :] = griddata(rawTimeHistory, rawStressHistory[i], timeHistory) strainHistory[i, :] = griddata(rawTimeHistory, rawStrainHistory[i], timeHistory) stressHistory = stressHistory.transpose() strainHistory = strainHistory.transpose() bundle = [timeHistory, stressHistory, strainHistory] bundleFileName = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'fittedHistory', sName+'_'+abaqusMaterial+'_fittedHistory.pkl') with open(bundleFileName, 'ab') as fittedFile: pickle.dump(bundle, fittedFile) return bundle
def contourf_interpolate_data(all_points, data, xlabel='', ylabel='', title='', interpolation_numpoints=200, interpolation_method='linear', mask_when_nearest=True, contour_numlevels=20, show_scatter=True, show_colorbar=True, fignum=None, ax_handle=None, mask_x_condition=None, mask_y_condition=None, log_scale=False): ''' Take (x,y) and z tuples, construct an interpolation with them and plot them nicely. all_points: Nx2 data: Nx1 mask_when_nearest: trick to hide points outside the convex hull of points even when using 'nearest' method ''' assert all_points.shape[1] == 2, "Give a Nx2 matrix for all_points" # Construct the interpolation param1_space_int = np.linspace(all_points[:, 0].min(), all_points[:, 0].max(), interpolation_numpoints) param2_space_int = np.linspace(all_points[:, 1].min(), all_points[:, 1].max(), interpolation_numpoints) data_interpol = spint.griddata(all_points, data, (param1_space_int[None, :], param2_space_int[:, None]), method=interpolation_method) if interpolation_method == 'nearest' and mask_when_nearest: # Let's mask the points outside of the convex hull # The linear interpolation will have nan's on points outside of the convex hull of the all_points data_interpol_lin = spint.griddata(all_points, data, (param1_space_int[None, :], param2_space_int[:, None]), method='linear') # Mask data_interpol[np.isnan(data_interpol_lin)] = np.nan # Mask it based on some conditions if not mask_x_condition is None: data_interpol[mask_x_condition(param1_space_int), :] = 0.0 if not mask_y_condition is None: data_interpol[:, mask_y_condition(param2_space_int)] = 0.0 # Plot it if ax_handle is None: f = plt.figure(fignum) ax_handle = f.add_subplot(111) else: f = ax_handle.get_figure() f.clf() ax_handle = f.add_subplot(111) if log_scale: cs = ax_handle.contourf(param1_space_int, param2_space_int, data_interpol, contour_numlevels, locator=plttic.LogLocator()) # cmap=plt.cm.jet else: cs = ax_handle.contourf(param1_space_int, param2_space_int, data_interpol, contour_numlevels) # cmap=plt.cm.jet ax_handle.set_xlabel(xlabel) ax_handle.set_ylabel(ylabel) ax_handle.set_title(title) if show_scatter: ax_handle.scatter(all_points[:, 0], all_points[:, 1], marker='o', c='b', s=5) ax_handle.set_xlim(param1_space_int.min(), param1_space_int.max()) ax_handle.set_ylim(param2_space_int.min(), param2_space_int.max()) if show_colorbar: f.colorbar(cs) return ax_handle
def test_imshow_heatmap(): from scipy.interpolate import griddata from matplotlib import pyplot as plt mesh3D = mesh(200) mesh2D = proj_to_2D(mesh3D) data = np.zeros((3,3)) data[0,1] += 2 vals = np.exp(log_dirichlet_density(mesh3D,2.,data=data.sum(0))) temp = log_censored_dirichlet_density(mesh3D,2.,data=data) censored_vals = np.exp(temp - temp.max()) xi = np.linspace(-1,1,1000) yi = np.linspace(-0.5,1,1000) plt.figure() plt.imshow(griddata((mesh2D[:,0],mesh2D[:,1]),vals,(xi[None,:],yi[:,None]),method='cubic')) plt.axis('off') plt.title('uncensored likelihood') plt.figure() plt.imshow(griddata((mesh2D[:,0],mesh2D[:,1]),censored_vals,(xi[None,:],yi[:,None]),method='cubic')) plt.axis('off') plt.title('censored likelihood')
def interpolate_data_2d(all_points, data, param1_space_int=None, param2_space_int=None, interpolation_numpoints=200, interpolation_method='linear', mask_when_nearest=True, show_scatter=True, show_colorbar=True, mask_x_condition=None, mask_y_condition=None): # Construct the interpolation if param1_space_int is None: param1_space_int = np.linspace(all_points[:, 0].min(), all_points[:, 0].max(), interpolation_numpoints) if param2_space_int is None: param2_space_int = np.linspace(all_points[:, 1].min(), all_points[:, 1].max(), interpolation_numpoints) data_interpol = spint.griddata(all_points, data, (param1_space_int[None, :], param2_space_int[:, None]), method=interpolation_method) if interpolation_method == 'nearest' and mask_when_nearest: # Let's mask the points outside of the convex hull # The linear interpolation will have nan's on points outside of the convex hull of the all_points data_interpol_lin = spint.griddata(all_points, data, (param1_space_int[None, :], param2_space_int[:, None]), method='linear') # Mask data_interpol[np.isnan(data_interpol_lin)] = np.nan # Mask it based on some conditions if mask_x_condition is not None: data_interpol[mask_x_condition(param1_space_int), :] = 0.0 if mask_y_condition is not None: data_interpol[:, mask_y_condition(param2_space_int)] = 0.0 return data_interpol
def velovect(u1,u2,d,minvel=1e-40,nvect=None,scalevar=None,scale=100,color='k',fig=None): '''Plots normalized velocity vectors''' if fig==None: ax=plt.gca() else: ax=fig.ax CC=d.getCenterPoints() n=np.sqrt(u1**2+u2**2) # remove zero velocity: m=n<minvel vr=np.ma.filled(np.ma.masked_array(u1/n,m),0.) vz=np.ma.filled(np.ma.masked_array(u2/n,m),0.) if scalevar != None: vr = vr*scalevar vz = vz*scalevar if nvect==None: Q=ax.quiver(CC[:,0],CC[:,1],vr,vz,pivot='middle',width=1e-3,minlength=0.,scale=scale, headwidth=6) else: # regrid the data: tmp0=np.complex(0,nvect[0]) tmp1=np.complex(0,nvect[1]) grid_r, grid_z = np.mgrid[ax.get_xlim()[0]:ax.get_xlim()[1]:tmp0, ax.get_ylim()[0]:ax.get_ylim()[1]:tmp1] grid_vr = griddata(CC, vr, (grid_r, grid_z), method='nearest') grid_vz = griddata(CC, vz, (grid_r, grid_z), method='nearest') Q=ax.quiver(grid_r,grid_z,grid_vr,grid_vz,pivot='middle',width=2e-3,minlength=minvel,scale=scale, headwidth=10,headlength=10,color=color,edgecolor=color,rasterized=True) plt.draw() return Q
def mesh2grid(v, mesh): """ Interpolates from an unstructured coordinates (mesh) to a structured coordinates (grid) """ x = mesh[:,0] z = mesh[:,1] lx = x.max() - x.min() lz = z.max() - z.min() nn = v.size nx = np.around(np.sqrt(nn*lx/lz)) nz = np.around(np.sqrt(nn*lz/lx)) dx = lx/nx dz = lz/nz # construct structured grid x = np.linspace(x.min(), x.max(), nx) z = np.linspace(z.min(), z.max(), nz) X, Z = np.meshgrid(x, z) grid = stack(X.flatten(), Z.flatten()) # interpolate to structured grid V = _interp.griddata(mesh, v, grid, 'linear') # workaround edge issues if np.any(np.isnan(V)): W = _interp.griddata(mesh, v, grid, 'nearest') for i in np.where(np.isnan(V)): V[i] = W[i] V = np.reshape(V, (nz, nx)) return V, grid
def autocorr( A, B, pointsA, pointsB, nregrid, rrange=[0.0, 1.5e18], phirange=[0.0, 6.283185307179586], zrange=[-1.5e18, 1.5e18] ): """Calculates the angular average of <a(t)b(t+s)>""" print "=== Obtaining correlation ===" # create r_i, phi_j and z_k arrays: ri = np.linspace(rrange[0], rrange[1], nregrid[0]) phij = np.linspace(phirange[0], phirange[1], nregrid[1]) zk = np.linspace(zrange[0], zrange[1], nregrid[2]) (xijk, yijk, zijk) = cylKernel(ri, phij, zk, np.array(nregrid, dtype=np.int32)) # griddata to points: dataA = griddata( (pointsA[:, 0], pointsA[:, 1], pointsA[:, 2]), np.array(A, dtype=np.float64), (xijk, yijk, zijk), method="nearest", ) dataB = griddata( (pointsB[:, 0], pointsB[:, 1], pointsB[:, 2]), np.array(B, dtype=np.float64), (xijk, yijk, zijk), method="nearest", ) correlation = autocorrKernel(dataA, dataB, np.array(nregrid, dtype=np.int32)) print "=== Done with correlation ===" return np.ma.masked_array(correlation, np.isnan(correlation))
def match_planting_harvest(self, planting_filename, harvest_filename): # Load both planting and harvest files self.planting_dataframe = pandas.read_csv(planting_filename, delimiter=',') self.harvest_dataframe = pandas.read_csv(harvest_filename, delimiter=',') # Interpolate planting data for the harvest lat/longs # Since we have a 2D grid and continuous values, perform bilinear interpolation, # which will look smoother than nearest neighbor interpolation # However, the "variety" is categorical and thus can't be bilinearly interpolated, # so instead we can use nearest neighbor # Interpolation turns out to be a common enough function that scipy provides it gd_linear = interpolate.griddata(self.planting_dataframe.values[:,:2], self.planting_dataframe.values[:,3:], self.harvest_dataframe.values[:,:2]) gd_nearest = interpolate.griddata(self.planting_dataframe.values[:,:2], self.planting_dataframe.values[:,2:3], self.harvest_dataframe.values[:,:2], method='nearest') interpolated_columns = self.harvest_dataframe.columns.append(self.planting_dataframe.columns[2:]) interpolated_array = numpy.hstack((self.harvest_dataframe.values, gd_nearest, gd_linear)) self.interpolated_dataframe = pandas.DataFrame(interpolated_array, columns=interpolated_columns).dropna(how='any') # If we just want to interpolate all columns as nearest neighbor, uncomment: # gd = interpolate.griddata(self.planting_dataframe.values[:,:2], self.planting_dataframe.values[:,2:], self.harvest_dataframe.values[:,:2], method='nearest') # interpolated_array = numpy.hstack((self.harvest_dataframe.values, gd)) # self.interpolated_dataframe = pandas.DataFrame(interpolated_array, columns=interpolated_columns) # Create test and validation sets self.train_ylabel, self.test_ylabel, self.train_Xdata, self.test_Xdata = cross_validation.train_test_split(self.interpolated_dataframe.values[:,2:3], self.interpolated_dataframe.values[:,4:-1]) return self.interpolated_dataframe
def interp_exp_f(fname, out_dir): """ Used to interpolate data from experiment F. """ print(" Beginning interpolation of " + fname) # The variables from the data print(" Reading data....") x, y, z_s, v_x, v_y, v_z = np.loadtxt(fname, unpack=True) #v_norm = np.sqrt(v_x**2 + v_y**2) res = 40 #int(fname.split(os.sep)[-1][5:8]) # The given points x_pts = np.asarray(sorted(set(x))) y_pts = np.asarray(sorted(set(y))) points = (x,y) # The points we want x_out = np.arange(-50,50.0001,100.0/res) y_out = np.arange(-50,50.0001,100.0/res) out_points = [[i,j] for i in x_out for j in y_out] x_out = np.transpose(out_points)[0] y_out = np.transpose(out_points)[1] # Interpolate each list separately print(" Interpolating data....") z_s_i = interpolate.griddata(points, z_s, out_points) v_x_i = interpolate.griddata(points, v_x, out_points) v_y_i = interpolate.griddata(points, v_y, out_points) v_z_i = interpolate.griddata(points, v_z, out_points) out_file = os.path.join(out_dir, os.path.basename(fname).replace('.txt','_interp.txt')) print(" Writing data....") np.savetxt(out_file,np.transpose([x_out,y_out,z_s_i,v_x_i,v_y_i,v_z_i]))
def question6b(): fname = 'report/Figures/q6.pdf' # fname = tempf pp = PdfPages(fname) plt.figure(figsize = (8,6)) w = np.sqrt(U[0,:,:]**2 + U[1,:,:]**2) xlocs = [1.25, 1.5, 2.0, 3.0, 5.0] dx_init = 5.0e-6 nbp = 150 yend = 1 xloc = -10.0 ystart = 0.0 xn = xloc * np.ones([nbp * 2 + 1]) yn = np.empty([nbp * 2 + 1]) base = ((yend - ystart) / dx_init) ** (1.0/(nbp-1)) yn[nbp] = 0. for j in range(nbp): yn[nbp - j - 1] = -(ystart + dx_init * base**j) yn[nbp + j + 1] = ystart + dx_init * base**j wslice = inter.griddata((x.flat, y.flat), w.flat, (xn, yn), method='nearest') for (xi, xloc) in enumerate(xlocs): ystart = 0.0 xn = xloc * np.ones([nbp * 2]) yn = np.empty([nbp * 2]) base = ((yend - ystart) / dx_init) ** (1.0/(nbp-1)) for j in range(nbp): yn[j] = -(ystart + dx_init * base**j) yn[nbp + j] = ystart + dx_init * base**j wslice = inter.griddata((x.flat, y.flat), w.flat, (xn, yn), method='linear') plt.plot(wslice[0:2*nbp], yn[0:2*nbp], '-', ms=2, label = 'x = %3.2f' %xloc) plt.legend(loc=4,prop={'size':6}) plt.xlabel(r'Velocity') plt.ylabel(r'$y$') plt.ylim([-1,1]) plt.title('Momentum Deficit') plt.tight_layout() pp.savefig(bbx_inches='tight') pp.close() return
def project_bitmap(m, f, args=None, kwargs=None, n_img_pix=(800,400), for_contour=False, healpy=False): """ """ if args is None: args = () if kwargs is None: kwargs = {} if type(n_img_pix) == int: n_img_pix = (n_img_pix, n_img_pix) l, b = np.meshgrid(np.linspace(-180,180,1000),np.linspace(-90,90,1000)) x,y = m(l,b) xmin, xmax = np.min(x[x < 1e30]), np.max(x[x < 1e30]) ymin, ymax = np.min(y[y < 1e30]), np.max(y[y < 1e30]) xran = xmax - xmin yran = ymax - ymin dx = xran / n_img_pix[0] dy = yran / n_img_pix[1] x0, y0 = np.meshgrid(np.linspace(xmin - 0.05 * xran, xmax + 0.05 * xran, n_img_pix[0]), np.linspace(ymin - 0.05 * yran, ymax + 0.05 * yran, n_img_pix[1])) l0, b0 = m(x0, y0, inverse=True) x1, y1 = m(l0, b0) mask = (((x0 - x1) ** 2 + (y0 - y1) ** 2) < 1).flatten() #mask = (((x0 - x1) ** 2 + (y0 - y1) ** 2) < 1e30).flatten() if not healpy: xg, yg = np.meshgrid(np.linspace(x0[0,0] - dx / 2, x0[-1,-1] + dx / 2, n_img_pix[0] + 1), np.linspace(y0[0,0] - dy / 2, y0[-1,-1] + dy / 2, n_img_pix[1] + 1)) z = np.zeros(l0.shape).flatten() z[mask] = f(l0.flatten()[mask], b0.flatten()[mask], *args, **kwargs) z[~mask] = np.NaN if not for_contour: zg = z.reshape((n_img_pix[1], n_img_pix[0])) zgm = np.ma.array(zg, mask=np.isnan(zg)) return xg, yg, zgm else: if healpy: xg, yg = m(*healpy_grid(hp.npix2nside(len(args[0])), nest=kwargs)) zg = griddata((xg, yg), args[0], (x0, y0), method='linear') zgm = np.ma.array(zg, mask=~mask.reshape((n_img_pix[1], n_img_pix[0]))) return x0, y0, zgm else: zg = griddata((x0.flatten()[mask], y0.flatten()[mask]), z[mask], (x0, y0), method='cubic') zgm = np.ma.array(zg, mask=~mask.reshape((n_img_pix[1], n_img_pix[0]))) return x0, y0, zgm
def test_fill_value(self): x = [(0,0), (0,1), (1,0)] y = [1, 2, 3] yi = griddata(x, y, [(1,1), (1,2), (0,0)], fill_value=-1) assert_array_equal(yi, [-1., -1, 1]) yi = griddata(x, y, [(1,1), (1,2), (0,0)]) assert_array_equal(yi, [np.nan, np.nan, 1])
def eval_points(self, *points, **kwds): ''' Interpolate data at points Parameters ---------- points : ndarray of float, shape (..., ndim) Points where to interpolate data at. method : {'linear', 'nearest', 'cubic'} method : {'linear', 'nearest', 'cubic'} Method of interpolation. One of - ``nearest``: return the value at the data point closest to the point of interpolation. - ``linear``: tesselate the input point set to n-dimensional simplices, and interpolate linearly on each simplex. - ``cubic`` (1-D): return the value detemined from a cubic spline. - ``cubic`` (2-D): return the value determined from a piecewise cubic, continuously differentiable (C1), and approximately curvature-minimizing polynomial surface. fill_value : float, optional Value used to fill in for requested points outside of the convex hull of the input points. If not provided, then the default is ``nan``. This option has no effect for the 'nearest' method. Examples -------- >>> import numpy as np >>> x = np.arange(-2, 2, 0.4) >>> xi = np.arange(-2, 2, 0.1) >>> d = PlotData(np.sin(x), x, xlab='x', ylab='sin', title='sinus', plot_args=['r.']) >>> di = PlotData(d.eval_points(xi), xi) >>> hi = di.plot() >>> h = d.plot() See also -------- scipy.interpolate.griddata ''' options = dict(method='linear') options.update(**kwds) if isinstance(self.args, (list, tuple)): # Multidimensional data ndim = len(self.args) if ndim < 2: msg = '''Unable to determine plotter-type, because len(self.args)<2. If the data is 1D, then self.args should be a vector! If the data is 2D, then length(self.args) should be 2. If the data is 3D, then length(self.args) should be 3. Unless you fix this, the interpolation will not work!''' warnings.warn(msg) else: xi = np.meshgrid(*self.args) return interpolate.griddata(xi, self.data.ravel(), points, **options) else: #One dimensional data return interpolate.griddata(self.args, self.data, points, **options)
def plotdis(IBC, UG, nodes, nn, xmin, xmax, ymin, ymax, savefigs=False): """Plot the nodal displacement solution using `griddata()` Parameters ---------- IBC : ndarray (int) IBC (Indicator of Boundary Conditions) indicates if the nodes has any type of boundary conditions applied to it. UG : ndarray (float) Array with the computed displacements. nodes : ndarray (float) Array with number and nodes coordinates: `number coordX coordY BCX BCY` nn : int Number of nodes. xmin : float Minimum x value for the grid. xmax : float Maximum x value for the grid. ymin : float Minimum y value for the grid. ymax : float Maximum y value for the grid. """ points = nodes[:, 1:3] grid_x, grid_y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j] UC = np.zeros([nn, 2], dtype=np.float) for i in range(nn): for j in range(2): kk = IBC[i, j] if kk == -1: UC[i, j] = 0.0 else: UC[i, j] = UG[kk] grid_z0 = griddata(points, UC[:, 0], (grid_x, grid_y), method='linear') grid_z1 = griddata(points, UC[:, 1], (grid_x, grid_y), method='linear') plt.figure("Solution: Horizontal displacement") plt.imshow(grid_z0.T, aspect='equal', extent=(xmin, xmax, ymin, ymax), origin='lower') plt.title(r'$u_x$') plt.colorbar(orientation='vertical') plt.grid() if savefigs: plt.savefig('numhorizo.pdf') plt.figure("Solution: Vertical displacement") plt.imshow(grid_z1.T, aspect='equal', extent=(xmin, xmax, ymin, ymax), origin='lower') plt.title(r'$u_y$') plt.colorbar(orientation='vertical') plt.grid() if savefigs: plt.savefig('numvertic.pdf')
def interpolation(self,x0,x1, n = 100): """Interpolate eta and phi along a line from x0 to x1""" X = linspace(x0[0],x1[0],n) # Initialize x points Y = linspace(x0[1],x1[1],n) # Initialize y points interpolatedeta = griddata(zip(self.x, self.y), self.eta, (X, Y), method='linear') # Interpolate eta interpolatedphi = griddata(zip(self.x, self.y), self.phi, (X, Y), method='linear') # Interpolate phi return [X, Y, interpolatedeta, interpolatedphi] # return X, Y and interpolated data
def plotstrain(EG, XS, xmin, xmax, ymin, ymax, savefigs=False): """Plot the strain solution over the full domain Using griddata plots the strain solution over the full domain defined by the integration points. The integration points physical coordinates are stored in XS[] while the strain solution is stored in EG[]. Parameters ---------- EG : ndarray (float) Array that contains the strain solution for each integration point in physical coordinates. XS : ndarray (float) Array with the coordinates of the integration points. xmin : float Minimum x value for the grid. xmax : float Maximum x value for the grid. ymin : float Minimum y value for the grid. ymax : float Maximum y value for the grid. """ grid_x, grid_y = np.mgrid[xmin:xmax:20j, ymin:ymax:20j] grid_z0 = griddata(XS, EG[:, 0], (grid_x, grid_y), method='linear') grid_z1 = griddata(XS, EG[:, 1], (grid_x, grid_y), method='linear') grid_z2 = griddata(XS, EG[:, 2], (grid_x, grid_y), method='linear') plt.figure("Solution: epsilon-xx strain") plt.imshow(grid_z0.T, aspect='equal', extent=(xmin, xmax, ymin, ymax), origin='lower') plt.title(r'$\epsilon_{xx}$') plt.colorbar(orientation='vertical') plt.grid() if savefigs: plt.savefig('numepsixx.pdf') plt.figure("Solution: epsilon-yy strain") plt.imshow(grid_z1.T, aspect='equal', extent=(xmin, xmax, ymin, ymax), origin='lower') plt.title(r'$\epsilon_{yy}$') plt.colorbar(orientation='vertical') plt.grid() if savefigs: plt.savefig('numepsiyy.pdf') plt.figure("Solution: gamma-xy strain") plt.imshow(grid_z2.T, aspect='equal', extent=(xmin, xmax, ymin, ymax), origin='lower') plt.title(r'$\gamma_{xy}$') plt.colorbar(orientation='vertical') plt.grid() if savefigs: plt.savefig('numgamaxy.pdf')
def plot_experiment_data(file_name, axes_list, coil1_abs_array, coil1_angle_array,plot_figures=None): expt_data_data = num.loadtxt(file_name) expt_data_q95 = expt_data_data[:,5] expt_data_betan = expt_data_data[:,3] interp_points = num.ones((num.max(expt_data_q95.shape),2),dtype=float) interp_points[:,0] = expt_data_q95 interp_points[:,1] = expt_data_betan existing_points = num.ones((num.max(q95_array.shape),2),dtype=float) existing_points[:,0] = q95_array existing_points[:,1] = Bn_array expt_data2_points_abs = griddata(existing_points,coil1_abs_array,interp_points,method='linear') expt_data2_points_angle = griddata(existing_points,coil1_angle_array,interp_points,method='linear') tmp1, tmp2 = expt_data_data.shape output_data = num.ones((tmp1,tmp2+2),dtype=float) output_data[:,0:tmp2] = expt_data_data output_data[:,tmp2] = expt_data2_points_abs output_data[:,tmp2+1] = expt_data2_points_angle num.savetxt('expt_data_output.txt',output_data,fmt='%.4f',delimiter = ' ') for ax in axes_list: ax.plot(expt_data_q95, expt_data_betan,'kx') if plot_figures ==None: pass else: fig_expt_data = pt.figure() ax1_expt_data = fig_expt_data.add_subplot(211) ax2_expt_data = fig_expt_data.add_subplot(212) ax1_expt_data.plot(expt_data_betan,expt_data2_points_abs,'o') ax2_expt_data.plot(expt_data_betan,expt_data2_points_angle,'o') ax1_expt_data.set_ylim(clim_list[iii]) ax2_expt_data.set_ylim([-200,200]) ax1_expt_data.set_title(start_title+ 'Magnitude'+extra_title) ax2_expt_data.set_title(start_title+ 'Phase' + extra_title) ax2_expt_data.set_xlabel(r'$\beta_N$') ax1_expt_data.set_ylabel('G/kA') ax2_expt_data.set_ylabel('deg') fig_expt_data.canvas.draw() fig_expt_data.show() fig_expt_data = pt.figure() ax1_expt_data = fig_expt_data.add_subplot(211) ax2_expt_data = fig_expt_data.add_subplot(212) ax1_expt_data.plot(expt_data_q95,expt_data2_points_abs,'o') ax2_expt_data.plot(expt_data_q95,expt_data2_points_angle,'o') ax1_expt_data.set_ylim(clim_list[iii]) ax2_expt_data.set_ylim([-200,200]) ax1_expt_data.set_title(start_title + 'Magnitude'+extra_title) ax2_expt_data.set_title(start_title + 'Phase' + extra_title) ax2_expt_data.set_xlabel('q95') ax1_expt_data.set_ylabel('G/kA') ax2_expt_data.set_ylabel('deg') fig_expt_data.canvas.draw() fig_expt_data.show()
def elastic_deform_helper(image, x_coord, y_coord, dx, dy): """ Applies random elastic deformation to the input image with given coordinates and displacement values of deformation points. Keeps the edge of the image steady by adding a few frame points that get displacement value zero. Input: image: array of shape (N.M,C) (Haven't tried it out for N != M), C number of channels x_coord: array of shape (L,) contains the x coordinates for the deformation points y_coord: array of shape (L,) contains the y coordinates for the deformation points dx: array of shape (L,) contains the displacement values in x direction dy: array of shape (L,) contains the displacement values in x direction Output: the deformed image (shape (N,M,C)) """ # Preliminaries # dimensions of the input image shape = image.shape # centers of x and y axis x_center = shape[1] / 2 y_center = shape[0] / 2 ## Construction of the coarse grid # anker points: coordinates x_coord_anker_points = np.array([ 0, x_center, shape[1] - 1, 0, shape[1] - 1, 0, x_center, shape[1] - 1 ]) y_coord_anker_points = np.array([ 0, 0, 0, y_center, y_center, shape[0] - 1, shape[0] - 1, shape[0] - 1 ]) # anker points: values dx_anker_points = np.zeros(8) dy_anker_points = np.zeros(8) # combine deformation and anker points to coarse grid x_coord_coarse = np.append(x_coord, x_coord_anker_points) y_coord_coarse = np.append(y_coord, y_coord_anker_points) coord_coarse = np.array(list(zip(y_coord_coarse, x_coord_coarse))) dx_coarse = np.append(dx, dx_anker_points) dy_coarse = np.append(dy, dy_anker_points) ## Interpolation onto fine grid # coordinates of fine grid coord_fine = [[y, x] for y in range(shape[0]) for x in range(shape[1])] # interpolate displacement in both x and y direction dx_fine = ipol.griddata( coord_coarse, dx_coarse, coord_fine, method='cubic') # cubic works better but takes longer (?) dy_fine = ipol.griddata(coord_coarse, dy_coarse, coord_fine, method='cubic') # other options: 'linear' # get the displacements into shape of the input image (the same values in each channel) if len(shape) == 3: dx_fine = dx_fine.reshape(shape[0:2]) dx_fine = np.stack([dx_fine] * shape[2], axis=-1) dy_fine = dy_fine.reshape(shape[0:2]) dy_fine = np.stack([dy_fine] * shape[2], axis=-1) ## Deforming the image: apply the displacement grid # base grid x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2])) # add displacement to base grid (-> new coordinates) indices = np.reshape(y + dy_fine, (-1, 1)), np.reshape( x + dx_fine, (-1, 1)), np.reshape(z, (-1, 1)) else: dx_fine = dx_fine.reshape(shape) dy_fine = dy_fine.reshape(shape) ## Deforming the image: apply the displacement grid # base grid x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0])) # add displacement to base grid (-> new coordinates) indices = np.reshape(y + dy_fine, (-1, 1)), np.reshape(x + dx_fine, (-1, 1)) # evaluate the image at the new coordinates deformed_image = map_coordinates(image, indices, order=2, mode='nearest') deformed_image = deformed_image.reshape(image.shape) return deformed_image
def plot_activity(opts, points, activity, labels, plot_state=False): """ Plot the activity of a neuron using data from all processed batches. """ sort_ix = sort_weights(opts) activity[:, opts.state_size:] = activity[:, opts.state_size + sort_ix] x = np.arange(0, opts.state_size) # x = np.linspace(np.amin(points[:, 0]), np.amax(points[:, 0])) scale = 2 * np.pi / opts.state_size x_rad = x * scale cos, sin = np.cos(x_rad), np.sin(x_rad) if opts.velocity: y = np.linspace(np.amin(points[:, 1]), np.amax(points[:, 1])) else: y = np.zeros(1) x_mesh, y_mesh = np.meshgrid(x, y) cos, _ = np.meshgrid(cos, y) sin, _ = np.meshgrid(sin, y) if plot_state: nc, nr = 5, 4 neurons = np.arange(opts.state_size) # state neurons else: nc, nr = 5, 8 neurons = np.arange(opts.state_size, opts.rnn_size) # extra neurons f_linear, ax_linear = plt.subplots(ncols=nc, nrows=nr) # plt.suptitle('Linear Interpolated Data') c, r = 0, 0 for i, n in enumerate(neurons): z_lin = griddata(points[:, :2], activity[:, n], (x_mesh, y_mesh), method='linear') plt.sca(ax_linear[r, c]) # plt.title('Neuron {}'.format(n)) plt.contourf(x, y, z_lin, cmap='RdBu_r') plt.axis('off') # find the global centroid if np.nanmax(z_lin) <= 0: z_lin -= np.nanmean(z_lin) # center activations at the median z_lin[np.isnan(z_lin)] = 0 z_lin[z_lin < 0] = 0 norm = np.sum(z_lin) cos_mean = np.sum(cos * z_lin) / norm sin_mean = np.sum(sin * z_lin) / norm com_rad = np.arctan2(sin_mean, cos_mean) com_x = (com_rad / scale) % 20 com_y = np.sum(y_mesh * z_lin) / norm # plt.scatter(com_x, com_y, c='k') c += 1 if c == nc: c = 0 r += 1 if r == nr: break # plt.tight_layout() plt.show()
xvect = xgrid.ravel() yvect = ygrid.ravel() zvect = zgrid.ravel() xyzvect = NP.hstack( (xvect.reshape(-1, 1), yvect.reshape(-1, 1), zvect.reshape(-1, 1))) if use_DSM or use_GSM: backdrop = HP.cartview(fluxes_DSM.ravel(), coord=['G', 'E'], xsize=backdrop_xsize, return_projected_map=True) elif use_GLEAM or use_SUMSS: if backdrop_coords == 'radec': backdrop = griddata(NP.hstack( (ra_deg.reshape(-1, 1), dec_deg.reshape(-1, 1))), fpeak, NP.hstack( (xvect.reshape(-1, 1), yvect.reshape(-1, 1))), method='cubic') backdrop = backdrop.reshape(backdrop_xsize / 2, backdrop_xsize) elif backdrop_coords == 'dircos': if (telescope == 'mwa_dipole') or (obs_mode == 'drift'): backdrop = PB.primary_beam_generator(xyzvect, freq, telescope=telescope, freq_scale='Hz', skyunits='dircos', phase_center=[0.0, 0.0, 1.0]) backdrop = backdrop.reshape(backdrop_xsize, backdrop_xsize) else: if backdrop_coords == 'radec': backdrop = griddata(NP.hstack(
0.025, 0.05, 0.005, 0.025, 0.05 ] #Wave conditions used in RBF #print(np.shape(f_tot)) #print(target_wavecon.shape) to_z = np.array(f_tot) #print(to_z.shape) RU = [] for j in range(0, len(target)): x = hs y = hs_lo z = to_z[:, j] hs_e = target_wavecon[j, 0] #tp_e=target_wavecon[j,1] hs_lo_e = target_wavecon[j, 2] vq = griddata((x, y), z, (hs_e, hs_lo_e), method='linear') RU.append(vq) print(RU) print() #print(i_sim) runup_x = np.array(RU) runup_xds = xr.Dataset({ 'runup': ('time', xr.DataArray(runup_x)), }, coords={'time': data_storm.time}) runup_all_xds = xr.concat([runup_all_xds, runup_xds], dim='n_sim') # %%
X = (int)(j * COS) Y = (int)(LenLinesC / 2 - j * SIN) SC[X][Y] = RawImgData[i][j] points.append([X, Y]) values.append(RawImgData[i][j]) values = np.array(values, dtype=np.int) return SC, values, points, LenLinesC print " In[55]:" SCH, valuesH, pointsH, LenLinesCH = CreateSC(SmallImg) grid_xH, grid_yH = np.mgrid[0:LenLinesCH:1, 0:LenLinesCH:1] grid_z1H = griddata(pointsH, valuesH, (grid_xH, grid_yH), method='linear') print " In[56]:" plt.figure(figsize=(10, 10)) plt.imshow((grid_z1H**0.7), cmap=plt.get_cmap('gray')) plt.title("Getting the image out of the data file: " + RawData.split("/")[-1] + " .") plt.savefig('Imgs/pic_' + RawData.split("/")[-1] + ".png", bbox_inches='tight') plt.show() if Debug: print " In[57]:" f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 5))
def update(self): #print(data) self.timer.stop() self.liveA = [] self.liveT = [] dataA = [] dataB = [] print('update') try: N = 5 X = [0, 100, 100, 0] * N Y = np.repeat(np.linspace(0, 100, N * 2), 2) Z = [50] self.posTable = tableXYZ(X, Y, Z) t = [time.time() - t_start] x = [0] y = [0] for zz in range(len(X) * len(Y)): try: target = next(self.posTable) #print(target) self.piStage.MOV(dPos=target, axis=b"1 2 3", waitUntilReady=True) t.append(time.time() - t_start) real_position = self.piStage.qPOS() x.append(real_position[0]) y.append(real_position[1]) print(">>", time.time(), self.q.qsize()) data_q = [] while self.q.qsize() > 3: data_q.append(self.q.get()) #print(self.q.qsize()) if len(data_q) > 0: for data in data_q: self.liveA += data[0].tolist() self.liveB += data[1].tolist() self.liveT += np.linspace(data[4], data[5], len(data[0])).tolist() except StopIteration: break app.processEvents() #time.sleep(5) x_ = x t_ = t #while self.q.empty(): # time.sleep(0.5) #self.pico.terminate() #if not self.q.empty(): data_q = [] #self.q.join() while self.q.qsize() > 0: data_q.append(self.q.get_nowait()) print(self.q.qsize()) if len(data_q) > 0: for data in data_q: self.liveA += data[0].tolist() self.liveB += data[1].tolist() self.liveT += np.linspace(data[4], data[5], len(data[0])).tolist() dataA = data[2] dataB = data[3] T_interp = interp1d(self.liveT, self.liveA, bounds_error=False, fill_value=0) pmt = T_interp(t) xi = np.linspace(min(x), max(x), N * 2) yi = np.linspace(min(y), max(y), N * 2) xi, yi = np.meshgrid(xi, yi) self.PMT = griddata((x, y), pmt, (xi, yi)) self.x = x self.out = [x, y, t, pmt, T_interp] print(min(x), max(x), xi.min(), xi.max()) print(min(y), max(y), yi.min(), yi.max()) print(self.PMT, xi.shape, yi.shape) #self.image = np.array(pmt[1:]).reshape((N,N)) self.img.setImage(self.PMT) #if len(self.liveA)>500: #self.liveA = self.liveA[L:] #self.liveB = self.liveB[L:] #self.liveT = self.liveT[L:] self.curveA.setData(pmt) #self.curveB.setData(dataB) self.curveA1.setData(x=self.liveT, y=self.liveA) self.curveB1.setData(x=self.liveT, y=self.liveB) self.curveX1.setData(x=t_, y=x_) app.processEvents() #self.timer.start(0.5) except: traceback.print_exc() pass
def fZ(self, Obs, TL, sInds, currentTimeAbs, mode): """Returns surface brightness of local zodiacal light Args: Obs (Observatory module): Observatory class object TL (TargetList module): TargetList class object sInds (integer ndarray): Integer indices of the stars of interest currentTimeAbs (astropy Time array): Current absolute mission time in MJD mode (dict): Selected observing mode Returns: fZ (astropy Quantity array): Surface brightness of zodiacal light in units of 1/arcsec2 """ # observatory positions vector in heliocentric ecliptic frame r_obs = Obs.orbit(currentTimeAbs, eclip=True) # observatory distance from heliocentric ecliptic frame center (projected in ecliptic plane) try: r_obs_norm = np.linalg.norm(r_obs[:,0:2], axis=1) # observatory ecliptic longitudes r_obs_lon = np.sign(r_obs[:,1])*np.arccos(r_obs[:,0]/r_obs_norm).to('deg').value # ensures the longitude is +/-180deg except: r_obs_norm = np.linalg.norm(r_obs[:,0:2], axis=1)*r_obs.unit # observatory ecliptic longitudes r_obs_lon = np.sign(r_obs[:,1])*np.arccos(r_obs[:,0]/r_obs_norm).to('deg').value # ensures the longitude is +/-180deg # longitude of the sun lon0 = (r_obs_lon + 180.) % 360. #turn into 0-360 deg heliocentric ecliptic longitude of spacecraft # target star positions vector in heliocentric true ecliptic frame r_targ = TL.starprop(sInds, currentTimeAbs, eclip=True) # target star positions vector wrt observatory in ecliptic frame r_targ_obs = (r_targ - r_obs).to('pc').value # tranform to astropy SkyCoordinates if sys.version_info[0] > 2: coord = SkyCoord(r_targ_obs[:,0], r_targ_obs[:,1], r_targ_obs[:,2], representation_type='cartesian').represent_as('spherical') else: coord = SkyCoord(r_targ_obs[:,0], r_targ_obs[:,1], r_targ_obs[:,2], representation='cartesian').represent_as('spherical') # longitude and latitude absolute values for Leinert tables lon = coord.lon.to('deg').value - lon0 # Get longitude relative to spacecraft lat = coord.lat.to('deg').value # Get latitude relative to spacecraft lon = abs((lon + 180.) % 360. - 180.) # converts to 0-180 deg lat = abs(lat) #technically, latitude is physically capable of being >90 deg #Interpolates 2D fbeta = griddata(self.points, self.values, list(zip(lon, lat))) lam = mode['lam'] # extract wavelength f = 10.**(self.logf(np.log10(lam.to('um').value)))*u.W/u.m**2/u.sr/u.um h = const.h # Planck constant c = const.c # speed of light in vacuum ephoton = h*c/lam/u.ph # energy of a photon F0 = TL.OpticalSystem.F0(lam) # zero-magnitude star (in ph/s/m2/nm) f_corr = f/ephoton/F0 # color correction factor fZ = fbeta*f_corr.to('1/arcsec2') return fZ
def glacHeights(self, glacmask, glacdem, glacSlope, innercells, hmin, tau): p = np.where(innercells == True) nrInnerCells = np.size(p, 1) nrRandCells = int(math.ceil(self.r * nrInnerCells)) #-mask where glacier is true glacTrue = np.where(glacmask == True) #-define the boundary rows and columns for sub-setting when the interpolation is done later on rmin = min(glacTrue[0]) rmax = max(glacTrue[0]) cmin = min(glacTrue[1]) cmax = max(glacTrue[1]) #-mask for innercells. Randomly points are selected from these indices innerTrue = np.argwhere(innercells == True) #-Create an empty array for the final heights of the particular glacier finalHeights = np.ones(glacmask.shape) * 0. #hga #-do-the interpolation n times for N in range(self.n): print('\tInterpolation run %d' % (N + 1)) # indices for innercells. Randomly points are selected from these indices indices = np.arange(nrInnerCells) #-Create array with missing values and fill with h calculated at randomly chosen locations glacHeightPoints = np.ones(glacmask.shape) * self.demMV for i in range(nrRandCells): #-sample a random index randPointIndex = int(np.random.choice(indices)) #-row and columns in matrix r = innerTrue[randPointIndex][0] c = innerTrue[randPointIndex][1] #-start increasing window size until hmin is reached w = 0 flag = True while flag: r_min = r - 1 - w r_max = r + 1 + w c_min = c - 1 - w c_max = c + 1 + w tempDem = glacdem[r_min:r_max, c_min:c_max].flatten() dH = np.nanmax(tempDem) - np.nanmin(tempDem) if dH >= hmin: flag = False else: w += 1 tempSlope = glacSlope[r_min:r_max, c_min:c_max].flatten() meanSlope = np.nanmean(tempSlope) #-calculate the ice thickness for the random point h = self.iceThickness(tau, meanSlope) #-assign the calculated glacier height to the gridcell glacHeightPoints[r, c] = h #-convert to list (for strange reasons np.delete doesn't work, so....) indices = indices.tolist() #-remove the sampled point to make sure it isn't sampled another time indices.remove(randPointIndex) #-convert back to np array indices = np.asarray(indices) #-sub-set of matrix to make interpolation quicker gh = glacHeightPoints[rmin:rmax + 1, cmin:cmax + 1] glacHeightPoints = None del glacHeightPoints #-shape and rows and columns shp = gh.shape rows = shp[0] cols = shp[1] #-create an array with 4 additional rows and 4 additional columns and fill those rows and columns with values of hga (elevation of adjecent cells) tempArray = np.ones((rows + 4, cols + 4)) * self.hga tempArray[2:2 + rows, 2:2 + cols] = gh points = np.where(tempArray != self.demMV) xi = np.ones(tempArray.shape) xi = np.where(xi == 1) h = griddata(points, tempArray[points], xi, method='cubic').reshape(tempArray.shape) h = np.maximum(0., h) tempArray = None gh = None points = None xi = None del tempArray, gh, points, xi glacIntHeightPoints = np.ones(glacmask.shape) * 0. glacIntHeightPoints[rmin:rmax + 1, cmin:cmax + 1] = h[2:2 + rows, 2:2 + cols] h = None del h finalHeights = finalHeights + glacIntHeightPoints glacIntHeightPoints = None del glacIntHeightPoints #-calculate average height over the n interpolation runs finalHeights = finalHeights / self.n finalHeights[np.where(glacmask == False)] = self.demMV return finalHeights
def parallax_corr(self, cth=None, time_slot=None, orbital=None, azi=None, ele=None, fill="False"): '''Perform the parallax correction for channel at *time_slot* (datetime.datetime() object), assuming the cloud top height cth and the viewing geometry given by the satellite orbital "orbital" and return the corrected channel. Authors: Ulrich Hamann (MeteoSwiss), Thomas Leppelt (DWD) Example calls: * calling this function (using orbital and time_slot) orbital = data.get_oribtal() data["VIS006"].parallax_corr(cth=data["CTTH"].height, time_slot=data.time_slot, orbital=orbital) * calling this function (using viewing geometry) orbital = data.get_oribtal() (azi, ele) = get_viewing_geometry(self, orbital, time_slot) data["VIS006"].parallax_corr(cth=data["CTTH"].height, azi=azi, ele=ele) Optional input: cth The parameter cth is the cloud top height (or the altitude of the object that should be shifted). cth must have the same size and projection as the channel orbital an orbital object define by the tle file (see pyorbital.orbital import Orbital or mpop/scene.py get_oribtal) azi azimuth viewing angle in degree (south is 0, counting clockwise) e.g. as given by self.get_viewing_geometry ele elevation viewing angle in degree (zenith is 90, horizon is 0) e.g. as given by self.get_viewing_geometry fill specifies the interpolation method to fill the gaps (basically areas behind the cloud that can't be observed by the satellite instrument) "False" (default): no interpolation, gaps are np.nan values and mask is set accordingly "nearest": fill gaps with nearest neighbour "bilinear": use scipy.interpolate.griddata with linear interpolation to fill the gaps output: parallax corrected channel the content of the channel will be parallax corrected. The name of the new channel will be *original_chan.name+'_PC'*, eg. "IR_108_PC". This name is also stored to the info dictionary of the originating channel. ''' # get time_slot from info, if present if time_slot==None: if "time" in self.info.keys(): time_slot=self.info["time"] if azi==None or ele==None: if time_slot==None or orbital==None: print "*** Error in parallax_corr (mpop/channel.py)" print " parallax_corr needs either time_slot and orbital" print " data[\"IR_108\"].parallax_corr(data[\"CTTH\"].height, time_slot=data.time_slot, orbital=orbital)" print " or the azimuth and elevation angle" print " data[\"IR_108\"].parallax_corr(data[\"CTTH\"].height, azi=azi, ele=ele)" quit() else: print ("... calculate viewing geometry (orbit and time are given)") (azi, ele) = self.get_viewing_geometry(orbital, time_slot) else: print ("... azimuth and elevation angle given") # mask the cloud top height cth_ = np.ma.masked_where(cth < 0, cth, copy=False) # Elevation displacement dz = cth_ / np.tan(np.deg2rad(ele)) # Create the new channel (by copying) and initialize the data with None values new_ch = copy.deepcopy(self) new_ch.data[:,:] = np.nan # Set the name new_ch.name += '_PC' # Add information about the corrected version to original channel self.info["parallax_corrected"] = self.name + '_PC' # get projection coordinates in meter (proj_x,proj_y) = self.area.get_proj_coords() print "... calculate parallax shift" # shifting pixels according to parallax corretion proj_x_pc = proj_x - np.sin(np.deg2rad(azi)) * dz # shift West-East in m # ??? sign correct ??? proj_y_pc = proj_y + np.cos(np.deg2rad(azi)) * dz # shift North-South in m # get indices for the pixels for the original position (y,x) = self.area.get_xy_from_proj_coords(proj_x, proj_y) # comment: might be done more efficient with meshgrid # >>> x = np.arange(-5.01, 5.01, 0.25) # >>> y = np.arange(-5.01, 5.01, 0.25) # >>> xx, yy = np.meshgrid(x, y) # get indices for the pixels at the parallax corrected position (y_pc,x_pc) = self.area.get_xy_from_proj_coords(proj_x_pc, proj_y_pc) # copy cloud free satellite pixels (surface observations) ind = np.where(cth_.mask == True) new_ch.data[x[ind],y[ind]] = self.data[x[ind],y[ind]] print "... copy data to parallax corrected position" # copy cloudy pixel with new position modified with parallax shift ind = np.where(x_pc.mask == False) new_ch.data[x_pc[ind],y_pc[ind]] = self.data[x[ind],y[ind]] # Mask out data gaps (areas behind the clouds) new_ch.data = np.ma.masked_where(np.isnan(new_ch.data), new_ch.data, copy=False) if fill.lower()=="false": return new_ch elif fill=="nearest": print "*** fill missing values with nearest neighbour" from scipy.ndimage import distance_transform_edt invalid = np.isnan(new_ch.data) ind = distance_transform_edt(invalid, return_distances=False, return_indices=True) new_ch.data = new_ch.data[tuple(ind)] elif fill=="bilinear": # this function does not interpolate at the outer boundaries from scipy.interpolate import griddata ind = np.where(new_ch.data.mask == False) points = np.transpose(np.append([y[ind]], [x[ind]], axis=0)) values = new_ch.data[ind] new_ch.data = griddata(points, values, (y, x), method='linear') # fill the remaining pixels with nearest neighbour from scipy.ndimage import distance_transform_edt invalid = np.isnan(new_ch.data) ind = distance_transform_edt(invalid, return_distances=False, return_indices=True) new_ch.data = new_ch.data[tuple(ind)] else: print "*** Error in parallax_corr (channel.py)" print " unknown gap fill method ", fill quit() return new_ch
im_x = x[cube] im_y = y[cube] xmin = -0.5 * dx_im xmax = 0.5 * dx_im ymin = -0.5 * dx_im ymax = 0.5 * dx_im nx = 128 dpx = (xmax - xmin) / float(nx) dpy = (ymax - ymin) / float(nx) xpx = np.linspace(xmin + 0.5 * dpx, xmax - 0.5 * dpx, nx) ypx = np.linspace(ymin + 0.5 * dpy, ymax - 0.5 * dpy, nx) grid_x, grid_y = np.meshgrid(xpx, ypx) points = np.transpose([im_x, im_y]) z1 = griddata(points, rho[cube], (grid_x, grid_y), method='nearest') z2 = griddata(points, T[cube], (grid_x, grid_y), method='nearest') z3 = griddata(points, ux[cube], (grid_x, grid_y), method='nearest') z4 = griddata(points, uy[cube], (grid_x, grid_y), method='nearest') z5 = np.around(griddata(points, lev[cube], (grid_x, grid_y), method='nearest')) nc = 21 im1 = ax2.contourf(xpx, ypx, z1, nc, cmap='jet') im2 = ax5.contourf(xpx, ypx, z2, nc, cmap='hot') ctr = ax2.contour(xpx, ypx, z5, colors='w', levels=range(0, 20)) ax2.clabel(ctr, inline=1, fmt="%i") vskip = 6 vec = ax5.quiver(xpx[::vskip], ypx[::vskip], z3[::vskip, ::vskip],
def points_to_raster(points_shapefiles, nodata_value=-99, data_col='values', output_resolution=250, outfile='surface.tif', dest_crs=None): """Interpolate point data to a regular grid using scipy.interpolate.griddata; write results to a GeoTiff. Parameters ---------- points_shapefiles : shapefile or list of shapefiles Point shapefiles with estimated data, assumed to be on a regular grid. nodata_value : numeric Value in `points_shapefiles` indicating no data data_col : str Field in `points_shapefiles` with estimated data. output_resolution : numeric Cell spacing of the output raster outfile : stf Output GeoTiff dest_crs : obj A Python int, dict, str, or pyproj.crs.CRS instance passed to the pyproj.crs.from_user_input See http://pyproj4.github.io/pyproj/stable/api/crs/crs.html#pyproj.crs.CRS.from_user_input. Can be any of: - PROJ string - Dictionary of PROJ parameters - PROJ keyword arguments for parameters - JSON string with PROJ parameters - CRS WKT string - An authority string [i.e. 'epsg:4326'] - An EPSG integer code [i.e. 4326] - A tuple of ("auth_name": "auth_code") [i.e ('epsg', '4326')] - An object with a `to_wkt` method. - A :class:`pyproj.crs.CRS` class Notes ----- """ df = shp2df(points_shapefiles, dest_crs=dest_crs) if dest_crs is None: dest_crs = get_shapefile_crs(points_shapefiles) # reshape the values column to a nrow x ncol array; convert invalid values to nans data = df[data_col].values data[data == nodata_value] = np.nan # coordinates for the orignal grid (aligned with NHG cell corners) x_points = np.array([g.x for g in df.geometry]) y_points = np.array([g.y for g in df.geometry]) # specifications for a new output_resolution grid aligned with NHG # xul, yul is the cell center of the first cell (in the upper left corner) xul = x_points.min() yul = y_points.max() dxy = output_resolution # 1D arrays of x and y coordinates for each column, row x = np.arange(np.min(x_points), np.max(x_points) + dxy, dxy) y = np.arange(np.min(y_points), np.max(y_points) + dxy, dxy) # 2D arrays of x and y coordinates for each point X, Y = np.meshgrid(x, y) # interpolate the values onto the new grid # using bilinear interpolation # `bounds_error=False` means extrapolated points will be filled with nans results = interpolate.griddata((x_points, y_points), data, (X, Y), method='linear') results = np.flipud(results) results = np.ma.masked_array(results, mask=np.isnan(results)) write_raster(outfile, results, xul=xul, yul=yul, dx=dxy, dy=dxy, rotation=0., crs=dest_crs, nodata=-9999)
def pol_cart_trans(d, k, t, x, y, name='re', interpmethod='cubic'): """ % original matlab code: Justin Stopa 09/06/2016 % % Purpose: % convert k,t spectrum into cartesian % % Input: % d - spectra in polar coordinates of (k,t) % k - wave number in log space % t - theta direction in radians % x - transform spc into cartesian with these x wavenumbers (output grid) % y - transform spc into cartesian with these y wavenumbers (output grid) Ouputs: D: 71*85 nd array matrix: cartesian cross spectra Dbefore: 71*85 nd array matrix: cartesian cross spectra without energy normalization (conversation) """ d = d.astype(np.float64) logging.debug('pol_cart_trans | d=%s', d.shape) logging.debug('pol_cart_trans | t=%s', t.shape) kmax = np.amax(k) # % maximum wavenumber kmin = np.amin(k) # % minimum wavenumber kmin = np.double(kmin) kmax = np.double(kmax) first_term = np.power((kmax / kmin), (1. / (len(k) - 1))) second_term = -1. / np.power((kmax / kmin), (1. / (len(k) - 1))) term_multi = first_term + second_term term_multi = np.double(term_multi) logging.debug('mode(np.diff(t)) = %s', mode(np.diff(t))) modal_value, count_value = mode(np.diff(t)) modal_value = modal_value[0] modal_value = np.float64(modal_value) a = np.float64(0.5) * modal_value * term_multi * k**2 a = a.astype(np.float64) k = k.astype(np.float64) # % Make matrix of output cartesian points X = np.tile(x, [len(y), 1]).squeeze().T Y = np.tile(y, [len(x), 1]).squeeze() #added by agrouaze # % dx and dy of output cartesian grid dx, _ = mode(np.diff(x)) dy, _ = mode(np.diff(y)) # % convert polar grid to cartesian grid kx = (np.tile(k, [len(t), 1]).T * np.tile(np.cos(t), [len(k), 1])) ky = (np.tile(k, [len(t), 1]).T * np.tile(np.sin(t), [len(k), 1])) pts = [] for xx in range(kx.shape[0]): for yy in range(kx.shape[1]): pts.append((kx[xx, yy], ky[xx, yy])) pts = np.array(pts) new_pts = [] for xx in range(X.shape[0]): for yy in range(X.shape[1]): new_pts.append((X[xx, yy], Y[xx, yy])) new_pts = np.array(new_pts) logging.debug('v2 pts = %s %s', len(pts), pts[0]) logging.debug('identic kx and ky = %s', np.array_equal(kx, ky)) logging.debug('pts = %s %s values = %s', kx.shape, ky.shape, d.ravel().shape) logging.debug('X %s Y %s', X.shape, Y.shape) D = griddata(points=pts, values=d.ravel(), xi=new_pts, method=interpmethod, rescale=False ) #here I use the scipy linear instead of v4 matlab option #nearest = 25 diff Re #meilleur result sur D #linear max diff 38 Re #cubic D max diff 28 Re D = np.reshape(D, X.shape, order='A') logging.debug('D = %s', D.shape) D = D.astype(np.float64) dx = dx.astype(np.float64) dy = dy.astype(np.float64) eng_car = 4.0 * np.sqrt(np.sum(np.sum(abs(D) * dx * dy))) eng_pol = 4.0 * np.sqrt(np.sum(np.sum(abs(d) * np.tile(a, [len(t), 1]).T))) # % Conserve energy Dbefore = copy.copy(D) D = D * (eng_pol / eng_car)**2 return D, Dbefore
dat = 15 dat2 = 9 y1 = np.array([-1-epsilon, -1-epsilon+0.03, -1-epsilon+0.06, -1-epsilon+0.09]) dat = len(y1) stream_points = np.array(list(zip( 0.33*np.ones(dat), y1))) stream_points2 = np.array(list(zip( 0.33*np.ones(dat), -y1))) saves = [6, 16, 43, 57, 58, 59] fig = plt.figure(4) for i in range(frames): plt.clf() u = np.array(list(h5py.File(name, 'r')["VisualisationVector"][str(int(N-skip*i))])) # Interpolate uneven grid onto an even grid ux_grid = np.roll(griddata((geometry[::s,0], geometry[::s,1]), u[::s,0], (X, Y), method='cubic'), 0, axis=1) uy_grid = np.roll(griddata((geometry[::s,0], geometry[::s,1]), u[::s,1], (X, Y), method='cubic'), 0, axis=1) x_, y_ = np.meshgrid(kappa, epsilon) ax1 = plt.contourf(X,Y, np.sqrt(np.square(ux_grid) + np.square(uy_grid)), cmap=Map, levels=15) cbar = fig.colorbar(ax1, format='%1.0f') #plt.quiver(x[::s], y[::s], ux_grid[::s, ::s], uy_grid[::s, ::s]) plt.streamplot(X, Y, ux_grid, uy_grid, color='k', density=3.25, start_points=stream_points ) plt.streamplot(X, Y, ux_grid, uy_grid, color='k', density=3.25, start_points=stream_points2 ) plt.streamplot(X, Y, ux_grid, uy_grid, color='k', density=0.75) plt.fill_between(x, 1+epsilon*np.ones(len(x)), +1+epsilon*np.cos(kappa*x), color="k") plt.fill_between(x, -1-epsilon*np.ones(len(x)), -1-epsilon*np.cos(kappa*x), color="k") cbar.ax.set_ylabel(r'Velocity $u$', fontsize=8) plt.ylabel(r"Vertical position $y$ [$a$]", fontsize=8) plt.xlabel(r"Horizontal position $x$ [$a$]", fontsize=8)
import matplotlib.pyplot as plt import numpy as np from math import sqrt from scipy import interpolate input_image = imageio.imread('face.png') input_image.shape (512, 512, 3) imageio.imwrite('face.png', input_image) plt.figure() plt.imshow(input_image, cmap=plt.cm.gray) plt.show() y = [] x = [] for i in range(0, 200): for j in range(0, 200): if input_image[i, j] > 150: x.append(i) y.append(j) #mask invalid values array = np.ma.masked_invalid(array) xx, yy = np.meshgrid(x, y) #get only the valid values x1 = xx[~array.mask] y1 = yy[~array.mask] newarr = array[~array.mask] GD1 = interpolate.griddata((x1, y1), newarr.ravel(), (xx, yy), method='cubic')
def pretty_print(X, embedding, ivs, tax, usercolors=None, with_diversity_background=True, bgcolor='white'): """Make a scatter plot of taxumap-embedded microbiota data. Samples are colored by their dominant Genus. The top 15 most abundant genera have a unique color, all other taxa are grey. Optionally, interpolate the diversity of samples in the local region of the embedded space and color the background accordingly, with darker shades indicating higher diversity.""" import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder dominant_taxon_name = X.idxmax(axis=1) dominant_taxon_name = dominant_taxon_name.apply( lambda v: tax.loc[v]['Genus']) dominant_taxon = dominant_taxon_name.copy() top_15_taxa = dominant_taxon.value_counts().sort_values( ascending=False).head(15) top_15_taxa_labels = top_15_taxa.index dominant_taxon = dominant_taxon.apply(lambda v: v if v in top_15_taxa else '-1') lenc = LabelEncoder().fit(dominant_taxon) _t = lenc.transform(dominant_taxon) dominant_taxon = pd.Series(_t, index=X.index) from matplotlib import cm _ncolors = len(top_15_taxa) _ncolors = _ncolors if _ncolors <= 15 else 16 cmap = cm.get_cmap('tab20c', _ncolors) embedding_colors = [ cmap(x) if x != 0 else 'whitesmoke' for x in dominant_taxon ] embedding_labels = [ lenc.inverse_transform([x])[0] if lenc.inverse_transform([x])[0] != '-1' else 'other' for x in dominant_taxon ] ##set up figure plt.close('all') fig, ax = plt.subplots(figsize=(5, 5)) if with_diversity_background: ## heatmap as background indicateing interpolated diversity in that region cmap = sns.dark_palette(color='white', as_cmap=True, reverse=True) from scipy.interpolate import griddata xmin, xmax = np.floor(min(embedding[:, 0])), np.ceil(max(embedding[:, 0])) ymin, ymax = np.floor(min(embedding[:, 1])), np.ceil(max(embedding[:, 1])) grid_x, grid_y = np.mgrid[xmin:xmax:15j, ymin:ymax:15j] grid_z1 = griddata(embedding, ivs, (grid_x, grid_y), method='linear', fill_value=np.nan) # plot heatmap ax.imshow(np.flipud(grid_z1.T), extent=(xmin, xmax, ymin, ymax), cmap=cmap, vmin=1, vmax=15, alpha=0.25) ax.set_facecolor(bgcolor) #ax.set_aspect('equal',adjustable='box') ## taxumap scatter if usercolors is None: noncolored_idx = list( map(lambda x: x == 'whitesmoke', embedding_colors)) ax.scatter(embedding[noncolored_idx, 0], embedding[noncolored_idx, 1], c=np.array(embedding_colors)[noncolored_idx], s=3, alpha=1, marker='o', rasterized=True) colored_idx = list(map(lambda x: x != 'whitesmoke', embedding_colors)) ax.scatter(embedding[colored_idx, 0], embedding[colored_idx, 1], c=np.array(embedding_colors)[colored_idx], s=3, alpha=1, marker='o', rasterized=True) ax.scatter(embedding[:, 0], embedding[:, 1], facecolor='none', edgecolor='k', linewidth=.1, s=3, alpha=1, marker='o', rasterized=True) from matplotlib.lines import Line2D legend_elements = [ Line2D( [0], [0], marker='o', linestyle='', alpha=1, color=c, #cmap(c), label=n) for (n, c) in set(zip(embedding_labels, embedding_colors)) ] ax.legend(handles=legend_elements, loc=(1.1, .01)) else: dominant_asv = X.idxmax(axis=1) dominant_asv_rel = X.max(axis=1) embedding_colors = [ 'whitesmoke' if dominant_asv_rel[i] < 0.3 else usercolors.loc[x].values[0] for i, x in dominant_asv.iteritems() ] noncolored_idx = list( map(lambda x: x == 'whitesmoke', embedding_colors)) ax.scatter(embedding[noncolored_idx, 0], embedding[noncolored_idx, 1], c=np.array(embedding_colors)[noncolored_idx], s=3, alpha=1, linewidth=0.1, marker='o', rasterized=True) colored_idx = list(map(lambda x: x != 'whitesmoke', embedding_colors)) ax.scatter(embedding[colored_idx, 0], embedding[colored_idx, 1], c=np.array(embedding_colors)[colored_idx], s=3, alpha=1, linewidth=0.1, marker='o', rasterized=True) from matplotlib.lines import Line2D most_dominating = dominant_asv.loc[dominant_asv_rel >= 0.3].apply( lambda v: tax.loc[v]['Genus']).value_counts().sort_values( ascending=False).head(30) most_dominating_color = dominant_asv.loc[ dominant_asv_rel >= 0.3].apply(lambda v: usercolors.loc[v].values[ 0]).value_counts().sort_values(ascending=False).head(30).index legend_names = np.array( list( map(lambda v: tax.loc[v].Genus.values, [ dominant_asv[dominant_asv_rel > 0.3].value_counts().head( 30).index.to_list() ]))).reshape(-1) legend_colors = np.array( list( map(lambda v: usercolors.loc[v].values, [ dominant_asv[dominant_asv_rel > 0.3].value_counts().head( 30).index.to_list() ]))).reshape(-1) legend_elements = [ Line2D( [0], [0], marker='o', linestyle='', alpha=1, color=c, #cmap(c), label=n) for (n, c) in set(zip(legend_names, legend_colors)) ] ax.legend(handles=legend_elements, loc=(1.1, .01)) ax.set_yticks([]) ax.set_xticks([]) ax.set_ylabel('phyUMAP-2') ax.set_xlabel('phyUMAP-1') sns.despine() plt.gcf().savefig('results/projection.pdf', dpi=250, bbox_inches='tight') plt.axis('off') ax.legend().remove() plt.gcf().savefig( 'results/no_axes_projection.png', dpi=250, )
def warping(V1, V2): ################# parameter setting ################### display_flag = True affine_start_flag = True polarity_flag = True nsamp = 100 eps_dum = 0.25 ndum_frac = 0.25 mean_dist_global = [] ori_weight = 0.1 nbins_theta = 12 nbins_r = 5 r_inner = 0.125 r_outer = 2 tan_eps = 1.0 n_iter = 6 beta_init = 1 r = 1 w = 4 ################## image loading ####################### #V1_orig = plt.imread('/Users/liujin/Desktop/mask_0.jpeg') #print(V1_orig.shape) = (128, 128) #V2_orig = plt.imread('/Users/liujin/Desktop/mask_4.jpeg') #print(V1_orig.dtype) = unit8 V1 = V1.squeeze() #print(V1.shape) = (128, 128) V2 = V2.squeeze() #print(v1.dtype) = unit8 print(V1.shape) binarizer1 = Binarizer(threshold=0.5).fit(V1) V1 = binarizer1.transform( V1) #print(V1.shape) = (128, 128) #print(v1.dtype) = unit8 binarizer2 = Binarizer(threshold=0.5).fit(V2) V2 = binarizer2.transform(V2) V1 = imfill(V1) V2 = imfill(V2) V1 = expand_dims( asarray(V1), axis=2) #print(V1.shape) = (128, 128, 1) #print(v1.dtype) = unit8 V2 = expand_dims(asarray(V2), axis=2) V1 = V1.astype( float) #print(V1.shape) = (128, 128, 1) #print(v1.dtype) = float64 V2 = V2.astype(float) N1, N2, _ = V1.shape print("N1 is {}".format(N1)) ################# edge detection ######################## x2, y2, t2 = bdry_extract_3(V2) nsamp2 = len(x2) if nsamp2 >= nsamp: x2, y2, t2 = get_samples_1(x2, y2, t2, nsamp) else: print("error: shape #2 does not have enough samples") Y = np.concatenate((x2, y2), axis=1) x1, y1, t1 = bdry_extract_3(V1) nsamp1 = len(x1) if nsamp1 >= nsamp: x1, y1, t1 = get_samples_1(x1, y1, t1, nsamp) else: print("error: shape #1 does not have enough samples") X = np.concatenate((x1, y1), axis=1) # plt.plot(x2, y2,'r+') # axes = plt.gca() # axes.set_xlim(0,100) # axes.set_ylim(128,0) # plt.show() # plt.plot(x1, y1,'r+') # axes = plt.gca() # axes.set_xlim(0,100) # axes.set_ylim(128,0) # plt.show() ##################### up to here, x1 is horizontal, y1 is vertical ##################### ################ compute correspondence ################## Xk = X tk = t1 k = True signal = True ndum = np.round(ndum_frac * nsamp).astype(int) #print(ndum) # = 25 out_vec_1 = np.zeros((1, nsamp)) out_vec_2 = np.zeros((1, nsamp)) while signal: BH1, mean_dist_1 = sc_compute(Xk.T, zeros( (1, nsamp)), mean_dist_global, nbins_theta, nbins_r, r_inner, r_outer, out_vec_1) BH2, mean_dist_2 = sc_compute(Y.T, zeros( (1, nsamp)), mean_dist_global, nbins_theta, nbins_r, r_inner, r_outer, out_vec_2) # from_mat=sio.loadmat("/Users/liujin/Desktop/hist_cost.mat") # BH1 = from_mat['BH1'] # BH2 = from_mat['BH2'] # mean_dist_1 = from_mat['mean_dist_1'] # mean_dist_2 = from_mat['mean_dist_2'] # t1 = from_mat["t1"] # t2 = from_mat["t2"] # tk = from_mat["tk"] if affine_start_flag: if k == True: lambda_o = 1000 else: lambda_o = beta_init * r**(k - 2) else: lambda_o = beta_init * r**(k - 1) beta_k = (mean_dist_2**2) * lambda_o #print("beta_k is {}".format(beta_k)) costmat_shape = hist_cost_2(BH1, BH2) #print("costmat_shape is {}".format(costmat_shape)) ###################################################################### theta_diff = np.tile(tk, (1, nsamp)) - np.tile(t2.T, (nsamp, 1)) #print("theta_diff is {}".format(theta_diff)) if polarity_flag: costmat_theta = 0.5 * (1 - np.cos(theta_diff)) else: costmat_theta = 0.5 * (1 - np.cos(2 * theta_diff)) costmat = (1 - ori_weight) * costmat_shape + ori_weight * costmat_theta #print("costmat is {}".format(costmat)) ####################################################################### nptsd = nsamp + ndum costmat2 = eps_dum * np.ones((nptsd, nptsd)) costmat2[:nsamp, :nsamp] = costmat #print("costmat2 is {}".format(costmat2)) ####################################################################### # m = Munkres() # cvec=m.compute(costmat2) # ## my processing to take out index # cvec = np.asarray(cvec) # print("cvec is {}".format(cvec)) # cvec = cvec[np.newaxis, :, 1] #m = munkres.Munkres() #indexes = m.compute(costmat2.tolist()) # from_mat=sio.loadmat("/Users/liujin/Desktop/costmat2.mat") # costmat2 = from_mat['costmat2'] indexes = hungarian.lap(costmat2) indexes = np.asarray(indexes) #print(indexes.shape) cvec = indexes[np.newaxis, 1, :] #print("cvec is {}".format(cvec)) #print("cvec shape is {}".format(cvec.shape)) # from_mat=sio.loadmat("/Users/liujin/Desktop/cvec.mat") # cvec = from_mat['cvec'] -1 # #print("cvec is {}".format(cvec)) # nptsd = from_mat["nptsd"] # nptsd = int(nptsd) # #print("nptsd is {}".format(nptsd)) # Xk = from_mat["Xk"] # #print("Xk is {}".format(Xk)) # X = from_mat["X"] # #print("X is {}".format(X)) # Y = from_mat["Y"] a = np.sort(cvec) cvec2 = np.argsort(cvec) #print("cvec2 is {}".format(cvec2)) out_vec_1 = cvec2[0, :nsamp] > nsamp #print("out_cvec_1 is {}".format(out_vec_1)) out_vec_2 = cvec[0, :nsamp] > nsamp #print("out_cvec_2 is {}".format(out_vec_2)) X2 = np.nan * np.ones((nptsd, 2)) X2[:nsamp, :] = Xk X2 = X2[cvec[:].squeeze(), :] #print("X2 is {}".format(X2)) X2b = np.nan * np.ones((nptsd, 2)) X2b[:nsamp, :] = X X2b = X2b[cvec[:].squeeze(), :] #print("X2b is {}".format(X2b)) ## attention Y2 = np.nan * np.ones((nptsd, 2)) Y2[:nsamp, :] = Y #print("Y2 is {}".format(Y2)) #print("X2b is {}".format(X2b)) #print("Y is {}".format(Y)) ind_good = np.nonzero(np.logical_not(np.isnan(X2b[:nsamp, 1]))) n_good = size(np.asarray(ind_good)) #print("n_good is {}".format(n_good)) X3b = X2b[ind_good, :].squeeze() Y3 = Y2[ind_good, :].squeeze() #print("X3b is {}".format(X3b)) #print("Y3 is {}".format(Y3)) # ########## ################################################## # # plt.plot(X2[:,0], X2[:,1],'r+') # # axes = plt.gca() # # axes.set_xlim(0,100) # # axes.set_ylim(128,0) # # plt.show() # # plt.plot(Y2[:,0], Y2[:,1],'r+') # # axes = plt.gca() # # axes.set_xlim(0,100) # # axes.set_ylim(128,0) # # plt.show() # plt.plot(X3b[:,0], X3b[:,1],'r+') # axes = plt.gca() # axes.set_xlim(0,100) # axes.set_ylim(128,0) # plt.show() # plt.plot(Y3[:,0], Y3[:,1],'r+') # axes = plt.gca() # axes.set_xlim(0,100) # axes.set_ylim(128,0) # plt.show() # from_mat=sio.loadmat("/Users/liujin/Desktop/book.mat") # X3b = from_mat['X3b'] # Y3 = from_mat['Y3'] # beta_k = from_mat['beta_k'] cx, cy, E = bookenstain(X3b, Y3, beta_k) #print("cx is {}".format(cx)) #print("cy is {}".format(cy)) #print("E is {}".format(E)) ########################### bookenstain is the same #################### # calculate affine cost A = np.concatenate( (cx[n_good + 1:n_good + 3, :], cy[n_good + 1:n_good + 3, :]), axis=1) #print("A is {}".format(A)) _, s, _ = np.linalg.svd(A) #print("s is {}".format(s)) aff_cost = log(s[0] / s[1]) #print(aff_cost) # calculate shape context cost a1 = np.min(costmat, axis=0, keepdims=True) a2 = np.min(costmat, axis=1, keepdims=True) input_lj = np.asarray([np.nanmean(a1), np.nanmean(a2)]) sc_cost = np.max(input_lj) # warp each coordinate fx_aff = np.dot(cx[n_good:n_good + 3].T, np.concatenate((np.ones((1, nsamp)), X.T), axis=0)) d2 = dist2(X3b, X) d2[d2 <= 0] = 0 U = np.multiply(d2, np.log(d2 + np.finfo(float).eps)) fx_wrp = np.dot(cx[:n_good].T, U) fx = fx_aff + fx_wrp fy_aff = np.dot(cy[n_good:n_good + 3].T, np.concatenate((np.ones((1, nsamp)), X.T), axis=0)) fy_wrp = np.dot(cy[:n_good].T, U) fy = fy_aff + fy_wrp Z = np.concatenate((fx, fy), axis=0) Z = Z.T # apply to tangent Xtan = X + np.dot(tan_eps, np.concatenate((np.cos(t1), np.sin(t1)), axis=1)) fx_aff = np.dot(cx[n_good:n_good + 3].T, np.concatenate((np.ones((1, nsamp)), Xtan.T), axis=0)) d2 = dist2(X3b, Xtan) d2[d2 <= 0] = 0 U = np.multiply(d2, np.log(d2 + np.finfo(float).eps)) fx_wrp = np.dot(cx[:n_good].T, U) fx = fx_aff + fx_wrp fy_aff = np.dot(cx[n_good:n_good + 3].T, np.concatenate((np.ones((1, nsamp)), Xtan.T), axis=0)) fy_wrp = np.dot(cy[:n_good].T, U) Ztan = np.concatenate((fx, fy), axis=0) Ztan = Ztan.T len_lj = Ztan.shape[0] tk = np.zeros((len_lj, 1)) for i in range(len_lj): tk[i] = atan2(Ztan[i, 1] - Z[i, 1], Ztan[i, 0] - Z[i, 0]) Xk = Z if k == n_iter: signal = False else: k = k + 1 # ######################## image warp ###################################### x, y = np.mgrid[0:N2, 0:N1] x = x.reshape(-1, 1) #print("x is {}".format(x)) y = y.reshape(-1, 1) #print("y is {}".format(y)) M = np.size(x) fx_aff = np.dot(cx[n_good:n_good + 3].T, np.concatenate((np.ones((1, M)), x.T, y.T), axis=0)) d2 = dist2(X3b, np.concatenate((x, y), axis=1)) fx_wrp = np.dot(cx[:n_good].T, np.multiply(d2, np.log(d2 + np.finfo(float).eps))) fx = fx_aff + fx_wrp #print("fx is {}".format(fx)) fy_aff = np.dot(cy[n_good:n_good + 3].T, np.concatenate((np.ones((1, M)), x.T, y.T), axis=0)) fy_wrp = np.dot(cy[:n_good].T, np.multiply(d2, np.log(d2 + np.finfo(float).eps))) fy = fy_aff + fy_wrp grid_x, grid_y = np.meshgrid(np.arange(0, N2, 1), np.arange(0, N1, 1)) fx = np.asarray(fx) fy = np.asarray(fy) V1m = griddata((fx.T.squeeze(), fy.T.squeeze()), V1[y, x], (grid_x, grid_y), method='nearest') V1m = V1m.squeeze() V1m[np.isnan(V1m)] = 0 binarizer = Binarizer(threshold=0.5).fit(V1m) V1m = binarizer.transform(V1m) plt.imshow(V1m.squeeze()) plt.show() # fz=find(isnan(V1w)); V1w(fz)=0; return V1m
def stitch(x1, y1, u1, v1, x2, y2, u2, v2, blend): ''' Stitch two vector fields with overlapping regions. Inputs: x1, y1 - coordinates of the first fields in meshgrid form u1, v1 - scaler values of the first field x2, y2 - coordinates of the second fields in meshgrid form u2, v2 - scaler values of the second field blend - stitching method in the overlapping region: 'none','average', 'cubic' or 'cosine' Outputs: x, y - coordinates of the stitched field in meshgrid form u, v - scaler values of the stitched field Notes: - PIV mask region must takes the value of zeros Disclaimer: This Py-code is translated from the Matlab-code shared on FMRL SharePoint, written by J. McClure ''' # -------- Create 2D field for stitched image ------------------ dx = np.max((np.abs(x1[0,0]-x1[0,1]), np.abs(x2[0,0]-x2[0,1]))); dy = np.max((np.abs(y1[0,0]-y1[1,0]), np.abs(y2[0,0]-y2[1,0]))); xmin = np.min((x1.min(), x2.min())) xmax = np.max((x1.max(), x2.max())) xmax = xmax + dx*((xmax-xmin)/dx-np.floor((xmax-xmin)/dx)) ymin = np.min((y1.min(), y2.min())) ymax = np.max((y1.max(), y2.max())) ymax = ymax + dy*((ymax-ymin)/dy-np.floor((ymax-ymin)/dy)) [x, y] = np.meshgrid(np.linspace(xmin,xmax, num = np.int(np.round(np.abs(xmax-xmin)/dx+1))),\ np.linspace(ymin,ymax, num = np.int(np.round(np.abs(xmax-xmin)/dx+1))) ) # -------- Region of overlap ----------------------------------- # Right boundary Ab = x[0,:]-np.min((x1.max(), x2.max())) Ab[Ab<0] = 10e8 xov2 = np.argmin(np.abs(Ab)) -1 # Left boundary Ac = x[0,:]-np.max((x1.min(), x2.min())) Ac[Ac>0] = 10e8 xov1 = np.argmin(np.abs(Ac)) +1 # Bottom boundary Ad = y[:,0]-np.max((y1.min(), y2.min())) Ad[Ad>0] = 10e8 yov1 = np.argmin(np.abs(Ad)) # Top boundary Ae = y[:,0]-np.min((y1.max(), y2.max())) Ae[Ae<0] = 10e8 yov2 = np.argmin(np.abs(Ae)) #-------- Interpolated region in both FOV ----------------------- x11 = np.argmin( np.abs( x[0,:]-x1.min() ) ) x12 = np.argmin( np.abs( x[0,:]-x1.max() ) ) y11 = np.argmin( np.abs( y[:,0]-y1.min() ) ) y12 = np.argmin( np.abs( y[:,0]-y1.max() ) ) Af = x[0,:]-x2.min() Af[Af<0] = 10e8 x21 = np.argmin( np.abs(Af) ) x22 = np.argmin( np.abs( x[0,:]-x2.max() ) ) y21 = np.argmin( np.abs( y[:,0]-y2.min() ) ) y22 = np.argmin( np.abs( y[:,0]-y2.max() ) ) #-------- Fill the interpolated velocity space ------------------ u = np.zeros(np.shape(x)) v = np.zeros(np.shape(x)) # Reshape x1, y1, u1, v1, x2, y2, u2, v2 for interpolation x1_intp = np.reshape(x1, x1.size); y1_intp = np.reshape(y1, y1.size) u1_intp = np.reshape(u1, u1.size); v1_intp = np.reshape(v1, v1.size) x2_intp = np.reshape(x2, x2.size); y2_intp = np.reshape(y2, y2.size) u2_intp = np.reshape(u2, u2.size); v2_intp = np.reshape(v2, v2.size) # Interpolate velocity measurements of each FOV onto the new mesh # The new mesh extends each field of view to the outmost x and y u[y11:y12, x11:x12] = griddata((x1_intp, y1_intp), u1_intp, (x[y11:y12, x11:x12], y[y11:y12, x11:x12]), method = 'cubic') v[y11:y12, x11:x12] = griddata((x1_intp, y1_intp), v1_intp, (x[y11:y12, x11:x12], y[y11:y12, x11:x12]), method = 'cubic') u[y21:y22, x21:x22] = griddata((x2_intp, y2_intp), u2_intp, (x[y21:y22, x21:x22], y[y21:y22, x21:x22]), method = 'cubic') v[y21:y22, x21:x22] = griddata((x2_intp, y2_intp), v2_intp, (x[y21:y22, x21:x22], y[y21:y22, x21:x22]), method = 'cubic') # Interpolate velocity measurements of each FOV onto the overlapping region on the new mesh uov1 = griddata((x1_intp, y1_intp), u1_intp, (x[yov1:yov2,xov1:xov2], y[yov1:yov2,xov1:xov2]), method = 'cubic') vov1 = griddata((x1_intp, y1_intp), v1_intp, (x[yov1:yov2,xov1:xov2], y[yov1:yov2,xov1:xov2]), method = 'cubic') uov2 = griddata((x2_intp, y2_intp), u2_intp, (x[yov1:yov2,xov1:xov2], y[yov1:yov2,xov1:xov2]), method = 'cubic') vov2 = griddata((x2_intp, y2_intp), v2_intp, (x[yov1:yov2,xov1:xov2], y[yov1:yov2,xov1:xov2]), method = 'cubic') #------- Select blending function for overlapping region ------------------ ## Generate weighting functions # No blending if blend == 'none': wgt1b = np.ones((1, np.abs(xov2-xov1))) wgt2b = np.zeros((1, np.abs(xov2-xov1))) # Simple average elif blend == 'average': wgt1b = 0.5*np.ones((1, np.abs(xov2-xov1))) wgt2b = 0.5*np.ones((1, np.abs(xov2-xov1))) # Linear weight elif blend == 'cubic': wgt1b = np.linspace(1, 0, num = (np.abs(xov2-xov1))) wgt2b = np.linspace(0, 1, num = (np.abs(xov2-xov1))) # Cosine weight elif blend == 'cosine': wgt1b = -0.5*np.cos(np.linspace(0, np.pi, num = (np.abs(xov2-xov1)))) + 0.5 wgt2b = 0.5*np.cos(np.linspace(0, np.pi, num = (np.abs(xov2-xov1)))) + 0.5 # Invalid input else: wgt1b = np.zeros((1, np.abs(xov2-xov1))) wgt2b = np.zeros((1, np.abs(xov2-xov1))) print('Invalid blending method!') wgt1 = np.matlib.repmat(wgt1b, (np.abs(yov2-yov1)), 1) wgt2 = np.matlib.repmat(wgt2b, (np.abs(yov2-yov1)), 1) # Blending uov1c = uov1*wgt2 uov2c = uov2*wgt1 vov1c = vov1*wgt2 vov2c = vov2*wgt1 u[yov1:yov2, xov1:xov2] = uov1c + uov2c v[yov1:yov2, xov1:xov2] = vov1c + vov2c return x, y, u, v
img = img[h:-h, w:-w] f = np.fft.fft2(img) fshift = np.fft.fftshift(f) magnitude_spectrum = 20 * np.log(np.abs(fshift)) psd1D = radialProfile.azimuthalAverage(magnitude_spectrum) # print(psd1D) # print(type(psd1D)) # print(psd1D.size) # Calculate the azimuthally averaged 1D power spectrum points = np.linspace(0, N, num=psd1D.size) # coordinates of a xi = np.linspace(0, N, num=N) # coordinates for interpolation # print(points) # print(xi) interpolated = griddata(points, psd1D, xi, method='cubic') interpolated /= interpolated[0] psd1D_total[cont, :] = interpolated label_total[cont] = 0 cont += 1 if cont == number_iter: break if cont == number_iter: break for x in range(N): psd1D_org_mean[x] = np.mean(psd1D_total[:, x]) psd1D_org_std[x] = np.std(psd1D_total[:, x])
import numpy as np import sys from scipy.interpolate import griddata from scipy.interpolate import interp2d mu = float(sys.argv[1]) e_bin = float(sys.argv[2]) data = np.genfromtxt("a_crit.txt", delimiter=',', comments='#') X = data[:, 0] Y = data[:, 1] Z = data[:, 2] xi = np.linspace(0, 0.5, 51) yi = np.linspace(0, 0.8, 81) zi = griddata((X, Y), Z, (xi[None, :], yi[:, None]), method='linear', fill_value=0) f = interp2d(xi, yi, zi, kind='linear') print "a_c = ", f(mu, e_bin)[0]
# Example 1 # Plot concentration and velocity using Triangulation xlim = [400000, 800000] ylim = [-800000, -400000] plt.tripcolor(n['x'], n['y'], n['Concentration'], triangles=n['i'], vmin=0, vmax=1) q = plt.quiver(n['x'], n['y'], n['u'], n['v'], angles='xy', scale=10) plt.quiverkey(q, X=0.3, Y=1.1, U=1, label='Drift: 1 m/s', labelpos='E') plt.xlim(xlim) plt.ylim(ylim) plt.show() # Example 2 # Compute average speed for each element u_elems = n['u'][n['i']] v_elems = n['v'][n['i']] u_avg = u_elems.mean(axis=1) v_avg = v_elems.mean(axis=1) spd = np.hypot(u_avg, v_avg) # Example 3 # Rasterize concentration (convert from triangle elements to 2D grid) # 1. Get x,y coordinates of each element xe, ye = [n[i][n['i']].mean(axis=1) for i in ['x', 'y']] # 2. Create x,y destination grids xg, yg = np.meshgrid(np.linspace(*xlim, 100), np.linspace(*ylim[::-1], 100)) # 3. Interpolate from elements onto grid cg = griddata(np.array([xe, ye]).T, n['Concentration'], np.array([xg, yg]).T).T plt.imshow(cg, extent=[xlim[0], xlim[1], ylim[0], ylim[1]]) plt.show()
def identify_spectra_gauss_fit(spectra, prlltc=None, lmin=400., lmax=900., airmass=1.0, sigfac=3.0, plotobj=False): """ Returns index of spectra picked by Guassian fit. NOTE: Index is counted against the array, not seg_id """ status = 0 pl.ioff() kt = SedSpec.Spectra(spectra) # Get X,Y positions (arcsec) and summed values between lmin and lmax xs, ys, vs = kt.to_xyv(lmin=lmin, lmax=lmax) xi = np.linspace(np.nanmin(xs), np.nanmax(xs), 200) yi = np.linspace(np.nanmin(ys), np.nanmax(ys), 200) x, y = np.mgrid[np.nanmin(xs):np.nanmax(xs):200j, np.nanmin(ys):np.nanmax(ys):200j] points = zip(xs, ys) values = vs gscl = (np.nanmax(xs) - np.nanmin(xs)) / 200. # Create image, print(stats) grid_vs = griddata(points, values, (x, y), method='linear') grid_vs[np.isnan(grid_vs)] = np.nanmean(grid_vs) grid_med = np.nanmedian(grid_vs) print("grid_vs min, max, mean, median: %f, %f, %f, %f\n" % (float(np.nanmin(grid_vs)), float(np.nanmax(grid_vs)), float(np.nanmean(grid_vs)), float(grid_med))) # Find features in image blobs = feature.blob_log(grid_vs - grid_med, min_sigma=10, max_sigma=20, threshold=100.0) print("Found %d blobs" % len(blobs)) goodblob = 0 # Loop over found blobs objs = [] for blob in blobs: # Extract blob properties bx, by, br = blob br *= gscl bx = int(bx) by = int(by) # How bright is this blob? gv = grid_vs[bx, by] - grid_med # Exclude edge blobs and faint blobs if 0 < bx < 199 and 0 < by < 199 and gv > 100.: goodblob += 1 print("%3d, z, x, y, dra, ddec: %8.1f, %5d, %5d, %6.2f, %6.2f" % (goodblob, float(gv), bx, by, xi[bx], yi[by])) objs.append((gv, xi[bx], yi[by], br, goodblob)) print("Found %d good objects" % len(objs)) if len(objs) <= 0: objs = [(1000., 0., 0., 2., goodblob)] # Make sure the brightest object is last objs.sort() # Perform 2-D Gaussian fit of good (real) objects for obj in objs: # Fill initial fit params amplitude = obj[0] xo = obj[1] yo = obj[2] ro = obj[3] objno = obj[4] print("\nFitting object %d" % objno) print("initial guess : z,a,b,x,y,theta:" " %9.1f, %6.2f, %6.2f, %6.2f, %6.2f, %7.2f" % (amplitude, ro, ro, xo, yo, 0.)) # create initial data initial_guess = (amplitude, xo, yo, ro, ro, 0, grid_med) try: popt, pcov = opt.curve_fit(gaussian_2d, (x, y), grid_vs.flatten(), p0=initial_guess) except RuntimeError: print("ERROR: unable to fit Gaussian") print("Using initial guess") status = 3 popt = initial_guess # Fitted position xc = popt[1] yc = popt[2] a = popt[3] b = popt[4] if xc < -30. or xc > 30. or yc < -30. or yc > 30.: print("ERROR: X,Y out of bounds: %f, %f" % (xc, yc)) print("Using initial guess") popt = initial_guess status = 1 # Fitted 3-sigma extent if a > 14. or b > 14. or a <= 0. or b <= 0.: print("ERROR: A,B out of bounds: %f, %f" % (a, b)) print("Using initial guess") popt = initial_guess status = 2 # Extract values to use xc = popt[1] yc = popt[2] if status == 0: a = popt[3] * sigfac b = popt[4] * sigfac else: a = popt[3] * 2.0 b = popt[4] * 2.0 pos = (xc, yc) theta = popt[5] z = popt[0] # report position and shape ellipse = (a, b, xc, yc, theta * (180. / np.pi)) print("PSF FIT on IFU: z,a,b,x,y,theta:" " %9.1f, %6.2f, %6.2f, %6.2f, %6.2f, %7.2f\n" % (z, a, b, xc, yc, theta * 180. / np.pi)) positions = [pos] # Gather spaxels all_kix = [] for the_pos in positions: all_kix.append( list( find_positions_ellipse(kt.KT.data, the_pos[0], the_pos[1], a, b, -theta))) all_kix = list(itertools.chain(*all_kix)) kix = list(set(all_kix)) print("found this many spaxels: %d" % len(kix)) if status == 0 and goodblob == 0: print("ERROR: no good objects found in image") status = 4 return kt.good_positions[kix], pos, positions, ellipse, status
def grid(): """ First 2 columns must be x and y """ filename = r'C:\Work\Programming\pygmi\data\sue\filt_magdata.csv' ofile = r'C:\Work\Programming\pygmi\data\magdata.tif' srows = 0 dlim = None xcol = 0 ycol = 1 zcol = 2 dxy = 15 # This bit reads in the first line to see if it is a header pntfile = open(filename) ltmp = pntfile.readline() pntfile.close() ltmp = ltmp.lower() isheader = any(c.isalpha() for c in ltmp) # Check for comma delimiting if ',' in ltmp: dlim = ',' # Set skip rows if isheader: srows = 1 # Now read in data datatmp = np.genfromtxt(filename, unpack=True, delimiter=dlim, skip_header=srows, usemask=False) # Now we interpolate xdata = datatmp[xcol] ydata = datatmp[ycol] zdata = datatmp[zcol] points = datatmp[:2].T newxdata = np.arange(xdata.min(), xdata.max(), dxy) newydata = np.arange(ydata.min(), ydata.max(), dxy) newpoints = np.meshgrid(newxdata, newydata) newpoints = (newpoints[0].flatten(), newpoints[1].flatten()) grid = si.griddata(points, zdata, newpoints, method='cubic') grid.shape = (newydata.shape[0], newxdata.shape[0]) grid = grid[::-1] # export data odat = Data() odat.dataid = '' odat.tlx = newxdata.min() odat.tly = newydata.max() odat.xdim = dxy odat.ydim = dxy odat.nrofbands = 1 odat.nullvalue = 1e+20 odat.rows, odat.cols = grid.shape odat.data = np.ma.masked_invalid(grid) tmp = pio.ExportData(None) tmp.ifile = ofile # tmp.export_ascii_xyz([odat]) # tmp.export_gdal([odat], 'ENVI') tmp.export_gdal([odat], 'GTiff') # Plotting section # dataex = (newxdata.min(), newxdata.max(), newydata.min(), newydata.max()) # plt.imshow(grid, cmap = plt.cm.jet, extent=dataex, origin='upper') plt.tricontourf(xdata, ydata, zdata, 40, cmap=plt.cm.jet) # plt.plot(xdata, ydata, '.') plt.colorbar() plt.show() pdb.set_trace()
def make_plot(Zscore, z, tis, me, cut_min, cut_max, fw, contour_levels): fig = pl.figure(figsize=(9, 9)) #ax = fig.add_subplot(111) ax = fig.add_axes([0.02, 0.02, 0.9, 0.9]) xsep = 1. ysep = 1. mlx = MultipleLocator(10) mly = MultipleLocator(10) cmap = pl.cm.jet lbl = r'$z_{%s}$' % ag_sgn[me] if (me == Nx[3] - 2): lbl = r'$\rm Tissue$' elif (me == Nx[3] - 1): lbl = r'$\rm VOI$' cmap = matplotlib.colors.ListedColormap(random_color) #cmap = pl.cm.get_cmap('Greys', len(voi)) elif (me == Nx[3]): lbl = r'$\rm Atrophy$' ## dim0 bx = lx[0] + xsep by = lx[1] + ysep xls = np.linspace(bx, bx + lx[1], Nx[1]) yls = np.linspace(by, by + lx[2], Nx[2]) x_gr, y_gr = np.meshgrid(xls, yls) # grid the data. z_gr = griddata((x[0] + bx, y[0] + by), Zscore[0], (x_gr, y_gr), method='nearest') print(np.min(z_gr), cut_min) cs = ax.contourf(x_gr, y_gr, z_gr, contour_levels, cmap=cmap, levels=np.linspace(cut_min, cut_max, contour_levels), extend='both') # , norm = LogNorm()) #cs = ax.pcolor(x_gr,y_gr,z_gr, cmap=cmap, vmin=cut_min, vmax=cut_max) cs.cmap.set_under('None') cs.cmap.set_over('k') ## dim1 bx = 0.0 by = lx[1] + ysep xls = np.linspace(bx, bx + lx[0], Nx[0]) yls = np.linspace(by, by + lx[2], Nx[2]) x_gr, y_gr = np.meshgrid(xls, yls) # grid the data. z_gr = griddata((x[1] + bx, y[1] + by), Zscore[1], (x_gr, y_gr), method='nearest') cs = ax.contourf(x_gr, y_gr, z_gr, contour_levels, cmap=cmap, levels=np.linspace(cut_min, cut_max, contour_levels), extend='both') # , norm = LogNorm()) #cs = ax.pcolor(x_gr,y_gr,z_gr, cmap=cmap, vmin=cut_min, vmax=cut_max) cs.cmap.set_under('None') cs.cmap.set_over('k') ## dim2 bx = 0.0 by = 0.0 xls = np.linspace(bx, bx + lx[0], Nx[0]) yls = np.linspace(by, by + lx[1], Nx[1]) x_gr, y_gr = np.meshgrid(xls, yls) # grid the data. z_gr = griddata((x[2] + bx, y[2] + by), Zscore[2], (x_gr, y_gr), method='nearest') cs = ax.contourf(x_gr, y_gr, z_gr, contour_levels, cmap=cmap, levels=np.linspace(cut_min, cut_max, contour_levels), extend='both') # , norm = LogNorm()) #cs = ax.pcolor(x_gr,y_gr,z_gr, cmap=cmap, vmin=cut_min, vmax=cut_max) cs.cmap.set_under('None') cs.cmap.set_over('None') ## section lines ax.plot([xsep, lx[0]], [sec[1] * dx[1], sec[1] * dx[1]], 'k-', lw=0.5) ax.plot([xsep, lx[0] + xsep + lx[1]], [lx[1] + ysep + sec[2] * dx[2], lx[1] + ysep + sec[2] * dx[2]], 'k-', lw=0.5) ax.plot([sec[0] * dx[0], sec[0] * dx[0]], [ysep, lx[1] + lx[2] + ysep], 'k-', lw=0.5) ax.plot([lx[0] + xsep + sec[1] * dx[1], lx[0] + xsep + sec[1] * dx[1]], [lx[1] + ysep, lx[1] + ysep + lx[2]], 'k-', lw=0.5) # scale bar from matplotlib.patches import Rectangle #cax = pl.gca() ax.add_patch(Rectangle((150, 205), 100, 2, alpha=1, color='k')) #ax.plot([12 + 200,108 + 200],[lx[1] - 14.0*ysep,lx[1] - 14.0*ysep],'k-',lw=10.0) fig.text(0.39, 0.47, r'$\rm 100 ~mm$') # ax.plot(x, y, 'ko', markersize=4) ax.set_xlim(0, lx[0] + lx[1] + xsep) ax.set_ylim(0, lx[1] + lx[2] + ysep) #ax.tick_params(axis='x',which='minor',bottom='on') #ax.tick_params(axis='y',which='minor',bottom='on') ax.xaxis.set_minor_locator(mlx) ax.yaxis.set_minor_locator(mly) tit = r'%s , $\rm t = %g ~(day)$' % (ag_tit[me], realtime) ax.set_xticks([]) ax.set_yticks([]) #if (i_y == 1): fig.text(0.50, 0.96, tit, fontsize=30, ha='center', va='center') #ax.set_title(tit) #ax.set_xticklabels([]) #if (i_y == 0): #ax.set_xlabel(r'$\left( \phi - \pi \right) \sin{\theta} + \pi$') #ax.set_xticklabels([r'$\rm \pi/2$',r'$\rm \pi$',r'$\rm 3\pi/2$',r'$\rm 2 \pi$']) #ax.set_xlabel(labels[0]) #ax.set_ylabel(labels[1]) #ax.set_yticklabels([r'$\rm \pi/2$',r'$\rm \pi$']) #else: #pl.setp(ax.get_yticklabels(), visible=False) #ax.set_yticklabels([]) #if (i_x == n_X - 1 and i_y == 0) : cbar_ax = fig.add_axes([0.93, 0.15, 0.02, 0.7]) cbar = pl.colorbar(cs, cax=cbar_ax) cbar.set_ticks([cut_min, cut_max]) #mticks = cbar.norm([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,2,3,4,5,6,7,8,9,10,20]) #cbar.ax.yaxis.set_ticks(mticks, minor=True) #cbar.set_ticklabels([0, r'%.2e' % np.max(z)]) #cbar.ax.set_yticklabels([r'$\rm %.1e$' % cut_min, r'$\rm %.1e$' % cut_max],rotation=90,fontsize=30) cbar.ax.set_yticklabels([r'$\rm %.1f$' % cut_min, r'$\rm %.1f$' % cut_max], rotation=90, fontsize=30) fig.text(0.96, 0.5, lbl, rotation='vertical', fontsize=30) ax.set_aspect('equal') # inset ax2 = fig.add_axes([0.48, 0.11, 0.38, 0.28], facecolor=(1., 1., 1.)) if (me == Nx[3] - 2): # for type (tissue) histogram Z = np.ma.masked_equal(z, -1) z = Z.compressed() ax2.hist(z, histtype='stepfilled', bins=50, density=False, fc='grey', alpha=0.6, edgecolor='black', linewidth=1.2) # bins='auto' ax2.set_xticklabels([ r'$\rm -1 (empty)$', r'$\rm 0 (CSF)$', r'$\rm 1 (WM)$', r'$\rm 2 (GM)$' ]) elif (me == Nx[3] - 1): # for group (voi) histogram Z = np.ma.masked_equal(z, 0) z = Z.compressed() ax2.hist(z, histtype='stepfilled', bins=256, density=False, fc='grey', alpha=0.6, edgecolor='black', linewidth=1.2) # bins='auto' else: marray = np.ma.masked_where(tis != 1, tis).mask z1 = np.ma.array(z, mask=marray).compressed() marray = np.ma.masked_where(tis != 2, tis).mask z2 = np.ma.array(z, mask=marray).compressed() SMALL = 1.e-6 z1[z1 <= SMALL] = 0 Z = np.ma.masked_equal(z1, 0) z1 = Z.compressed() z2[z2 <= SMALL] = 0 Z = np.ma.masked_equal(z2, 0) z2 = Z.compressed() ax2.hist(z2, histtype='stepfilled', bins=50, density=False, fc='grey', alpha=0.6, edgecolor='black', linewidth=1.2, label=r'$\rm GM$') # bins='auto' ax2.hist(z1, histtype='stepfilled', bins=50, density=False, fc='yellow', alpha=0.8, edgecolor='black', linewidth=1.2, label=r'$\rm WM$') # bins='auto' ax2.legend(loc='upper right', fontsize=18, ncol=2) ax2.ticklabel_format(style='sci', axis='x', scilimits=(0, 0)) #ax2.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) a = np.histogram(z1, bins=50, density=False) b = np.histogram(z2, bins=50, density=False) ax2.set_ylim(0, 1.30 * max(np.max(a[0]), np.max(b[0]))) #ax2.set_xlim(cut_min,cut_max) ax2.set_yticklabels([]) ax2.set_title(ag_names[me], fontsize=26) #ax2.set_xlabel(ag_names[me], fontsize = 26) #ax2.set_ylabel(r'$\rm probability$') pl.savefig(fw + '.png', format='png', dpi=100, orientation='landscape') pl.close()
particle_status = pt_results['particle_status'] file.close() fig = plt.figure(figsize=(20, 8)) ax = fig.add_subplot(111, projection='3d') ####plt.contour(river_surf_x,river_surf_y,river_surf_z,30,linewidths=1,color="k") if plot_surface: ### create data for plotting river bed river_face_x = x[unique_face_index[0, :]] river_face_y = y[unique_face_index[1, :]] river_face_z = z[unique_face_index[2, :]] river_surf_x = np.linspace(river_face_x.min(), river_face_x.max(), 100) river_surf_y = np.linspace(river_face_y.min(), river_face_y.max(), 100) river_surf_z = griddata((river_face_x, river_face_y), river_face_z, (river_surf_x[None, :], river_surf_y[:, None]), method='cubic') river_surf_x_grid, river_surf_y_grid = np.meshgrid( river_surf_x, river_surf_y) ax.plot_surface(river_surf_x_grid, river_surf_y_grid, river_surf_z, linewidths=0, antialiased=True, alpha=0.3, rstride=1, cstride=1) ###cmap=cm.coolwarm,) ## for imaterial in range(9,10): ##material_type["Ringold_Fine"]:##,material_type["Ringold_Fine"]: imaterial = material_type["Hanford"]
def plot_map(ax, x_grid, y_grid, z_values, colormap="plasma", resolution=30j, savefig=0, figure_name=None): """ This is a function to plot 2D functions Args: ax ( pyplot instance ): the handler of the plot which we create x_grid ( list ): the x grid points, dimension (Nx) y_grid ( list ): the y grid points, dimension (Ny) z_values ( list of lists ): the values of the function at the grid points, dimension (Nx, Ny) colormap ( string ): the type of coloring scheme, Options include: "plasma" (default), "Blues", "viridis", "binary", "hot", etc. resolution ( complex, imaginary ): the degree of extra-granulation in the plotting interpolation savefig ( int ): 0 - don't save the figure as a file, 1 - do save it figure_name ( string ): the name of the file to where the figure is to be saved (only is used if savefig == 1) Returns: None : just plots the 2D image """ npts_x = len(x_grid) npts_y = len(y_grid) xmin = x_grid[0] xmax = x_grid[npts_x - 1] ymin = y_grid[0] ymax = y_grid[npts_y - 1] extent = (xmin, xmax, ymin, ymax) xs0, ys0, zs0 = [], [], [] for i in range(npts_x): for j in range(npts_y): xs0.append(x_grid[i]) ys0.append(y_grid[j]) zs0.append(z_values[i][j]) #N = 30j xs, ys = np.mgrid[extent[0]:extent[1]:resolution, extent[2]:extent[3]:resolution] zs = griddata((xs0, ys0), zs0, (xs, ys), method="linear") #ax.xticks(energy, rotation=30) #ax.yticks(energy, rotation=30) ax.xticks(rotation=30) ax.yticks(rotation=30) ax.imshow(zs.T, cmap=colormap, extent=extent, interpolation='Lanczos', origin='lower') #ax.plot(xs0, ys0, "bo") ax.colorbar() if savefig == 1: ax.savefig(figure_name)
mode_t[:, s, 1], mode_t[:, s, 2], length=0.1, ) from ss_util import axisEqual3D axisEqual3D(ax) elif args.plot_l_z: from scipy.interpolate import griddata intvl = (np.max(q_cart_2d[:, 0]) - np.min(q_cart_2d[:, 0])) / 200. gridx, gridy = np.mgrid[ np.min(q_cart_2d[:, 0]):np.max(q_cart_2d[:, 0]):intvl, np.min(q_cart_2d[:, 1]):np.max(q_cart_2d[:, 1]):intvl, ] gdat = griddata( np.transpose([q_cart_2d[:, 0], q_cart_2d[:, 1]]), np.real(mode_l_2d[:, s, 2]) / hbar, (gridx, gridy), method='cubic', ).T fig, ax = plt.subplots() if args.relative: ish = ax.imshow(gdat, cmap='seismic', origin='lower') else: ish = ax.imshow(gdat, cmap='seismic', origin='lower', vmin=-1., vmax=1.) # ax.contourf( # gridx, gridy, # gdat, # 100,
def show_confidences(state_action_partition, classifier, sample_actions, show=True): """ Show confidences together with real data points as an linearly interpolated image. :param state_action_partition: State-action partition. :param classifier: State-action classifier. :param sample_actions: Function that samples actions given a state. :param show: Show plot. :return: None. """ xs = [] ys = [] colors = [] real_points = [] real_colors = [] for idx, block in enumerate(state_action_partition): for transition in block: real_points.append([transition[0], transition[1]]) real_colors.append(idx) state = transition[3] actions = sample_actions(state) probs = classifier.batch_predict_prob([state] * len(actions), actions) for i in range(len(probs)): xs.append(state) ys.append(actions[i]) colors.append(probs[i][np.argmax(probs[i])]) real_points = np.array(real_points, dtype=np.float32) real_colors = np.array(real_colors, dtype=np.float32) xs = np.array(xs, dtype=np.float32) ys = np.array(ys, dtype=np.float32) colors = np.array(colors, dtype=np.float32) x_min = int(np.floor(np.min(xs))) x_max = int(np.ceil(np.max(xs))) y_min = int(np.floor(np.min(ys))) y_max = int(np.ceil(np.max(ys))) grid_x, grid_y = np.mgrid[x_min:x_max:1000j, y_min:y_max:1000j] grid = griddata(np.stack([xs, ys], axis=-1), colors, (grid_x, grid_y), method="linear") #plt.rcParams['axes.facecolor'] = "white" plt.figure(figsize=(14, 8)) plt.imshow(grid.T, extent=(x_min, x_max, y_min, y_max), origin="lower", cmap="gray", vmin=0, vmax=1) cbar = plt.colorbar() cbar.set_label("confidence") plt.scatter(real_points[:, 0], real_points[:, 1], c=real_colors) plt.xlabel("states") plt.ylabel("actions") if show: plt.show()
# save inferred system response for plotting manuscript figures in MATLAB. scipy.io.savemat('Pred.mat', {'u_FullField_Pred': u_FullField_Pred}) scipy.io.savemat( 'Histories.mat', { 'lambda_history_Pretrain': lambda_history_Pretrain, 'lambda_history_Adam': lambda_history_Adam, 'lambda_history_STRidge': lambda_history_STRidge, 'ridge_append_counter_STRidge': ridge_append_counter_STRidge, 'loss_f_history_STRidge': loss_f_history_STRidge }) # plot the whole domain U_pred = griddata(X_star, u_FullField_Pred.flatten(), (X, T), method='cubic') fig = plt.figure() ax = fig.gca(projection='3d') surf = ax.plot_surface(X, T, U_pred, cmap=cm.coolwarm, linewidth=0, antialiased=False) ax.set_xlabel('x') ax.set_ylabel('t') ax.set_zlabel('u') plt.title('Model Result') plt.savefig('28.png') # plot the whole domain truth
x, y = pos[:, 0], pos[:, 1] vx, vy = vel[:, 0], vel[:, 1] # define regular grid spatially covering input data n = 1024 xg = np.linspace(rangeX[0], rangeX[1], n) yg = np.linspace(rangeY[0], rangeY[1], n) delta_x = np.diff(xg).mean() delta_y = np.diff(yg).mean() X, Y = np.meshgrid(xg, yg) #data to interpolate z = vx # interpolate Z values on defined grid Z = griddata(np.vstack((x.flatten(),y.flatten())).T, \ np.vstack(z.flatten()),(X,Y),method='linear').reshape(X.shape) # mask nan values, so they will not appear on plot Zm = np.ma.masked_where(np.isnan(Z), Z) #dZdY = np.gradient(Zm.flatten('F'),2*delta_y).reshape(Y.shape).T print np.gradient(Zm, delta_y)[0].shape dZdY = np.gradient(Zm, delta_y)[0].reshape(Zm.shape) #data to interpolate z = vy # interpolate Z values on defined grid Z = griddata(np.vstack((x.flatten(),y.flatten())).T, \ np.vstack(z.flatten()),(X,Y),method='linear').reshape(X.shape) # mask nan values, so they will not appear on plot Zm = np.ma.masked_where(np.isnan(Z), Z)
print 'Now transforming coordinate system of SMB' wgs84 = pyproj.Proj( "+init=EPSG:4326" ) # LatLon with WGS84 datum used by GPS units and Google Earth psn_gl = pyproj.Proj( "+init=epsg:3413" ) # Polar Stereographic North used by BedMachine (as stated in NetDCF header) xs, ys = pyproj.transform(wgs84, psn_gl, x_lon, y_lat) #xs_81, ys_81 = pyproj.transform(wgs84, psn_gl, x_lon_81, y_lat_81) smb_1980 = smb_raw[0][0] smb_2014 = smb_raw[-1][0] Xmat, Ymat = np.meshgrid(X, Y) regridded_smb_1980 = interpolate.griddata((xs.ravel(), ys.ravel()), smb_1980.ravel(), (Xmat, Ymat), method='nearest') regridded_smb_2014 = interpolate.griddata((xs.ravel(), ys.ravel()), smb_2014.ravel(), (Xmat, Ymat), method='nearest') SMB_1980 = interpolate.interp2d(X, Y, regridded_smb_1980, kind='linear') SMB_2014 = interpolate.interp2d(X, Y, regridded_smb_2014, kind='linear') ### Hindcasting SMB: year-specific 2006-2014 #SMB_dict = {} #set up a dictionary of surface mass balance fields indexed by year #for year in range(2006, 2015): # index = year - 2015 #so that 2014 will be smb_raw[-1], etc. # smb_year = smb_raw[index][0] # regridded_smb_year = interpolate.griddata((xs.ravel(), ys.ravel()), smb_year.ravel(), (Xmat, Ymat), method='nearest') # SMB_dict[year] = interpolate.interp2d(X, Y, regridded_smb_year, kind='linear')