def resample2d(i_data, i_s, i_e, i_i, o_s, o_e, o_i, kx=3, ky=3, s=0, gauss_sig=0, median_boxcar_size=0, clip=True): ''' Resample a square 2D input grid with extents defined by [i_s] and [i_e] with increment [i_i] to a new 2D grid with extents defined by [o_s] and [o_e] with increment [o_i]. Returns a 2D resampled array, with options for smoothing (gaussian and median) and clipping. ''' # calculate bivariate spline, G, using input grid and data grid_pre_rebin = np.arange(i_s, i_e, i_i) G = RectBivariateSpline(grid_pre_rebin, grid_pre_rebin, i_data, kx=kx, ky=ky) # evaluate this spline at new points on output grid grid_x, grid_y = np.mgrid[o_s:o_e:o_i, o_s:o_e:o_i] data = G.ev(grid_x, grid_y) if gauss_sig != 0: data = gaussian_filter(data, gauss_sig) if median_boxcar_size != 0: data = median_filter(data, median_boxcar_size) if clip: input_max = np.max(i_data) input_min = np.min(i_data) data[np.where(data>input_max)] = input_max data[np.where(data<input_min)] = input_min return data
def __init__(self, alpha, Re, cl, cd): """Setup CCAirfoil from raw airfoil data on a grid. Parameters ---------- alpha : array_like (deg) angles of attack where airfoil data are defined (should be defined from -180 to +180 degrees) Re : array_like Reynolds numbers where airfoil data are defined (can be empty or of length one if not Reynolds number dependent) cl : array_like lift coefficient 2-D array with shape (alpha.size, Re.size) cl[i, j] is the lift coefficient at alpha[i] and Re[j] cd : array_like drag coefficient 2-D array with shape (alpha.size, Re.size) cd[i, j] is the drag coefficient at alpha[i] and Re[j] """ alpha = np.radians(alpha) self.one_Re = False # special case if zero or one Reynolds number (need at least two for bivariate spline) if len(Re) < 2: Re = [1e1, 1e15] cl = np.c_[cl, cl] cd = np.c_[cd, cd] self.one_Re = True kx = min(len(alpha)-1, 3) ky = min(len(Re)-1, 3) # a small amount of smoothing is used to prevent spurious multiple solutions self.cl_spline = RectBivariateSpline(alpha, Re, cl, kx=kx, ky=ky, s=0.1) self.cd_spline = RectBivariateSpline(alpha, Re, cd, kx=kx, ky=ky, s=0.001)
def setModel(self,model,dx,think_positive=False): ''' Set new model for the source. :param model: ``(n, n)`` Numpy image array. :param dx: scalar Pixel size in microarcseconds. :param think_positive: (optional) bool Should we enforce that the source image has no negative pixel values? ''' self.nx = int(ceil(model.shape[-1] * dx / self.dx)) # number of image pixels self.model = model # source model self.model_dx = dx # source model resolution # load source image that has size and resolution compatible with the screen. self.isrc = np.empty(2*(self.nx,)) self.think_positive = think_positive M = self.model.shape[1] # size of original image array f_img = RectBivariateSpline(self.model_dx/self.dx*(np.arange(M) - 0.5*(M-1)),\ self.model_dx/self.dx*(np.arange(M) - 0.5*(M-1)),\ self.model) xx_,yy_ = np.meshgrid((np.arange(self.nx) - 0.5*(self.nx-1)),\ (np.arange(self.nx) - 0.5*(self.nx-1)),indexing='xy') m = f_img.ev(yy_.flatten(),xx_.flatten()).reshape(2*(self.nx,)) self.isrc = m * (self.dx/self.model_dx)**2 # rescale for change in pixel size if self.think_positive: self.isrc[self.isrc < 0] = 0 if not self.live_dangerously: self._checkSanity()
def interpolate_individual(self, image): # unpacking ogridx, ogridy = self.ogrid ngridx, ngridy = self.ngrid f = RectBivariateSpline(ogridy, ogridx, image, kx=1, ky=1) return f.ev(ngridy.flatten(), ngridx.flatten()).reshape(ngridx.shape)
def plot(self, ax, V=None, **kwargs): '''Plot the contours into matplotlib axis. Parameters ---------- ax : matplotlib.Axes Axes to plot into V : array-like A list of contour values to plot. If not None, the internal contour values will be overriden during plotting, but not inside the object. kwargs : dict Keyword arguments to pass on to the ax.contour() method. ''' if V is None: V = self.V d, X, Y = self.data.getData() # hack - add zero value to close contours d = np.hstack((d, np.zeros((d.shape[0], 1)))) d = np.vstack((d, np.zeros((1, d.shape[1])))) dx = X[0, 1] - X[0, 0] dy = Y[1, 0] - Y[0, 0] x_longer = X[0, :].tolist() x_longer.append(X[0, -1] + dx) y_longer = Y[:, 0].tolist() y_longer.append(Y[-1, 0] + dy) x_interp, y_interp = np.meshgrid( np.linspace(x_longer[0], x_longer[-1], len(x_longer) * self.upsample_factor), np.linspace(x_longer[0], y_longer[-1], len(y_longer) * self.upsample_factor)) spl = RectBivariateSpline(x_longer, y_longer, d.T) d_interp = spl.ev(x_interp, y_interp) ax.contour(x_interp, y_interp, d_interp, V, **kwargs)
def calcAnker(IS, inputPoints, rasterdata, gp): """ """ dhm = rasterdata['subraster'] [Xa, Ya, Xe, Ye] = inputPoints # Letzte Koordinate in xi/yi entspricht nicht exakt den Endkoordinaten Xe_ = gp['xi'][-1] Ye_ = gp['yi'][-1] AnkA_dist = IS['d_Anker_A'][0] AnkE_dist = IS['d_Anker_E'][0] stueA_H = IS['HM_Anfang'][0] stueE_H = IS['HM_Ende_max'][0] # X- und Y-Koordinate der Geodaten im Projektionssystem berechnen dx = float(Xe - Xa) dy = float(Ye - Ya) if dx == 0: dx = 0.0001 azimut = math.atan(dy/dx) if dx > 0: azimut += 2 * math.pi else: azimut += math.pi # X- und Y-Koordinaten der beiden Ankerpunkte am Boden AnkXa = Xa - AnkA_dist * math.cos(azimut) AnkYa = Ya - AnkA_dist * math.sin(azimut) AnkXe = Xe_ + AnkE_dist * math.cos(azimut) AnkYe = Ye_ + AnkE_dist * math.sin(azimut) # Linear Interpolation # Koordinatenarrays des DHMs coordX = gp['linspaces'][0] coordY = gp['linspaces'][1] # kx, ky bezeichnen grad der interpolation, 1=linear spline = RectBivariateSpline(-coordY, coordX, dhm, kx=1, ky=1) xi = np.array([AnkXa, Xa, Xe_, AnkXe]) yi = np.array([AnkYa, Ya, Ye_, AnkYe]) # Z-Koordinate der Anker für Anfangs- und Endpunkte zAnker = spline.ev(-yi, xi) # Höhenangaben am Boden AnkA_z = stueA_H + 0.1*(zAnker[1] - zAnker[0]) AnkE_z = stueE_H + 0.1*(zAnker[2] - zAnker[3]) if AnkA_dist == 0: AnkA_z = 0.0 if AnkE_dist == 0: AnkE_z = 0.0 Ank = [AnkA_dist, AnkA_z, AnkE_dist, AnkE_z] # Ausdehnungen der Anker Felder, alles in [m] #Ank = [d_Anker_A, z_Anker_A * 0.1, d_Anker_E, z_Anker_E * 0.1] Laenge_Ankerseil = (AnkA_dist**2 + AnkA_z**2)**0.5 + \ (AnkE_dist**2 + AnkE_z**2)**0.5 # Eventuell nicht nötig #IS['z_Anker_A'][0] = z_Anker_A #IS['z_Anker_E'][0] = z_Anker_E return [Ank, Laenge_Ankerseil, zAnker]
def getStraightenWormInt(worm_img, skeleton, half_width = -1, cnt_widths = np.zeros(0), width_resampling = 7, ang_smooth_win = 12, length_resampling = 49): ''' Code to straighten the worm worms. worm_image - image containing the worm skeleton - worm skeleton half_width - half width of the worm, if it is -1 it would try to calculated from cnt_widths cnt_widths - contour widths used in case the half width is not given width_resampling - number of data points used in the intensity map along the worm width length_resampling - number of data points used in the intensity map along the worm length ang_smooth_win - window used to calculate the skeleton angles. A small value will introduce noise, therefore obtaining bad perpendicular segments. A large value will over smooth the skeleton, therefore not capturing the correct shape. ''' #if np.all(np.isnan(skeleton)): # buff = np.empty((skeleton.shape[0], width_resampling)) # buff.fill(np.nan) # return buff assert half_width>0 or cnt_widths.size>0 assert not np.any(np.isnan(skeleton)) if ang_smooth_win%2 == 1: ang_smooth_win += 1; if skeleton.shape[0] != length_resampling: skeleton, _ = curvspace(np.ascontiguousarray(skeleton), length_resampling) skelX = skeleton[:,0]; skelY = skeleton[:,1]; assert np.max(skelX) < worm_img.shape[0] assert np.max(skelY) < worm_img.shape[1] assert np.min(skelY) >= 0 assert np.min(skelY) >= 0 #calculate smoothed angles skel_angles = angleSmoothed(skelX, skelY, ang_smooth_win) #%get the perpendicular angles to define line scans (orientation doesn't #%matter here so subtracting pi/2 should always work) perp_angles = skel_angles - np.pi/2; #%for each skeleton point get the coordinates for two line scans: one in the #%positive direction along perpAngles and one in the negative direction (use #%two that both start on skeleton so that the intensities are the same in #%the line scan) #resample the points along the worm width if half_width <= 0: half_width = (np.median(cnt_widths[10:-10])/2.) #add half a pixel to get part of the contour r_ind = np.linspace(-half_width, half_width, width_resampling) #create the grid of points to be interpolated (make use of numpy implicit broadcasting Nx1 + 1xM = NxM) grid_x = skelX + r_ind[:, np.newaxis]*np.cos(perp_angles); grid_y = skelY + r_ind[:, np.newaxis]*np.sin(perp_angles); f = RectBivariateSpline(np.arange(worm_img.shape[0]), np.arange(worm_img.shape[1]), worm_img) return f.ev(grid_y, grid_x) #return interpolated intensity map
def main(): filenameEffArea='aeff_P7REP_ULTRACLEAN_V15_back.fits' directoryEffectiveArea='/Users/dspolyar/Documents/IRF/EffectiveArea/' print pyfits.info( directoryEffectiveArea+filenameEffArea) CTHETA_LO, CTHETA_HI, energyLow, energyHigh, EFFAREA = importEffectiveArea(directoryEffectiveArea+filenameEffArea) energylog, Ctheta=centeringDataAndConvertingToLog(energyHigh,energyLow,CTHETA_HI,CTHETA_LO) SplineEffectiveArea=RectBivariateSpline(Ctheta,energylog,EFFAREA) plotofEffectiveArea(SplineEffectiveArea,EFFAREA,energylog,Ctheta) print SplineEffectiveArea.ev(1.,5.)
def main(): if len(sys.argv) != 2: print """ Usage: python img2spline.py [path_to_img] \n""" sys.exit(-1) else: path_to_img = sys.argv[1] img = Image.open(path_to_img).convert('L') # RGB -> [0..255] print "Image was opened and converted to grayscale." img.show() width, height = img.size data = np.array(list(img.getdata()), dtype=int) data = data.reshape((height, width)) print "Data was extracted." # Start plotting original surface of image fig = plt.figure() # ax = fig.add_subplot(111, projection='3d') ax = fig.add_subplot(111) ax.invert_yaxis() x = range(0, width) y = range(0, height) # rev_y = range(height-1, -1, -1) # reverse y X, Y = np.meshgrid(x, y) print Y r_stride = 1 + width / 20 c_stride = 1 + height / 20 # ax.plot_surface(X, Y, data, rstride=r_stride, cstride=c_stride) mappable = ax.pcolor(X, Y, data) plt.colorbar(mappable) ax.set_title("Original grayscale image") ax.set_xlabel('Width (px)') ax.set_ylabel('Height (px)') plt.draw() # Finish plotting original surface of image # 2D Interpolation here spline = RectBivariateSpline(x, y, data) print spline.get_coeffs() plt.show()
def scatter(self,move_pix=0,scale=1): ''' Generate the scattered image which is stored in the ``iss`` member. :param move_pix: (optional) int Number of pixels to roll the screen (for time evolution). :param scale: (optional) scalar Scale factor for gradient. To simulate the scattering effect at another wavelength this is (lambda_new/lambda_old)**2 ''' M = self.model.shape[-1] # size of original image array N = self.nx # size of output image array #if not self.live_dangerously: self._checkSanity() # calculate phase gradient dphi_x,dphi_y = self._calculate_dphi(move_pix=move_pix) if scale != 1: dphi_x *= scale/sqrt(2.) dphi_y *= scale/sqrt(2.) xx_,yy = np.meshgrid((np.arange(N) - 0.5*(N-1)),\ (np.arange(N) - 0.5*(N-1)),indexing='xy') # check whether we care about PA of scattering kernel if self.pa != None: f_model = RectBivariateSpline(self.model_dx/self.dx*(np.arange(M) - 0.5*(M-1)),\ self.model_dx/self.dx*(np.arange(M) - 0.5*(M-1)),\ self.model) # apply rotation theta = -(90 * pi / 180) + np.radians(self.pa) # rotate CW 90 deg, then CCW by PA xx_ += dphi_x yy += dphi_y xx = cos(theta)*xx_ - sin(theta)*yy yy = sin(theta)*xx_ + cos(theta)*yy self.iss = f_model.ev(yy.flatten(),xx.flatten()).reshape((self.nx,self.nx)) # rotate back and clip for positive values for I if self.think_positive: self.iss = clip(rotate(self.iss,-1*theta/np.pi*180,reshape=False),a_min=0,a_max=1e30) * (self.dx/self.model_dx)**2 else: self.iss = rotate(self.iss,-1*theta/np.pi*180,reshape=False) * (self.dx/self.model_dx)**2 # otherwise do a faster lookup rather than the expensive interpolation. else: yyi = np.rint((yy+dphi_y+self.nx/2)).astype(np.int) % self.nx xxi = np.rint((xx_+dphi_x+self.nx/2)).astype(np.int) % self.nx if self.think_positive: self.iss = clip(self.isrc[yyi,xxi],a_min=0,a_max=1e30) else: self.iss = self.isrc[yyi,xxi]
def x_sig( self, x, sigma ): eps_list, mu_q = self.spirrid_response eps_sig = InterpolatedUnivariateSpline( mu_q[0, :], eps_list[1] ) if max( mu_q ) > sigma: pass else: raise ValueError( 'applied stress higher than the maximum in micromechanical evaluation of a CB' ) eps = eps_sig( sigma ) spline = RectBivariateSpline( eps_list[0], eps_list[1], mu_q ) sigma_f = spline.ev( x, ones( len( x ) ) * eps ) / self.V_f sigma_m = ( sigma - sigma_f * self.V_f ) / self.V_m return sigma_m
def __init__(self, x, y, z, kx=1, ky=1, xname=None, xunits=None, yname=None, yunits=None, zname=None, zunits=None): """Constructor. """ if hasattr(z, '__call__'): _x, _y = numpy.meshgrid(y, x) z = z(_x, _y) xBivariateSplineBase.__init__(self, x, y, z, xname, xunits, yname, yunits, zname, zunits) RectBivariateSpline.__init__(self, x, y, z, bbox=[None, None, None, None], kx=kx, ky=ky, s=0)
def _approx(fmapnii, s=14.): """ Slice-wise approximation of a smooth 2D bspline credits: http://scipython.com/book/chapter-8-scipy/examples/two-dimensional-interpolation-\ with-scipyinterpolaterectbivariatespline/ """ from scipy.interpolate import RectBivariateSpline from builtins import str, bytes if isinstance(fmapnii, (str, bytes)): fmapnii = nb.load(fmapnii) if not isinstance(s, (tuple, list)): s = np.array([s] * 2) data = fmapnii.get_data() zooms = fmapnii.header.get_zooms() knot_decimate = np.floor(s / np.array(zooms)[:2]).astype(np.uint8) knot_space = np.array(zooms)[:2] * knot_decimate xmax = 0.5 * data.shape[0] * zooms[0] ymax = 0.5 * data.shape[1] * zooms[1] x = np.arange(-xmax, xmax, knot_space[0]) y = np.arange(-ymax, ymax, knot_space[1]) x2 = np.arange(-xmax, xmax, zooms[0]) y2 = np.arange(-ymax, ymax, zooms[1]) coeffs = [] nslices = data.shape[-1] for k in range(nslices): data2d = data[..., k] data2dsubs = data2d[::knot_decimate[0], ::knot_decimate[1]] interp_spline = RectBivariateSpline(x, y, data2dsubs) data[..., k] = interp_spline(x2, y2) coeffs.append(interp_spline.get_coeffs().reshape(data2dsubs.shape)) # Save smoothed data hdr = fmapnii.header.copy() caff = fmapnii.affine datanii = nb.Nifti1Image(data.astype(np.float32), caff, hdr) # Save bspline coeffs caff[0, 0] = knot_space[0] caff[1, 1] = knot_space[1] coeffnii = nb.Nifti1Image(np.stack(coeffs, axis=2), caff, hdr) return datanii, coeffnii
def kde_histogram(events_x, events_y, xout=None, yout=None, bins=None): """ Histogram-based Kernel Density Estimation Parameters ---------- events_x, events_y: 1D ndarray The input points for kernel density estimation. Input is flattened automatically. xout, yout: ndarray The coordinates at which the KDE should be computed. If set to none, input coordinates are used. bins: tuple (binsx, binsy) The number of bins to use for the histogram. Returns ------- density: ndarray, same shape as `xout` The KDE for the points in (xout, yout) See Also -------- `numpy.histogram2d` `scipy.interpolate.RectBivariateSpline` """ valid_combi = ((xout is None and yout is None) or (xout is not None and yout is not None) ) if not valid_combi: raise ValueError("Both `xout` and `yout` must be (un)set.") if yout is None and yout is None: xout = events_x yout = events_y if bins is None: bins = (max(5, bin_num_doane(events_x)), max(5, bin_num_doane(events_y))) # Compute the histogram hist2d, xedges, yedges = np.histogram2d(x=events_x, y=events_y, bins=bins, normed=True) xip = xedges[1:]-(xedges[1]-xedges[0])/2 yip = yedges[1:]-(yedges[1]-yedges[0])/2 estimator = RectBivariateSpline(x=xip, y=yip, z=hist2d) density = estimator.ev(xout, yout) density[density < 0] = 0 return density.reshape(xout.shape)
def add_dem_2D(self, x, dem, y0=0., y1=np.infty, yref=None, kx=3, ky=1, s=None): ''' Add topography by vertically stretching the domain in the region [y0, y1] - points below y0 are kept fixed, points above y1 are moved as the DEM, points in between are interpolated. Usage: first call add_dem_2D for each boundary that is to be perturbed and finally call apply_dem to add the perturbation to the mesh coordinates. :param x: x coordinates of the DEM :type x: numpy array :param dem: the DEM :type dem: numpy array :param y0: vertical coordinate, at which the stretching begins :type y0: float :param y1: vertical coordinate, at which the stretching ends, can be infinity :type y1: float :param yref: vertical coordinate, at which the stretching ends :type yref: float :param kx: horizontal degree of the spline interpolation :type kx: integer :param ky: vertical degree of the spline interpolation :type ky: integer :param s: smoothing factor :type s: float ''' if not self.ndim == 2: # pragma: no cover raise ValueError('apply_dem_2D works on 2D meshes only') if yref is None: yref = self.points[:, 1].max() if y1 < np.infty: y = np.array([y0, yref, y1]) d = np.c_[np.zeros(len(dem)), dem, np.zeros(len(dem))] else: y = np.array([y0, yref]) d = np.c_[np.zeros(len(dem)), dem] xx, yy = np.meshgrid(x, y, indexing='ij') rbs = RectBivariateSpline(x, y, d, kx=kx, ky=ky, s=s) # add to topography if self.topography is None: self.topography = np.zeros_like(self.points[:, -1]) self.points[:, 1] += rbs.ev(self.points[:, 0], self.points[:, 1])
def interp_cross(x_in, y_in, data, x0, x1, y0, y1, rint=None, mode='linear'): u""" (x0,y0)-(x1,y1)に沿った直線上の点に内挿する """ if np.ndim(x_in) == 2: x_in = x_in[0,:] if np.ndim(y_in) == 2: y_in = y_in[:,0] ndim = np.ndim(data) if ndim > 2: oldshape = data.shape data = data.reshape(-1, oldshape[-2], oldshape[-1]) elif ndim < 2: raise ValueError, "input data dimension size must be larger than 2. input ndim is {}".format(ndim) # make output grid if rint is None: rint = max(np.diff(x_in).max(), np.diff(y_in).max()) rmax = np.hypot(x1-x0, y1-y0) nr = rmax//rint + 1 theta = np.arctan2(y1-y0, x1-x0) r_out = np.linspace(0, rmax, nr) x_out = r_out*np.cos(theta) + x0 y_out = r_out*np.sin(theta) + y0 xrev = yrev = 1 if x0 > x1: x_out = x_out[::-1] xrev = -1 if y0 > y1: y_out = y_out[::-1] yrev = -1 # interpolate if ndim == 2: if mode == 'linear': out = RectBivariateSpline(y_in, x_in, data, kx=1, ky=1)(y_out, x_out)[::xrev,::yrev].diagonal() elif mode == 'cubic': out = RectBivariateSpline(y_in, x_in, data, kx=3, ky=3)(y_out, x_out)[::xrev,::yrev].diagonal() else: zn, yn, xn = data.shape out = np.ma.empty((zn,nr), dtype=np.float32) for z in range(zn): if mode == 'linear': out[z,:] = RectBivariateSpline(y_in, x_in, data[z,:,:], kx=1, ky=1)(y_out, x_out)[::xrev,::yrev].diagonal() elif mode == 'cubic': out[z,:] = RectBivariateSpline(y_in, x_in, data[z,:,:], kx=3, ky=3)(y_out, x_out)[::xrev,::yrev].diagonal() out = out.reshape(oldshape[:-2] + (-1,)) return x_out, y_out, r_out, out
def run_slimscat(isrc,idx,screenfile='screen.bin'): ''' Scatter source image. :param isrc: source image :param idx: source pixel scale :param screenfile: screen file ''' # read in screen parameters f = open(screenfile,'rb') hdrsize = struct.unpack('i',f.read(4))[0] nphi = int(np.sqrt(struct.unpack('i',f.read(4))[0])) dx = struct.unpack('d',f.read(8))[0] f.seek(hdrsize) # filter image # check fov iny,inx = isrc.shape ny = int(np.floor(iny*idx/dx)) nx = int(np.floor(inx*idx/dx)) assert idx*max([iny,inx]) < nphi*dx # read in screen dphi_x = np.empty((ny,nx),dtype=np.float64) dphi_y = np.empty((ny,nx),dtype=np.float64) for i in range(ny): f.seek(hdrsize + i*8*1*nphi,0) dphi_x[i,:] = struct.unpack('{0:d}d'.format(nx),f.read(nx*8)) for i in range(ny): f.seek(hdrsize + 8*nphi*nphi + i*8*1*nphi,0) dphi_y[i,:] = struct.unpack('{0:d}d'.format(nx),f.read(nx*8)) f.close() # construct spline fit to source image f_isrc = RectBivariateSpline(idx/dx*(np.arange(iny) - 0.5*(iny-1)),\ idx/dx*(np.arange(inx) - 0.5*(inx-1)),\ isrc) # scatter pixel coordinates yy,_xx = np.meshgrid((np.arange(ny) - 0.5*(ny-1)),\ (np.arange(nx) - 0.5*(nx-1)),indexing='ij') _xx += dphi_x yy += dphi_y return f_isrc.ev(yy.flatten(),_xx.flatten()).reshape((ny,nx))
def get_interpolated_pixel_color_rbspline(pts, s_im, size): """given pts in floats, linear interpolate pixel values nearby to get a good colour""" pts = clamp(pts, size) s_im = np.atleast_3d(s_im) ys,xs = size ycoords, xcoords = np.arange(ys), np.arange(xs) out = np.empty(pts.shape[1:] + (s_im.shape[-1],),dtype=s_im.dtype) pts_vec = pts.reshape((2,-1)) out_vec = out.reshape((-1,s_im.shape[-1])) #flatten for easier vectorization for i in range(s_im.shape[-1]): #loop over color channels rbspline = RectBivariateSpline(ycoords, xcoords, s_im[...,i]) out_vec[:,i] = rbspline.ev(pts_vec[0],pts_vec[1]) return out
def interp3d_emergence(uplift, data, out_times, verbose=False): """Interpolate uplift surfaces (xyz data at a specific t) to data locations (non-grid) and data times (between times calculated). Uses progressive linear interpolations: first the uplift at each outputted time is interpolated to the data locations in data.locs, then they are interpolated to the data times in each location. Parameters ---------- uplift (array-like) - size (times, lon, lat) array of uplift surfaces data - data whose data.locs are the locations to interpolate to. out_times - the times for the first index of the uplift array (should be of uplift eventually, yes?) """ time_start = time.clock() ########################################## # STUFF TO FIX HEEEEEEERE!!!!!!!!!!!!! N = uplift[0].shape # TODO These should be gotten from somewhere, right? uplift.grid?? X = np.linspace(0, 4900000, num=N[1], endpoint=True) Y = np.linspace(0, 4700000, num=N[0], endpoint=True) ########################################## # interp_data will be an array of size (N_output_times, N_locations) # for use in interpolating the calculated emergence to the locations and # times at which there are data in data interp_data = [] # Interpolate the calculated uplift at each time on the Lat-Lon grid # to the data locations. for uplift_at_a_time in uplift: interp_func = RectBivariateSpline(X, Y, uplift_at_a_time.T) interp_data.append(interp_func.ev(data.locs[:,0], data.locs[:,1])) interp_data = np.array(interp_data).T calc_vector = [] # Interpolate the calculated uplifted at each time and data location # to the times of the data location. for interp, loc in zip(interp_data, data): calc_vector.append(np.interp(loc['data_dict']['times'], out_times[::-1], interp[::-1])) # flatten the array calc_vector = np.array([item for l in calc_vector for item in l]) if verbose: print 'Interpolation time: {0}s'.format(time.clock()-time_start) return calc_vector
def __init__(self, origin=None, globalOrigin=None,areaPoly=None, gridtype=None): if not areaPoly: raise Exception('areaPoly must be given.') if not origin: origin=(0,0) if not globalOrigin: globalOrigin=(596120, 6727530) #sweref99..located on map.. nice position.. use as default. self.areaPoly=areaPoly xmin,xmax,ymin,ymax=fun.polygonLim(areaPoly) side=10 self.lim=np.array([xmin-0.5*side,xmax+0.5*side, ymin-0.5*side, ymax+0.5*side]) self.A=fun.polygon_area(areaPoly) self.Ainv=1./self.A #used a lot.. faster to just compute this once. self.origin=origin self.globalOrigin=globalOrigin self.type=gridtype x,y,z=GIS.readTerrain(globalOrigin=globalOrigin , areaPoly=areaPoly) #get a list of how x and y varies.. strictly ascending xlist=x[:,0] #just the variations, not the 2D-matrix ylist=y[0,:] self.interpol=RectBivariateSpline(xlist, ylist, z) #used pretty much everytime we need the height of a specific point. Implemented in fortran and very fast nx.Graph.__init__(self) self.t_x=x self.t_y=y self.t_z=z self.roadWidth=4 #width of road self.overlap={} #will later be filled. A speedup thing self.weightFunction=normalizedPitchDist #reference to exter self.areaCover=go.roadAreaCoverage(self) self.moviefig=None #may be used for movies later self.cmdfolder=os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
def __init__(self, terrainAP=None, rasterDist=1.0, humusType=None): """ The humus layer has a triangular thickness distribution with parameters in dPar. RasterDist is the distance between each node on our raster with thicknesses given by the distribution. A large distance mimics a situation where the humuslayer is more or less spatially invariant, whilst a small distance mimics a situation where the thickness varies on a small length scale. """ if humusType=='2': dPar=[0.00,0.05,0.01]#triangular distribution [min,max,mode] meters elif humusType=='3': dPar=[0.05,0.15,0.10] elif humusType=='4': dPar=[0.15,0.30,0.22] else: raise Exception('HumusType is not correct.') xLim = terrainAP[1][0] yLim = terrainAP[3][1] self.xNumInv = int(xLim/rasterDist) self.yNumInv = int(yLim/rasterDist) z=[] self.pointDepth = 1 x=np.linspace(0,xLim,self.xNumInv) y=np.linspace(0,yLim,self.yNumInv) #These forloops are in order to achieve a two dimensional array with thicknesses for i in range(self.xNumInv): ztemp=[] for j in range(self.yNumInv): ztemp.append(random.triangular(dPar[0],dPar[1],dPar[2])) z.append(ztemp) self.z=np.array(z)#Must be a numpy-array for the interpolation to work. self.interpolDepth=RectBivariateSpline(x,y,self.z)
def __call__(self, x, y, dx=0, dy=0, grid=False): """Overloaded __call__method. Here we basically override the default value of the `grid` parameter from `True` to `False`, since we're typically interested in evaluating the splined at given physical coordinates, rather than grid points. """ return RectBivariateSpline.__call__(self, x, y, None, dx, dy, grid)
def getStraightenWormIntT(worm_img, skeleton, half_width = -1, cnt_widths = np.zeros(0), width_resampling = 7, ang_smooth_win = 12, length_resampling = 49): #if np.all(np.isnan(skeleton)): # buff = np.empty((skeleton.shape[0], width_resampling)) # buff.fill(np.nan) # return buff assert half_width>0 or cnt_widths.size>0 assert not np.any(np.isnan(skeleton)) if ang_smooth_win%2 == 1: ang_smooth_win += 1; if skeleton.shape[0] != length_resampling: skeleton, _ = curvspace(np.ascontiguousarray(skeleton), length_resampling) skelX = skeleton[:,0]; skelY = skeleton[:,1]; assert np.max(skelX) < worm_img.shape[0] assert np.max(skelY) < worm_img.shape[1] assert np.min(skelY) >= 0 assert np.min(skelY) >= 0 #calculate smoothed angles skel_angles = angleSmoothed(skelX, skelY, ang_smooth_win) #%get the perpendicular angles to define line scans (orientation doesn't #%matter here so subtracting pi/2 should always work) perp_angles = skel_angles - np.pi/2; #%for each skeleton point get the coordinates for two line scans: one in the #%positive direction along perpAngles and one in the negative direction (use #%two that both start on skeleton so that the intensities are the same in #%the line scan) #resample the points along the worm width if half_width <= 0: half_width = (np.median(cnt_widths[10:-10])/2.) #add half a pixel to get part of the contour r_ind = np.linspace(-half_width, half_width, width_resampling) #create the grid of points to be interpolated (make use of numpy implicit broadcasting Nx1 + 1xM = NxM) grid_x = skelX + r_ind[:, np.newaxis]*np.cos(perp_angles); grid_y = skelY + r_ind[:, np.newaxis]*np.sin(perp_angles); f = RectBivariateSpline(np.arange(worm_img.shape[0]), np.arange(worm_img.shape[1]), worm_img) return f.ev(grid_y, grid_x), grid_x, grid_y #return interpolated intensity map
def project_interp(self,to_file=None,sys_a8=False,bok=True,clobber=False,**kwd): # projected image x,y to_nx,to_ny=self.to_size print('get XY in image for pixels in new WCS ...') xx,yy=np.meshgrid(np.arange(to_nx),np.arange(to_ny)) # projected image ra dec ra,dec=self.to_wcs.all_pix2world(xx,yy,0) # image XY if sys_a8: imxx,imyy=a8.a8_ad2xy(self.head,ra,dec,bok=bok) imxx-=1.0 imyy-=1.0 else: imxx,imyy=self.wcs.all_world2pix(ra,dec,0) data=self.data.copy() ny,nx=self.data.shape inmask=(imyy > -0.5) & (imyy < ny-0.5) & (imxx > -0.5) & (imxx < nx-0.5) if not inmask.any(): return None,None int_data=np.zeros((to_ny,to_nx)) if self.mask is not None: int_mask=np.zeros((to_ny,to_nx),dtype='bool') indr,indc=np.where(inmask) tmpyy=np.round(imyy[inmask]).astype('int') tmpxx=np.round(imxx[inmask]).astype('int') tmpmask=self.mask[tmpyy,tmpxx] int_mask[indr,indc]=tmpmask int_mask[np.logical_not(inmask)]=True print('project image to new WCS by interpolating ...') ff=RectBivariateSpline(np.arange(ny),np.arange(nx),data,**kwd) inmaskval=ff.ev(imyy[inmask],imxx[inmask]) #int_data[np.logical_not(inmask)]=np.nan int_data[inmask]=inmaskval if to_file is not None: hdu=self.to_wcs.to_fits()[0] hdu.data=int_data hdu.writeto(to_file,clobber=clobber) if self.mask is not None: f,ext=os.path.splitext(to_file) hdu.data=int_mask.astype('uint8') hdu.writeto(f+'-mask.fits',clobber=clobber) if self.mask is not None: return int_data,int_mask else: return int_data,None
def generate_score_map(structure): if structure == 'BackG': return None score_matrix = np.zeros((n_sample_x, n_sample_y)) score_matrix[sample_location_indices[:,0], sample_location_indices[:,1]] = probs_allClasses[structure] spline = RectBivariateSpline(sample_locations_unique_xs/shrink_factor, sample_locations_unique_ys/shrink_factor, score_matrix, bbox=[interpolation_xmin/shrink_factor, interpolation_xmax/shrink_factor, interpolation_ymin/shrink_factor, interpolation_ymax/shrink_factor]) t1 = time.time() dense_score_map = spline.ev(sample_locations_interpolatedArea_xs_matrix, sample_locations_interpolatedArea_ys_matrix) sys.stderr.write('evaluate spline: %.2f seconds\n' % (time.time() - t1)) # 5s for shrink_factor=4; doubling results in quadratic time reduction t1 = time.time() dense_score_map = resize(dense_score_map, (interpolation_h, interpolation_w)) # similar speed as rescale # dense_score_map = rescale(dense_score_map, shrink_factor) sys.stderr.write('scale up: %.2f seconds\n' % (time.time() - t1)) # 10s, very high penalty when multiprocessing # t = time.time() dense_score_map[dense_score_map < 1e-1] = 0 dense_score_map[dense_score_map > 1.] = 1. # sys.stderr.write('threshold: %.2f seconds\n' % (time.time() - t)) if np.count_nonzero(dense_score_map) < 1e5: sys.stderr.write('No %s is detected on section %d\n' % (structure, sec)) return None t1 = time.time() scoremap_bp_filepath, scoremap_interpBox_filepath = \ DataManager.get_scoremap_filepath(stack=stack, fn=fn, anchor_fn=anchor_fn, structure=structure, return_bbox_fp=True, setting=actual_setting) save_hdf(dense_score_map.astype(np.float16), scoremap_bp_filepath, complevel=5) np.savetxt(scoremap_interpBox_filepath, np.array((interpolation_xmin, interpolation_xmax, interpolation_ymin, interpolation_ymax))[None], fmt='%d') sys.stderr.write('save: %.2f seconds\n' % (time.time() - t1)) # 4s, very high penalty when multiprocessing
class SplineEstimator(object): def fit(self, x, y): self.lut = RectBivariateSpline(x[1], x[0], y) return self def predict(self, X): return self.lut.ev(X[:, 1], X[:, 0])
def find_stop_power(**kwargs): data = TRS398_table7() stop_power_interp = RectBivariateSpline( np.array(data["depth/R50"]), np.array(data["R50"]), np.array(data["contents"]) ) energy = kwargs["energy"] depth_mm = np.array(kwargs["depth"]) depth_cm = depth_mm / 10 R50_mm = energy_to_R50(energy) R50_cm = R50_mm / 10 depth_over_R50 = depth_cm / R50_cm stop_power = np.ravel(stop_power_interp.ev(depth_over_R50, R50_cm)) return stop_power
def getStraightenWormInt(worm_img, skeleton, half_width, width_resampling): ''' Code to straighten the worm worms. worm_image - image containing the worm skeleton - worm skeleton half_width - half width of the worm, if it is -1 it would try to calculated from cnt_widths cnt_widths - contour widths used in case the half width is not given width_resampling - number of data points used in the intensity map along the worm width length_resampling - number of data points used in the intensity map along the worm length ang_smooth_win - window used to calculate the skeleton angles. A small value will introduce noise, therefore obtaining bad perpendicular segments. A large value will over smooth the skeleton, therefore not capturing the correct shape. ''' assert half_width>0 or cnt_widths.size>0 assert not np.any(np.isnan(skeleton)) dX = np.diff(skeleton[:,0]) dY = np.diff(skeleton[:,1]) skel_angles = np.arctan2(dY, dX) skel_angles = np.hstack((skel_angles[0], skel_angles)) #%get the perpendicular angles to define line scans (orientation doesn't #%matter here so subtracting pi/2 should always work) perp_angles = skel_angles - np.pi/2; #%for each skeleton point get the coordinates for two line scans: one in the #%positive direction along perpAngles and one in the negative direction (use #%two that both start on skeleton so that the intensities are the same in #%the line scan) r_ind = np.linspace(-half_width, half_width, width_resampling) #create the grid of points to be interpolated (make use of numpy implicit broadcasting Nx1 + 1xM = NxM) grid_x = skeleton[:,0] + r_ind[:, np.newaxis]*np.cos(perp_angles); grid_y = skeleton[:,1] + r_ind[:, np.newaxis]*np.sin(perp_angles); #interpolated the intensity map f = RectBivariateSpline(np.arange(worm_img.shape[0]), np.arange(worm_img.shape[1]), worm_img) straighten_worm = f.ev(grid_y, grid_x) return straighten_worm, grid_x, grid_y
def __init__(self, R_in, z_in, Raxis, zaxis, psi_in, R_out, z_out, psi_sep=0): print('2d interp') self.error = 0 # Check input dimensions, R_out z_out must be flat if len(R_out) != len(z_out): print('R and z must have the same dimensions') self.error = 1 return if np.array(R_out).ndim > 1: print('R_out must be flat') self.error = 2 return if np.array(z_out).ndim > 1: print('z_out must be flat') self.error = 3 return nz_psi, nR_psi = psi_in.shape if len(R_in) != nR_psi: print('Inconsistent R axis for psi_in') self.error = 5 return if len(z_in) != nz_psi: print('Inconsistent z axis for psi_in') self.error = 6 return nRz = len(R_out) self.psi_red = np.zeros(nRz) # Bilinear interpolation bisp = RectBivariateSpline(z_in, R_in, psi_in) self.psi_axis = bisp.ev(zaxis, Raxis) for jRz, R in enumerate(R_out): z = z_out[jRz] self.psi_red[jRz] = bisp.ev(z, R) self.psi_norm = (self.psi_red - self.psi_axis)/(psi_sep - self.psi_axis) self.rho_pol = np.sqrt(self.psi_norm)
def getEroDep(self, xo = None, yo = None, xm = None, ym = None, pts = 100, gfilter = 5): """ Extract a slice from the 3D data set and compute its deposition thicknesses. Parameters ---------- variable: xo, yo Lower X,Y coordinates of the cross-section variable: xm, ym Upper X,Y coordinates of the cross-section variable: pts Number of points to discretise the cross-section variable: gfilter Gaussian smoothing filter """ if xm > self.x.max(): xm = self.x.max() if ym > self.y.max(): ym = self.y.max() if xo < self.x.min(): xo = self.x.min() if yo < self.y.min(): yo = self.y.min() xsec, ysec = self._cross_section(xo, yo, xm, ym, pts) self.dist = np.sqrt(( xsec - xo )**2 + ( ysec - yo )**2) # Surface rect_B_spline = RectBivariateSpline(self.y[:,0], self.x[0,:], self.z) datatop = rect_B_spline.ev(ysec, xsec) self.top = filters.gaussian_filter1d(datatop,sigma=gfilter) # Cumchange rect_B_spline = RectBivariateSpline(self.y[:,0], self.x[0,:], self.cumchange) cumdat = rect_B_spline.ev(ysec, xsec) gcum = filters.gaussian_filter1d(cumdat,sigma=gfilter) self.depo = gcum.clip(min=0) return
def report_renorm_bin(rhov,x0v,fm,nx,nd,MR,IDs,EoS): rep = [] x = x0v x1 = np.array(menus.flatten(x)) x2 = 1.0-x1 rhob = rhov[0] xb = np.empty((2)) f2d = np.empty((nx,nd)) rho = np.empty((nx,nd)) dfdrhob = np.empty((nx,nd)) dfdx1 = np.empty((nx,nd)) dfdx2 = np.empty((nx,nd)) rho1 = np.empty((nx,nd)) rho2 = np.empty((nx,nd)) u1mat = np.empty((nx,nd)) u2mat = np.empty((nx,nd)) bmix = np.empty((nx)) b = eos.b_calc(IDs,EoS) b1 = b[0] b2 = b[1] for i in range(0,nx): xb[0] = x1[i] xb[1] = x2[i] bmix[i] = eos.bmix_calc(MR,b,xb) for i in range(0,nx): f = np.array(fm[i]) for j in range(0,nd): f2d[i][j] = f[j] rho[i][j] = rhob[j]/bmix[i] for i in range(0,nx): for j in range(0,nd): rho1[i][j] = x1[i]*rho[i][j] rho2[i][j] = x2[i]*rho[i][j] for i in range(0,nx): dfdrhob[i][0] = (f2d[i][1]-f2d[i][0])/(rhob[1]-rhob[0]) for j in range(1,nd-1): dfdrhob[i][j] = (f2d[i][j+1]-f2d[i][j-1])/(rhob[j+1]-rhob[j-1]) dfdrhob[i][nd-1] = (f2d[i][nd-1]-f2d[i][nd-2])/(rhob[nd-1]-rhob[nd-2]) for i in range(0,nx-1): for j in range(0,nd): if i!=0: dfdx1[i][j] = (f2d[i+1][j]-f2d[i-1][j])/(x1[i+1]-x1[i-1]) else: dfdx1[0][j] = (f2d[1][j]-f2d[0][j])/(x1[1]-x1[0]) dfdx1[nx-1][j] = (f2d[nx-1][j]-f2d[nx-2][j])/(x1[nx-1]-x1[nx-2]) for i in range(0,nx-1): for j in range(0,nd): if i!=0: dfdx2[i][j] = (f2d[i+1][j]-f2d[i-1][j])/(x2[i+1]-x2[i-1]) else: dfdx2[0][j] = (f2d[1][j]-f2d[0][j])/(x2[1]-x2[0]) dfdx2[nx-1][j] = (f2d[nx-1][j]-f2d[nx-2][j])/(x2[nx-1]-x2[nx-2]) for i in range(0,nx): for j in range(0,nd): u1mat[i][j] = dfdrhob[i][j]*b1+dfdx2[i][j]*(0-rho2[i][j])/(rho[i][j]*rho[i][j]) u2mat[i][j] = dfdrhob[i][j]*b2+dfdx1[i][j]*(0-rho1[i][j])/(rho[i][j]*rho[i][j]) u1res_mat = RectBivariateSpline(x,rhob,u1mat) u2res_mat = RectBivariateSpline(x,rhob,u2mat) u1mat_r = [] u2mat_r = [] for i in range(0,nx): u1mat_r.append(u1mat[i]) u2mat_r.append(u2mat[i]) title = 'ren_f.tmp' savedir = str('%s' %title) with open(savedir,'w') as file: dv = str("") for j in range(0,nd): d1 = str(round(rhob[j],9)) dv = dv+';'+d1 file.write(dv) file.write('\n') for i in range(0,nx): x = str(round(x0v[i],9)) f = fm[i] lin1 = x for j in range(0,nd): f1 = str(round(f[j],9)) lin1 = lin1+';'+f1 file.write(lin1) file.write('\n') title = 'u1.tmp' savedir = str('%s' %title) with open(savedir,'w') as file: dv = str("") for j in range(0,nd): d1 = str(round(rhob[j],9)) dv = dv+';'+d1 file.write(dv) file.write('\n') for i in range(0,nx): x = str(round(x0v[i],9)) u = u1mat_r[i] lin1 = x for j in range(0,nd): u1 = str(round(u[j],9)) lin1 = lin1+';'+u1 file.write(lin1) file.write('\n') title = 'u2.tmp' savedir = str('%s' %title) with open(savedir,'w') as file: dv = str("") for j in range(0,nd): d1 = str(round(rhob[j],9)) dv = dv+';'+d1 file.write(dv) file.write('\n') for i in range(0,nx): x = str(round(x0v[i],9)) u = u2mat_r[i] lin1 = x for j in range(0,nd): u2 = str(round(u[j],9)) lin1 = lin1+';'+u2 file.write(lin1) file.write('\n') rep.append(u1res_mat) rep.append(u2res_mat) return rep
def sparsery(mov: np.ndarray, high_pass: int, neuropil_high_pass: int, batch_size: int, spatial_scale: int, threshold_scaling, max_iterations: int, yrange, xrange, percentile=0) -> Tuple[Dict[str, Any], List[Dict[str, Any]]]: """Returns stats and ops from 'mov' using correlations in time.""" mean_img = mov.mean(axis=0) mov = utils.temporal_high_pass_filter(mov=mov, width=int(high_pass)) max_proj = mov.max(axis=0) sdmov = utils.standard_deviation_over_time(mov, batch_size=batch_size) mov = neuropil_subtraction( mov=mov / sdmov, filter_size=neuropil_high_pass) # subtract low-pass filtered movie _, Lyc, Lxc = mov.shape LL = np.meshgrid(np.arange(Lxc), np.arange(Lyc)) gxy = [np.array(LL).astype('float32')] dmov = mov movu = [] # downsample movie at various spatial scales Lyp, Lxp = np.zeros(5, 'int32'), np.zeros(5, 'int32') # downsampled sizes for j in range(5): movu0 = square_convolution_2d(dmov, 3) dmov = 2 * utils.downsample(dmov) gxy0 = utils.downsample(gxy[j], False) gxy.append(gxy0) _, Lyp[j], Lxp[j] = movu0.shape movu.append(movu0) # spline over scales I = np.zeros((len(gxy), gxy[0].shape[1], gxy[0].shape[2])) for movu0, gxy0, I0 in zip(movu, gxy, I): gmodel = RectBivariateSpline(gxy0[1, :, 0], gxy0[0, 0, :], movu0.max(axis=0), kx=min(3, gxy0.shape[1] - 1), ky=min(3, gxy0.shape[2] - 1)) I0[:] = gmodel(gxy[0][1, :, 0], gxy[0][0, 0, :]) v_corr = I.max(axis=0) scale, estimate_mode = find_best_scale(I=I, spatial_scale=spatial_scale) # TODO: scales from cellpose (?) # scales = 3 * 2 ** np.arange(5.0) # scale = np.argmin(np.abs(scales - diam)) # estimate_mode = EstimateMode.Estimated spatscale_pix = 3 * 2**scale mask_window = int(((spatscale_pix * 1.5) // 2) * 2) Th2 = threshold_scaling * 5 * max( 1, scale) # threshold for accepted peaks (scale it by spatial scale) vmultiplier = max(1, mov.shape[0] / 1200) print( 'NOTE: %s spatial scale ~%d pixels, time epochs %2.2f, threshold %2.2f ' % (estimate_mode.value, spatscale_pix, vmultiplier, vmultiplier * Th2)) # get standard deviation for pixels for all values > Th2 v_map = [utils.threshold_reduce(movu0, Th2) for movu0 in movu] movu = [movu0.reshape(movu0.shape[0], -1) for movu0 in movu] mov = np.reshape(mov, (-1, Lyc * Lxc)) lxs = 3 * 2**np.arange(5) nscales = len(lxs) v_max = np.zeros(max_iterations) ihop = np.zeros(max_iterations) v_split = np.zeros(max_iterations) V1 = deepcopy(v_map) stats = [] patches = [] seeds = [] extract_patches = False for tj in range(max_iterations): # find peaks in stddev's v0max = np.array([V1[j].max() for j in range(5)]) imap = np.argmax(v0max) imax = np.argmax(V1[imap]) yi, xi = np.unravel_index(imax, (Lyp[imap], Lxp[imap])) # position of peak yi, xi = gxy[imap][1, yi, xi], gxy[imap][0, yi, xi] med = [int(yi), int(xi)] # check if peak is larger than threshold * max(1,nbinned/1200) v_max[tj] = v0max.max() if v_max[tj] < vmultiplier * Th2: break ls = lxs[imap] ihop[tj] = imap # make square of initial pixels based on spatial scale of peak yi, xi = int(yi), int(xi) ypix0, xpix0, lam0 = add_square(yi, xi, ls, Lyc, Lxc) # project movie into square to get time series tproj = (mov[:, ypix0 * Lxc + xpix0] * lam0[0]).sum(axis=-1) if percentile > 0: threshold = min(Th2, np.percentile(tproj, percentile)) else: threshold = Th2 active_frames = np.nonzero( tproj > threshold)[0] # frames with activity > Th2 # get square around seed if extract_patches: mask = mov[active_frames].mean(axis=0).reshape(Lyc, Lxc) patches.append(utils.square_mask(mask, mask_window, yi, xi)) seeds.append([yi, xi]) # extend mask based on activity similarity for j in range(3): ypix0, xpix0, lam0 = iter_extend(ypix0, xpix0, mov, Lyc, Lxc, active_frames) tproj = mov[:, ypix0 * Lxc + xpix0] @ lam0 active_frames = np.nonzero(tproj > threshold)[0] if len(active_frames) < 1: if tj < nmasks: continue else: break if len(active_frames) < 1: if tj < nmasks: continue else: break # check if ROI should be split v_split[tj], ipack = two_comps(mov[:, ypix0 * Lxc + xpix0], lam0, threshold) if v_split[tj] > 1.25: lam0, xp, active_frames = ipack tproj[active_frames] = xp ix = lam0 > lam0.max() / 5 xpix0 = xpix0[ix] ypix0 = ypix0[ix] lam0 = lam0[ix] ymed = np.median(ypix0) xmed = np.median(xpix0) imin = np.argmin((xpix0 - xmed)**2 + (ypix0 - ymed)**2) med = [ypix0[imin], xpix0[imin]] # update residual on raw movie mov[np.ix_(active_frames, ypix0 * Lxc + xpix0)] -= tproj[active_frames][:, np.newaxis] * lam0 # update filtered movie ys, xs, lms = multiscale_mask(ypix0, xpix0, lam0, Lyp, Lxp) for j in range(nscales): movu[j][np.ix_(active_frames, xs[j] + Lxp[j] * ys[j])] -= np.outer( tproj[active_frames], lms[j]) Mx = movu[j][:, xs[j] + Lxp[j] * ys[j]] V1[j][ys[j], xs[j]] = (Mx**2 * np.float32(Mx > threshold)).sum(axis=0)**.5 stats.append({ 'ypix': ypix0.astype(int), 'xpix': xpix0.astype(int), 'lam': lam0 * sdmov[ypix0, xpix0], 'med': med, 'footprint': ihop[tj] }) if tj % 1000 == 0: print('%d ROIs, score=%2.2f' % (tj, v_max[tj])) for stat in stats: stat['ypix'] += int(yrange[0]) stat['xpix'] += int(xrange[0]) stat['med'][0] += int(yrange[0]) stat['med'][1] += int(xrange[0]) new_ops = { 'max_proj': max_proj, 'Vmax': v_max, 'ihop': ihop, 'Vsplit': v_split, 'Vcorr': v_corr, 'Vmap': v_map, 'spatscale_pix': spatscale_pix, } return new_ops, stats
def kepprf(infile, plotfile, prfdir, frameno, columns, rows, fluxes, background=False, border=1, focus=False, xtol=1e-4, ftol=1., plot=False, imscale='linear', cmap='YlOrBr', apercol='#ffffff', verbose=False, logfile='kepprf.log'): """ kepprf -- Fit a PSF model to a specific image within a Target Pixel File Fit a PSF model, combined with spacecraft jitter and pixel scale drift (the Pixel Response Function; PRF) to a single observation of Kepler target pixels. Parameters ---------- infile : str The name of a MAST standard format FITS file containing Kepler Target Pixel data within the first data extension. plotfile : str Name of an optional output plot file containing the results of kepprf. An example is provided in Figure 1. Typically this is a PNG format file. If no file is required, plotfile can be 'None' or blank, in which case the plot will be generated but the plot will not be saved to a file. Any existing file with this name will be automatically overwritten. prfdir : str The full or relative directory path to a folder containing the Kepler PSF calibration. Calibration files can be downloaded from the Kepler focal plane characteristics page at the MAST. frameno : int The cadence number in the input file data containing the pixels to plot. If the chosen observation has a non-zero quality flag set or the pixel set contains only NULLs then the task will halt with an error message. columns : list A starting guess for the CCD column position(s) of the source(s) that are to be fit. The model is unlikely to converge if the guess is too far away from the correct location. A rule of thumb is to provide a guess within 1 CCD pixel of the true position. If more than one source is being modeled then the column positions of each are separated by a comma. The same number of sources in the columns, rows and fluxes field is a requirement of this task. rows : list A starting guess for the CCD row position(s) of the source(s) that are to be fit. The model is unlikely to converge if the guess is too far away from the correct location. A rule of thumb is to provide a guess within 1 CCD pixel of the true position. If more than one source is being modeled then the row positions of each are separated by a comma. The same number of sources in the columns, rows and fluxes field is a requirement of this task. fluxes : list A starting guess for the flux(es) of the source(s) that are to be fit. Fit convergence is not particularly reliant on the accuracy of these guesses, but the fit will converge faster the more accurate the guess. If more than one source is being modeled then the row positions of each are separated by a comma. The same number of sources in the columns, rows and fluxes field is a requirement of this task. border : int If a background is included in the fit then it is modeled as a two-dimensional polynomial. This parameter is the polynomial order. A zero-order polynomial is generally recommended. background : boolean Whether to include a background component in the model. If `True` then the background will be represented by a two-dimensional polynomial of order `border`. This functionality is somewhat experimental, with one eye upon potential background gradients across large masks or on those detectors more prone to pattern noise. Generally it is recommended to set background as `False`. focus : boolean Whether to incude pixel scale and focus rotation with the fit parameters of the model. This is also an experimental function. This approach does not attempt to deal with inter- or intra-pixel variations. The recommended use is currently to set focus as `False`. xtol : float The dimensionless, relative model parameter convergence criterion for the fit algorithm. ftol : float The dimensionless, relative model residual convergence criterion for the fit algorithm. imscale : str kepprf can plot images with three choices of image scales. The choice is made using this argument. The options are: * linear * logarithmic * squareroot cmap : str matplotlib's color map plot : boolean Plot fit results to the screen? verbose : boolean Print informative messages and warnings to the shell and logfile? logfile : string Name of the logfile containing error and warning messages. Examples -------- Using the command line tool ``kepprf``, one can fit multiple PRFs to a given frame in a target pixel file as follows .. code-block:: bash $ kepprf kplr008256049-2010174085026_lpd-targ.fits prf.png --prfdir ~/kplr2011265_prf/ --frameno 1000 --columns 830 831 --rows 242 241 --fluxes 1.0 0.1 --plot --verbose KepID: 8256049 BJD: 2455296.903574196 RA (J2000): 298.67861 Dec (J2000): 44.1755 KepMag: 15.654 SkyGroup: 53 Season: 3 Channel: 81 Module: 24 Output: 1 Convergence time = 0.15390515327453613s Flux = 3978.040625752744 e-/s X = 829.8259431097927 pix Y = 242.3810334478628 pix Flux = 4734.069273790539 e-/s X = 830.990805551025 pix Y = 240.97340366638306 pix Total flux in mask = 10747.293440638587 e-/s Target flux in mask = 3793.041929468528 e-/s Total flux in aperture = 6365.551487630484 e-/s Target flux in aperture = 3110.924803570053 e-/s Target flux fraction in aperture = 78.2024392468689 % Contamination fraction in aperture = 51.12874650978488 % Residual flux = -0.5748827605745994 e-/s Pearsons chi^2 test = 296.12077907844986 for 13 dof Chi^2 test = 19803.55879917441 for 13 dof .. image:: ../_static/images/api/kepprf.png :align: center """ # log the call hashline = '--------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = ('KEPPRF -- ' + ' infile={}'.format(infile) + ' plotfile={}'.format(plotfile) + ' frameno={}'.format(frameno) + ' columns={}'.format(columns) + ' rows={}'.format(rows) + ' fluxes={}'.format(fluxes) + ' prfdir={}'.format(prfdir) + ' background={}'.format(background) + ' border={}'.format(border) + ' focus={}'.format(focus) + ' xtol={}'.format(xtol) + ' ftol={}'.format(xtol) + ' plot={}'.format(plot) + ' imscale={}'.format(imscale) + ' cmap={}'.format(cmap) + ' apercol={}'.format(apercol) + ' verbose={}'.format(verbose) + ' logfile={}'.format(logfile)) kepmsg.log(logfile, call + '\n', verbose) # start time kepmsg.clock('KEPPRF started at', logfile, verbose) # construct inital guess vector for fit f = fluxes x = columns y = rows nsrc = len(f) if len(x) != nsrc or len(y) != nsrc: errmsg = ("ERROR -- KEPFIT:FITMULTIPRF: Guesses for rows, columns and " "fluxes must have the same number of sources") kepmsg.err(logfile, errmsg, verbose) guess = list(f) + list(x) + list(y) if background: if border == 0: guess.append(0.0) else: for i in range((border + 1) * 2): guess.append(0.0) if focus: guess = guess + [1.0, 1.0, 0.0] try: kepid, channel, skygroup, module, output, quarter, season, ra, \ dec, column, row, kepmag, xdim, ydim, barytime = \ kepio.readTPF(infile, 'TIME', logfile, verbose) except: errmsg = "ERROR -- KEPPRF: is {} a Target Pixel File? ".format(infile) kepmsg.err(logfile, errmsg, verbose) kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr = \ kepio.readTPF(infile, 'TIMECORR', logfile, verbose) kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno = \ kepio.readTPF(infile, 'CADENCENO', logfile, verbose) kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels = \ kepio.readTPF(infile,'FLUX',logfile,verbose) kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels = \ kepio.readTPF(infile, 'FLUX_ERR', logfile, verbose) kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual = \ kepio.readTPF(infile, 'QUALITY', logfile, verbose) # read mask defintion data from TPF file maskimg, pixcoord1, pixcoord2 = kepio.readMaskDefinition( infile, logfile, verbose) npix = np.size(np.nonzero(maskimg)[0]) # print target data if verbose: print('') print(' KepID: {}'.format(kepid)) print(' BJD: {}'.format(barytime[frameno - 1] + 2454833.0)) print(' RA (J2000): {}'.format(ra)) print('Dec (J2000): {}'.format(dec)) print(' KepMag: {}'.format(kepmag)) print(' SkyGroup: {}'.format(skygroup)) print(' Season: {}'.format(str(season))) print(' Channel: {}'.format(channel)) print(' Module: {}'.format(module)) print(' Output: {}'.format(output)) print('') # is this a good row with finite timestamp and pixels? if (not np.isfinite(barytime[frameno - 1]) or np.nansum(fluxpixels[frameno - 1, :]) == np.nan): errmsg = ("ERROR -- KEPFIELD: Row {0} is a bad quality timestamp". format(frameno)) kepmsg.err(logfile, errmsg, verbose) # construct input pixel image flux = fluxpixels[frameno - 1, :] ferr = errpixels[frameno - 1, :] DATx = np.arange(column, column + xdim) DATy = np.arange(row, row + ydim) # image scale and intensity limits of pixel data n = 0 DATimg = np.empty((ydim, xdim)) ERRimg = np.empty((ydim, xdim)) for i in range(ydim): for j in range(xdim): DATimg[i, j] = flux[n] ERRimg[i, j] = ferr[n] n += 1 # determine suitable PRF calibration file if int(module) < 10: prefix = 'kplr0' else: prefix = 'kplr' prfglob = prfdir + '/' + prefix + str(module) + '.' + str( output) + '*' + '_prf.fits' try: prffile = glob.glob(prfglob)[0] except: errmsg = "ERROR -- KEPPRF: No PRF file found in {0}".format(prfdir) kepmsg.err(logfile, errmsg, verbose) # read PRF images prfn = [0, 0, 0, 0, 0] crpix1p = np.zeros(5, dtype='float32') crpix2p = np.zeros(5, dtype='float32') crval1p = np.zeros(5, dtype='float32') crval2p = np.zeros(5, dtype='float32') cdelt1p = np.zeros(5, dtype='float32') cdelt2p = np.zeros(5, dtype='float32') for i in range(5): prfn[i], crpix1p[i], crpix2p[i], crval1p[i], crval2p[i], cdelt1p[i], cdelt2p[i] = \ kepio.readPRFimage(prffile, i+1, logfile, verbose) prfn = np.array(prfn) PRFx = np.arange(0.5, np.shape(prfn[0])[1] + 0.5) PRFy = np.arange(0.5, np.shape(prfn[0])[0] + 0.5) PRFx = (PRFx - np.size(PRFx) / 2) * cdelt1p[0] PRFy = (PRFy - np.size(PRFy) / 2) * cdelt2p[0] # interpolate the calibrated PRF shape to the target position prf = np.zeros(np.shape(prfn[0]), dtype='float32') prfWeight = np.zeros(5, dtype='float32') for i in range(5): prfWeight[i] = math.sqrt((column - crval1p[i])**2 + (row - crval2p[i])**2) if prfWeight[i] == 0.0: prfWeight[i] = 1.0e-6 prf = prf + prfn[i] / prfWeight[i] prf = prf / np.nansum(prf) / cdelt1p[0] / cdelt2p[0] # location of the data image centered on the PRF image (in PRF pixel units) prfDimY = int(ydim / cdelt1p[0]) prfDimX = int(xdim / cdelt2p[0]) PRFy0 = int(np.round((np.shape(prf)[0] - prfDimY) / 2)) PRFx0 = int(np.round((np.shape(prf)[1] - prfDimX) / 2)) # interpolation function over the PRF splineInterpolation = RectBivariateSpline(PRFx, PRFy, prf) # construct mesh for background model if background: bx = np.arange(1., float(xdim + 1)) by = np.arange(1., float(ydim + 1)) xx, yy = np.meshgrid(np.linspace(bx.min(), bx.max(), xdim), np.linspace(by.min(), by.max(), ydim)) # fit PRF model to pixel data start = time.time() if focus and background: args = (DATx, DATy, DATimg, ERRimg, nsrc, border, xx, yy, splineInterpolation, float(x[0]), float(y[0])) ans = fmin_powell(kepfunc.PRFwithFocusAndBackground, guess, args=args, xtol=xtol, ftol=ftol, disp=False) elif focus and not background: args = (DATx, DATy, DATimg, ERRimg, nsrc, splineInterpolation, float(x[0]), float(y[0])) ans = fmin_powell(kepfunc.PRFwithFocus, guess, args=args, xtol=xtol, ftol=ftol, disp=False) elif background and not focus: args = (DATx, DATy, DATimg, ERRimg, nsrc, border, xx, yy, splineInterpolation, float(x[0]), float(y[0])) ans = fmin_powell(kepfunc.PRFwithBackground, guess, args=args, xtol=xtol, ftol=ftol, disp=False) else: args = (DATx, DATy, DATimg, ERRimg, nsrc, splineInterpolation, float(x[0]), float(y[0])) ans = fmin_powell(kepfunc.PRF, guess, args=args, xtol=xtol, ftol=ftol, disp=False) print("Convergence time = {}s\n".format(time.time() - start)) # pad the PRF data if the PRF array is smaller than the data array flux = [] OBJx = [] OBJy = [] PRFmod = np.zeros((prfDimY, prfDimX)) if PRFy0 < 0 or PRFx0 < 0.0: PRFmod = np.zeros((prfDimY, prfDimX)) superPRF = np.zeros((prfDimY + 1, prfDimX + 1)) superPRF[abs(PRFy0):abs(PRFy0) + np.shape(prf)[0], abs(PRFx0):abs(PRFx0) + np.shape(prf)[1]] = prf prf = superPRF * 1.0 PRFy0 = 0 PRFx0 = 0 # rotate the PRF model around its center if focus: angle = ans[-1] prf = interpolation.rotate(prf, -angle, reshape=False, mode='nearest') # iterate through the sources in the best fit PSF model for i in range(nsrc): flux.append(ans[i]) OBJx.append(ans[nsrc + i]) OBJy.append(ans[nsrc * 2 + i]) # calculate best-fit model y = (OBJy[i] - np.mean(DATy)) / cdelt1p[0] x = (OBJx[i] - np.mean(DATx)) / cdelt2p[0] prfTmp = interpolation.shift(prf, [y, x], order=3, mode='constant') prfTmp = prfTmp[PRFy0:PRFy0 + prfDimY, PRFx0:PRFx0 + prfDimX] PRFmod = PRFmod + prfTmp * flux[i] wx = 1.0 wy = 1.0 angle = 0 b = 0.0 # write out best fit parameters if verbose: txt = ("Flux = {0} e-/s X = {1} pix Y = {2} pix".format( flux[i], OBJx[i], OBJy[i])) kepmsg.log(logfile, txt, True) if background: bterms = border + 1 if bterms == 1: b = ans[nsrc * 3] else: bcoeff = np.array([ ans[nsrc * 3:nsrc * 3 + bterms], ans[nsrc * 3 + bterms:nsrc * 3 + bterms * 2] ]) bkg = kepfunc.polyval2d(xx, yy, bcoeff) b = np.nanmean(bkg.reshape(bkg.size)) txt = "\n Mean background = {0} e-/s".format(b) kepmsg.log(logfile, txt, True) if focus: wx = ans[-3] wy = ans[-2] angle = ans[-1] if focus: if not background: kepmsg.log(logfile, '', True) kepmsg.log(logfile, " X/Y focus factors = {0}/{1}".format(wx, wy), True) kepmsg.log(logfile, "PRF rotation angle = {0} deg".format(angle), True) # measure flux fraction and contamination PRFall = kepfunc.PRF2DET(flux, OBJx, OBJy, DATx, DATy, wx, wy, angle, splineInterpolation) PRFone = kepfunc.PRF2DET([flux[0]], [OBJx[0]], [OBJy[0]], DATx, DATy, wx, wy, angle, splineInterpolation) FluxInMaskAll = np.nansum(PRFall) FluxInMaskOne = np.nansum(PRFone) FluxInAperAll = 0.0 FluxInAperOne = 0.0 for i in range(1, ydim): for j in range(1, xdim): if kepstat.bitInBitmap(maskimg[i, j], 2): FluxInAperAll += PRFall[i, j] FluxInAperOne += PRFone[i, j] FluxFraction = FluxInAperOne / flux[0] try: Contamination = (FluxInAperAll - FluxInAperOne) / FluxInAperAll except: Contamination = 0.0 kepmsg.log( logfile, "\n Total flux in mask = {0} e-/s".format( FluxInMaskAll), True) kepmsg.log( logfile, " Target flux in mask = {0} e-/s".format(FluxInMaskOne), True) kepmsg.log( logfile, " Total flux in aperture = {0} e-/s".format(FluxInAperAll), True) kepmsg.log( logfile, " Target flux in aperture = {0} e-/s".format(FluxInAperOne), True) kepmsg.log( logfile, " Target flux fraction in aperture = {0} %".format( FluxFraction * 100.0), True) kepmsg.log( logfile, "Contamination fraction in aperture = {0} %".format( Contamination * 100.0), True) # construct model PRF in detector coordinates PRFfit = PRFall + 0.0 if background and bterms == 1: PRFfit = PRFall + b if background and bterms > 1: PRFfit = PRFall + bkg # calculate residual of DATA - FIT PRFres = DATimg - PRFfit FLUXres = np.nansum(PRFres) / npix # calculate the sum squared difference between data and model Pearson = abs(np.nansum(np.square(DATimg - PRFfit) / PRFfit)) Chi2 = np.nansum(np.square(DATimg - PRFfit) / np.square(ERRimg)) DegOfFreedom = npix - len(guess) - 1 try: kepmsg.log(logfile, "\n Residual flux = {0} e-/s".format(FLUXres), True) kepmsg.log( logfile, "Pearson\'s chi^2 test = {0} for {1} dof".format( Pearson, DegOfFreedom), True) except: pass kepmsg.log( logfile, " Chi^2 test = {0} for {1} dof".format(Chi2, DegOfFreedom), True) # image scale and intensity limits for plotting images imgdat_pl, zminfl, zmaxfl = kepplot.intScale2D(DATimg, imscale) imgprf_pl, zminpr, zmaxpr = kepplot.intScale2D(PRFmod, imscale) imgfit_pl, zminfi, zmaxfi = kepplot.intScale2D(PRFfit, imscale) imgres_pl, zminre, zmaxre = kepplot.intScale2D(PRFres, 'linear') if imscale == 'linear': zmaxpr *= 0.9 elif imscale == 'logarithmic': zmaxpr = np.max(zmaxpr) zminpr = zmaxpr / 2 plt.figure(figsize=[12, 10]) plt.clf() plotimage(imgdat_pl, zminfl, zmaxfl, 1, row, column, xdim, ydim, 0.07, 0.53, 'observation', cmap) plotimage(imgprf_pl, zminpr, zmaxpr, 2, row, column, xdim, ydim, 0.44, 0.53, 'model', cmap) kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 1, apercol, '--', 0.5) kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 2, apercol, '-', 3.0) plotimage(imgfit_pl, zminfl, zmaxfl, 3, row, column, xdim, ydim, 0.07, 0.08, 'fit', cmap) plotimage(imgres_pl, zminfl, zmaxfl, 4, row, column, xdim, ydim, 0.44, 0.08, 'residual', cmap) # plot data color bar barwin = plt.axes([0.84, 0.08, 0.06, 0.9]) if imscale == 'linear': brange = np.arange(zminfl, zmaxfl, (zmaxfl - zminfl) / 1000) elif imscale == 'logarithmic': brange = np.arange(10.0**zminfl, 10.0**zmaxfl, (10.0**zmaxfl - 10.0**zminfl) / 1000) elif imscale == 'squareroot': brange = np.arange(zminfl**2, zmaxfl**2, (zmaxfl**2 - zminfl**2) / 1000) if imscale == 'linear': barimg = np.resize(brange, (1000, 1)) elif imscale == 'logarithmic': barimg = np.log10(np.resize(brange, (1000, 1))) elif imscale == 'squareroot': barimg = np.sqrt(np.resize(brange, (1000, 1))) try: nrm = len(str(int(np.nanmax(brange)))) - 1 except: nrm = 0 brange = brange / 10**nrm plt.imshow(barimg, aspect='auto', interpolation='nearest', origin='lower', vmin=np.nanmin(barimg), vmax=np.nanmax(barimg), extent=(0.0, 1.0, brange[0], brange[-1]), cmap=cmap) barwin.yaxis.tick_right() barwin.yaxis.set_label_position('right') barwin.yaxis.set_major_locator(plt.MaxNLocator(7)) plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False)) plt.gca().set_autoscale_on(False) plt.setp(plt.gca(), xticklabels=[], xticks=[]) plt.ylabel('Flux (10$^{%d}$ e$^-$ s$^{-1}$)' % nrm) barwin.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f')) # render plot if len(plotfile) > 0 and plotfile.lower() != 'none': plt.savefig(plotfile) if plot: plt.draw() plt.show() # stop time kepmsg.clock('\nKEPPRF ended at', logfile, verbose)
def coreg_with_master_dem(args): """ Coregistration with the use of a master DEM """ ## Read DEMs metadata ## # master master_dem = raster.SingleBandRaster(args.master_dem, load_data=False) if args.nodata1 != 'none': nodata1 = float(args.nodata1) else: band = master_dem.ds.GetRasterBand(1) nodata1 = band.GetNoDataValue() # slave slave_dem = raster.SingleBandRaster(args.slave_dem, load_data=False) if args.nodata2 != 'none': nodata2 = float(args.nodata2) else: band = slave_dem.ds.GetRasterBand(1) nodata2 = band.GetNoDataValue() ## reproject DEMs into the same spatial grid, master or slave ## if master_dem.extent != slave_dem.extent: if args.grid == 'master': print("Reproject slave DEM") # Read extent of DEMs intersection in master projection extent = master_dem.intersection(args.slave_dem) # Read master DEM in area of overlap master_dem = raster.SingleBandRaster(args.master_dem, load_data=extent, latlon=False) master_dem.r = np.float32(master_dem.r) # Reproject slave DEM in same grid dem2coreg = slave_dem.reproject(master_dem.srs, master_dem.nx, master_dem.ny, master_dem.extent[0], master_dem.extent[3], master_dem.xres, master_dem.yres, dtype=6, nodata=nodata2, interp_type=gdal.GRA_Bilinear, progress=True).r # Store GeoTransform for later saving gt = (master_dem.extent[0], master_dem.xres, 0.0, master_dem.extent[3], 0.0, master_dem.yres) elif args.grid == 'slave': print("Reproject master DEM") # Read extent of DEMs intersection in slave projection extent = slave_dem.intersection(args.master_dem) # Read slave DEM in area of overlap slave_dem = raster.SingleBandRaster(args.slave_dem, load_data=extent, latlon=False) dem2coreg = np.float32(slave_dem.r) del slave_dem.r # Reproject master DEM in same grid master_dem = master_dem.reproject(slave_dem.srs, slave_dem.nx, slave_dem.ny, slave_dem.extent[0], slave_dem.extent[3], slave_dem.xres, slave_dem.yres, dtype=6, nodata=nodata1, interp_type=gdal.GRA_Bilinear, progress=True) # Store GeoTransform for later saving gt = (slave_dem.extent[0], slave_dem.xres, 0.0, slave_dem.extent[3], 0.0, slave_dem.yres) else: sys.exit("Error : grid must be 'master' or 'slave'") else: master_dem = raster.SingleBandRaster(args.master_dem) master_dem.r = np.float32(master_dem.r) slave_dem = raster.SingleBandRaster(args.slave_dem) dem2coreg = np.float32(slave_dem.r) gt = master_dem.ds.GetGeoTransform() # Save GeoTransform for saving # Mask no data values if nodata1 is not None: master_dem.r[master_dem.r == nodata1] = np.nan if nodata2 is not None: dem2coreg[dem2coreg == nodata2] = np.nan ## mask points ## if args.maskfile != 'none': mask = raster.SingleBandRaster(args.maskfile) if master_dem.r.shape != mask.r.shape: print("Reproject mask") mask = mask.reproject( master_dem.srs, master_dem.nx, master_dem.ny, master_dem.extent[0], master_dem.extent[3], master_dem.xres, master_dem.yres, dtype=6, interp_type=gdal.GRA_NearestNeighbour, progress=True) # nearest neighbor interpolation master_dem.r[mask.r > 0] = np.nan if args.shp != 'none': outlines = vect.SingleLayerVector(args.shp) mask = outlines.create_mask(master_dem) # Dilate mask if buffer size set to positive value if args.buffer > 0: xres = float(master_dem.xres) buf_size_pix = int(args.buffer / xres) mask = binary_dilation(mask, np.ones((3, 3)), iterations=buf_size_pix / 2) master_dem.r[mask > 0] = np.nan ## filter outliers ## if args.resmax != 'none': master_dem.r[np.abs(master_dem.r - dem2coreg) > float(args.resmax)] = np.nan ## Set master DEM grid for later resampling ## xgrid = np.arange(master_dem.nx) ygrid = np.arange(master_dem.ny) X, Y = master_dem.coordinates() diff_before = dem2coreg - master_dem.r ## Print out some statistics median = np.median(diff_before[np.isfinite(diff_before)]) NMAD_old = 1.4826 * np.median( np.abs(diff_before[np.isfinite(diff_before)] - median)) print("Statistics on initial dh") print("Median : %f, NMAD : %f" % (median, NMAD_old)) ## Display if args.plot == True: maxval = 3 * NMAD_old #np.percentile(np.abs(diff_before[np.isfinite(diff_before)]),90) pl.imshow(diff_before, vmin=-maxval, vmax=maxval, cmap='RdYlBu') cb = pl.colorbar() cb.set_label('Elevation difference (m)') pl.show() ## fill NaN values for interpolation ## nanval = np.isnan(dem2coreg) slave_filled = np.where(np.isnan(dem2coreg), -9999, dem2coreg) ## Create spline function ## f = RectBivariateSpline(ygrid, xgrid, slave_filled, kx=1, ky=1) f2 = RectBivariateSpline(ygrid, xgrid, nanval, kx=1, ky=1) xoff, yoff = 0, 0 ## compute terrain aspect/slope ## slope, aspect = grad2d(master_dem.r) ## Iterations to estimate DEMs shift print("Iteratively estimate DEMs shift") for i in range(args.niter): # remove bias dem2coreg -= median #Elevation difference dh = master_dem.r - dem2coreg #compute offset east, north, c = horizontal_shift(dh, slope, aspect, args.plot, args.min_count) print("#%i - Offset in pixels : (%f,%f)" % (i + 1, east, north)) xoff += east yoff += north #resample slave DEM in the new grid znew = f(ygrid - yoff, xgrid + xoff) #postive y shift moves south nanval_new = f2(ygrid - yoff, xgrid + xoff) #remove filled values that have been interpolated znew[nanval_new != 0] = np.nan # update DEM dem2coreg = znew # print some statistics diff = dem2coreg - master_dem.r diff = diff[np.isfinite(diff)] NMAD_new = 1.4826 * np.median(np.abs(diff - np.median(diff))) median = np.median(diff) print("Median : %.2f, NMAD = %.2f, Gain : %.2f%%" % (median, NMAD_new, (NMAD_new - NMAD_old) / NMAD_old * 100)) NMAD_old = NMAD_new print("Final Offset in pixels (east, north) : (%f,%f)" % (xoff, yoff)) ### Deramping ### if args.degree >= 0: print("Deramping") diff = dem2coreg - master_dem.r # remove points above altitude threshold (snow covered areas) if args.zmax != 'none': diff[master_dem.r > int(args.zmax)] = np.nan # remove points below altitude threshold (e.g sea ice) if args.zmin != 'none': diff[master_dem.r < int(args.zmin)] = np.nan # remove points with slope higher than 40° that are more error-prone # removed until issue with Nan is fixed, see previous commit #slope, aspect = master_dem.compute_slope() #diff[slope>=40*np.pi/180] = np.nan #diff[np.isnan(slope)] = np.nan # remove outliers med = np.median(diff[np.isfinite(diff)]) mad = 1.4826 * np.median(np.abs(diff[np.isfinite(diff)] - med)) diff[np.abs(diff - med) > 3 * mad] = np.nan # estimate a ramp and remove it ramp = deramping(diff, X, Y, d=args.degree, plot=args.plot) vshift = ramp(X, Y) dem2coreg -= vshift #ramp(X,Y) # save to output file if args.save == True: fname, ext = os.path.splitext(args.outfile) fname += '_shift.txt' f = open(fname, 'w') f.write("Final offset: east (pixels), north (pixels), median up (m)\n") if args.degree >= 0: vshift_med = np.median(vshift[nanval_new == 0]) else: vshift_med = np.nan f.write("%g, %g, %g\n" % (xoff, yoff, vshift_med)) f.write("Final NMAD (m) :\n%f" % NMAD_new) f.close() print("Offset saved in %s" % fname) # Save the ramp as a GeoTiff # if args.save==True: # fname, ext = os.path.splitext(args.outfile) # fname+='_ramp.TIF' # raster.simple_write_geotiff(fname, ramp(X,Y), gt, wkt=master_dem.srs.ExportToWkt(),dtype=gdal.GDT_Float32) # #ramp(X,Y).tofile(fname) # print("Ramp saved in %s" %fname) # print some statistics diff = dem2coreg - master_dem.r diff = diff[np.isfinite(diff)] median = np.median(diff) NMAD = 1.4826 * np.median(np.abs(diff - median)) print("Final DEM") print("Median : %.2f, NMAD = %.2f" % (median, NMAD)) #Display results if args.plot == True: diff_after = dem2coreg - master_dem.r pl.figure('before') pl.imshow(diff_before, vmin=-maxval, vmax=maxval, cmap='RdYlBu') cb = pl.colorbar() cb.set_label('DEM difference (m)') pl.figure('after') pl.imshow(diff_after, vmin=-maxval, vmax=maxval, cmap='RdYlBu') cb = pl.colorbar() cb.set_label('DEM difference (m)') #pl.show() pl.figure() pl.hist(diff_after[np.isfinite(diff_after)], bins=np.linspace(-maxval, maxval, 50)) pl.xlabel('DEM difference (m)') pl.show() # Replace all NaNs with nodata value dem2coreg[np.isnan(dem2coreg)] = nodata2 #Save to output file #dtype = master_dem.ds.GetRasterBand(1).DataType raster.simple_write_geotiff(args.outfile, dem2coreg, gt, wkt=master_dem.srs.ExportToWkt(), dtype=gdal.GDT_Float32, nodata_value=nodata2)
def active_contour(image, snake, alpha=0.01, beta=0.1, w_line=0, w_edge=1, gamma=0.01, bc='periodic', max_px_move=1.0, max_iterations=2500, convergence=0.1): """Active contour model. Active contours by fitting snakes to features of images. Supports single and multichannel 2D images. Snakes can be periodic (for segmentation) or have fixed and/or free ends. The output snake has the same length as the input boundary. As the number of points is constant, make sure that the initial snake has enough points to capture the details of the final contour. Parameters ---------- image : (N, M) or (N, M, 3) ndarray Input image. snake : (N, 2) ndarray Initialisation coordinates of snake. For periodic snakes, it should not include duplicate endpoints. alpha : float, optional Snake length shape parameter. Higher values makes snake contract faster. beta : float, optional Snake smoothness shape parameter. Higher values makes snake smoother. w_line : float, optional Controls attraction to brightness. Use negative values to attract to dark regions. w_edge : float, optional Controls attraction to edges. Use negative values to repel snake from edges. gamma : float, optional Explicit time stepping parameter. bc : {'periodic', 'free', 'fixed'}, optional Boundary conditions for worm. 'periodic' attaches the two ends of the snake, 'fixed' holds the end-points in place, and'free' allows free movement of the ends. 'fixed' and 'free' can be combined by parsing 'fixed-free', 'free-fixed'. Parsing 'fixed-fixed' or 'free-free' yields same behaviour as 'fixed' and 'free', respectively. max_px_move : float, optional Maximum pixel distance to move per iteration. max_iterations : int, optional Maximum iterations to optimize snake shape. convergence: float, optional Convergence criteria. Returns ------- snake : (N, 2) ndarray Optimised snake, same shape as input parameter. References ---------- .. [1] Kass, M.; Witkin, A.; Terzopoulos, D. "Snakes: Active contour models". International Journal of Computer Vision 1 (4): 321 (1988). Examples -------- >>> from skimage.draw import circle_perimeter >>> from skimage.filters import gaussian Create and smooth image: >>> img = np.zeros((100, 100)) >>> rr, cc = circle_perimeter(35, 45, 25) >>> img[rr, cc] = 1 >>> img = gaussian(img, 2) Initiliaze spline: >>> s = np.linspace(0, 2*np.pi,100) >>> init = 50*np.array([np.cos(s), np.sin(s)]).T+50 Fit spline to image: >>> snake = active_contour(img, init, w_edge=0, w_line=1) #doctest: +SKIP >>> dist = np.sqrt((45-snake[:, 0])**2 +(35-snake[:, 1])**2) #doctest: +SKIP >>> int(np.mean(dist)) #doctest: +SKIP 25 """ max_iterations = int(max_iterations) if max_iterations <= 0: raise ValueError("max_iterations should be >0.") convergence_order = 10 valid_bcs = [ 'periodic', 'free', 'fixed', 'free-fixed', 'fixed-free', 'fixed-fixed', 'free-free' ] if bc not in valid_bcs: raise ValueError("Invalid boundary condition.\n" + "Should be one of: " + ", ".join(valid_bcs) + '.') img = img_as_float(image) RGB = img.ndim == 3 # Find edges using sobel: if w_edge != 0: if RGB: edge = [ sobel(img[:, :, 0]), sobel(img[:, :, 1]), sobel(img[:, :, 2]) ] else: edge = [sobel(img)] for i in range(3 if RGB else 1): edge[i][0, :] = edge[i][1, :] edge[i][-1, :] = edge[i][-2, :] edge[i][:, 0] = edge[i][:, 1] edge[i][:, -1] = edge[i][:, -2] else: edge = [0] # Superimpose intensity and edge images: if RGB: img = w_line*np.sum(img, axis=2) \ + w_edge*sum(edge) else: img = w_line * img + w_edge * edge[0] # Interpolate for smoothness: intp = RectBivariateSpline(np.arange(img.shape[1]), np.arange(img.shape[0]), img.T, kx=2, ky=2, s=0) x, y = snake[:, 0].astype(np.float), snake[:, 1].astype(np.float) xsave = np.empty((convergence_order, len(x))) ysave = np.empty((convergence_order, len(x))) # Build snake shape matrix for Euler equation n = len(x) a = np.roll(np.eye(n), -1, axis=0) + \ np.roll(np.eye(n), -1, axis=1) - \ 2*np.eye(n) # second order derivative, central difference b = np.roll(np.eye(n), -2, axis=0) + \ np.roll(np.eye(n), -2, axis=1) - \ 4*np.roll(np.eye(n), -1, axis=0) - \ 4*np.roll(np.eye(n), -1, axis=1) + \ 6*np.eye(n) # fourth order derivative, central difference A = -alpha * a + beta * b # Impose boundary conditions different from periodic: sfixed = False if bc.startswith('fixed'): A[0, :] = 0 A[1, :] = 0 A[1, :3] = [1, -2, 1] sfixed = True efixed = False if bc.endswith('fixed'): A[-1, :] = 0 A[-2, :] = 0 A[-2, -3:] = [1, -2, 1] efixed = True sfree = False if bc.startswith('free'): A[0, :] = 0 A[0, :3] = [1, -2, 1] A[1, :] = 0 A[1, :4] = [-1, 3, -3, 1] sfree = True efree = False if bc.endswith('free'): A[-1, :] = 0 A[-1, -3:] = [1, -2, 1] A[-2, :] = 0 A[-2, -4:] = [-1, 3, -3, 1] efree = True # Only one inversion is needed for implicit spline energy minimization: inv = np.linalg.inv(A + gamma * np.eye(n)) # Explicit time stepping for image energy minimization: for i in range(max_iterations): fx = intp(x, y, dx=1, grid=False) fy = intp(x, y, dy=1, grid=False) if sfixed: fx[0] = 0 fy[0] = 0 if efixed: fx[-1] = 0 fy[-1] = 0 if sfree: fx[0] *= 2 fy[0] *= 2 if efree: fx[-1] *= 2 fy[-1] *= 2 xn = inv @ (gamma * x + fx) yn = inv @ (gamma * y + fy) # Movements are capped to max_px_move per iteration: dx = max_px_move * np.tanh(xn - x) dy = max_px_move * np.tanh(yn - y) if sfixed: dx[0] = 0 dy[0] = 0 if efixed: dx[-1] = 0 dy[-1] = 0 x += dx y += dy # Convergence criteria needs to compare to a number of previous # configurations since oscillations can occur. j = i % (convergence_order + 1) if j < convergence_order: xsave[j, :] = x ysave[j, :] = y else: dist = np.min( np.max( np.abs(xsave - x[None, :]) + np.abs(ysave - y[None, :]), 1)) if dist < convergence: break return np.array([x, y]).T
def periodradius(epos, Init=False, fpara=None, fdet=None, xbin=None, ybin=None, xgrid=None, ygrid=None, Convert=False): ''' Return the period-radius distribution in referse to the model parameter (Mass, Msini or radius) out reverse to the observed parameter (Msini or Radius) default is the input grid. For MC=False, the distribution can also be Converted onto the ouput grid (M->Msini or M->R) ''' if fpara is None: pps = epos.pdfpars.getpps(Init=Init) fpar2d = epos.pdfpars.get2d(Init=Init) else: pps = epos.pdfpars.getpps_fromlist(fpara) fpar2d = epos.pdfpars.get2d_fromlist(fpara) #print fpara pdf = epos.func(epos.X_in, epos.Y_in, *fpar2d) pdf_X, pdf_Y = np.sum(pdf, axis=1), np.sum(pdf, axis=0) sum_pdf = np.sum(pdf) sum_pdf_X = np.sum(pdf_X) sum_pdf_Y = np.sum(pdf_Y) ''' Convert M->Msini or M->R''' if Convert: if epos.RV: pdf = _convert_pdf_M_to_Msini(epos.MC_xvar, epos.in_yvar, epos.MC_yvar, pdf) pdf_X, pdf_Y = np.sum(pdf, axis=1), np.sum(pdf, axis=0) elif epos.MR: pass # convolve with detection efficiency? if fdet is not None: det_pdf = pdf * fdet det_pdf_X, det_pdf_Y = np.sum(det_pdf, axis=1), np.sum(det_pdf, axis=0) # calculate pdf on different grid? if xbin is not None: xgrid = np.logspace(np.log10(xbin[0]), np.log10(xbin[-1]), 5) if ybin is not None: ygrid = np.logspace(np.log10(ybin[0]), np.log10(ybin[-1]), 5) # calculate pdf on provided grid if (xgrid is not None) or (ygrid is not None): #assert Convert==False, 'Not implemented' if xgrid is None: xgrid = epos.MC_xvar if ygrid is None: #ygrid= epos.MC_yvar ygrid = epos.in_yvar X, Y = np.meshgrid(xgrid, ygrid, indexing='ij') pdf = epos.func(X, Y, *fpar2d) # normalized per unit dlnxdlny pdf = pps * pdf / sum_pdf * epos.scale if Convert: assert np.all(ygrid == epos.MC_yvar) or np.all( ygrid == epos.in_yvar), 'cannot do custom y grid' pdf = _convert_pdf_M_to_Msini(xgrid, ygrid, epos.MC_yvar, pdf) #pdf_X, pdf_Y= np.sum(pdf, axis=1), np.sum(pdf, axis=0) if fdet is not None: #assert False, 'Where is this used?' #func_fdet= interp2d(xgrid, ygrid, fdet.T, kind='cubic') func_fdet = RectBivariateSpline(epos.MC_xvar, epos.MC_yvar, fdet) # was in_yvar _fdet = func_fdet(xgrid, ygrid) #print fdet.shape, _fdet.shape #print xgrid #print ygrid #print _fdet pdf *= _fdet dlnx = np.log(xgrid[-1] / xgrid[0]) dlny = np.log(ygrid[-1] / ygrid[0]) pdf_X = np.average(pdf, axis=1) * dlny pdf_Y = np.average(pdf, axis=0) * dlnx else: # calculate pdf on default grid # normalized in units dlnx, dlny, and dlnxdlny if fdet is None: pdf_X = pps * pdf_X / sum_pdf_X * epos.scale_x pdf_Y = pps * pdf_Y / sum_pdf_Y * epos.scale_in_y pdf = pps * pdf / sum_pdf * epos.scale else: pdf_X = pps * det_pdf_X / sum_pdf_X * epos.scale_x pdf_Y = pps * det_pdf_Y / sum_pdf_Y * epos.scale_in_y pdf = pps * det_pdf / sum_pdf * epos.scale return pps, pdf, pdf_X, pdf_Y
def image_reconstruction_prepare(image_path, LUT_path): image_file = os.listdir(image_path) LUT_files = os.listdir(LUT_path) argmin_look_up_table_r = np.loadtxt(LUT_path + LUT_files[0], dtype=np.float32).T #argmin_look_up_table_r = np.flip(argmin_look_up_table_r,axis=1) argmin_look_up_table_g = np.loadtxt(LUT_path + LUT_files[1], dtype=np.float32).T #argmin_look_up_table_g = np.flip(argmin_look_up_table_g,axis=1) argmin_look_up_table_b = np.loadtxt(LUT_path + LUT_files[2], dtype=np.float32).T #argmin_look_up_table_b = np.flip(argmin_look_up_table_b,axis=1) CCRF = np.concatenate((np.expand_dims(argmin_look_up_table_r, axis=0), np.expand_dims(argmin_look_up_table_g, axis=0), np.expand_dims(argmin_look_up_table_b, axis=0)), axis=0) plt.figure() plt.title('argmin lookup table') plt.axis([0, 1024, 0, 1024]) imshow(argmin_look_up_table_r, cmap="gray") plt.show() plt.figure() plt.title('argmin lookup table') plt.axis([0, 1024, 0, 1024]) imshow(argmin_look_up_table_g, cmap="gray") plt.show() plt.figure() plt.title('argmin lookup table') plt.axis([0, 1024, 0, 1024]) imshow(argmin_look_up_table_b, cmap="gray") plt.show() # interpolation xxx = np.arange(0, 1, 1 / 1024) yyy = np.arange(0, 1, 1 / 1024) func_r = RectBivariateSpline(xxx, yyy, argmin_look_up_table_r, bbox=[0, 1, 0, 1]) func_g = RectBivariateSpline(xxx, yyy, argmin_look_up_table_g, bbox=[0, 1, 0, 1]) func_b = RectBivariateSpline(xxx, yyy, argmin_look_up_table_b, bbox=[0, 1, 0, 1]) func = [func_r.ev, func_g.ev, func_b.ev] # prepare the input images example_image = imread(image_path + image_file[0]) image_high, image_wide = example_image.shape[0], example_image.shape[1] tmp_image_input = np.zeros((0, image_high, image_wide, 3), dtype=np.float32) for i in image_file: image_tmp = ((np.expand_dims(imread(image_path + i), axis=0) + 0.5) / 256.0).astype(np.float32) tmp_image_input = np.concatenate((tmp_image_input, image_tmp)) return tmp_image_input, func, CCRF
def LucasKanadeAffine(It, It1, threshold, num_iters): """ :param It: template image :param It1: Current image :param threshold: if the length of dp is smaller than the threshold, terminate the optimization :param num_iters: number of iterations of the optimization :return: M: the Affine warp matrix [3x3 numpy array] put your implementation here """ # put your implementation here - reference: http://www.cse.psu.edu/~rtc12/CSE486/lecture30.pdf M = np.eye(3).astype(np.float32) dp = [[1], [0], [0], [0], [1], [0]] p = np.zeros(6) x1, y1, x2, y2 = 0, 0, It.shape[1], It.shape[0] rows, cols = It.shape # compute gradient and RBspline Iy, Ix = np.gradient(It1) y = np.arange(0, rows, 1) x = np.arange(0, cols, 1) c = np.linspace(x1, x2, cols) r = np.linspace(y1, y2, rows) cc, rr = np.meshgrid(c, r) RBspline_It = RectBivariateSpline(y, x, It) T = RBspline_It.ev(rr, cc) RBspline_gx = RectBivariateSpline(y, x, Ix) RBspline_gy = RectBivariateSpline(y, x, Iy) RBspline_It1 = RectBivariateSpline(y, x, It1) while np.square(dp).sum() > threshold: W = np.array([[1.0 + p[0], p[1], p[2]], [p[3], 1.0 + p[4], p[5]]]) x1_warp = W[0, 0] * x1 + W[0, 1] * y1 + W[0, 2] y1_warp = W[1, 0] * x1 + W[1, 1] * y1 + W[1, 2] x2_warp = W[0, 0] * x2 + W[0, 1] * y2 + W[0, 2] y2_warp = W[1, 0] * x2 + W[1, 1] * y2 + W[1, 2] cols_warp = np.linspace(x1_warp, x2_warp, It.shape[1]) rows_warp = np.linspace(y1_warp, y2_warp, It.shape[0]) cols_mesh, rows_mesh = np.meshgrid(cols_warp, rows_warp) warpImg = RBspline_It1.ev(rows_mesh, cols_mesh) #compute error image #errImg is (n,1) error = T - warpImg errorImg = error.reshape(-1, 1) #compute gradient Ix_warp = RBspline_gx.ev(rows_mesh, cols_mesh) Iy_warp = RBspline_gy.ev(rows_mesh, cols_mesh) #I is (n,2) I = np.vstack((Ix_warp.ravel(), Iy_warp.ravel())).T #evaluate delta = I @ jac is (nx6) delta = np.zeros((It.shape[0] * It.shape[1], 6)) for i in range(It.shape[0]): for j in range(It.shape[1]): #I is (1x2) for each pixel #Jacobian is (2x6)for each pixel I_indiv = np.array([I[i * It.shape[1] + j]]).reshape(1, 2) jac_indiv = np.array([[j, 0, i, 0, 1, 0], [0, j, 0, i, 0, 1]]) delta[i * It.shape[1] + j] = np.matmul(I_indiv, jac_indiv) #compute Hessian Matrix #H is (6x6) H = np.matmul(delta.T, delta) #compute dp #dp is (6x6)@(6xn)@(nx1) = (6x1) dp = np.linalg.inv(H) @ (delta.T) @ errorImg #update parameters p[0] += dp[0, 0] p[1] += dp[1, 0] p[2] += dp[2, 0] p[3] += dp[3, 0] p[4] += dp[4, 0] p[5] += dp[5, 0] M = np.array([[1.0 + p[0], p[1], p[2]], [p[3], 1.0 + p[4], p[5]], [0, 0, 1]]) return M
def texture_noise(model, support=None, L=None, noise_sd=SHAPE_MODEL_NOISE_LV['lo'], len_sc=SHAPE_MODEL_NOISE_LEN_SC, max_rng=None, max_n=1e4, hf_noise=True): tex = model.load_texture() if tex is None: print('tools.texture_noise: no texture loaded') return [None] * 3 r = np.sqrt(max_n / np.prod(tex.shape[:2])) ny, nx = (np.array(tex.shape[:2]) * r).astype(np.int) n = nx * ny tx_grid_xx, tx_grid_yy = np.meshgrid(np.linspace(0, 1, nx), np.linspace(0, 1, ny)) tx_grid = np.hstack((tx_grid_xx.reshape((-1, 1)), tx_grid_yy.reshape((-1, 1)))) support = support if support else model points = np.array(support.vertices) max_rng = np.max(np.ptp(points, axis=0)) if max_rng is None else max_rng # use vertices for distances, find corresponding vertex for each pixel y_cov = None if L is None: try: from sklearn.gaussian_process.kernels import Matern, WhiteKernel except: print('Requires scikit-learn, install using "conda install scikit-learn"') sys.exit() kernel = 1.0 * noise_sd * Matern(length_scale=len_sc * max_rng, nu=1.5) \ + 0.5 * noise_sd * Matern(length_scale=0.1 * len_sc * max_rng, nu=1.5) \ + WhiteKernel( noise_level=1e-5 * noise_sd * max_rng) # white noise for positive definite covariance matrix only # texture coordinates given so that x points left and *Y POINTS UP* tex_img_coords = np.array(support.texcoords) tex_img_coords[:, 1] = 1 - tex_img_coords[:, 1] _, idxs = find_nearest_each(haystack=tex_img_coords, needles=tx_grid) tx2vx = support.texture_to_vertex_map() y_cov = kernel(points[tx2vx[idxs], :] - np.mean(points, axis=0)) if 0: # for debugging distances import matplotlib.pyplot as plt import cv2 from visnav.algo.image import ImageProc orig_tx = cv2.imread(os.path.join(DATA_DIR, '67p+tex.png'), cv2.IMREAD_GRAYSCALE) gx, gy = np.gradient(points[tx2vx[idxs], :].reshape((ny, nx, 3)), axis=(1, 0)) gxy = np.linalg.norm(gx, axis=2) + np.linalg.norm(gy, axis=2) gxy = (gxy - np.min(gxy)) / (np.max(gxy) - np.min(gxy)) grad_img = cv2.resize((gxy * 255).astype('uint8'), orig_tx.shape) overlaid = ImageProc.merge((orig_tx, grad_img)) plt.figure(1) plt.imshow(overlaid) plt.show() # sample gp e0, L = mv_normal(np.zeros(n), cov=y_cov, L=L) e0 = e0.reshape((ny, nx)) # interpolate for final texture x = np.linspace(np.min(tx_grid_xx), np.max(tx_grid_xx), tex.shape[1]) y = np.linspace(np.min(tx_grid_yy), np.max(tx_grid_yy), tex.shape[0]) interp0 = RectBivariateSpline(tx_grid_xx[0, :], tx_grid_yy[:, 0], e0, kx=1, ky=1) err0 = interp0(x, y) if 0: import matplotlib.pyplot as plt import cv2 from visnav.algo.image import ImageProc orig_tx = cv2.imread(os.path.join(DATA_DIR, '67p+tex.png'), cv2.IMREAD_GRAYSCALE) err_ = err0 if 1 else e0 eimg = (err_ - np.min(err_)) / (np.max(err_) - np.min(err_)) eimg = cv2.resize((eimg * 255).astype('uint8'), orig_tx.shape) overlaid = ImageProc.merge((orig_tx, eimg)) plt.figure(1) plt.imshow(overlaid) plt.show() err1 = 0 if hf_noise: e1, L = mv_normal(np.zeros(n), L=L) e1 = e1.reshape((ny, nx)) interp1 = RectBivariateSpline(tx_grid_xx[0, :], tx_grid_yy[:, 0], e1, kx=1, ky=1) err_coef = interp1(x, y) lo, hi = np.min(err_coef), np.max(err_coef) err_coef = (err_coef - lo) / (hi - lo) len_sc = 10 err1 = generate_field_fft(tex.shape, (6 * noise_sd, 4 * noise_sd), (len_sc / 1000, len_sc / 4500)) if hf_noise else 0 err1 *= err_coef noisy_tex = tex + err0 + err1 min_v, max_v = np.quantile(noisy_tex, (0.0001, 0.9999)) min_v = min(0, min_v) noisy_tex = (np.clip(noisy_tex, min_v, max_v) - min_v) / (max_v - min_v) if 0: import matplotlib.pyplot as plt plt.figure(1) plt.imshow(noisy_tex) plt.figure(2) plt.imshow(err0) plt.figure(3) plt.imshow(err1) plt.show() return noisy_tex, np.std(err0 + err1), L
class SF(object): def __init__(self, filename, upsample=1, **kwargs): self.shape = {} self.filename = filename self.set_kwargs(kwargs) self.eqdsk = nova.geqdsk.read(self.filename) self.normalise() # unit normalisation self.set_plasma(self.eqdsk) self.set_boundary(self.eqdsk['rbdry'], self.eqdsk['zbdry']) self.set_flux(self.eqdsk) # calculate flux profiles self.set_TF(self.eqdsk) self.set_current(self.eqdsk) xo_arg = np.argmin(self.zbdry) self.xo = [self.rbdry[xo_arg], self.zbdry[xo_arg]] self.mo = [self.eqdsk['rmagx'], self.eqdsk['zmagx']] self.upsample(upsample) self.get_Xpsi() self.get_Mpsi() self.set_contour() # set cfeild self.get_LFP() #self.get_sol_psi(dSOL=3e-3,Nsol=15,verbose=False) self.rcirc = 0.3 * abs( self.Mpoint[1] - self.Xpoint[1]) # leg search radius self.drcirc = 0.1 * self.rcirc # leg search width self.xlim = self.eqdsk['xlim'] self.ylim = self.eqdsk['ylim'] self.nlim = self.eqdsk['nlim'] def set_kwargs(self, kwargs): for key in kwargs: setattr(self, key, kwargs[key]) def normalise(self): if ('Fiesta' in self.eqdsk['name'] or 'Nova' in self.eqdsk['name']\ or 'disr' in self.eqdsk['name']) and 'CREATE' not in self.eqdsk['name']: self.norm = 1 else: # CREATE self.eqdsk['cpasma'] *= -1 self.norm = 2 * np.pi for key in ['psi', 'simagx', 'sibdry']: self.eqdsk[key] /= self.norm # Webber/loop to Webber/radian for key in ['ffprim', 'pprime']: self.eqdsk[ key] *= self.norm # []/(Webber/loop) to []/(Webber/radian) self.b_scale = 1 # flux function scaling def trim_r(self, rmin=1.5): if self.r[0] == 0: # trim zero radius entries i = np.argmin(abs(self.r - rmin)) self.r = self.r[i:] self.psi = self.psi[i:, :] def eqwrite(self, pf, CREATE=False, prefix='Nova', config=''): if len(config) > 0: name = prefix + '_' + config else: name = prefix if CREATE: # save with create units (Webber/loop, negated Iplasma) name = 'CREATE_format_' + name norm = 2 * np.pi # reformat: webber/loop Ip_dir = -1 # reformat: reverse plasma current psi_offset = self.get_Xpsi()[0] # reformat: boundary psi=0 else: norm, Ip_dir, psi_offset = 1, 1, 0 # no change nc, rc, zc, drc, dzc, Ic = pf.unpack_coils()[:-1] psi_ff = np.linspace(0, 1, self.nr) pad = np.zeros(self.nr) eq = { 'name': name, 'nx': self.nr, 'ny': self.nz, # Number of horizontal and vertical points 'r': self.r, 'z': self.z, # Location of the grid-points 'rdim': self.r[-1] - self.r[0], # Size of the domain in meters 'zdim': self.z[-1] - self.z[0], # Size of the domain in meters 'rcentr': self.eqdsk['rcentr'], # Reference vacuum toroidal field (m, T) 'bcentr': self.eqdsk['bcentr'], # Reference vacuum toroidal field (m, T) 'rgrid1': self.r[0], # R of left side of domain 'zmid': self.z[0] + (self.z[-1] - self.z[0]) / 2, # Z at the middle of the domain 'rmagx': self.Mpoint[0], # Location of magnetic axis 'zmagx': self.Mpoint[1], # Location of magnetic axis 'simagx': float(self.Mpsi) * norm, # Poloidal flux at the axis (Weber / rad) 'sibdry': self.Xpsi * norm, # Poloidal flux at plasma boundary (Weber / rad) 'cpasma': self.eqdsk['cpasma'] * Ip_dir, 'psi': (np.transpose(self.psi).reshape((-1, )) - psi_offset) * norm, # Poloidal flux in Weber/rad on grid points 'fpol': self.Fpsi( psi_ff), # Poloidal current function on uniform flux grid 'ffprim': self.b_scale * self.FFprime(psi_ff) / norm, # "FF'(psi) in (mT)^2/(Weber/rad) on uniform flux grid" 'pprime': self.b_scale * self.Pprime(psi_ff) / norm, # "P'(psi) in (N/m2)/(Weber/rad) on uniform flux grid" 'pressure': pad, # Plasma pressure in N/m^2 on uniform flux grid 'qpsi': pad, # q values on uniform flux grid 'nbdry': self.nbdry, 'rbdry': self.rbdry, 'zbdry': self.zbdry, # Plasma boundary 'nlim': self.nlim, 'xlim': self.xlim, 'ylim': self.ylim, # first wall 'ncoil': nc, 'rc': rc, 'zc': zc, 'drc': drc, 'dzc': dzc, 'Ic': Ic } # coils eqdir = trim_dir('../../eqdsk') filename = eqdir + '/' + config + '.eqdsk' print('writing eqdsk', filename) nova.geqdsk.write(filename, eq) def write_flux(self): psi_norm = np.linspace(0, 1, self.nr) pprime = self.b_scale * self.Pprime(psi_norm) FFprime = self.b_scale * self.FFprime(psi_norm) with open('../Data/' + self.dataname + '_flux.txt', 'w') as f: f.write( 'psi_norm\tp\' [Pa/(Weber/rad)]\tFF\' [(mT)^2/(Weber/rad)]\n') for psi, p_, FF_ in zip(psi_norm, pprime, FFprime): f.write('{:1.4f}\t\t{:1.4f}\t\t{:1.4f}\n'.format(psi, p_, FF_)) def set_flux(self, eqdsk): F_ff = eqdsk['fpol'] P_ff = eqdsk['pressure'] n = len(F_ff) psi_ff = np.linspace(0, 1, n) F_ff = interp1d(psi_ff, F_ff)(psi_ff) P_ff = interp1d(psi_ff, P_ff)(psi_ff) dF_ff = np.gradient(F_ff, 1 / (n - 1)) dP_ff = np.gradient(P_ff, 1 / (n - 1)) self.Fpsi = interp1d(psi_ff, F_ff) self.dFpsi = interp1d(psi_ff, dF_ff) self.dPpsi = interp1d(psi_ff, dP_ff) FFp = spline(psi_ff, eqdsk['ffprim'], s=1e-5)(psi_ff) Pp = spline(psi_ff, eqdsk['pprime'], s=1e2)(psi_ff) # s=1e5 self.FFprime = interp1d(psi_ff, FFp, fill_value=0, bounds_error=False) self.Pprime = interp1d(psi_ff, Pp, fill_value=0, bounds_error=False) def set_TF(self, eqdsk): for key in ['rcentr', 'bcentr']: setattr(self, key, eqdsk[key]) def set_boundary(self, r, z, n=5e2): self.nbdry = int(n) self.rbdry, self.zbdry = geom.rzSLine(r, z, npoints=n) def set_current(self, eqdsk): for key in ['cpasma']: setattr(self, key, eqdsk[key]) def update_plasma(self, eq): # update requres full separatrix for attr in ['Bspline', 'Pspline', 'Xpsi', 'Mpsi', 'Br', 'Bz', 'LFPr']: if hasattr(self, attr): delattr(self, attr) self.set_plasma(eq) self.get_Xpsi() self.get_Mpsi() self.set_contour() # calculate cfeild self.get_LFP() r, z = self.get_boundary() self.set_boundary(r, z) #self.get_Plimit() # limit plasma extent #self.get_sol_psi() # re-calculate sol_psi def get_Plimit(self): psi = np.zeros(self.nlim) for i, (r, z) in enumerate(zip(self.xlim, self.ylim)): psi[i] = self.Ppoint((r, z)) self.Xpsi = np.max(psi) #i = np.argmax(psi) #self.Xpoint = np.array([self.xlim[i],self.ylim[i]]) def set_plasma(self, eq): for key in ['r', 'z', 'psi']: if key in eq.keys(): setattr(self, key, eq[key]) self.trim_r() self.space() self.Bfeild() def upsample(self, sample): if sample > 1: ''' EQ(self,n=sample*self.n) self.space() ''' from scipy.interpolate import RectBivariateSpline as rbs sample = np.int(np.float(sample)) interp_psi = rbs(self.r, self.z, self.psi) self.nr, self.nz = sample * self.nr, sample * self.nz self.r = np.linspace(self.r[0], self.r[-1], self.nr) self.z = np.linspace(self.z[0], self.z[-1], self.nz) self.psi = interp_psi(self.r, self.z, dx=0, dy=0) self.space() def space(self): self.nr = len(self.r) self.nz = len(self.z) self.n = self.nr * self.nz self.dr = (self.r[-1] - self.r[0]) / (self.nr - 1) self.dz = (self.z[-1] - self.z[0]) / (self.nz - 1) self.r2d, self.z2d = np.meshgrid(self.r, self.z, indexing='ij') def Bfeild(self): psi_r, psi_z = np.gradient(self.psi, self.dr, self.dz) rm = np.array(np.matrix(self.r).T * np.ones([1, self.nz])) rm[rm == 0] = 1e-34 self.Br = -psi_z / rm self.Bz = psi_r / rm def Bpoint(self, point, check_bounds=False): # magnetic feild at point feild = np.zeros(2) # function re-name (was Bcoil) if not hasattr(self, 'Bspline'): self.Bspline = [[], []] self.Bspline[0] = RectBivariateSpline(self.r, self.z, self.Br) self.Bspline[1] = RectBivariateSpline(self.r, self.z, self.Bz) if check_bounds: inbound = point[0]>=np.min(self.r) and point[0]<=np.max(self.r) \ and point[1]>=np.min(self.z) and point[1]<=np.max(self.z) return inbound else: for i in range(2): feild[i] = self.Bspline[i].ev(point[0], point[1]) return feild def minimum_feild(self, radius, theta): R = radius * np.sin(theta) + self.Xpoint[0] Z = radius * np.cos(theta) + self.Xpoint[1] B = np.zeros(len(R)) for i, (r, z) in enumerate(zip(R, Z)): feild = self.Bpoint((r, z)) B[i] = np.sqrt(feild[0]**2 + feild[1]**2) return np.argmin(B) def Ppoint(self, point): # was Pcoil if not hasattr(self, 'Pspline'): self.Pspline = RectBivariateSpline(self.r, self.z, self.psi) psi = self.Pspline.ev(point[0], point[1]) return psi def contour(self, Nstd=1.5, Nlevel=31, Xnorm=True, lw=1, plot_vac=True, boundary=True, **kwargs): alpha, lw = np.array([1, 0.5]), lw * np.array([2.25, 1.75]) if boundary: r, z = self.get_boundary(1 - 1e-3) pl.plot(r, z, linewidth=lw[0], color=0.75 * np.ones(3)) self.set_boundary(r, z) if not hasattr(self, 'Xpsi'): self.get_Xpsi() if not hasattr(self, 'Mpsi'): self.get_Mpsi() if 'levels' not in kwargs.keys(): dpsi = 0.01 * (self.Xpsi - self.Mpsi) level, n = [self.Mpsi + dpsi, self.Xpsi - dpsi], 17 level, n = [ np.mean(self.psi) - Nstd * np.std(self.psi), np.mean(self.psi) + Nstd * np.std(self.psi) ], 15 level, n = [-Nstd * np.std(self.psi), Nstd * np.std(self.psi)], Nlevel if Nstd*np.std(self.psi) < self.Mpsi-self.Xpsi and \ self.z.max() > self.Mpoint[1]: Nstd = (self.Mpsi - self.Xpsi) / np.std(self.psi) level, n = [-Nstd * np.std(self.psi), Nstd * np.std(self.psi)], Nlevel levels = np.linspace(level[0], level[1], n) linetype = '-' else: levels = kwargs['levels'] linetype = '-' if 'color' in kwargs.keys(): color = kwargs['color'] else: color = 'k' if 'linetype' in kwargs.keys(): linetype = kwargs['linetype'] if color == 'k': alpha *= 0.25 if Xnorm: levels = levels + self.Xpsi contours = self.get_contour(levels) for psi_line, level in zip(contours, levels): if Xnorm: level = level - self.Xpsi for line in psi_line: r, z = line[:, 0], line[:, 1] if self.inPlasma(r, z) and boundary: pindex = 0 else: pindex = 1 if (not plot_vac and pindex == 0) or plot_vac: pl.plot(r, z, linetype, linewidth=lw[pindex], color=color, alpha=alpha[pindex]) #if boundary: # pl.plot(self.rbdry,self.zbdry,linetype,linewidth=lw[pindex], # color=color,alpha=alpha[pindex]) pl.axis('equal') pl.axis('off') return levels def inPlasma(self, R, Z, delta=0): return R.min()>=self.rbdry.min()-delta and \ R.max()<=self.rbdry.max()+delta and \ Z.min()>=self.zbdry.min()-delta and \ Z.max()<=self.zbdry.max()+delta def plot_cs(self, cs, norm, Plasma=False, color='k', pcolor='w', linetype='-'): alpha = np.array([1, 0.5]) lw = 0.75 if not Plasma: norm = 0 if color == 'k': alpha *= 0.25 for p in cs.get_paths(): v = p.vertices R, Z, delta = v[:, 0][:], v[:, 1][:], 0.5 inPlasma = R.min()>=self.rbdry.min()-delta and \ R.max()<=self.rbdry.max()+delta and \ Z.min()>=self.zbdry.min()-delta and \ Z.max()<=self.zbdry.max()+delta if inPlasma: pl.plot(R, Z, linetype, linewidth=1.25 * lw, color=norm * np.array([1, 1, 1]), alpha=alpha[0]) else: pl.plot(R, Z, linetype, linewidth=lw, color=color, alpha=alpha[1]) def Bcontour(self, axis, Nstd=1.5, color='r'): var = 'B' + axis if not hasattr(self, var): self.Bfeild() B = getattr(self, var) level = [np.mean(B) - Nstd * np.std(B), np.mean(B) + Nstd * np.std(B)] CS = pl.contour(self.r, self.z, B, levels=np.linspace(level[0], level[1], 30), colors=color) for cs in CS.collections: cs.set_linestyle('solid') def Bquiver(self): if not hasattr(self, 'Br'): self.Bfeild() pl.quiver(self.r, self.z, self.Br.T, self.Bz.T) def Bsf(self): if not hasattr(self, 'Br'): self.Bfeild() pl.streamplot(self.r, self.z, self.Br.T, self.Bz.T, color=self.Br.T, cmap=pl.cm.RdBu) pl.clim([-1.5, 1.5]) #pl.colorbar(strm.lines) def getX(self, xo=None): def feild(x): B = self.Bpoint(x) return sum(B * B)**0.5 res = minimize(feild, np.array(xo), method='nelder-mead', options={ 'xtol': 1e-7, 'disp': False }) return res.x def get_Xpsi(self, xo=None, select='primary'): if xo is None: if hasattr(self, 'xo'): xo = self.xo else: xo_arg = np.argmin(self.eqdsk['zbdry']) xo = [self.eqdsk['rbdry'][xo_arg], self.eqdsk['zbdry'][xo_arg]] Xpoint = np.zeros((2, 2)) Xpsi = np.zeros(2) for i, flip in enumerate([1, -1]): xo[1] *= flip Xpoint[:, i] = self.getX(xo=xo) Xpsi[i] = self.Ppoint(Xpoint[:, i]) index = np.argsort(Xpoint[1, :]) Xpoint = Xpoint[:, index] Xpsi = Xpsi[index] if select == 'lower': i = 0 # lower Xpoint elif select == 'upper': i = 1 # upper Xpoint elif select == 'primary': i = np.argmax(Xpsi) # primary Xpoint self.Xerr = Xpsi[1] - Xpsi[0] self.Xpsi = Xpsi[i] self.Xpoint = Xpoint[:, i] self.Xpoint_array = Xpoint if i == 0: xo[1] *= -1 # re-flip if self.Xpoint[1] < self.mo[1]: self.Xloc = 'lower' else: self.Xloc = 'upper' return (self.Xpsi, self.Xpoint) def getM(self, mo=None): if mo is None: mo = self.mo def psi(m): return -self.Ppoint(m) res = minimize(psi, np.array(mo), method='nelder-mead', options={ 'xtol': 1e-7, 'disp': False }) return res.x def get_Mpsi(self, mo=None): self.Mpoint = self.getM(mo=mo) self.Mpsi = self.Ppoint(self.Mpoint) return (self.Mpsi, self.Mpoint) def remove_contour(self): for key in ['cfeild', 'cfeild_bndry']: if hasattr(self, key): delattr(self, key) def set_contour(self): psi_boundary = 1.1 * (self.Xpsi - self.Mpsi) + self.Mpsi psi_bndry = np.pad(self.psi[1:-1, 1:-1], (1, ), mode='constant', constant_values=psi_boundary) self.cfeild = cntr(self.r2d, self.z2d, self.psi) self.cfeild_bndry = cntr(self.r2d, self.z2d, psi_bndry) def get_contour(self, levels, boundary=False): if boundary: cfeild = lambda level: self.cfeild_bndry.trace(level, level, 0) else: cfeild = lambda level: self.cfeild.trace(level, level, 0) lines = [] for level in levels: psi_line = cfeild(level) psi_line = psi_line[:len(psi_line) // 2] lines.append(psi_line) return lines def get_boundary(self, alpha=1 - 1e-3, delta_loop=0.1, plot=False): self.Spsi = alpha * (self.Xpsi - self.Mpsi) + self.Mpsi psi_line = self.get_contour([self.Spsi], boundary=True)[0] R, Z = np.array([]), np.array([]) for line in psi_line: r, z = line[:, 0], line[:, 1] if self.Xloc == 'lower': # lower Xpoint index = z >= self.Xpoint[1] elif self.Xloc == 'upper': # upper Xpoint index = z <= self.Xpoint[1] if sum(index) > 0: r, z = r[index], z[index] loop = np.sqrt((r[0] - r[-1])**2 + (z[0] - z[-1])**2) < delta_loop if (z > self.Mpoint[1]).any() and ( z < self.Mpoint[1]).any() and loop: R, Z = np.append(R, r), np.append(Z, z) R, Z = geom.clock(R, Z) if plot: pl.plot(R, Z) return R, Z def get_sep(self, expand=0): # generate boundary dict for elliptic R, Z = self.get_boundary() boundary = {'R': R, 'Z': Z, 'expand': expand} return boundary def get_midplane(self, r, z): def psi_err(r, *args): z = args[0] psi = self.Ppoint((r, z)) return abs(psi - self.Xpsi) res = minimize(psi_err, np.array(r), method='nelder-mead', args=(z), options={ 'xtol': 1e-7, 'disp': False }) return res.x[0] def get_LFP(self, xo=None, alpha=1 - 1e-3): r, z = self.get_boundary(alpha=alpha) if self.Xpoint[1] < self.Mpoint[1]: index = z > self.Xpoint[1] else: # alowance for upper Xpoint index = z < self.Xpoint[1] r_loop, z_loop = r[index], z[index] rc, zc = self.Mpoint radius = ((r_loop - rc)**2 + (z_loop - zc)**2)**0.5 theta = np.arctan2(z_loop - zc, r_loop - rc) index = theta.argsort() radius, theta = radius[index], theta[index] theta = np.append(theta[-1] - 2 * np.pi, theta) radius = np.append(radius[-1], radius) r = rc + radius * np.cos(theta) z = zc + radius * np.sin(theta) fLFSr = interp1d(theta, r) fLFSz = interp1d(theta, z) self.LFPr, self.LFPz = fLFSr(0), fLFSz(0) self.LFPr = self.get_midplane(self.LFPr, self.LFPz) self.HFPr, self.HFPz = fLFSr(-np.pi), fLFSz(-np.pi) self.HFPr = self.get_midplane(self.HFPr, self.HFPz) self.shape['R'] = np.mean([self.HFPr, self.LFPr]) self.shape['a'] = (self.LFPr - self.HFPr) / 2 self.shape['AR'] = self.shape['R'] / self.shape['a'] return (self.LFPr, self.LFPz, self.HFPr, self.HFPz) def first_wall_psi(self, trim=True, single_contour=False, **kwargs): if 'point' in kwargs: req, zeq = kwargs.get('point') psi = self.Ppoint([req, zeq]) else: req, zeq = self.LFPr, self.LFPz if 'psi_n' in kwargs: # normalized psi psi_n = kwargs.get('psi_n') psi = psi_n * (self.Xpsi - self.Mpsi) + self.Mpsi elif 'psi' in kwargs: psi = kwargs.get('psi') else: raise ValueError('set point=(r,z) or psi in kwargs') contours = self.get_contour([psi]) R, Z = self.pick_contour(contours, Xpoint=False) if single_contour: min_contour = np.empty(len(R)) for i in range(len(R)): min_contour[i] = np.min((R[i] - req)**2 + (Z[i] - zeq)**2) imin = np.argmin(min_contour) r, z = R[imin], Z[imin] else: r, z = np.array([]), np.array([]) for i in range(len(R)): r = np.append(r, R[i]) z = np.append(z, Z[i]) if trim: if self.Xloc == 'lower': r, z = r[z <= zeq], z[z <= zeq] elif self.Xloc == 'upper': r, z = r[z >= zeq], z[z >= zeq] else: raise ValueError('Xloc not set (get_Xpsi)') if req > self.Xpoint[0]: r, z = r[r > self.Xpoint[0]], z[r > self.Xpoint[0]] else: r, z = r[r < self.Xpoint[0]], z[r < self.Xpoint[0]] istart = np.argmin((r - req)**2 + (z - zeq)**2) r = np.append(r[istart + 1:], r[:istart]) z = np.append(z[istart + 1:], z[:istart]) istart = np.argmin((r - req)**2 + (z - zeq)**2) if istart > 0: r, z = r[::-1], z[::-1] return r, z, psi def firstwall_loop(self, plot=False, **kwargs): if not hasattr(self, 'LFPr'): self.get_LFP() if 'psi_n' in kwargs: r, z, psi = self.first_wall_psi(psi_n=kwargs['psi_n'], trim=False) psi_lfs = psi_hfs = psi elif 'dr' in kwargs: # geometric offset dr = kwargs.get('dr') LFfwr, LFfwz = self.LFPr + dr, self.LFPz HFfwr, HFfwz = self.HFPr - dr, self.HFPz r_lfs, z_lfs, psi_lfs = self.first_wall_psi(point=(LFfwr, LFfwz)) r_hfs, z_hfs, psi_hfs = self.first_wall_psi(point=(HFfwr, HFfwz)) r_top, z_top = self.get_offset(dr) if self.Xloc == 'lower': r_top, z_top = geom.theta_sort(r_top, z_top, xo=self.xo, origin='top') index = z_top >= self.LFPz else: r_top, z_top = geom.theta_sort(r_top, z_top, xo=self.xo, origin='bottom') index = z_top <= self.LFPz r_top, z_top = r_top[index], z_top[index] istart = np.argmin((r_top - HFfwr)**2 + (z_top - HFfwz)**2) if istart > 0: r_top, z_top = r_top[::-1], z_top[::-1] r = np.append(r_hfs[::-1], r_top) r = np.append(r, r_lfs) z = np.append(z_hfs[::-1], z_top) z = np.append(z, z_lfs) else: errtxt = 'requre \'psi_n\' or \'dr\' in kwargs' raise ValueError(errtxt) if plot: pl.plot(r, z) return r[::-1], z[::-1], (psi_lfs, psi_hfs) def get_offset(self, dr, Nsub=0): rpl, zpl = self.get_boundary() # boundary points rpl, zpl = geom.offset(rpl, zpl, dr) # offset from sep if Nsub > 0: # sub-sample rpl, zpl = geom.rzSLine(rpl, zpl, Nsub) return rpl, zpl def midplane_loop(self, r, z): index = np.argmin((r - self.LFPr)**2 + (z - self.LFPz)**2) if z[index] <= self.LFPz: index -= 1 r = np.append(r[:index + 1][::-1], r[index:][::-1]) z = np.append(z[:index + 1][::-1], z[index:][::-1]) L = geom.length(r, z) index = np.append(np.diff(L) != 0, True) r, z = r[index], z[index] # remove duplicates return r, z def get_sol_psi(self, verbose=False, **kwargs): for var in ['dSOL', 'Nsol']: if var in kwargs: setattr(self, var, kwargs[var]) if verbose: print('calculating sol psi', self.Nsol, self.dSOL) self.get_LFP() self.Dsol = np.linspace(0, self.dSOL, self.Nsol) r = self.LFPr + self.Dsol z = self.LFPz * np.ones(len(r)) self.sol_psi = np.zeros(len(r)) for i, (rp, zp) in enumerate(zip(r, z)): self.sol_psi[i] = self.Ppoint([rp, zp]) def upsample_sol(self, nmult=10): k = 1 # smoothing factor for i, (r, z) in enumerate(zip(self.Rsol, self.Zsol)): l = geom.length(r, z) L = np.linspace(0, 1, nmult * len(l)) self.Rsol[i] = sinterp(l, r, k=k)(L) self.Zsol[i] = sinterp(l, z, k=k)(L) def sol(self, dr=3e-3, Nsol=5, plot=False, update=False, debug=False): # dr [m] if update or not hasattr(self,'sol_psi') or dr > self.dSOL\ or Nsol > self.Nsol: self.get_sol_psi(dSOL=dr, Nsol=Nsol) # re-calculcate LFP elif (Nsol>0 and Nsol != self.Nsol) or \ (dr>0 and dr != self.dSOL): # update if dr > 0: self.dSOL = dr if Nsol > 0: self.Nsol = Nsol Dsol = np.linspace(0, self.dSOL, self.Nsol) self.sol_psi = interp1d(self.Dsol, self.sol_psi)(Dsol) self.Dsol = Dsol contours = self.get_contour(self.sol_psi) self.Rsol, self.Zsol = self.pick_contour(contours, Xpoint=True, Midplane=False, Plasma=False) self.upsample_sol(nmult=4) # upsamle self.get_legs(debug=debug) if plot: color = sns.color_palette('Set2', 6) for c, leg in enumerate(['inner', 'outer']): #enumerate(self.legs): for i in np.arange(self.legs[leg]['i'])[::-1]: r, z = self.snip(leg, i) r, z = self.legs[leg]['R'][i], self.legs[leg]['Z'][i] pl.plot(r, z, color=color[c], linewidth=0.5) def add_core(self): # refarance from low-feild midplane for i in range(self.Nsol): for leg in [ 'inner', 'inner1', 'inner2', 'outer', 'outer1', 'outer2' ]: if leg in self.legs: if 'inner' in leg: core = 'core1' else: core = 'core2' Rc = self.legs[core]['R'][i][:-1] Zc = self.legs[core]['Z'][i][:-1] self.legs[leg]['R'][i] = np.append(Rc, self.legs[leg]['R'][i]) self.legs[leg]['Z'][i] = np.append(Zc, self.legs[leg]['Z'][i]) def orientate(self, R, Z): if R[-1] > R[0]: # counter clockwise R = R[::-1] Z = Z[::-1] return R, Z def pick_contour(self, contours, Xpoint=False, Midplane=True, Plasma=False): Rs = [] Zs = [] Xp, Mid, Pl = True, True, True for psi_line in contours: for line in psi_line: R, Z = line[:, 0], line[:, 1] if Xpoint: # check Xpoint proximity rX = np.sqrt((R - self.Xpoint[0])**2 + (Z - self.Xpoint[1])**2) if (min(rX) < self.rcirc): Xp = True else: Xp = False if Midplane: # check lf midplane crossing if (np.max(Z) > self.LFPz) and (np.min(Z) < self.LFPz): Mid = True else: Mid = False if Plasma: if (np.max(R) < np.max(self.rbdry)) and\ (np.min(R) > np.min(self.rbdry)) and\ (np.max(Z) < np.max(self.zbdry)) and\ (np.min(Z) > np.min(self.zbdry)): Pl = True else: Pl = False if Xp and Mid and Pl: R, Z = self.orientate(R, Z) Rs.append(R) Zs.append(Z) return Rs, Zs def topolar(self, R, Z): x, y = R, Z r = np.sqrt((x - self.Xpoint[0])**2 + (y - self.Xpoint[1])**2) if self.Xloc == 'lower': t = np.arctan2(x - self.Xpoint[0], y - self.Xpoint[1]) elif self.Xloc == 'upper': t = np.arctan2(x - self.Xpoint[0], self.Xpoint[1] - y) else: raise ValueError('Xloc not set (get_Xpsi)') return r, t def store_leg(self, rloop, tloop): if np.argmin(rloop) > len(rloop) / 2: # point legs out rloop, tloop = rloop[::-1], tloop[::-1] ncirc = np.argmin(abs(rloop - self.rcirc)) tID = np.argmin(abs(tloop[ncirc] - self.tleg)) legID = self.tID[tID] if self.nleg == 6: if legID <= 1: label = 'inner' + str(legID + 1) elif legID >= 4: label = 'outer' + str(legID - 3) elif legID == 2: label = 'core1' elif legID == 3: label = 'core2' else: label = '' else: if legID == 0: label = 'inner' elif legID == 3: label = 'outer' elif legID == 1: label = 'core1' elif legID == 2: label = 'core2' else: label = '' if label: i = self.legs[label]['i'] R = rloop * np.sin(tloop) + self.Xpoint[0] if self.Xloc == 'lower': Z = rloop * np.cos(tloop) + self.Xpoint[1] elif self.Xloc == 'upper': Z = -rloop * np.cos(tloop) + self.Xpoint[1] else: raise ValueError('Xloc not set (get_Xpsi)') if i > 0: if R[0]**2 + Z[0]**2 == (self.legs[label]['R'][i - 1][0]**2 + self.legs[label]['Z'][i - 1][0]**2): i -= 1 if 'core' in label: R, Z = R[::-1], Z[::-1] self.legs[label]['R'][i] = R self.legs[label]['Z'][i] = Z self.legs[label]['i'] = i + 1 def min_L2D(self, targets): L2D = np.zeros(len(targets.keys())) for i, target in enumerate(targets.keys()): L2D[i] = targets[target]['L2D'][0] return L2D.min() def check_legs(self): if self.sf.z.min() > self.sf.Xpoint[1] - self.sf.rcirc: print('grid out of bounds') def get_legs(self, debug=False): if debug: theta = np.linspace(-np.pi, np.pi, 100) r = (self.rcirc - self.drcirc / 2) * np.cos(theta) z = (self.rcirc - self.drcirc / 2) * np.sin(theta) pl.plot(r + self.Xpoint[0], z + self.Xpoint[1], 'k--', alpha=0.5) r = (self.rcirc + self.drcirc / 2) * np.cos(theta) z = (self.rcirc + self.drcirc / 2) * np.sin(theta) pl.plot(r + self.Xpoint[0], z + self.Xpoint[1], 'k--', alpha=0.5) self.tleg = np.array([]) for N in range(len(self.Rsol)): r, t = self.topolar(self.Rsol[N], self.Zsol[N]) index = (r > self.rcirc - self.drcirc / 2) & ( r < self.rcirc + self.drcirc / 2) self.tleg = np.append(self.tleg, t[index]) nbin = 50 nhist, bins = np.histogram(self.tleg, bins=nbin) flag, self.nleg, self.tleg = 0, 0, np.array([]) for i in range(len(nhist)): if nhist[i] > 0: if flag == 0: tstart = bins[i] tend = bins[i] flag = 1 if flag == 1: tend = bins[i] elif flag == 1: self.tleg = np.append(self.tleg, (tstart + tend) / 2) self.nleg += 1 flag = 0 else: flag = 0 if nhist[-1] > 0: tend = bins[-1] self.tleg = np.append(self.tleg, (tstart + tend) / 2) self.nleg += 1 if self.nleg == 6: # snow flake self.legs = {\ 'inner1':{'R':[[] for i in range(self.Nsol)], 'Z':[[] for i in range(self.Nsol)],'i':0}, 'inner2':{'R':[[] for i in range(self.Nsol)], 'Z':[[] for i in range(self.Nsol)],'i':0}, 'outer1':{'R':[[] for i in range(self.Nsol)], 'Z':[[] for i in range(self.Nsol)],'i':0}, 'outer2':{'R':[[] for i in range(self.Nsol)], 'Z':[[] for i in range(self.Nsol)],'i':0}, 'core1':{'R':[[] for i in range(self.Nsol)], 'Z':[[] for i in range(self.Nsol)],'i':0}, 'core2':{'R':[[] for i in range(self.Nsol)], 'Z':[[] for i in range(self.Nsol)],'i':0}} else: self.legs = {\ 'inner':{'R':[[] for i in range(self.Nsol)], 'Z':[[] for i in range(self.Nsol)],'i':0}, 'outer':{'R':[[] for i in range(self.Nsol)], 'Z':[[] for i in range(self.Nsol)],'i':0}, 'core1':{'R':[[] for i in range(self.Nsol)], 'Z':[[] for i in range(self.Nsol)],'i':0}, 'core2':{'R':[[] for i in range(self.Nsol)], 'Z':[[] for i in range(self.Nsol)],'i':0}} self.legs = OrderedDict(sorted(self.legs.items(), key=lambda t: t[0])) if self.nleg == 0: err_txt = 'legs not found\n' raise ValueError(err_txt) self.tID = np.arange(self.nleg) self.tID = np.append(self.nleg - 1, self.tID) self.tID = np.append(self.tID, 0) self.tleg = np.append(-np.pi - (np.pi - self.tleg[-1]), self.tleg) self.tleg = np.append(self.tleg, np.pi + (np.pi + self.tleg[1])) for N in range(len(self.Rsol)): ends, ro = [0, -1], np.zeros(2) for i in ends: ro[i] = np.sqrt(self.Rsol[N][i]**2 + self.Zsol[N][i]**2) r, t = self.topolar(self.Rsol[N], self.Zsol[N]) post = False rpost, tpost = 0, 0 if ro[0] == ro[-1]: # cut loops if np.min(r * np.cos(t)) > self.drcirc - self.rcirc: nmax = np.argmax(r * np.sin(t)) # LF else: nmax = np.argmin(r * np.cos(t)) # minimum z r = np.append(r[nmax:], r[:nmax]) t = np.append(t[nmax:], t[:nmax]) while len(r) > 0: if r[0] > self.rcirc: if np.min(r) < self.rcirc: ncut = np.arange(len(r))[r < self.rcirc][0] rloop, tloop = r[:ncut], t[:ncut] loop = False else: ncut = -1 rloop, tloop = r, t loop = True if post: rloop, tloop = np.append(rpost, rloop), np.append( tpost, tloop) else: ncut = np.arange(len(r))[r > self.rcirc][0] rin, tin = r[:ncut], t[:ncut] nx = self.minimum_feild(rin, tin) # minimum feild rpre, tpre = rin[:nx + 1], tin[:nx + 1] rpost, tpost = rin[nx:], tin[nx:] loop = True post = True rloop, tloop = np.append(rloop, rpre), np.append(tloop, tpre) if loop: if rloop[0] < self.rcirc and rloop[-1] < self.rcirc: if np.min(rloop * np.cos(tloop)) > self.drcirc - self.rcirc: nmax = np.argmax(rloop * np.sin(tloop)) # LF else: nmax = np.argmax(rloop) self.store_leg(rloop[:nmax], tloop[:nmax]) self.store_leg(rloop[nmax:], tloop[nmax:]) else: self.store_leg(rloop, tloop) if ncut == -1: r, t = [], [] else: r, t = r[ncut:], t[ncut:] def strike_point(self, Xi, graze): ratio = np.sin(graze) * np.sqrt(Xi[-1]**2 + 1) if np.abs(ratio) > 1: theta = np.sign(ratio) * np.pi else: theta = np.arcsin(ratio) return theta def snip(self, leg, layer_index=0, L2D=0): if not hasattr(self, 'Rsol'): self.sol() Rsol = self.legs[leg]['R'][layer_index] Zsol = self.legs[leg]['Z'][layer_index] Lsol = geom.length(Rsol, Zsol, norm=False) if L2D == 0: L2D = Lsol[-1] if layer_index != 0: Rsolo = self.legs[leg]['R'][0] Zsolo = self.legs[leg]['Z'][0] Lsolo = geom.length(Rsolo, Zsolo, norm=False) indexo = np.argmin(np.abs(Lsolo - L2D)) index = np.argmin((Rsol - Rsolo[indexo])**2 + (Zsol - Zsolo[indexo])**2) L2D = Lsol[index] else: index = np.argmin(np.abs(Lsol - L2D)) if Lsol[index] > L2D: index -= 1 if L2D > Lsol[-1]: L2D = Lsol[-1] print('warning: targent requested outside grid') Rend, Zend = interp1d(Lsol, Rsol)(L2D), interp1d(Lsol, Zsol)(L2D) Rsol, Zsol = Rsol[:index], Zsol[:index] # trim to strike point Rsol, Zsol = np.append(Rsol, Rend), np.append(Zsol, Zend) return (Rsol, Zsol) def pick_leg(self, leg, layer_index): R = self.legs[leg]['R'][layer_index] Z = self.legs[leg]['Z'][layer_index] return R, Z def Xtrim(self, Rsol, Zsol): Xindex = np.argmin((self.Xpoint[0] - Rsol)**2 + (self.Xpoint[1] - Zsol)**2) if (Rsol[-1]-Rsol[Xindex])**2+(Zsol[-1]-Zsol[Xindex])**2 <\ (Rsol[0]-Rsol[Xindex])**2+(Zsol[0]-Zsol[Xindex])**2: Rsol = Rsol[:Xindex] # trim to Xpoints Zsol = Zsol[:Xindex] Rsol = Rsol[::-1] Zsol = Zsol[::-1] else: Rsol = Rsol[Xindex:] # trim to Xpoints Zsol = Zsol[Xindex:] return (Rsol, Zsol) def get_graze(self, point, target): T = target / np.sqrt(target[0]**2 + target[1]**2) # target vector B = self.Bpoint([point[0], point[1]]) B /= np.sqrt(B[0]**2 + B[1]**2) # poloidal feild line vector theta = np.arccos(np.dot(B, T)) if theta > np.pi / 2: theta = np.pi - theta Xi = self.expansion([point[0]], [point[1]]) graze = np.arcsin(np.sin(theta) * (Xi[-1]**2 + 1)**-0.5) return graze def get_max_graze(self, r, z): theta = np.pi / 2 # normal target, maximum grazing angle Xi = self.expansion([r], [z]) graze = np.arcsin(np.sin(theta) * (Xi[-1]**2 + 1)**-0.5) return graze def expansion(self, Rsol, Zsol): Xi = np.array([]) Bm = np.abs(self.bcentr * self.rcentr) for r, z in zip(Rsol, Zsol): B = self.Bpoint([r, z]) Bp = np.sqrt(B[0]**2 + B[1]**2) # polodial feild Bphi = Bm / r # torodal field Xi = np.append(Xi, Bphi / Bp) # feild expansion return Xi def connection(self, leg, layer_index, L2D=0): if L2D > 0: # trim targets to L2D Rsol, Zsol = self.snip(leg, layer_index, L2D) else: # rb.trim_sol to trim to targets Rsol = self.legs[leg]['R'][layer_index] Zsol = self.legs[leg]['Z'][layer_index] Lsol = geom.length(Rsol, Zsol) index = np.append(np.diff(Lsol) != 0, True) Rsol, Zsol = Rsol[index], Zsol[index] # remove duplicates if len(Rsol) < 2: L2D, L3D = [0], [0] else: dRsol = np.diff(Rsol) dZsol = np.diff(Zsol) L2D = np.append(0, np.cumsum(np.sqrt(dRsol**2 + dZsol**2))) dTsol = np.array([]) Xi = self.expansion(Rsol, Zsol) for r, dr, dz, xi in zip(Rsol[1:], dRsol, dZsol, Xi): dLp = np.sqrt(dr**2 + dz**2) dLphi = xi * dLp dTsol = np.append(dTsol, dLphi / (r + dr / 2)) L3D = np.append( 0, np.cumsum(dTsol * np.sqrt((dRsol / dTsol)**2 + (dZsol / dTsol)**2 + (Rsol[:-1])**2))) return L2D, L3D, Rsol, Zsol def shape_parameters(self, plot=False): self.get_LFP() r95, z95 = self.get_boundary(alpha=0.95) ru = r95[np.argmax(z95)] # triangularity rl = r95[np.argmin(z95)] self.shape['del_u'] = (self.shape['R'] - ru) / self.shape['a'] self.shape['del_l'] = (self.shape['R'] - rl) / self.shape['a'] self.shape['kappa'] = (np.max(z95) - np.min(z95)) / (2 * self.shape['a']) r, z = self.get_boundary(alpha=1 - 1e-4) r, z = geom.clock(r, z, reverse=True) self.shape['V'] = loop_vol(r, z, plot=plot) return self.shape
def Ppoint(self, point): # was Pcoil if not hasattr(self, 'Pspline'): self.Pspline = RectBivariateSpline(self.r, self.z, self.psi) psi = self.Pspline.ev(point[0], point[1]) return psi
R0 = np.sqrt(X0**2 + Y0**2) Window = 0 * R0 Window[R0 <= dx1 / 2.] = 1 Window = Window / np.sum(Window) # figure(10); imagesc(Window); axis xy equal tight; colormap jet; colorbar; #--To get good grayscale edges, convolve with the correct window before downsampling. FPM0 = np.fft.ifftshift( np.fft.ifft2( np.fft.fft2(np.fft.fftshift(Window)) * np.fft.fft2(np.fft.fftshift(FPM0)))) FPM0 = np.roll(FPM0, (1, 1), axis=(0, 1)) #--Undo a centering shift x1 = np.arange(-(N1 - 1) / 2., (N1 - 1) / 2. + 1) * dx1 # (-(N1-1)/2:(N1-1)/2)*dx1; # [X1, Y1] = np.meshgrid(x1, x1) FPM0 = np.real(FPM0) interp_spline = RectBivariateSpline( x0, x0, FPM0) # RectBivariateSpline is faster in 2-D than interp2d FPM1 = interp_spline(x1, x1) # FPM1 = interp2(X0, Y0, FPM0, X1, Y1, 'cubic', 0); #--Downsample by interpolation if mp.centering == 'pixel': mp.F3.compact.ampMask = np.zeros((N1 + 1, N1 + 1)) mp.F3.compact.ampMask[1::, 1::] = FPM1 elif mp.centering == 'interpixel': mp.F3.compact.ampMask = FPM1 # figure(2); imagesc(FPM0); axis xy equal tight; colormap jet; colorbar; # figure(3); imagesc(FPM1); axis xy equal tight; colormap jet; colorbar; # figure(12); imagesc(FPM0-rot90(FPM0,2)); axis xy equal tight; colormap jet; colorbar; # figure(13); imagesc(FPM1-rot90(FPM1,2)); axis xy equal tight; colormap jet; colorbar; # %# Optical Layout: Full Model
def analyse_equil ( F, R, Z): s = numpy.shape(F) nx = s[0] ny = s[1] #;;;;;;;;;;;;;;; Find critical points ;;;;;;;;;;;;; # # Need to find starting locations for O-points (minima/maxima) # and X-points (saddle points) # Rr=numpy.tile(R,nx).reshape(nx,ny).T Zz=numpy.tile(Z,ny).reshape(nx,ny) contour1=contour(Rr,Zz,gradient(F)[0], levels=[0.0], colors='r') contour2=contour(Rr,Zz,gradient(F)[1], levels=[0.0], colors='r') draw() ### 1st method - line crossings --------------------------- res=find_inter( contour1, contour2) #rex1=numpy.interp(res[0], R, numpy.arange(R.size)).astype(int) #zex1=numpy.interp(res[1], Z, numpy.arange(Z.size)).astype(int) rex1=res[0] zex1=res[1] w=numpy.where((rex1 > R[2]) & (rex1 < R[nx-3]) & (zex1 > Z[2]) & (zex1 < Z[nx-3])) nextrema = numpy.size(w) rex1=rex1[w].flatten() zex1=zex1[w].flatten() ### 2nd method - local maxima_minima ----------------------- res1=local_min_max.detect_local_minima(F) res2=local_min_max.detect_local_maxima(F) res=numpy.append(res1,res2,1) rex2=res[0,:].flatten() zex2=res[1,:].flatten() w=numpy.where((rex2 > 2) & (rex2 < nx-3) & (zex2 >2) & (zex2 < nx-3)) nextrema = numpy.size(w) rex2=rex2[w].flatten() zex2=zex2[w].flatten() n_opoint=nextrema n_xpoint=numpy.size(rex1)-n_opoint # Needed for interp below Rx=numpy.arange(numpy.size(R)) Zx=numpy.arange(numpy.size(Z)) print("Number of O-points: "+numpy.str(n_opoint)) print("Number of X-points: "+numpy.str(n_xpoint)) # Deduce the O & X points x=R[rex2] y=Z[zex2] dr=old_div((R[numpy.size(R)-1]-R[0]),numpy.size(R)) dz=old_div((Z[numpy.size(Z)-1]-Z[0]),numpy.size(Z)) repeated=set() for i in range(numpy.size(rex1)): for j in range(numpy.size(x)): if numpy.abs(rex1[i]-x[j]) < 2*dr and numpy.abs(zex1[i]-y[j]) < 2*dz : repeated.add(i) # o-points o_ri=numpy.take(rex1,numpy.array(list(repeated))) opt_ri=numpy.interp(o_ri,R,Rx) o_zi=numpy.take(zex1,numpy.array(list(repeated))) opt_zi=numpy.interp(o_zi,Z,Zx) opt_f=numpy.zeros(numpy.size(opt_ri)) func = RectBivariateSpline(Rx, Zx, F) for i in range(numpy.size(opt_ri)): opt_f[i]=func(opt_ri[i], opt_zi[i]) n_opoint=numpy.size(opt_ri) # x-points x_ri=numpy.delete(rex1, numpy.array(list(repeated))) xpt_ri=numpy.interp(x_ri,R,Rx) x_zi=numpy.delete(zex1, numpy.array(list(repeated))) xpt_zi=numpy.interp(x_zi,Z,Zx) xpt_f=numpy.zeros(numpy.size(xpt_ri)) func = RectBivariateSpline(Rx, Zx, F) for i in range(numpy.size(xpt_ri)): xpt_f[i]=func(xpt_ri[i], xpt_zi[i]) n_xpoint=numpy.size(xpt_ri) # plot o-points plot(o_ri,o_zi,'o', markersize=10) labels = ['{0}'.format(i) for i in range(o_ri.size)] for label, xp, yp in zip(labels, o_ri, o_zi): annotate(label, xy = (xp, yp), xytext = (10, 10), textcoords = 'offset points',size='large', color='b') draw() # plot x-points plot(x_ri,x_zi,'x', markersize=10) labels = ['{0}'.format(i) for i in range(x_ri.size)] for label, xp, yp in zip(labels, x_ri, x_zi): annotate(label, xy = (xp, yp), xytext = (10, 10), textcoords = 'offset points',size='large', color='r') draw() print("Number of O-points: "+str(n_opoint)) if n_opoint == 0 : print("No O-points! Giving up on this equilibrium") return Bunch(n_opoint=0, n_xpoint=0, primary_opt=-1) #;;;;;;;;;;;;;; Find plasma centre ;;;;;;;;;;;;;;;;;;; # Find the O-point closest to the middle of the grid mind = (opt_ri[0] - (old_div(numpy.float(nx),2.)))**2 + (opt_zi[0] - (old_div(numpy.float(ny),2.)))**2 ind = 0 for i in range (1, n_opoint) : d = (opt_ri[i] - (old_div(numpy.float(nx),2.)))**2 + (opt_zi[i] - (old_div(numpy.float(ny),2.)))**2 if d < mind : ind = i mind = d primary_opt = ind print("Primary O-point is at "+ numpy.str(numpy.interp(opt_ri[ind],numpy.arange(numpy.size(R)),R)) + ", " + numpy.str(numpy.interp(opt_zi[ind],numpy.arange(numpy.size(Z)),Z))) print("") if n_xpoint > 0 : # Find the primary separatrix # First remove non-monotonic separatrices nkeep = 0 for i in range (n_xpoint) : # Draw a line between the O-point and X-point n = 100 # Number of points farr = numpy.zeros(n) dr = old_div((xpt_ri[i] - opt_ri[ind]), numpy.float(n)) dz = old_div((xpt_zi[i] - opt_zi[ind]), numpy.float(n)) for j in range (n) : # interpolate f at this location func = RectBivariateSpline(Rx, Zx, F) farr[j] = func(opt_ri[ind] + dr*numpy.float(j), opt_zi[ind] + dz*numpy.float(j)) # farr should be monotonic, and shouldn't cross any other separatrices maxind = numpy.argmax(farr) minind = numpy.argmin(farr) if (maxind < minind) : maxind, minind = minind, maxind # Allow a little leeway to account for errors # NOTE: This needs a bit of refining if (maxind > (n-3)) and (minind < 3) : # Monotonic, so add this to a list of x-points to keep if nkeep == 0 : keep = [i] else: keep = numpy.append(keep, i) nkeep = nkeep + 1 if nkeep > 0 : print("Keeping x-points ", keep) xpt_ri = xpt_ri[keep] xpt_zi = xpt_zi[keep] xpt_f = xpt_f[keep] else: "No x-points kept" n_xpoint = nkeep # Now find x-point closest to primary O-point s = numpy.argsort(numpy.abs(opt_f[ind] - xpt_f)) xpt_ri = xpt_ri[s] xpt_zi = xpt_zi[s] xpt_f = xpt_f[s] inner_sep = 0 else: # No x-points. Pick mid-point in f xpt_f = 0.5*(numpy.max(F) + numpy.min(F)) print("WARNING: No X-points. Setting separatrix to F = "+str(xpt_f)) xpt_ri = 0 xpt_zi = 0 inner_sep = 0 #;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; # Put results into a structure result = Bunch(n_opoint=n_opoint, n_xpoint=n_xpoint, # Number of O- and X-points primary_opt=primary_opt, # Which O-point is the plasma centre inner_sep=inner_sep, #Innermost X-point separatrix opt_ri=opt_ri, opt_zi=opt_zi, opt_f=opt_f, # O-point location (indices) and psi values xpt_ri=xpt_ri, xpt_zi=xpt_zi, xpt_f=xpt_f) # X-point locations and psi values return result
def __init__(self, MCrelation, cosmology=None): self.MCrelation = MCrelation # if isinstance(MCrelation, str): if MCrelation == 'Duffy08': pass elif MCrelation == 'DK15': # We compute the c-M relation for an array of z_arr and # M_arr, and store the interpolation function in self z_arr = np.linspace(0, 2, 21) M_arr = np.logspace(13, 16, 301) rho_m = cosmology['Omega_m'] * cosmo.RHOCRIT Omh2 = cosmology['Omega_m'] * cosmology['h']**2 Obh2 = cosmology['Omega_b'] * cosmology['h']**2 fb = cosmology['Omega_b'] / cosmology['Omega_m'] k_arr = np.logspace(-4, 2, 400) # Eisenstein&Hu'99 transfer function (no wiggles) # EQ 6 sound_horizon = 44.5 * np.log( 9.83 / Omh2) / (1 + 10 * Obh2**.75)**.5 # EQ 31 alphaGamma = 1 - .328 * np.log(431 * Omh2) * fb + .38 * np.log( 22.3 * Omh2) * fb**2 # EQ 30 Gamma = cosmology['Omega_m'] * cosmology['h'] * ( alphaGamma + (1 - alphaGamma) / (1 + (.43 * k_arr * cosmology['h'] * sound_horizon)**4)) # EQ 28 q = k_arr * (2.7255 / 2.7)**2 / Gamma # EQ 29 C0 = 14.2 + 731 / (1 + 62.5 * q) L0 = np.log(2 * np.exp(1) + 1.8 * q) TF = L0 / (L0 + C0 * q**2) # We only care about the derivative, not the normalization PK_EHsmooth = k_arr**cosmology['ns'] * TF**2 # Interpolation function for EQ 8, DK15 n_of_k = InterpolatedUnivariateSpline(np.log(k_arr), np.log(PK_EHsmooth)) # Normalized growth function integrand = lambda z_int: (1 + z_int) / cosmo.Ez(z_int, cosmology )**3 D_arr = np.array([ cosmo.Ez(z, cosmology) * integrate.quad(integrand, z, 1e3)[0] for z in z_arr ]) D_arr /= D_arr[0] ##### Compute sigma(M, z=0) # Radius [M_arr] R = (3 * M_arr / (4 * np.pi * rho_m))**(1 / 3) R = np.append(R, 8) # [M_arr, k_arr] kR = k_arr[None, :] * R[:, None] # Window functions [M_arr, k_arr] window = 3 * (np.sin(kR) / kR**3 - np.cos(kR) / kR**2) # Integrand [M_arr, k_arr] integrand_sigma2 = PK_EHsmooth[None, :] * window[:, :]**2 * k_arr[ None, :]**2 # sigma^2 [z_arr, M_arr] sigma2 = .5 / np.pi**2 * np.trapz(integrand_sigma2, k_arr, axis=-1) sigma = sigma2[:-1]**.5 * cosmology['sigma8'] / sigma2[-1]**.5 # EQ 12, DK15 k_R = .69 * 2 * np.pi / R[:-1] n = n_of_k(np.log(k_R), nu=1) # EQ 4, DK15 [z_arr, M_arr] nu = 1.686 / sigma[None, :] / D_arr[:, None] # EQ 10, DK15 [M_arr] c_min = 6.58 + n * 1.37 nu_min = 6.82 + n * 1.42 # EQ 9, DK15 [z_arr, M_arr] c = .5 * c_min * ((nu_min / nu)**1.12 + (nu / nu_min)**1.69) c[c > 30.] = 30. # Set up spline interpolation in z_arr and M_arr self.concentration = RectBivariateSpline(z_arr, M_arr, c) else: raise ValueError('Unknown mass-concentration relation:', MCrelation)
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Mon Jul 20 10:56:15 2020 @author: anirbanroy """ import numpy as np from scipy.interpolate import RectBivariateSpline #from plotsettings import * sfr_filepath='../data/' f=np.loadtxt(sfr_filepath+'sfr_beherozzi.dat') z=f[:,0] m=f[:,1] sfr=f[:,2] zlen=137 #manually checked mlen=int(len(z)/zlen) zn=z[0:zlen] mhn=m.reshape(mlen,zlen)[:,0] sfrn=sfr.reshape(mlen,zlen) sfr_interpolation=RectBivariateSpline(mhn, zn, sfrn) def sfr_int(m,z): res=sfr_interpolation(m,z) res=np.where(res<1e-4, 0.0, res) return res.flatten()
def csr2d_kick_calc( z_b, x_b, weight, *, gamma=None, rho=None, nz=100, nx=100, xlim=None, zlim=None, reuse_psi_grids=False, psi_s_grid_old=None, psi_x_grid_old=None, map_f=map, species="electron", debug=False, ): """ Calculates the 2D CSR kick on a set of particles with positions `z_b`, `x_b` and charges `charges`. Parameters ---------- z_b : np.array Bunch z coordinates in [m] x_b : np.array Bunch x coordinates in [m] weight : np.array weight array (positive only) in [C] This should sum to the total charge in the bunch gamma : float Relativistic gamma rho : float bending radius in [m] if neagtive, particles with a positive x coordinate is on the inner side of the magnet nz : int number of z grid points nx : int number of x grid points zlim : floats (min, max) or None z grid limits in [m] xlim : floats (min, max) or None x grid limits in [m] map_f : map function for creating potential grids. Examples: map (default) executor.map species : str Particle species. Currently required to be 'electron' debug: bool If True, returns the computational grids. Default: False Returns ------- dict with: ddelta_ds : np.array relative z momentum kick [1/m] dxp_ds : np.array relative x momentum kick [1/m] """ assert species == "electron", "TODO: support species {species}" # assert np.sign(rho) == 1, 'TODO: negative rho' rho_sign = np.sign(rho) if rho_sign == -1: rho = -rho x_b = -x_b # flip the beam x coordinate # Grid setup if zlim: zmin = zlim[0] zmax = zlim[1] else: zmin = z_b.min() zmax = z_b.max() if xlim: xmin = xlim[0] xmax = xlim[1] else: xmin = x_b.min() xmax = x_b.max() dz = (zmax - zmin) / (nz - 1) dx = (xmax - xmin) / (nx - 1) # Charge deposition # Old method # zx_positions = np.stack((z_b, x_b)).T # indexes, contrib = split_particles(zx_positions, charges, mins, maxs, sizes) # t1 = time.time(); # charge_grid = deposit_particles(Np, sizes, indexes, contrib) # t2 = time.time(); # Remi's fast code t1 = time.time() charge_grid = histogram_cic_2d(z_b, x_b, weight, nz, zmin, zmax, nx, xmin, xmax) if debug: t2 = time.time() print("Depositing particles takes:", t2 - t1, "s") # Normalize the grid so its integral is unity norm = np.sum(charge_grid) * dz * dx lambda_grid = charge_grid / norm # Apply savgol filter lambda_grid_filtered = np.array( [savgol_filter(lambda_grid[:, i], 13, 2) for i in np.arange(nx)]).T # Differentiation in z lambda_grid_filtered_prime = central_difference_z(lambda_grid_filtered, nz, nx, dz, order=1) # Grid axis vectors zvec = np.linspace(zmin, zmax, nz) xvec = np.linspace(xmin, xmax, nx) beta = np.sqrt(1 - 1 / gamma**2) t3 = time.time() if reuse_psi_grids == True: psi_s_grid = psi_s_grid_old psi_x_grid = psi_x_grid_old else: # Creating the potential grids #zvec2 = np.linspace(2 * zmin, 2 * zmax, 2 * nz) #xvec2 = np.linspace(2 * xmin, 2 * xmax, 2 * nx) zvec2 = np.arange(-nz, nz, 1) * dz # center = 0 is at [nz] xvec2 = np.arange(-nx, nx, 1) * dx # center = 0 is at [nx] zm2, xm2 = np.meshgrid(zvec2, xvec2, indexing="ij") beta_grid = beta * np.ones(zm2.shape) # Map (possibly parallel) temp = map_f(psi_s, zm2 / 2 / rho, xm2 / rho, beta_grid) psi_s_grid = np.array(list(temp)) temp2 = map_f(psi_x, zm2 / 2 / rho, xm2 / rho, beta_grid) psi_x_grid = np.array(list(temp2)) # Replacing the fake zeros along the x_axis ( due to singularity) with averaged value from the nearby grid psi_x_grid[:, nx] = psi_x_where_x_equals_zero(zvec2, dx / rho, beta) if debug: t4 = time.time() print("Computing potential grids take:", t4 - t3, "s") # Compute the wake via 2d convolution conv_s = oaconvolve(lambda_grid_filtered_prime, psi_s_grid, mode="same") conv_x = oaconvolve(lambda_grid_filtered_prime, psi_x_grid, mode="same") if debug: t5 = time.time() print("Convolution takes:", t5 - t4, "s") Ws_grid = (beta**2 / rho) * (conv_s) * (dz * dx) Wx_grid = (beta**2 / rho) * (conv_x) * (dz * dx) # Interpolate Ws and Wx everywhere within the grid Ws_interp = RectBivariateSpline(zvec, xvec, Ws_grid) Wx_interp = RectBivariateSpline(zvec, xvec, Wx_grid) # Overall factor Nb = np.sum(weight) / e_charge kick_factor = r_e * Nb / gamma # m # Calculate the kicks at the particle locations delta_kick = kick_factor * Ws_interp.ev(z_b, x_b) xp_kick = kick_factor * Wx_interp.ev(z_b, x_b) if debug: t6 = time.time() print("Interpolation takes:", t6 - t5, "s") if rho_sign == -1: xp_kick = -xp_kick result = {"ddelta_ds": delta_kick, "dxp_ds": xp_kick} if debug: result.update({ "zvec": zvec, "xvec": xvec, "zvec2": zvec2, "xvec2": xvec2, "Ws_grid": Ws_grid, "Wx_grid": Wx_grid, "psi_s_grid": psi_s_grid, "psi_x_grid": psi_x_grid, "charge_grid": charge_grid, "lambda_grid_filtered_prime": lambda_grid_filtered_prime, }) return result
def image_reconstruction3(image_path, LUT_path): # use 1 CCRF with RectBivariateSpline image_file = os.listdir(image_path) LUT_files = os.listdir(LUT_path) argmin_look_up_table_r = np.loadtxt(LUT_path + LUT_files[0]) #argmin_look_up_table_r = np.flip(argmin_look_up_table_r,axis=1) argmin_look_up_table_g = np.loadtxt(LUT_path + LUT_files[1]) #argmin_look_up_table_g = np.flip(argmin_look_up_table_g,axis=1) argmin_look_up_table_b = np.loadtxt(LUT_path + LUT_files[2]) #argmin_look_up_table_b = np.flip(argmin_look_up_table_b,axis=1) plt.figure() plt.title('argmin lookup table') plt.axis([0, 1024, 0, 1024]) imshow(argmin_look_up_table_r, cmap="gray") plt.show() plt.figure() plt.title('argmin lookup table') plt.axis([0, 1024, 0, 1024]) imshow(argmin_look_up_table_g, cmap="gray") plt.show() plt.figure() plt.title('argmin lookup table') plt.axis([0, 1024, 0, 1024]) imshow(argmin_look_up_table_b, cmap="gray") plt.show() example_image = imread(image_path + image_file[0]) image_high, image_wide = example_image.shape[0], example_image.shape[1] number_of_image = len(image_file) tmp_image_input = np.zeros((0, image_high, image_wide, 3), dtype=np.float32) # interpolation xxx = np.arange(0, 1, 1 / 1024) yyy = np.arange(0, 1, 1 / 1024) func_r = RectBivariateSpline(xxx, yyy, argmin_look_up_table_r, bbox=[0, 1, 0, 1]) #func_g = RectBivariateSpline(xxx,yyy,argmin_look_up_table_g, bbox = [0,1,0,1]) #func_b = RectBivariateSpline(xxx,yyy,argmin_look_up_table_b, bbox = [0,1,0,1]) #func = [func_r.ev,func_g.ev,func_b.ev] # prepare the input images for i in image_file: image_tmp = ((np.expand_dims(imread(image_path + i), axis=0) + 0.5) / 256.0).astype(np.float32) tmp_image_input = np.concatenate((tmp_image_input, image_tmp)) init_time = time.time() for z in range(number_of_image - 1): tmp_image_output = np.zeros( (number_of_image - 1 - z, image_high, image_wide, 3), dtype=np.float32) for x in range(number_of_image - 1 - z): tmp_image_1 = tmp_image_input[x].flatten() tmp_image_2 = tmp_image_input[x + 1].flatten() tmp_image_output[x] = func_r.ev(tmp_image_1, tmp_image_2).reshape( image_high, image_wide, 3) tmp_image_input = tmp_image_output.copy() print("Layer completed!") print("final passed time = {}".format(time.time() - init_time)) image_output = tmp_image_output[0] #misc.imsave("G:\\ECE516\\HDR team\\before_tone.jpg",image_output) return image_output
def InverseCompositionAffine(It, It1, threshold, num_iters): """ :param It: template image :param It1: Current image :param threshold: if the length of dp is smaller than the threshold, terminate the optimization :param num_iters: number of iterations of the optimization :return: M: the Affine warp matrix [3x3 numpy array] """ #We have to find M #We precompute the Jacobian #We have to get the initializations p = np.zeros(6) dp = np.ones(6) #Since we have six parameters to be determined #We hvae to initialize M M = np.eye(3) row_1, col_1 = It.shape row_2, col_2 = It.shape imH0, imW0 = np.shape(It) imH1, imW1 = np.shape(It1) splinet = RectBivariateSpline(np.linspace(0, row_1, row_1), np.linspace(0, col_1, col_1), It) #For image 1 splinet1 = RectBivariateSpline(np.linspace(0, row_2, row_2), np.linspace(0, col_2, col_2), It1) #For image 2 Iy, Ix = np.gradient(It) # This we find out the Affine subtraction spline_x = RectBivariateSpline(np.linspace(0, row_1, row_1), np.linspace(0, col_1, col_1), Ix) #For image 2 spline_y = RectBivariateSpline(np.linspace(0, row_1, row_1), np.linspace(0, col_1, col_1), Iy) #For image 2 #we have to get all the coordinates for the template image x, y = np.mgrid[0:col_1, 0:row_1] #print("Shape od x and y:",x.shape,y.shape) x_coor = np.reshape(x, (1, -1)) y_coor = np.reshape(y, (1, -1)) #We make [x,y,1] coor = np.vstack((x_coor, y_coor, np.ones((1, row_1 * col_1)))) # grad_x = spline_x.ev(y, x).flatten() grad_y = spline_y.ev(y, x).flatten() T = splinet.ev(y, x).flatten() A1 = np.multiply(grad_x, x_coor) A2 = np.multiply(grad_x, y_coor) A3 = np.reshape(grad_x, (1, -1)) A4 = np.multiply(grad_y, x_coor) A5 = np.multiply(grad_y, y_coor) A6 = np.reshape(grad_y, (1, -1)) A = np.vstack( (A1, A2, A3, A4, A5, A6)) #this is the Jaconian and the gradient of I A = A.T #print("Shape of A in my program:",A.shape) #print("Shape of b:",error.shape) H = A.T @ A #We calculate the Hessian n = 1 while (np.square(dp).sum() > threshold and n < num_iters): M = np.array([[1 + p[0], p[1], p[2]], [p[3], 1 + p[4], p[5]], [0, 0, 1]]) #Dimension i #print("Shape of M:",M.shape) #print("Shape of coor :",coor.shape) #We have to find the waroed image and hence we have to multiply m with x warp = M @ coor #This is 3*N #now we have the xp and yp coordinates warp_x = warp[0] warp_y = warp[1] #Now we have to find the gradient splines warp_final = splinet1.ev(warp_y, warp_x).flatten() #We now find the error image error = np.reshape(T - warp_final, (len(warp_x), 1)) # #b = np.reshape(Itp - It1p, (len(xp), 1)) dp = np.linalg.inv( H) @ A.T @ error # 2x2 @ 2xn @ nx1 --> Results in 2x1 #print("Shape of dp:",dp.shape) p = (p + dp.T).ravel() n += 1 #print("Shape of dp:",dp.shape) dM = np.vstack((dp.reshape(2, 3), [0, 0, 1])) M = M @ np.linalg.inv(dM) return M
def buildSection(self, sec=None, pts=None, gfilter=0.001): """ Extract a slice from the 3D data set and compute the stratigraphic layers. Parameters ---------- variable: sec Section first and last point coordinates (X,Y). variable: pts Number of points to discretise the cross-section. variable: gfilter Gaussian smoothing filter. """ if pts is None: pts = self.nx * 10 xo = sec[0, 0] xm = sec[1, 0] yo = sec[0, 1] ym = sec[1, 1] if xm > self.x.max(): xm = self.x.max() if ym > self.y.max(): ym = self.y.max() if xo < self.x.min(): xo = self.x.min() if yo < self.y.min(): yo = self.y.min() xsec, ysec = self._cross_section(xo, yo, xm, ym, pts) self.dist = np.sqrt((xsec - xo)**2 + (ysec - yo)**2) self.xsec = xsec self.ysec = ysec for k in range(self.layNb): # Thick rect_B_spline = RectBivariateSpline(self.yi, self.xi, self.rth[:, :, k]) data = rect_B_spline.ev(ysec, xsec) secTh = filters.gaussian_filter1d(data, sigma=gfilter) secTh[secTh < 0] = 0 self.secTh.append(secTh) # Elev rect_B_spline1 = RectBivariateSpline(self.yi, self.xi, self.relev[:, :, k]) data1 = rect_B_spline1.ev(ysec, xsec) secElev = filters.gaussian_filter1d(data1, sigma=gfilter) self.secElev.append(secElev) # Depth rect_B_spline2 = RectBivariateSpline(self.yi, self.xi, self.rdep[:, :, k]) data2 = rect_B_spline2.ev(ysec, xsec) secDep = filters.gaussian_filter1d(data2, sigma=gfilter) self.secDep.append(secDep) # Prop idprop = np.zeros(secDep.shape[0], dtype=int) rect_B_spline3 = RectBivariateSpline(self.yi, self.xi, self.rprop[:, :, k, 0]) data3 = rect_B_spline3.ev(ysec, xsec) secProp1 = filters.gaussian_filter1d(data3, sigma=gfilter) rect_B_spline4 = RectBivariateSpline(self.yi, self.xi, self.rprop[:, :, k, 1]) data4 = rect_B_spline4.ev(ysec, xsec) secProp2 = filters.gaussian_filter1d(data4, sigma=gfilter) if self.rockNb > 2: rect_B_spline5 = RectBivariateSpline(self.yi, self.xi, self.rprop[:, :, k, 2]) data5 = rect_B_spline5.ev(ysec, xsec) secProp3 = filters.gaussian_filter1d(data5, sigma=gfilter) r1 = np.where( np.logical_and(secProp2 > secProp1, secProp2 > secProp3))[0] idprop[r1] = 1 r2 = np.where( np.logical_and(secProp3 > secProp2, secProp3 > secProp1))[0] idprop[r2] = 2 else: r3 = np.where(secProp2 > secProp1)[0] idprop[r3] = 1 self.secPropID.append(idprop) # Ensure the spline interpolation does not create underlying layers above upper ones topsec = self.secDep[self.layNb - 1] for k in range(self.layNb - 2, -1, -1): secDep = self.secDep[k] self.secDep[k] = np.minimum(secDep, topsec) topsec = self.secDep[k] return
row['r_mid'] * np.sin(ang), row['r_mid'] * np.cos(ang)], euler2mat(-ang, 0., 0.), # In detail this should be (primary_length + gap + secondary_length) / 2 # but the gap is somewhat complicated and this is only used # for display, we'll ignore that for now. [row['primary_length'], row['azwidth'] / 2., (row['outer_radius'] - row['inner_radius']) / 2.])) spo_pos4d = [np.dot(xyz2zxy, s) for s in spo_pos4d] reflectivity = load_table2d(os.path.join(config['data']['caldb_inputdata'], 'spos', 'coated_reflectivity.csv')) reflectivity_interpolator = RectBivariateSpline(reflectivity[1].to(u.keV), reflectivity[2].to(u.rad), reflectivity[3][0]) class PerfectLensSegment(PerfectLens): def __init__(self, **kwargs): self.d_center_optax = kwargs.pop('d_center_optical_axis') super(PerfectLensSegment, self).__init__(**kwargs) def specific_process_photons(self, photons, intersect, interpos, intercoos): # A ray through the center is not broken. # So, find out where a central ray would go. p_opt_axis = self.geometry['center'] - self.d_center_optax * self.geometry['e_z'] focuspoints = h2e(p_opt_axis) + self.focallength * norm_vector(h2e(photons['dir'][intersect])) dir = norm_vector(e2h(focuspoints - h2e(interpos[intersect]), 0)) pol = parallel_transport(photons['dir'].data[intersect, :], dir,
def coreg_with_IceSAT(args): """ Coregistration with the use of IceSAT data """ is_files = args.master_dem ## Read slave DEM ## slave_dem = DEMRaster(args.slave_dem) slave_dem.r = np.float32(slave_dem.r) if args.nodata2 != 'none': nodata = float(args.nodata2) else: band = slave_dem.ds.GetRasterBand(1) nodata = band.GetNoDataValue() # save original data for later resampling dem2coreg = slave_dem dem2coreg_save = np.copy(dem2coreg.r) # compute DEM extent lonmin, lonmax, latmin, latmax = dem2coreg.get_extent_latlon() lonmin += 360 lonmax += 360 RoI = ((lonmin, latmin), (lonmin, latmax), (lonmax, latmax), (lonmax, latmin), (lonmin, latmin)) ## mask points ## mask = raster.SingleBandRaster(args.maskfile) if dem2coreg.r.shape != mask.r.shape: print("Reproject mask") mask = mask.reproject(dem2coreg.srs, dem2coreg.nx, dem2coreg.ny, dem2coreg.extent[0], dem2coreg.extent[3], dem2coreg.xres, dem2coreg.yres, dtype=6, nodata=nodata, interp_type=1, progress=True) dem2coreg.r[mask.r > 0] = np.nan ## read Icesat data with DEM extent ## all_lons, all_lats, all_elev = read_icesat_elev(is_files, RoI) ## compare slave DEM and Icesat ## slave_elev = dem2coreg.interp(all_lons, all_lats, latlon=True) dh = all_elev - slave_elev dh[slave_elev == 0] = np.nan xx, yy = dem2coreg.proj(all_lons, all_lats) if args.plot == True: pl.title('Icesat - slave DEM elev') rgb = dem2coreg.shaded_relief(downsampl=5) pl.scatter(xx, yy, c=dh, edgecolor='none', vmin=-20, vmax=20) cb = pl.colorbar() cb.set_label('Elevation difference (m)') pl.show() ## compute slave DEM slope at Icesat points ## print("Compute slope and aspect") g2, g1 = np.gradient(dem2coreg.r) distx = np.abs(dem2coreg.xres) disty = np.abs(dem2coreg.yres) slope_pix = np.sqrt((g1 / distx)**2 + (g2 / disty)**2) aspect = np.arctan2(-g1, g2) aspect = aspect + np.pi slope_ds = raster.simple_write_geotiff('none', slope_pix, dem2coreg.ds.GetGeoTransform(), wkt=dem2coreg.srs.ExportToWkt(), dtype=6) slope_raster = raster.SingleBandRaster(slope_ds) slope_at_IS = slope_raster.interp(all_lons, all_lats, latlon=True) aspect_ds = raster.simple_write_geotiff('none', aspect, dem2coreg.ds.GetGeoTransform(), wkt=dem2coreg.srs.ExportToWkt(), dtype=6) aspect_raster = raster.SingleBandRaster(aspect_ds) aspect_at_IS = aspect_raster.interp(all_lons, all_lats, latlon=True) # slave DEM grid xgrid = np.arange(dem2coreg.nx) ygrid = np.arange(dem2coreg.ny) X, Y = dem2coreg.coordinates() ## Print out some statistics median = np.median(dh[np.isfinite(dh)]) NMAD_old = 1.4826 * np.median(np.abs(dh[np.isfinite(dh)] - median)) print("Statistics on initial dh") print("Median : %f, NMAD : %f" % (median, NMAD_old)) ## Iterations to estimate DEMs shift print("Iteratively estimate DEMs shift") slave_elev = dem2coreg.interp(all_lons, all_lats, latlon=True) dh = all_elev - slave_elev dh[slave_elev == 0] = np.nan xoff, yoff = 0, 0 for i in range(args.niter): # compute aspect/dh relationship east, north, c = horizontal_shift(dh, slope_at_IS, aspect_at_IS, plot=args.plot, min_count=args.min_count) print("#%i - Offset in pixels : (%f,%f)" % (i + 1, east, north)) xoff += east yoff += north #Update elevation difference slave_elev = dem2coreg.interp(xx + xoff, yy + yoff) dh = all_elev - slave_elev dh[slave_elev == 0] = np.nan # print some statistics median = np.median(dh[np.isfinite(dh)]) NMAD_new = 1.4826 * np.median(np.abs(dh[np.isfinite(dh)] - median)) print("Median : %.2f, NMAD = %.2f, Gain : %.2f%%" % (median, NMAD_new, (NMAD_new - NMAD_old) / NMAD_old * 100)) NMAD_old = NMAD_new print("Final Offset in pixels (east, north) : (%f,%f)" % (xoff, yoff)) if args.save == True: fname, ext = os.path.splitext(args.outfile) fname += '_shift.txt' f = open(fname, 'w') f.write("Final Offset in pixels (east, north) : (%f,%f)" % (xoff, yoff)) f.write("Final NMAD : %f" % NMAD_new) f.close() print("Offset saved in %s" % fname) ### Deramping ### print("Deramping") # remove points above altitude threshold (snow covered areas) #if args.zmax!='none': # dh[master_dem.r>int(args.zmax)] = np.nan # remove points below altitude threshold (e.g sea ice) zmin = 40 if zmin != 'none': dh[slave_elev < int(zmin)] = np.nan # remove points with slope higher than 20° that are more error-prone # slope, aspect = dem2coreg.compute_slope() # dh[slope>=20*np.pi/180] = np.nan # dh[np.isnan(slope)] = np.nan # remove outliers med = np.median(dh[np.isfinite(dh)]) mad = 1.4826 * np.median(np.abs(dh[np.isfinite(dh)] - med)) dh[np.abs(dh - med) > 3 * mad] = np.nan # estimate a ramp and remove it ramp = deramping(dh, xx, yy, d=args.degree, plot=False) # compute stats of deramped dh tmp = dh - ramp(X, Y) median = np.median(tmp) NMAD_new = 1.4826 * np.median(np.abs(tmp[np.isfinite(tmp)] - median)) if args.save == True: fname, ext = os.path.splitext(args.outfile) fname += '_shift.txt' f = open(fname, 'a') f.write("Median after deramping : (%f)" % (median)) f.write("NMAD after deramping : %f" % NMAD_new) f.close() print("Post-deramping stats saved in %s" % fname) # save to output file if args.save == True: fname, ext = os.path.splitext(args.outfile) fname += '_ramp.TIF' #fname = WD+'/ramp.out' raster.simple_write_geotiff(fname, ramp(X, Y), dem2coreg.ds.GetGeoTransform(), wkt=dem2coreg.srs.ExportToWkt(), dtype=gdal.GDT_Float32) #ramp(X,Y).tofile(fname) print("Ramp saved in %s" % fname) if args.plot == True: pl.figure('ramp') pl.scatter(xx, yy, c=ramp(xx, yy), edgecolor='none') pl.colorbar() pl.figure('before') pl.imshow(rgb, extent=dem2coreg.extent, interpolation='bilinear') pl.scatter(xx, yy, c=dh, edgecolor='none', vmin=-10, vmax=10) pl.colorbar() pl.figure('after') pl.imshow(rgb, extent=dem2coreg.extent, interpolation='bilinear') pl.scatter(xx, yy, c=dh - ramp(xx, yy), edgecolor='none', vmin=-10, vmax=10) pl.colorbar() pl.show() ### Interpolate the slave DEM to the new grid ### print("Interpolate DEM to new grid") # fill NaN values for interpolation nanval = np.isnan(dem2coreg_save) slave_filled = np.where(np.isnan(dem2coreg_save), -9999, dem2coreg_save) # Create spline function f = RectBivariateSpline(ygrid, xgrid, slave_filled, kx=1, ky=1) f2 = RectBivariateSpline(ygrid, xgrid, nanval, kx=1, ky=1) # resample slave DEM in the new grid znew = f(ygrid - yoff, xgrid + xoff) #postive y shift moves south nanval_new = f2(ygrid - yoff, xgrid + xoff) #remove filled values that have been interpolated znew[nanval_new != 0] = np.nan # update DEM dem2coreg_save = znew ### Remove ramp ### dem2coreg_save -= ramp(X, Y) ### Save to output file ### raster.simple_write_geotiff(args.outfile, dem2coreg_save, dem2coreg.ds.GetGeoTransform(), wkt=dem2coreg.srs.ExportToWkt(), dtype=gdal.GDT_Float32)
def interpolateSpline(self,x,y): F = RectBivariateSpline(self.xBinCentres,self.yBinCentres,self.z.T,s=0) # pts = np.stack((y, x), axis=1) Z = F(x,y) return Z
def interpolated_model(self, plot=False): """ Generate an interpolated model from the completeness curves. Waves have to be uniformly spaced """ if plot: import matplotlib.pyplot as plt import matplotlib as mpl mpl.use("tkagg") # bins to interpolate all completeness curves to, # in these coordinates 50% completeness always # at 1.0, also should in combination will fill_value # ensure 0 returns zero completeness in the # RectBivariateSpline fluxes_f50_units = linspace(0, 50, 5000) c_all = [] fgrid_for_mask = [] wgrid_for_mask = [] # Offset by half a bin brighter, so the mask kicks in # at the center location, not 1/2 a bin away. The # 0.999 factor is to ensure the actual value itself # is not in the mask fbsizediv2 = 0.999 * (self.fluxes[1] - self.fluxes[0]) / 2.0 for twave, tf50, c in zip(self.waves, self.f50, self.compl_curves): if plot: plt.plot(self.fluxes / tf50, c, linestyle="--") # Shift grid to center of bin, so don't # interpolate past value fgrid_for_mask.append((self.fluxes + fbsizediv2) / tf50) wgrid_for_mask.append(ones(len(self.fluxes)) * twave) # divide so 50% completeness to # convert to flux units of per f50 interpolator = interp1d(self.fluxes / tf50, c, bounds_error=False, fill_value=(0.0, c[-1])) # interpolate to the coordinates where 50% is at 1.0 c_all.append(interpolator(fluxes_f50_units)) c_all = array(c_all) # Make a combined model if self._wl_collapse: cmean = mean(c_all, axis=0) completeness_model = interp1d(fluxes_f50_units, cmean, fill_value=(0.0, cmean[-1]), bounds_error=False) if plot: vals_to_plot = completeness_model(fluxes_f50_units) else: # waves have to be uniformly spaced for this to work (? don't think so?) interp = RectBivariateSpline(self.waves, fluxes_f50_units, c_all, kx=3, ky=3) if self.dont_interp_to_zero: # Use this as a mask to not extrapolate toward 0.0 # if nearest point is zero compl_mask = zeros(self.compl_curves.shape) compl_mask[self.compl_curves > 0.0] = 1.0 interp_mask = NearestNDInterpolator( list( zip( array(wgrid_for_mask).ravel(), array(fgrid_for_mask).ravel())), compl_mask.ravel()) completeness_model = lambda x, y: interp_mask(x, y) * interp( x, y, grid=False) else: completeness_model = lambda x, y: interp(x, y, grid=False) if plot: vals_to_plot = completeness_model( self.waves[2] * ones(len(fluxes_f50_units)), fluxes_f50_units) if plot: plt.plot(fluxes_f50_units, vals_to_plot, "k.", lw=2.0) plt.xlim(0, 12.0) plt.xlabel("Flux/(50% flux) [erg/s/cm^2]") plt.ylabel("Normalized Completeness") plt.show() return completeness_model
def __init__(self, bmeshx, bmeshz, b): self.f = RectBivariateSpline(bmeshx,bmeshz, \ b.reshape(len(bmeshx), len(bmeshz)))
def get_contour(x, y, z_in, val): # Only works if z has exactly a single, convex, nested and closed contour # Easiest if we look for the zero contour z = np.copy(z_in) - val # Now find the largest and smallest y with a sign change in y-direction iy_min = len(y) iy_max = 0 for ix in range(len(x)): for isc in np.where(z[ix, 1:] - z[ix, 0:len(y) - 1])[0]: if(isc < iy_min): iy_min = isc if(isc > iy_max): iy_max = isc if(iy_min - 1 > 0): iy_min -= 1 if(iy_max + 1 < len(y)): iy_max += 1 spl = RectBivariateSpline(x, y, z) dxmin = np.min(np.abs(x[1:] - x[0:len(x) - 1])) # smallest spacing in x dymin = np.min(np.abs(y[1:] - y[0:len(y) - 1])) # smallest spacing in y while(y[iy_max] - y[iy_min] < 50 * dymin): dymin *= 2 x_grid = np.arange(np.min(x), np.max(x), dxmin) y_grid = np.arange(y[iy_min], y[iy_max], dymin) # Assures that contours are less than one grid cell apart at worst y_inter = np.zeros(len(x_grid)) cont = [] for i in range(len(y_grid)): y_inter[:] = y_grid[i] z_inter = spl(x_grid, y_inter, grid=False) roots = InterpolatedUnivariateSpline(x_grid, z_inter).roots() for root in roots: cont.append([root, y_grid[i]]) cont = np.array(cont) x_geo = np.mean(cont.T[0]) y_geo = np.mean(cont.T[1]) thetas = np.arctan2(cont.T[1] - y_geo, cont.T[0] - x_geo) isort = np.argsort(thetas) i_last = isort[0] i_start = i_last # Starting point of current contour cur_cont = [[cont.T[0][i_last], cont.T[1][i_last]]] sorted_conts = [] insort = 1 finished = np.zeros(len(isort), dtype=np.bool) finished[0] = True move_direction = 1 # Reversed if looking for further points of open contour while False in finished: if(not finished[insort]): i = isort[insort] x1_cp = cont.T[0][i_last] x2_cp = cont.T[0][i] y1_cp = cont.T[1][i_last] y2_cp = cont.T[1][i] if(check_neighbor(x1_cp, y1_cp, x2_cp, y2_cp, x, y, z)): # plt.plot(cont.T[0], cont.T[1], "+") # plt.plot([x1_cp, x2_cp], [y1_cp, y2_cp], "-") # plt.show() cur_cont.append([x2_cp, y2_cp]) finished[insort] = True i_last = i # else: # plt.plot([x[ix1_border - 1], x[ix1_border], x[ix1_border + 1], \ # x[ix1_border - 1], x[ix1_border], x[ix1_border + 1], \ # x[ix1_border - 1], x[ix1_border], x[ix1_border + 1]], \ # [y[iy1_border - 1], y[iy1_border - 1], y[iy1_border - 1], \ # y[iy1_border], y[iy1_border], y[iy1_border], \ # y[iy1_border + 1], y[iy1_border + 1], y[iy1_border + 1]], "+") # # plt.plot(cont.T[0], cont.T[1], "+") # plt.plot([x1_cp, x2_cp], [y1_cp, y2_cp], "-") # plt.plot([x1_cp, x2_cp], [y1_cp, y2_cp], "*") # plt.show() insort += move_direction if(insort == len(isort) and not np.all(finished)): # End of current contour reached # First check if start point of last contour can be continued i_last = i_start move_direction = -1 insort += move_direction if(insort < 0 and not np.all(finished)): # Current contour finished sorted_conts.append(cur_cont) # -> start a new one cur_cont = [] insort = np.where(np.logical_not(finished))[0][0] i_last = isort[insort] finished[insort] = True cur_cont.append([cont.T[0][i_last], cont.T[1][i_last]]) if(len(isort[np.logical_not(finished)]) > 1): insort = np.where(np.logical_not(finished))[0][0] move_direction = +1 # Finally go through all contours and check for closed ones closed_info = np.zeros(len(sorted_conts), dtype=np.bool) for i_cont in range(len(sorted_conts)): cont = sorted_conts[i_cont] i = isort[insort] x1_cp = cont[0][0] x2_cp = cont[0][1] y1_cp = cont[-1][0] y2_cp = cont[-1][1] # Build connection between points if(check_neighbor(x1_cp, y1_cp, x2_cp, y2_cp, x, y, z)): # Closed contour append first point at end sorted_conts[i_cont].append([x1_cp, y1_cp]) closed_info[i_cont] = True # Convert all contours to np arrays sorted_conts[i_cont] = np.array(sorted_conts[i_cont]) plt.plot(sorted_conts[i_cont].T[0], sorted_conts[i_cont].T[1], "-") # x_cont = np.concatenate([cont.T[0][isort], [cont.T[0][np.argmin(thetas)]]]) # y_cont = np.concatenate([cont.T[1][isort], [cont.T[1][np.argmin(thetas)]]]) # plt.plot(x_cont, y_cont, "-") plt.show() return closed_info, sorted_conts
def read_adf15(path, order=1, plot_lines=[], ax=None, plot_3d=False): """Read photon emissivity coefficients from an ADAS ADF15 file. Returns a dictionary whose keys are the wavelengths of the lines in angstroms. The value is an interpolant that will evaluate the log10 of the PEC at a desired density and temperature. The power-10 exponentiation of this PEC has units of :math:`photons \cdot cm^3/s` Units for interpolation: :math:`cm^{-3}` for density; :math:`eV` for temperature. Parameters ---------- path : str Path to adf15 file to read. order : int, opt Parameter to control the order of interpolation. Default is 1 (linear interpolation). plot_lines : list List of lines whose PEC data should be displayed. Lines should be identified by their wavelengths. The list of available wavelengths in a given file can be retrieved by first running this function ones, checking dictionary keys, and then requesting a plot of one (or more) of them. ax : matplotlib axes instance If not None, plot on this set of axes. plot_3d : bool Display PEC data as 3D plots rather than 2D ones. Returns ------- log10pec_dict : dict Dictionary containing interpolation functions for each of the available lines of the indicated type (ionization or recombination). Each interpolation function takes as arguments the log-10 of ne and Te and returns the log-10 of the chosen PEC. Examples -------- To plot the Lyman-alpha photon emissivity coefficients for H (or its isotopes), you can use: >>> filename = 'pec96#h_pju#h0.dat' # for D Ly-alpha >>> # fetch file automatically, locally, from AURORA_ADAS_DIR, or directly from the web: >>> path = aurora.get_adas_file_loc(filename, filetype='adf15') >>> >>> # plot Lyman-alpha line at 1215.2 A. >>> # see available lines with log10pec_dict.keys() after calling without plot_lines argument >>> log10pec_dict = aurora.read_adf15(path, plot_lines=[1215.2]) Another example, this time also with charge exchange:: >>> filename = 'pec96#c_pju#c2.dat' >>> path = aurora.get_adas_file_loc(filename, filetype='adf15') >>> log10pec_dict = aurora.read_adf15(path, plot_lines=[361.7]) Metastable-resolved files will be automatically identified and parsed accordingly, e.g.:: >>> filename = 'pec96#he_pjr#he0.dat' >>> path = aurora.get_adas_file_loc(filename, filetype='adf15') >>> log10pec_dict = aurora.read_adf15(path, plot_lines=[584.4]) Notes ----- This function expects the format of PEC files produced via the ADAS adas810 or adas218 routines. """ # find out whether file is metastable resolved meta_resolved = path.split('#')[-2][-1] == 'r' if meta_resolved: print('Identified metastable-resolved PEC file') with open(path, 'r') as f: lines = f.readlines() cs = path.split('#')[-1].split('.dat')[0] header = lines.pop(0) # Get the expected number of lines by reading the header: num_lines = int(header.split()[0]) log10pec_dict = {} for i in range(0, num_lines): if '----' in lines[0]: _ = lines.pop(0) # separator may exist before each transition # Get the wavelength, number of densities and number of temperatures # from the first line of the entry: l = lines.pop(0) header = l.split() # sometimes the wavelength and its units are not separated: try: header = [hh.split('A')[0] for hh in header] except: # lam and A are separated. Delete 'A' unit. header = np.delete(header, 1) lam = float(header[0]) if header[1] == '': # 2nd element was empty -- annoyingly, this happens sometimes num_den = int(header[2]) num_temp = int(header[3]) else: num_den = int(header[1]) num_temp = int(header[2]) if meta_resolved: # index of metastable state INDM = int(header[-3].split('/')[0].split('=')[-1]) # Get the densities: dens = [] while len(dens) < num_den: dens += [float(v) for v in lines.pop(0).split()] dens = np.asarray(dens) # Get the temperatures: temp = [] while len(temp) < num_temp: temp += [float(v) for v in lines.pop(0).split()] temp = np.asarray(temp) # Get the PEC's: PEC = [] while len(PEC) < num_den: PEC.append([]) while len(PEC[-1]) < num_temp: PEC[-1] += [float(v) for v in lines.pop(0).split()] PEC = np.asarray(PEC) # find what kind of rate we are dealing with if 'recom' in l.lower(): rate_type = 'recom' elif 'excit' in l.lower(): rate_type = 'excit' elif 'chexc' in l.lower(): rate_type = 'chexc' elif 'drsat' in l.lower(): rate_type = 'drsat' elif 'ion' in l.lower(): rate_type = 'ioniz' else: # attempt to report unknown rate type -- this should be fairly robust rate_type = l.replace(' ', '').lower().split('type=')[1].split('/')[0] # create dictionary with keys for each wavelength: if lam not in log10pec_dict: log10pec_dict[lam] = {} # add a key to the log10pec_dict[lam] dictionary for each type of rate: recom, excit or chexc # interpolate PEC on log dens,temp scales pec_fun = RectBivariateSpline( np.log10(dens), np.log10(temp), np.log10( PEC ), # NB: interpolation of log10 of PEC to avoid issues at low ne or Te kx=order, ky=order) if meta_resolved: if rate_type not in log10pec_dict[lam]: log10pec_dict[lam][rate_type] = {} log10pec_dict[lam][rate_type][f'meta{INDM}'] = pec_fun else: log10pec_dict[lam][rate_type] = pec_fun if lam in plot_lines: # only plot 3 densities at chosen indices dens_idx = np.array([13, 15, 16, 18, 19]) # plot PEC values over ne,Te grid given by ADAS, showing interpolation quality NE, TE = np.meshgrid(dens[dens_idx], temp) PEC_eval = 10**pec_fun.ev(np.log10(NE), np.log10(TE)).T # plot PEC rates _ax = _plot_pec(dens[dens_idx], temp, PEC[dens_idx, :], PEC_eval, lam, cs, rate_type, ax, plot_3d) meta_str = '' if meta_resolved: meta_str = f' , meta = {INDM}' #_ax.set_title(cs + r' , $\lambda$ = '+str(lam) +' $\AA$, '+rate_type+meta_str) plt.tight_layout() return log10pec_dict
def diffICP(depthMap, pts3d, intrinsic, extrinsic, svInd=0): # This is a function doing differentiable ICP loss svAddPre = acGInfo()['internalRe_add'] svAdd = os.path.join(svAddPre, 'icpLoss') pts3d_org = np.copy(pts3d) iterationTime = 5 affM = np.eye(4) distTh = 0.6 icpLoss = np.zeros(iterationTime) matchedPtsRec = np.zeros(iterationTime) successTime = 0 for k in range(iterationTime): pts3d = (affM @ pts3d.T).T pts2d, depth = project3dPts(pts3d, intrinsic, extrinsic, isDepth=True) imgShape = depthMap.shape interX = np.arange(0, imgShape[1], 1) interY = np.arange(0, imgShape[0], 1) interpF = RectBivariateSpline(interY, interX, depthMap) depthR = interpF.ev(pts2d[:, 1], pts2d[:, 0]) pts3dR = reconstruct3dPts(depthR, pts2d[:, 0], pts2d[:, 1], intrinsic, extrinsic) validMask = np.sqrt(np.sum(np.square(pts3d - pts3dR), axis=1)) < distTh if np.sum(validMask) <= 30: break valpts3dR = pts3dR[validMask, :] valpts3d = pts3d[validMask, :] icpLoss[k] = np.mean( np.sum(np.square(pts3d[validMask, :] - pts3dR[validMask, :]), axis=1)) matchedPtsRec[k] = np.sum(validMask) affM = valpts3dR.T @ valpts3d @ np.linalg.inv(valpts3d.T @ valpts3d) successTime = successTime + 1 if successTime == 0: fig = plt.figure() ax = fig.add_subplot((111), projection='3d') ax.scatter(pts3d[:, 0], pts3d[:, 1], pts3d[:, 2], s=0.1, c='r') ax.scatter(pts3dR[:, 0], pts3dR[:, 1], pts3dR[:, 2], s=0.1, c='b') set_axes_equal(ax) plt.legend(['org Pts', 'target pts']) plt.title("Failure case") plt.savefig(os.path.join(svAdd, str(svInd) + '_3d'), dpi=300) plt.close(fig) fig = plt.figure() plt.stem(icpLoss) plt.savefig(os.path.join(svAdd, str(svInd) + '_curve')) plt.close(fig) return None, None fig = plt.figure() ax = fig.add_subplot((111), projection='3d') ax.scatter(pts3d[validMask, 0], pts3d[validMask, 1], pts3d[validMask, 2], s=0.1, c='r') ax.scatter(pts3dR[validMask, 0], pts3dR[validMask, 1], pts3dR[validMask, 2], s=0.1, c='b') ax.scatter(pts3d_org[validMask, 0], pts3d_org[validMask, 1], pts3d_org[validMask, 2], s=0.1, c='g') set_axes_equal(ax) plt.legend(['src Pts', 'target pts', 'org pts']) plt.savefig(os.path.join(svAdd, str(svInd) + '_3d'), dpi=300) plt.close(fig) # fig.show() occptRat = matchedPtsRec[successTime - 1] / matchedPtsRec[0] fig = plt.figure() plt.stem(icpLoss) plt.title("ICP loss curve, occupancy Ratio is %f" % occptRat) plt.xlabel("Iteration times") plt.ylabel("Square loss") plt.savefig(os.path.join(svAdd, str(svInd) + '_curve')) plt.close(fig) return validMask, affM
def find_contours(self, h): # Contour for single level # Sign changes of Z-h are recoreded in pen_points # The recorded points are always above or left the penetration point # Negative indices indicate a horizontal sign change self.pen_points = [] #horizontal # The zeroth x entry can never be a horizontal penetration point for i in range(1, self.nx-1): for j in range(0, self.ny): if((self.Z[i,j] -h) * (self.Z[i + 1,j] - h) < 0.0): self.pen_points.append(-np.array([i,j])) # Edge for i in range(self.nx-1, self.nx): for j in range(0, self.ny): if((self.Z[i,j] -h) * (self.Z[i - 1,j] - h) < 0.0): if(np.all(np.sum(np.abs(self.pen_points - (-np.array([i - 1,j]))),axis=1) > 0)): self.pen_points.append(-np.array([i - 1,j])) #Vertical for i in range(0, self.nx): # The zeroth xy entry can never be a vertical penetration point for j in range(1, self.ny-1): if((self.Z[i,j] - h) * (self.Z[i,j + 1] - h) < 0.0): self.pen_points.append(np.array([i,j])) #Edge for i in range(0, self.nx): for j in range(self.ny-1, self.ny): if((self.Z[i,j] - h) * (self.Z[i,j - 1] - h) < 0.0): if(np.all(np.sum(np.abs(self.pen_points - (-np.array([i,j-1]))),axis=1) > 0)): self.pen_points.append(np.array([i,j-1])) if(len(self.pen_points) == 0): self.contours_found = False return # Convert the pen_points to complex values which makes the index search much faster # Also remove any duplicates we might have picked up self.pen_points = list(np.unique(np.array(self.pen_points).T[0] + 1.j * np.array(self.pen_points).T[1])) self.contours_found = True self.finished_points = [] self.contour_lines = [[]] self.contour_indices = [[]] self.contour_closed = [] next_point = self.pen_points.pop(0) Z_spl = RectBivariateSpline(self.x, self.y, self.Z - h) N_int = 10 x_int = np.zeros(N_int) y_int = np.zeros(N_int) # Open contourlines are reversed to avoid bad starting points contour_reversed = False # Assemble contours while True: i_next = int(np.real(next_point)) j_next = int(np.imag(next_point)) if(i_next < 0 or j_next < 0): # Horiontal penetration point x_int[:] = np.linspace(self.x[-i_next], self.x[-i_next + 1], N_int) y_int[:] = self.y[-j_next] Z_int = Z_spl(x_int, y_int, grid=False) root_spl = InterpolatedUnivariateSpline(x_int, Z_int) roots = root_spl.roots() if(len(roots) == 0): print("Found no roots for this penetration point") fig = plt.figure() ax = fig.add_subplot(111) ax.contour(self.x, self.y, self.Z.T - h, levels=[0.0]) ax.vlines(self.x, np.min(self.y), np.max(self.y), linewidths=0.2) ax.hlines(self.y, np.min(self.x), np.max(self.x), linewidths=0.2) ax.plot(x_int, y_int, "r--", linewidth=4) fig2 = plt.figure() ax2 = fig2.add_subplot(111) ax2.plot(x_int, Z_int) plt.show() raise ValueError("Found no roots for this penetration point") for root in roots: self.contour_lines[-1].append([root, y_int[0]]) else: # Horiontal penetration point x_int[:] = self.x[i_next] y_int[:] = np.linspace(self.y[j_next], self.y[j_next + 1], N_int) Z_int = Z_spl(x_int, y_int, grid=False) root_spl = InterpolatedUnivariateSpline(y_int, Z_int) roots = root_spl.roots() if(len(roots) == 0): print("Found no roots for this penetration point") fig = plt.figure() ax = fig.add_subplot(111) ax.contour(self.x, self.y, self.Z.T - h, levels=[0.0]) ax.vlines(self.x, np.min(self.y), np.max(self.y), linewidths=0.2) ax.hlines(self.y, np.min(self.x), np.max(self.x), linewidths=0.2) ax.plot(x_int, y_int, "r--", linewidth=4) fig2 = plt.figure() ax2 = fig2.add_subplot(111) ax2.plot(y_int, Z_int) plt.show() raise ValueError("Found no roots for this penetration point") for root in roots: self.contour_lines[-1].append([x_int[0], root]) self.contour_indices[-1].append(next_point) self.finished_points.append(next_point) # Find the next point found_next, isclosed, next_point = self._find_next(next_point) if(not found_next): if(isclosed and len(self.contour_lines[-1]) > 2): # Close the contour self.contour_lines[-1].append(self.contour_lines[-1][0]) self.contour_indices[-1].append(self.contour_indices[-1][0]) elif(not contour_reversed): self.contour_lines[-1] = self.contour_lines[-1][::-1] self.contour_indices[-1] = self.contour_indices[-1][::-1] contour_reversed = True found_next, isclosed, next_point = self._find_next(self.contour_indices[-1][-1]) if(not found_next): contour_reversed = False self.contour_closed.append(isclosed) self.contour_lines[-1] = np.array(self.contour_lines[-1]) self.contour_indices[-1] = np.array(self.contour_indices[-1]) if(len(self.pen_points) == 0): break next_point = self.pen_points.pop(0) self.contour_lines.append([]) self.contour_indices.append([])
plt.close() #Start plotting 1d figure plt.figure(2, figsize=figsize) #Calculate energy per shell r = np.arange(0, m + 1, 1) #radius energy = np.zeros(m + 1) shell_pattern = np.zeros((m + 1, m + 1)) if 0: #Polar coordinates for shell spectrum t = np.arange(0, np.pi / 2, np.pi / 2 / m) #angle #Create cubic interpolation of reference file interp_spline = RectBivariateSpline(x, y, data) i = 0 for ri in r: energy[i] = 0.0 j = 0 for tj in t: x_tmp = ri * np.cos(tj) y_tmp = ri * np.sin(tj) energy[i] += interp_spline(x_tmp, y_tmp) / float(m) #print(j, x_tmp, y_tmp, energy[i], data[0,0], m) j = j + 1 i = i + 1 else: print("Generating energy in shells (Each x is 1/", m, ")") for i in range(0, m):
def renorm(EoS,IDs,MR,T,nd,nx,kij,nc,CR,en_auto,beta_auto,SM,n,estimate,L_est,phi_est): #nd Size of density grid #nx Size of mole fraction grid #n Main loop iteration controller #If only 1 component is present mimic a binary mixture made of the same component if nc==1: IDs[1] = IDs[0] #Recover parameters L_rg = data.L(IDs) #Vector with L parameters (cutoff length) phi_rg = data.phi(IDs) #Vector with phi parameters Tc = data.Tc(IDs) #Components parameters a = eos.a_calc(IDs,EoS,T) b = eos.b_calc(IDs,EoS) Tr = T/np.array(Tc) #Main loop parameters x = np.array([0.0001,0.9999]) stepx = (1/float(nx)) #Step to calculate change k = 0 #Vector fill counter i = 1 #Main loop counter r = 0 #Report counter count = 0 rho = np.empty((nd)) #Density vector rhov = [] #Density vector to export x0v = [] #Mole fraction vector to export bmixv = [] f = np.empty((nd)) #Helmholtz energy density vector fv = [] #Helmholtz energy density vector to export fresv = [] #Residual Helmholtz energy density vector to export Tv = [] #Temperature values to export df = np.empty((nd)) #Changes in helmholtz energy density vector f_orig = np.empty((nd)) #Unmodified Helmholtz energy density vector rhob = [] #Adimensional density vector u = np.empty((nd)) X = np.ones((4*nc)) Pv = [] fmat = [] Pmatv = np.empty((nx,nd)) fmatres = [] umat = [] ures = np.empty((nd)) uv = [] df_vdw = np.empty((nd)) df_vec2 = [] f_vec2 = [] P_vec2 = [] u_vec2 = [] aa2 = [] if nc==1: X = np.ones((8)) #Main loop************************************* while x[0]<1.0: if nc>1: print x[0] if x[0]==0.006: #after first step x[0]=0.005 x[1]=1-x[0] if nc==1: x[0] = 0.999999 x[1] = 0.000001 #Mixture parameters bmix = eos.bmix_calc(MR,b,x) amix = eos.amix_calc(MR,a,x,kij) Nav = 6.023e23 rhomax = 0.999999 #Mixture Renormalization parameters L = np.dot(x,np.power(L_rg,3.0)) L = np.power(L,1.0/3.0) phi = np.dot(x,phi_rg) #print L #print phi pi = math.pi omega = data.omega(IDs)[0] sig = np.power(6/pi*b/Nav*np.exp(omega),1.0/3.0)[0] #sig = np.power(b/Nav,1.0/3.0)[0] #c1 = data.c1(IDs)[0] #en = data.en(IDs)[0] #sig = np.power(1.15798*b/Nav,1.0/3.0)[0] L = sig #L = 1.5*sig #L = 1/c1*sig #print L,phi #L = 0.5/c1*sig #PHI = 4*(pi**2.0) #PHI = 1.0/pi/4.0 #lamda = 1.5 #w_LJ = (9.0*sig/7.0) #lennard-jones #print 'LJ=',w_LJ #w_SW = np.sqrt((1./5.)*(sig**2.0)*(lamda**5.0-1)/(lamda**3.0-1)) #square-well potential #print 'SW=',w_SW #phi = PHI*(w_LJ**2)/2/(L**2) #phi = PHI*(w_SW**2)/2/(L**2) #om = data.omega(IDs) #phi = 2/np.power(np.exp(om),4)[0] #w = 0.575*sig*en/T/kB/b[0]*1e6 #print 'w=',w #phi = 2/np.power(np.exp(c1),4)[0] #w = 100.0*1e-9/100 #van der waals wavelength 100nm #phi = PHI*(w**2)/2/(L**2) #print L #print phi #print '---------' #New parameters #L = 1.5*np.power(b/Nav,1.0/3.0) #h = 6.626e-34 #kkB = 1.38e-23 #MM = 0.034 #deBroglie = h/np.sqrt(3*kkB*T*MM/Nav) #phi = (deBroglie**2.0)/(L**2.0)*150*3.14 #L = L[0] #phi = phi[0] #print 'L=',L #print 'phi=',phi if estimate==True: L = L_est phi = phi_est for k in range(0,nd): rho[k] = np.array(float(k)/nd/bmix) if k==0: rho[0] = 1e-6 if EoS==6: if k==0: X = association.frac_nbs(nc,1/rho[k],CR,en_auto,beta_auto,b,bmix,X,0,x,0,T,SM) else: X = association.frac_nbs(nc,1/rho[k],CR,en_auto,beta_auto,b,bmix,X,1,x,0,T,SM) #print X,k #raw_input('...') f[k] = np.array(helm_rep(EoS,R,T,rho[k],amix,bmix,X,x,nc)) #Helmholtz energy density k = k+1 f_orig = f #Initial helmholtz energy density """ #------------------------------------------- #Fluctuation Analysis----------------------- #------------------------------------------- drho = rho[int(nd/2)]-rho[int(nd/2)-1] for i in range(1,nd-2): u[i] = (f[i+1]-f[i-1])/(2*drho) u[nd-1] = (f[nd-1]-f[nd-2])/drho u[0] = (f[1]-f[0])/drho fspl = splrep(rho,f,k=3) #Cubic Spline Representation f3 = splev(rho,fspl,der=0) u = splev(rho,fspl,der=1) #Evaluate Cubic Spline First derivative P = -f3+rho*u P_vec2.append(P) u_vec2.append(u) #=========================================== #=========================================== """ #Subtract attractive forces (due long range correlations) f = f + 0.5*amix*(rho**2) df_vec2.append(rho) f_vec2.append(rho) #Adimensionalization rho = rho*bmix f = f*bmix*bmix/amix T = T*bmix*R/amix f_vec2.append(f) rho1 = rho.flatten() #Main loop**************************************************************** i = 1 while i<=n: #print i #K = kB*T/((2**(3*i))*(L**3)) #K = R*T/((L**3)*(2**(3*i))) K = T/(2**(3*i))/((L**3)/bmix*6.023e23) #Long and Short Range forces fl = helm_long(EoS,rho,f) fs = helm_short(EoS,rho,f,phi,i) #Calculate df width = rhomax/nd w = 0 for w in range(0,nd): df[w] = renorm_df(w,nd,fl,fs,K,rho,width) #Update Helmholtz Energy Density df = np.array(df) #used to evaluate each step f = f + df df_vec2.append(list(df/bmix/bmix*amix*1e6/rho)) f_vec2.append(list(f)) #print 'i=',i,K,f[2],df[2],T,df_vec2[1][2] i = i+1 #print i #Dimensionalization rho = rho/bmix f = f/bmix/bmix*amix T = T/bmix/R*amix #df_total = #df = np.array(df) #df_vec.append(df) #Add original attractive forces f = f - 0.5*amix*(rho**2) #Store residual value of f #fres = f - rho*R*T*(np.log(rho)-1) #WRONG fres = f - rho*R*T*np.log(rho) #f = f + rho*R*T*(np.log(rho)-1) #Already accounting ideal gas energy #strT = str(T) #dfT = ('df_%s.csv' %strT) TT = np.zeros((nd)) df_vdw = 0.5*((rho*bmix)**2) df_vec2.append(list(df_vdw)) f_vec2.append(list(df_vdw)) for i in range(0,nd): TT[i] = T df_vec2.append(TT) f_vec2.append(TT) envelope.report_df(df_vec2,'df.csv') envelope.report_df(f_vec2,'f.csv') #raw_input('----') #if(EoS==6): # f = fres fv.append(f) fresv.append(fres) x0v.append(x[0]) bmixv.append(bmix) if r==0: rhob.append(rho*bmix) #rhob vector is always the same rhov.append(rho) #in case the calculation is done for one-component r=1 drho = rho[int(nd/2)]-rho[int(nd/2)-1] for i in range(1,nd-2): u[i] = (f[i+1]-f[i-1])/(2*drho) u[nd-1] = (f[nd-1]-f[nd-2])/drho u[0] = (f[1]-f[0])/drho fspl = splrep(rho,f,k=3) #Cubic Spline Representation f = splev(rho,fspl,der=0) u = splev(rho,fspl,der=1) #Evaluate Cubic Spline First derivative P = -f+rho*u Pv.append(P) for j in range(0,nd): Pmatv[count][j] = P[j] #print Pmatv[count][j],count,j,x[0] count = count+1 """ #Fluctuation Analysis----------------------- P_vec2.append(P) u_vec2.append(u) P_vec2.append(TT) u_vec2.append(TT) envelope.report_df(P_vec2,'P.csv') envelope.report_df(u_vec2,'u.csv') #=========================================== """ fmat.append(f) fmatres.append(fres) x[0] = x[0]+stepx #if nc>1: # if abs(x[0]-1.0)<1e-5: # x[0] = 0.9999 if nc>1: Pmat = RectBivariateSpline(x0v,rhob,Pmatv) else: Pmat = 'NULL' #differente imposed by renormalization dfv = [] dff = f-f_orig dfv.append(dff) avdw = [] aa2 = 0.5*amix*(rho**2) avdw.append(aa2) renorm_out = [] renorm_out.append(fv) renorm_out.append(x0v) renorm_out.append(rhov) renorm_out.append(rhob) renorm_out.append(fmat) renorm_out.append(Pmat) if nc>1: #If binary mixture, report calculated values print 'before report' ren_u = report_renorm_bin(rhob,x0v,fmatres,nx,nd,MR,IDs,EoS) renorm_out.append(ren_u) else: renorm_out.append(0) renorm_out.append(fresv) renorm_out.append(Pv) renorm_out.append(bmixv) renorm_out.append(dfv) renorm_out.append(avdw) return renorm_out