def LucasKanade(It, It1, rect, p0 ): # Input: # It: template image # It1: Current image # rect: Current position of the car # (top left, bot right coordinates) # p0: Initial movement vector [dp_x0, dp_y0] # Output: # p: movement vector [dp_x, dp_y] # Put your implementation here p=copy.deepcopy(p0) threshold = 0.1 x1 =rect[0,0] y1 = rect[0,1] x2 = rect[0,2] y2 = rect[0,3] #interpolate the entire images nx, ny = It.shape[1], It.shape[0] X= np.arange(0, nx, 1) Y= np.arange(0, ny, 1) It_interpolation = RectBivariateSpline(Y, X, It) It1_interpolation = RectBivariateSpline(Y, X, It1) rows=np.arange(y1,y2+1) cols=np.arange(x1,x2+1) It_rect=It_interpolation(rows,cols) while True: H,W=It_rect.shape ############################## #warp it1 first then cut out the rectangle shift=[-p[1],-p[0]] It1_warped=scipy.ndimage.shift(It1, shift, output=None, order=3, mode='constant', cval=0.0, prefilter=True) gradient=np.gradient(It1_warped) gradient_x=gradient[1] gradient_y=gradient[0] #interpolate on the gradient gradient_x_interp=RectBivariateSpline(Y, X, gradient_x) gradient_y_interp=RectBivariateSpline(Y, X, gradient_y) it1_warped_iterpolation=RectBivariateSpline(Y, X, It1_warped) I_w=it1_warped_iterpolation(rows,cols) #compute error image b=It_rect-I_w #reshape error_image to 1xn b = np.reshape(b,(H*W,1)) #compute gradient gradient_x=gradient_x_interp(rows,cols) gradient_y=gradient_y_interp(rows,cols) #reshaping gradient matrices to nx1 gradient_x=np.reshape(gradient_x,(H*W,1)) gradient_y=np.reshape(gradient_y,(H*W,1)) #concatenate gradient matrices in x and y direction to nx2 gradient=np.concatenate((gradient_x,gradient_y),axis=1) jacobian = np.array([[1,0],[0,1]]) A=np.matmul(gradient,jacobian) AT=np.transpose(A) # #computer Hessian H=np.matmul(AT,A) delta_p=np.matmul(np.matmul(la.pinv(H),AT),b) #update p p[0]=p[0]+delta_p[0,0] p[1]=p[1]+delta_p[1,0] # print(delta_p) if (la.norm(delta_p) < threshold): break # print(p) return p
def csr2d_kick_calc_transient( z_b, x_b, weight, *, gamma=None, rho=None, phi=None, steady_state=False, nz=100, nx=100, xlim=None, zlim=None, species="electron", imethod='map_coordinates', debug=False, ): """ Calculates the 2D CSR kick on a set of particles with positions `z_b`, `x_b` and charges `charges`. Parameters ---------- z_b : np.array Bunch z coordinates in [m] x_b : np.array Bunch x coordinates in [m] weight : np.array weight array (positive only) in [C] This should sum to the total charge in the bunch gamma : float Relativistic gamma rho : float bending radius in [m] phi : float entrance angle in radian nz : int number of z grid points nx : int number of x grid points steady_state : boolean If True, the transient terms in case A and B are ignored zlim : floats (min, max) or None z grid limits in [m] xlim : floats (min, max) or None x grid limits in [m] species : str Particle species. Currently required to be 'electron' imethod : str Interpolation method for kicks. Must be one of: 'map_coordinates' (default): uses scipy.ndimage.map_coordinates 'spline': uses: scipy.interpolate.RectBivariateSpline debug: bool If True, returns the computational grids. Default: False Returns ------- dict with: ddelta_ds : np.array relative z momentum kick [1/m] dxp_ds : np.array relative x momentum kick [1/m] """ assert species == "electron", "TODO: support species {species}" # assert np.sign(rho) == 1, 'TODO: negative rho' # Grid setup if zlim: zmin = zlim[0] zmax = zlim[1] else: zmin = z_b.min() zmax = z_b.max() if xlim: xmin = xlim[0] xmax = xlim[1] else: xmin = x_b.min() xmax = x_b.max() dz = (zmax - zmin) / (nz - 1) dx = (xmax - xmin) / (nx - 1) # Charge deposition t1 = time.time() charge_grid = histogram_cic_2d(z_b, x_b, weight, nz, zmin, zmax, nx, xmin, xmax) if debug: t2 = time.time() print("Depositing particles takes:", t2 - t1, "s") # Normalize the grid so its integral is unity norm = np.sum(charge_grid) * dz * dx lambda_grid = charge_grid / norm # Apply savgol filter to the distribution grid lambda_grid_filtered = np.array( [savgol_filter(lambda_grid[:, i], 13, 2) for i in np.arange(nx)]).T # Differentiation in z #lambda_grid_filtered_prime = central_difference_z(lambda_grid_filtered, nz, nx, dz, order=1) # Distribution grid axis vectors zvec = np.linspace(zmin, zmax, nz) xvec = np.linspace(xmin, xmax, nx) Z, X = np.meshgrid(zvec, xvec, indexing='ij') beta = np.sqrt(1 - 1 / gamma**2) t3 = time.time() Es_case_B_grid_IGF = green_mesh((nz, nx), (dz, dx), rho=rho, gamma=gamma, component='Es_case_B_IGF') Fx_case_B_grid_IGF = green_mesh((nz, nx), (dz, dx), rho=rho, gamma=gamma, component='Fx_case_B_IGF') if debug: t4 = time.time() print("Computing case B field grids takes:", t4 - t3, "s") if steady_state == True: ### Compute the wake via 2d convolution (no boundary condition) #conv_s, conv_x = fftconvolve2(lambda_grid_filtered_prime, psi_s_grid, psi_x_grid) conv_s, conv_x = fftconvolve2(lambda_grid_filtered, Es_case_B_grid_IGF, Fx_case_B_grid_IGF) Ws_grid = (beta**2 / abs(rho)) * (conv_s) * (dz * dx) Wx_grid = (beta**2 / abs(rho)) * (conv_x) * (dz * dx) else: Es_case_A_grid = green_mesh((nz, nx), (dz, dx), rho=rho, gamma=gamma, component='Es_case_A', phi=phi, debug=False) Fx_case_A_grid = green_mesh((nz, nx), (dz, dx), rho=rho, gamma=gamma, component='Fx_case_A', phi=phi, debug=False) if debug: print("Case A field grids computed!") @vectorize([float64(float64, float64)], target='parallel') def boundary_convolve_Ws_A_super(z_observe, x_observe): return boundary_convolve(1, z_observe, x_observe, zvec, xvec, dz, dx, lambda_grid_filtered, Es_case_A_grid, gamma=gamma, rho=rho, phi=phi) @vectorize([float64(float64, float64)], target='parallel') def boundary_convolve_Ws_B_super(z_observe, x_observe): return boundary_convolve(2, z_observe, x_observe, zvec, xvec, dz, dx, lambda_grid_filtered, Es_case_B_grid_IGF, gamma=gamma, rho=rho, phi=phi) @vectorize([float64(float64, float64)], target='parallel') def boundary_convolve_Wx_A_super(z_observe, x_observe): return boundary_convolve(1, z_observe, x_observe, zvec, xvec, dz, dx, lambda_grid_filtered, Fx_case_A_grid, gamma=gamma, rho=rho, phi=phi) @vectorize([float64(float64, float64)], target='parallel') def boundary_convolve_Wx_B_super(z_observe, x_observe): return boundary_convolve(2, z_observe, x_observe, zvec, xvec, dz, dx, lambda_grid_filtered, Fx_case_B_grid_IGF, gamma=gamma, rho=rho, phi=phi) if debug: print("mappable functions for field grids defined") # use Numba vectorization factor_case_A = (1 / gamma**2 / rho**2) * (dz * dx) Ws_grid_case_A = boundary_convolve_Ws_A_super(Z, X) * factor_case_A Wx_grid_case_A = boundary_convolve_Wx_A_super(Z, X) * factor_case_A factor_case_B = (beta**2 / rho**2) * (dz * dx) Ws_grid_case_B = boundary_convolve_Ws_B_super(Z, X) * factor_case_B Wx_grid_case_B = boundary_convolve_Wx_B_super(Z, X) * factor_case_B Ws_grid = Ws_grid_case_B + Ws_grid_case_A Wx_grid = Wx_grid_case_B + Wx_grid_case_A if debug: t5 = time.time() print("Convolution takes:", t5 - t4, "s") # Calculate the kicks at the particle locations # Overall factor Nb = np.sum(weight) / e_charge kick_factor = r_e * Nb / gamma # in m # Interpolate Ws and Wx everywhere within the grid if imethod == 'spline': # RectBivariateSpline method Ws_interp = RectBivariateSpline(zvec, xvec, Ws_grid) Wx_interp = RectBivariateSpline(zvec, xvec, Wx_grid) delta_kick = kick_factor * Ws_interp.ev(z_b, x_b) xp_kick = kick_factor * Wx_interp.ev(z_b, x_b) elif imethod == 'map_coordinates': # map_coordinates method. Should match above fairly well. order=1 is even faster. zcoord = (z_b - zmin) / dz xcoord = (x_b - xmin) / dx delta_kick = kick_factor * map_coordinates( Ws_grid, np.array([zcoord, xcoord]), order=2) xp_kick = kick_factor * map_coordinates( Wx_grid, np.array([zcoord, xcoord]), order=2) else: raise ValueError(f'Unknown interpolation method: {imethod}') if debug: t6 = time.time() print(f'Interpolation with {imethod} takes:', t6 - t5, "s") #result = {"ddelta_ds": delta_kick} result = {"ddelta_ds": delta_kick, "dxp_ds": xp_kick} result.update({ "zvec": zvec, "xvec": xvec, "Ws_grid": Ws_grid, "Wx_grid": Wx_grid }) if debug: timing = np.array([t2 - t1, t4 - t3, t5 - t4, t6 - t5]) result.update({ "charge_grid": charge_grid, "lambda_grid_filtered": lambda_grid_filtered, "timing": timing }) if steady_state == False: result.update({ "Ws_grid_case_A": Ws_grid_case_A, "Ws_grid_case_B": Ws_grid_case_B, "Wx_grid_case_A": Wx_grid_case_A, "Wx_grid_case_B": Wx_grid_case_B, "Es_case_A_grid": Es_case_A_grid, "Es_case_B_grid_IGF": Es_case_B_grid_IGF, "charge_grid": charge_grid, }) return result
def buildSection(self, xo=None, yo=None, xm=None, ym=None, pts=100, gfilter=5): """ Extract a slice from the 3D data set and compute the stratigraphic layers. Parameters ---------- variable: xo, yo Lower X,Y coordinates of the cross-section. variable: xm, ym Upper X,Y coordinates of the cross-section. variable: pts Number of points to discretise the cross-section. variable: gfilter Gaussian smoothing filter. """ if pts is None: pts = self.nx * 10 if xm > self.x.max(): xm = self.x.max() if ym > self.y.max(): ym = self.y.max() if xo < self.x.min(): xo = self.x.min() if yo < self.y.min(): yo = self.y.min() xsec, ysec = self._cross_section(xo, yo, xm, ym, pts) self.dist = np.sqrt((xsec - xo)**2 + (ysec - yo)**2) self.xsec = xsec self.ysec = ysec for k in range(self.nz): # Thick rect_B_spline = RectBivariateSpline(self.yi, self.xi, self.th[:, :, k]) data = rect_B_spline.ev(ysec, xsec) secTh = filters.gaussian_filter1d(data, sigma=gfilter) secTh[secTh < 0] = 0 self.secTh.append(secTh) # Elev rect_B_spline1 = RectBivariateSpline(self.yi, self.xi, self.elev[:, :, k]) data1 = rect_B_spline1.ev(ysec, xsec) secElev = filters.gaussian_filter1d(data1, sigma=gfilter) self.secElev.append(secElev) # Depth rect_B_spline2 = RectBivariateSpline(self.yi, self.xi, self.dep[:, :, k]) data2 = rect_B_spline2.ev(ysec, xsec) secDep = filters.gaussian_filter1d(data2, sigma=gfilter) self.secDep.append(secDep) # Ensure the spline interpolation does not create underlying layers above upper ones topsec = self.secDep[self.nz - 1] for k in range(self.nz - 2, -1, -1): secDep = self.secDep[k] self.secDep[k] = np.minimum(secDep, topsec) topsec = self.secDep[k] return
def sim_to_vis(box, antenna_pos, numin=150., numax=180., cosmo=Planck15, beam=CircularGaussian): """ Parameters ---------- box : :class:`tocm_tools.Box` instance or str Either a 21cmFAST simulation box, or a string specifying a filename to one. antenna_pos : array 2D array of (x,y) positions of antennae in meters (shape (Nantennate,2)). numin : float Minimum frequency (in MHz) to include in the "observation" numax : float Maximum frequency (in MHz) to include in the "observation" cosmo : :class:`astropy.cosmology.FLRW` instance The cosmology to use for all calculations beam : :class:`spore.model.beam.Beam` instance The telescope beam model to use. Returns ------- uvsample : array 2D complex array in which the first dimension has length of Nbaselines, and the second has Nnu. This is the Fourier Transform of the sky at the baselines, has units of Jy. baselines : array The baseline vectors of the observation (shape (Nbaseline,2)). Units m. nu : array 1D array of frequencies of observation. Units MHz. """ # READ THE BOX box, Nbox, L, d, nu, z = get_cut_box(box, numin, numax) lam = 3e8 / (nu * 1e6) # in m # Convert to specific intensity (Jy/sr) #Jy/(sr?) mK->K to Jy (J/K) /m^2 box *= 1e-3 * 1e26 * 2 * k_B / lam**2 # INITIALISE A BEAM MODEL beam = beam(nu.min(), np.linspace(1, nu.max() / nu.min(), len(nu))) # Box width at different redshifts width = L / cosmo.angular_diameter_distance(z).value # Minimum umax available in simulation maxl = 2 * np.sin(width.max() / 2 - width.max() / Nbox / 2) maxu = Nbox / 2 / maxl # GENERATE BASELINE VECTORS baselines = pos_to_baselines(antenna_pos) u0, baselines = baselines_to_u0(baselines, nu[0], maxu, ret_baselines=True) uvsample = np.zeros((len(baselines), len(nu)), dtype="complex128") # ATTENUATE BOX BY THE BEAM for i in range(len(nu)): dl = width[i] / Nbox l = np.sin( np.linspace(-width[i] / 2 + dl / 2, width[i] / 2 - dl / 2, Nbox)) L, M = np.meshgrid(l, l) slice = box[:, :, i] * np.exp(-(L**2 + M**2) / (2 * beam.sigma[i]**2)) # Interpolate onto regular l,m grid spl_lm = RectBivariateSpline(l, l, slice) l = np.linspace(l.min(), l.max(), len(l)) slice = spl_lm(l, l, grid=True) FT, freq = dft.fft(slice, L=l.max() - l.min(), a=0, b=2 * np.pi) uvsample[:, i] = interpolate_visibility_onto_baselines( FT, freq[0], nu[i] / nu[0], u0) return uvsample, u0, nu, { 'slice': slice, 'box': box, "FT": FT, 'freq': freq, "l": l }
def predict(self, requested_cosmology, z, m, get_errors=True, N_draw=1000): """Emulate the halo mass function dn/dlnM for the desired set of cosmology parameters, redshifts, and masses. :param requested_cosmology: The set of cosmology parameters for which the mass function is requested. The parameters are `Ommh2`, `Ombh2`, `Omnuh2`, `n_s`, `h`, `sigma_8`, `w_0`, `w_a`. :type requested_cosmology: dict :param z: The redshift(s) for which the mass function is requested. :type z: float or array :param m: The mass(es) for which the mass function is requested, in units [Msun/h]. :type z: float or array :param get_errors: Whether or not to compute error estimates (faster in the latter case). Default is `True`. :type get_errors: bool, optional :param N_draw: How many sample mass functions to draw when computing the error estimate. Applies only if `get_errors` is `True`. :type N_draw: int, optional Returns ------- HMF: array_like The mass function dN/dlnM in units[(h/Mpc)^3] and with shape [len(z), len(m)]. HMF_rel_err: array_like The relative error on dN/dlnM, with shape [len(z), len(m)]. Returns 0 if `get_errors` is `False`. For requested redshifts that are between the redshifts for which the underlying emulator is defined, the weighted errors from the neighboring redshifts are added in quadrature. """ # Validate requested z and m if np.any(z < 0): raise ValueError("z must be >= 0") if np.any(z > self.z_arr_asc[-1]): raise ValueError("z must be <= 2.02") if np.any(m < 1e13): raise ValueError("m must be >= 1e13") if np.any(m > 1e16): raise ValueError("m must be <= 1e16") z = np.atleast_1d(z) m = np.atleast_1d(m) # Do we want error estimates? if not get_errors: N_draw = 0 # Call the actual emulator emu_dict = self.predict_raw_emu(requested_cosmology, N_draw=N_draw) # Set up interpolation grids HMF_interp_input = np.log(np.nextafter(0, 1)) * np.ones( (len(self.z_arr_asc), 3001)) for i, emu_z in enumerate(self.z_arr_asc): HMF_interp_input[i, :len(emu_dict[emu_z]['HMF'])] = np.log( emu_dict[emu_z]['HMF']) HMF_interp = RectBivariateSpline(self.z_arr_asc, np.linspace(13, 16, 3001), HMF_interp_input, kx=1, ky=1) if get_errors: HMFerr_interp_input = np.zeros((len(self.z_arr_asc), 3001)) for i, emu_z in enumerate(self.z_arr_asc): HMFerr_interp_input[i, :len(emu_dict[emu_z]['HMF_std'] )] = emu_dict[emu_z]['HMF_std'] HMFerr_interp = RectBivariateSpline(self.z_arr_asc, np.linspace(13, 16, 3001), HMFerr_interp_input, kx=1, ky=1) # Call interpolation at input z and m HMF_out = np.exp(HMF_interp(z, np.log10(m))) HMFerr_out = np.zeros((len(z), len(m))) if get_errors: # First interpolate to requested m HMFerr_at_m = HMFerr_interp(self.z_arr_asc, np.log10(m)) # Now add weighted errors in quadrature for z_id, this_z in enumerate(z): z_id_nearest = np.argsort(np.abs(self.z_arr_asc - this_z))[:2] Delta_z = (this_z - self.z_arr_asc[z_id_nearest]) / np.diff( self.z_arr_asc[z_id_nearest]) HMFerr_out[z_id] = np.sqrt( (HMFerr_at_m[z_id_nearest[0], :] * Delta_z[0])**2 + (HMFerr_at_m[z_id_nearest[1], :] * Delta_z[1])**2) return HMF_out, HMFerr_out
def py_sampleImage(reference_image, dxy, udat, vdat, dRA=0., dDec=0., PA=0., origin='upper'): """ Python implementation of sampleImage. """ if origin == 'upper': v_origin = 1. elif origin == 'lower': v_origin = -1. nxy = reference_image.shape[0] dRA *= 2. * np.pi dDec *= 2. * np.pi du = 1. / (nxy * dxy) # Real to Complex transform fft_r2c_shifted = np.fft.fftshift(np.fft.rfft2( np.fft.fftshift(reference_image)), axes=0) # apply rotation cos_PA = np.cos(PA) sin_PA = np.sin(PA) urot = udat * cos_PA - vdat * sin_PA vrot = udat * sin_PA + vdat * cos_PA dRArot = dRA * cos_PA - dDec * sin_PA dDecrot = dRA * sin_PA + dDec * cos_PA # interpolation indices uroti = np.abs(urot) / du vroti = nxy / 2. + v_origin * vrot / du uneg = urot < 0. vroti[uneg] = nxy / 2 - v_origin * vrot[uneg] / du # coordinates of FT u_axis = np.linspace(0., nxy // 2, nxy // 2 + 1) v_axis = np.linspace(0., nxy - 1, nxy) # We use RectBivariateSpline to do only linear interpolation, which is faster # than interp2d for our case of a regular grid. # RectBivariateSpline does not work for complex input, so we need to run it twice. f_re = RectBivariateSpline(v_axis, u_axis, fft_r2c_shifted.real, kx=1, ky=1, s=0) ReInt = f_re.ev(vroti, uroti) f_im = RectBivariateSpline(v_axis, u_axis, fft_r2c_shifted.imag, kx=1, ky=1, s=0) ImInt = f_im.ev(vroti, uroti) f_amp = RectBivariateSpline(v_axis, u_axis, np.abs(fft_r2c_shifted), kx=1, ky=1, s=0) AmpInt = f_amp.ev(vroti, uroti) # correct for Real to Complex frequency mapping uneg = urot < 0. ImInt[uneg] *= -1. PhaseInt = np.angle(ReInt + 1j * ImInt) # apply the phase change theta = urot * dRArot + vrot * dDecrot vis = AmpInt * (np.cos(theta + PhaseInt) + 1j * np.sin(theta + PhaseInt)) return vis
def compute_abs_derivatives(mceq_run, pid, barr_param, zenith_list): mceq_run.unset_mod_pprod(dont_fill=False) barr_pars = [p for p in barr if p.startswith(barr_param) and 'ch' not in p] print('Parameters corresponding to selection', barr_pars) dim_res = len(zenith_list), etr.shape[0] gs = mceq_run.get_solution numu, anumu, nue, anue = (np.zeros(dim_res), np.zeros(dim_res), np.zeros(dim_res), np.zeros(dim_res)) for iz, zen_deg in enumerate(zenith_list): mceq_run.set_theta_deg(zen_deg) mceq_run.solve() numu[iz] = gs('total_numu', 0)[tr] anumu[iz] = gs('total_antinumu', 0)[tr] nue[iz] = gs('total_nue', 0)[tr] anue[iz] = gs('total_antinue', 0)[tr] mceq_run.unset_mod_pprod(dont_fill=True) for p in barr_pars: mceq_run.set_mod_pprod(2212, pid, barr_unc, (p, delta)) # mceq_run.y.print_mod_pprod() mceq_run._init_default_matrices(skip_D_matrix=True) numu_up, anumu_up, nue_up, anue_up = (np.zeros(dim_res), np.zeros(dim_res), np.zeros(dim_res), np.zeros(dim_res)) for iz, zen_deg in enumerate(zenith_list): mceq_run.set_theta_deg(zen_deg) mceq_run.solve() numu_up[iz] = gs('total_numu', 0)[tr] anumu_up[iz] = gs('total_antinumu', 0)[tr] nue_up[iz] = gs('total_nue', 0)[tr] anue_up[iz] = gs('total_antinue', 0)[tr] mceq_run.unset_mod_pprod(dont_fill=True) for p in barr_pars: mceq_run.set_mod_pprod(2212, pid, barr_unc, (p, -delta)) # mceq_run.y.print_mod_pprod() mceq_run._init_default_matrices(skip_D_matrix=True) numu_down, anumu_down, nue_down, anue_down = (np.zeros(dim_res), np.zeros(dim_res), np.zeros(dim_res), np.zeros(dim_res)) for iz, zen_deg in enumerate(zenith_list): mceq_run.set_theta_deg(zen_deg) mceq_run.solve() numu_down[iz] = gs('total_numu', 0)[tr] anumu_down[iz] = gs('total_antinumu', 0)[tr] nue_down[iz] = gs('total_nue', 0)[tr] anue_down[iz] = gs('total_antinue', 0)[tr] fd_derivative = lambda up, down: (up - down) / (2. * delta) dnumu = fd_derivative(numu_up, numu_down) danumu = fd_derivative(anumu_up, anumu_down) dnue = fd_derivative(nue_up, nue_down) danue = fd_derivative(anue_up, anue_down) return [ RectBivariateSpline(cos_theta, np.log(etr), dist) for dist in [numu, dnumu, anumu, danumu, nue, dnue, anue, danue] ]
def active_contour(image, snake, alpha=0.01, beta=0.1, w_line=0, w_edge=1, gamma=0.01, bc='periodic', max_px_move=1.0, max_iterations=2500, convergence=0.1): """Active contour model. Active contours by fitting snakes to features of images. Supports single and multichannel 2D images. Snakes can be periodic (for segmentation) or have fixed and/or free ends. The output snake has the same length as the input boundary. As the number of points is constant, make sure that the initial snake has enough points to capture the details of the final contour. Parameters ---------- image : (N, M) or (N, M, 3) ndarray Input image. snake : (N, 2) ndarray Initialisation coordinates of snake. For periodic snakes, it should not include duplicate endpoints. alpha : float, optional Snake length shape parameter. Higher values makes snake contract faster. beta : float, optional Snake smoothness shape parameter. Higher values makes snake smoother. w_line : float, optional Controls attraction to brightness. Use negative values to attract to dark regions. w_edge : float, optional Controls attraction to edges. Use negative values to repel snake from edges. gamma : float, optional Explicit time stepping parameter. bc : {'periodic', 'free', 'fixed'}, optional Boundary conditions for worm. 'periodic' attaches the two ends of the snake, 'fixed' holds the end-points in place, and'free' allows free movement of the ends. 'fixed' and 'free' can be combined by parsing 'fixed-free', 'free-fixed'. Parsing 'fixed-fixed' or 'free-free' yields same behaviour as 'fixed' and 'free', respectively. max_px_move : float, optional Maximum pixel distance to move per iteration. max_iterations : int, optional Maximum iterations to optimize snake shape. convergence: float, optional Convergence criteria. Returns ------- snake : (N, 2) ndarray Optimised snake, same shape as input parameter. References ---------- .. [1] Kass, M.; Witkin, A.; Terzopoulos, D. "Snakes: Active contour models". International Journal of Computer Vision 1 (4): 321 (1988). Examples -------- >>> from skimage.draw import circle_perimeter >>> from skimage.filters import gaussian Create and smooth image: >>> img = np.zeros((100, 100)) >>> rr, cc = circle_perimeter(35, 45, 25) >>> img[rr, cc] = 1 >>> img = gaussian(img, 2) Initiliaze spline: >>> s = np.linspace(0, 2*np.pi,100) >>> init = 50*np.array([np.cos(s), np.sin(s)]).T+50 Fit spline to image: >>> snake = active_contour(img, init, w_edge=0, w_line=1) #doctest: +SKIP >>> dist = np.sqrt((45-snake[:, 0])**2 +(35-snake[:, 1])**2) #doctest: +SKIP >>> int(np.mean(dist)) #doctest: +SKIP 25 """ split_version = scipy.__version__.split('.') if not (split_version[-1].isdigit()): split_version.pop() scipy_version = list(map(int, split_version)) new_scipy = scipy_version[0] > 0 or \ (scipy_version[0] == 0 and scipy_version[1] >= 14) if not new_scipy: raise NotImplementedError( 'You are using an old version of scipy. ' 'Active contours is implemented for scipy versions ' '0.14.0 and above.') max_iterations = int(max_iterations) if max_iterations <= 0: raise ValueError("max_iterations should be >0.") convergence_order = 10 valid_bcs = [ 'periodic', 'free', 'fixed', 'free-fixed', 'fixed-free', 'fixed-fixed', 'free-free' ] if bc not in valid_bcs: raise ValueError("Invalid boundary condition.\n" + "Should be one of: " + ", ".join(valid_bcs) + '.') img = img_as_float(image) RGB = img.ndim == 3 # Find edges using sobel: if w_edge != 0: if RGB: edge = [ sobel(img[:, :, 0]), sobel(img[:, :, 1]), sobel(img[:, :, 2]) ] else: edge = [sobel(img)] for i in range(3 if RGB else 1): edge[i][0, :] = edge[i][1, :] edge[i][-1, :] = edge[i][-2, :] edge[i][:, 0] = edge[i][:, 1] edge[i][:, -1] = edge[i][:, -2] else: edge = [0] # Superimpose intensity and edge images: if RGB: img = w_line*np.sum(img, axis=2) \ + w_edge*sum(edge) else: img = w_line * img + w_edge * edge[0] # Interpolate for smoothness: if new_scipy: intp = RectBivariateSpline(np.arange(img.shape[1]), np.arange(img.shape[0]), img.T, kx=2, ky=2, s=0) else: intp = np.vectorize( interp2d(np.arange(img.shape[1]), np.arange(img.shape[0]), img, kind='cubic', copy=False, bounds_error=False, fill_value=0)) x, y = snake[:, 0].astype(np.float), snake[:, 1].astype(np.float) xsave = np.empty((convergence_order, len(x))) ysave = np.empty((convergence_order, len(x))) # Build snake shape matrix for Euler equation n = len(x) a = np.roll(np.eye(n), -1, axis=0) + \ np.roll(np.eye(n), -1, axis=1) - \ 2*np.eye(n) # second order derivative, central difference b = np.roll(np.eye(n), -2, axis=0) + \ np.roll(np.eye(n), -2, axis=1) - \ 4*np.roll(np.eye(n), -1, axis=0) - \ 4*np.roll(np.eye(n), -1, axis=1) + \ 6*np.eye(n) # fourth order derivative, central difference A = -alpha * a + beta * b # Impose boundary conditions different from periodic: sfixed = False if bc.startswith('fixed'): A[0, :] = 0 A[1, :] = 0 A[1, :3] = [1, -2, 1] sfixed = True efixed = False if bc.endswith('fixed'): A[-1, :] = 0 A[-2, :] = 0 A[-2, -3:] = [1, -2, 1] efixed = True sfree = False if bc.startswith('free'): A[0, :] = 0 A[0, :3] = [1, -2, 1] A[1, :] = 0 A[1, :4] = [-1, 3, -3, 1] sfree = True efree = False if bc.endswith('free'): A[-1, :] = 0 A[-1, -3:] = [1, -2, 1] A[-2, :] = 0 A[-2, -4:] = [-1, 3, -3, 1] efree = True # Only one inversion is needed for implicit spline energy minimization: inv = scipy.linalg.inv(A + gamma * np.eye(n)) # Explicit time stepping for image energy minimization: for i in range(max_iterations): if new_scipy: fx = intp(x, y, dx=1, grid=False) fy = intp(x, y, dy=1, grid=False) else: fx = intp(x, y, dx=1) fy = intp(x, y, dy=1) if sfixed: fx[0] = 0 fy[0] = 0 if efixed: fx[-1] = 0 fy[-1] = 0 if sfree: fx[0] *= 2 fy[0] *= 2 if efree: fx[-1] *= 2 fy[-1] *= 2 xn = np.dot(inv, gamma * x + fx) yn = np.dot(inv, gamma * y + fy) # Movements are capped to max_px_move per iteration: dx = max_px_move * np.tanh(xn - x) dy = max_px_move * np.tanh(yn - y) if sfixed: dx[0] = 0 dy[0] = 0 if efixed: dx[-1] = 0 dy[-1] = 0 x += dx y += dy # Convergence criteria needs to compare to a number of previous # configurations since oscillations can occur. j = i % (convergence_order + 1) if j < convergence_order: xsave[j, :] = x ysave[j, :] = y else: dist = np.min( np.max( np.abs(xsave - x[None, :]) + np.abs(ysave - y[None, :]), 1)) if dist < convergence: break return np.array([x, y]).T
def __init__(self, matrix, resolution, extent=Extent()): self._extends = extent x = np.arange(self._extends.x_min, self._extends.x_max, resolution) y = np.arange(self._extends.y_min, self._extends.y_max, resolution) self._interp_spline = RectBivariateSpline(x, y, matrix)
def get_field_rotation_power_from_PK(params, PK, chi_source, lmax=20000, acc=1, lsamp=None): results = camb.get_background(params) nz = int(100 * acc) if lmax < 3000: raise ValueError('field rotation assumed lmax > 3000') ls = np.hstack((np.arange(2, 400, 1), np.arange(401, 2600, int(10. / acc)), np.arange(2650, lmax, int(50. / acc)), np.arange(lmax, lmax + 1))).astype(np.float64) # get grid of C_L(chi_s,k) for different redshifts chimaxs = np.linspace(0, chi_source, nz) cls = np.zeros((nz, ls.size)) for i, chimax in enumerate(chimaxs[1:]): cl = cl_kappa_limber(results, PK, ls, nz, chimax) cls[i + 1, :] = cl cls[0, :] = 0 cl_chi = RectBivariateSpline(chimaxs, ls, cls) # Get M(L,L') matrix chis = np.linspace(0, chi_source, nz, dtype=np.float64) zs = results.redshift_at_comoving_radial_distance(chis) dchis = (chis[2:] - chis[:-2]) / 2 chis = chis[1:-1] zs = zs[1:-1] win = (1 / chis - 1 / chi_source)**2 / chis**2 w = np.ones(chis.shape) cchi = cl_chi(chis, ls, grid=True) M = np.zeros((ls.size, ls.size)) for i, ell in enumerate(ls): k = (ell + 0.5) / chis w[:] = 1 w[k < 1e-4] = 0 w[k >= PK.kmax] = 0 cl = np.dot(dchis * w * PK.P(zs, k, grid=False) * win / k**4, cchi) M[i, :] = cl * ell**4 # note we don't attempt to be accurate beyond lowest Limber Mf = RectBivariateSpline(ls, ls, np.log(M)) # L sampling for output if lsamp is None: lsamp = np.hstack((np.arange(2, 20, 2), np.arange(25, 200, 10 // acc), np.arange(220, 1200, 30 // acc), np.arange(1300, min(lmax // 2, 2600), 150 // acc), np.arange(3000, lmax // 2 + 1, 1000 // acc))) # Get field rotation (curl) spectrum. diagm = np.diag(M) diagmsp = InterpolatedUnivariateSpline(ls, diagm) def high_curl_integrand(_ll, _lp): _lp = _lp.astype(int) r2 = (np.float64(_ll) / _lp)**2 return _lp * r2 * diagmsp(_lp) / np.pi clcurl = np.zeros(lsamp.shape) lsall = np.arange(2, lmax + 1, dtype=np.float64) for i, ll in enumerate(lsamp): ell = np.float64(ll) lmin = lsall[0] lpmax = min(lmax, int(max(1000, ell * 2))) if ll < 500: lcalc = lsall[0:lpmax - 2] else: # sampling in L', with denser around L~L' lcalc = np.hstack( (lsall[0:20:4], lsall[29:ll - 200:35], lsall[ll - 190:ll + 210:6], lsall[ll + 220:lpmax + 60:60])) tmps = np.zeros(lcalc.shape) for ix, lp in enumerate(lcalc): llp = int(lp) lp = np.float64(lp) if abs(ll - llp) > 200 and lp > 200: nphi = 2 * int(min(lp / 10 * acc, 200)) + 1 elif ll > 2000: nphi = 2 * int(lp / 10 * acc) + 1 else: nphi = 2 * int(lp) + 1 dphi = 2 * np.pi / nphi phi = np.linspace(dphi, (nphi - 1) / 2 * dphi, (nphi - 1) // 2) # even and don't need zero w = 2 * np.ones(phi.size) cosphi = np.cos(phi) lrat = lp / ell lfact = np.sqrt(1 + lrat**2 - 2 * cosphi * lrat) lnorm = ell * lfact lfact[lfact <= 0] = 1 w[lnorm < lmin] = 0 w[lnorm > lmax] = 0 lnorm = np.maximum(lmin, np.minimum(lmax, lnorm)) tmps[ix] += lp * np.dot(w, (np.sin(phi) / lfact**2 * (cosphi - lrat))**2 * np.exp(Mf(lnorm, lp, grid=False))) * dphi sp = InterpolatedUnivariateSpline(lcalc, tmps) clcurl[i] = sp.integral(2, lpmax - 1) * 4 / (2 * np.pi)**2 if lpmax < lmax: tail = np.sum(high_curl_integrand(ll, lsall[lpmax - 2:])) clcurl[i] += tail return lsamp, clcurl
def LucasKanade(It, It1, rect, p0 = np.zeros(2)): # Input: # It: template image # It1: Current image # rect: Current position of the car # (top left, bot right coordinates) # p0: Initial movement vector [dp_x0, dp_y0] # Output: # p: movement vector [dp_x, dp_y] # # Put your implementation here delta = 100 threshold = 0.05 p = p0 x1,y1,x2,y2 = rect width = x2 - x1 height = y2 - y1 rows_it= range (0, It.shape[0]) cols_it = range(0, It.shape[1]) rows_it1= range (0, It1.shape[0]) cols_it1 = range(0, It1.shape[1]) spline_it = RectBivariateSpline(rows_it, cols_it, It) spline_it1 = RectBivariateSpline(rows_it1, cols_it1, It1) x, y = np.mgrid[x1: x2 + 1: width * 1j, y1: y2 + 1: height * 1j] reference = spline_it.ev(y, x).flatten() while (np.linalg.norm(delta) > threshold): img= spline_it1.ev(y+ p[0], x+ p[1]).flatten() error= reference-img ## Evaluation grad_x = spline_it1.ev(y+ p[0], x + p[1], dy=1). flatten() grad_y = spline_it1.ev(y+ p[0], x + p[1], dx=1). flatten() deriv = np.vstack ((grad_x, grad_y)) invs = np.linalg.pinv (deriv) delta= np.matmul(invs.T, error) p += delta return p
domain = ((9000, 19000), (2500, 12500), (-5500, -500)) cell_width = 500 # Acquisition source frequencies (Hz) frequencies = [1.0] # only use one frequency #frequencies = [0.5, 1.0] omegas = [2.0 * np.pi] # Radial frequency (Hz) # Define seafloor as every first cell with resistivity lower than 0.33 seafloor = np.ones((mesh.shape_cells[0], mesh.shape_cells[1])) for i in range(mesh.shape_cells[0]): for ii in range(mesh.shape_cells[1]): seafloor[i, ii] = mesh.nodes_z[:-1][ model.property_x[i, ii, :] < 0.33][0] # Create a 2D interpolation function from the seafloor bathymetry = RectBivariateSpline( mesh.cell_centers_x, mesh.cell_centers_y, seafloor) # Seafloor x-, y- and z-coordinates, required for making the octree mesh seafloor = np.reshape(seafloor, (len(mesh.cell_centers_x) * len(mesh.cell_centers_y), 1)) xseafloor, yseafloor = np.meshgrid(mesh.cell_centers_x, mesh.cell_centers_y) seafloorxyz = np.c_[mkvc(xseafloor), mkvc(yseafloor), mkvc(seafloor)] seafloorxyz = seafloorxyz[(seafloorxyz[:, 0] > 9000) & ((seafloorxyz[:, 0] < 19000)) & (seafloorxyz[:, 1] > 2500) & (seafloorxyz[:, 1] < 12500)] # Defining transmitter location src_x = 2 * 5000 src_y = 7500 # Source depths: 50 m above seafloor src_z = bathymetry(src_x, src_y).ravel() + 50 xtx, ytx, ztx = np.meshgrid([src_x], [src_y], [src_z[0]])
def load_table(self, fname): # read file F = open(fname, 'r') L = F.readlines() F.close() # convert loaded file to floats L = [l.strip() for l in L] L = [[float(x) for x in l.split()] for l in L] # extract parts Q2 = np.copy(L[0]) X = np.copy(L[1]) L = np.array(L[2:]) # get number of partons in table npartons = len(L[0]) # empy array for FF values FF = np.zeros((npartons, Q2.size, X.size)) # fill array cnt = 0 for iX in range(X.size - 1): for iQ2 in range(Q2.size): for iparton in range(npartons): if any([iparton == k for k in [0, 1, 2, 6, 7, 8]]): factor = (1 - X[iX])**4 * X[iX]**0.5 elif any([iparton == k for k in [3, 4]]): factor = (1 - X[iX])**7 * X[iX]**0.3 elif iparton == 5: factor = (1 - X[iX])**4 * X[iX]**0.3 FF[iparton, iQ2, iX] = L[cnt, iparton] #/factor cnt += 1 LX = np.log(X) LQ2 = np.log(Q2) D = self.D D['UTOT'] = RectBivariateSpline(LQ2, LX, FF[0], kx=3, ky=3) D['DTOT'] = RectBivariateSpline(LQ2, LX, FF[1], kx=3, ky=3) D['STOT'] = RectBivariateSpline(LQ2, LX, FF[2], kx=3, ky=3) D['CTOT'] = RectBivariateSpline(LQ2, LX, FF[3], kx=3, ky=3) D['BTOT'] = RectBivariateSpline(LQ2, LX, FF[4], kx=3, ky=3) D['G'] = RectBivariateSpline(LQ2, LX, FF[5], kx=3, ky=3) D['UVAL'] = RectBivariateSpline(LQ2, LX, FF[6], kx=3, ky=3) D['DVAL'] = RectBivariateSpline(LQ2, LX, FF[7], kx=3, ky=3) D['SVAL'] = RectBivariateSpline(LQ2, LX, FF[8], kx=3, ky=3) D['Up'] = lambda lQ2, lx: 0.5 * (D['UTOT'](lQ2, lx) + D['UVAL'] (lQ2, lx)) D['UBp'] = lambda lQ2, lx: 0.5 * (D['UTOT'](lQ2, lx) - D['UVAL'] (lQ2, lx)) D['Dp'] = lambda lQ2, lx: 0.5 * (D['DTOT'](lQ2, lx) + D['DVAL'] (lQ2, lx)) D['DBp'] = lambda lQ2, lx: 0.5 * (D['DTOT'](lQ2, lx) - D['DVAL'] (lQ2, lx)) D['Sp'] = lambda lQ2, lx: 0.5 * (D['STOT'](lQ2, lx) + D['SVAL'] (lQ2, lx)) D['SBp'] = lambda lQ2, lx: 0.5 * (D['STOT'](lQ2, lx) - D['SVAL'] (lQ2, lx)) D['Cp'] = lambda lQ2, lx: 0.5 * D['CTOT'](lQ2, lx) D['Bp'] = lambda lQ2, lx: 0.5 * D['BTOT'](lQ2, lx)
def get_W_on_grid(self, dW_qw, include_q0=True, metal=False): """This function transforms the screened potential W(q,w) to the (q,w)-grid of the GW calculation. Also, W is integrated over a region around q=0 if include_q0 is set to True.""" q_cs = self.qd.ibzk_kc rcell_cv = 2 * pi * np.linalg.inv(self.calc.wfs.gd.cell_cv).T q_vs = np.dot(q_cs, rcell_cv) q_grid = (q_vs**2).sum(axis=1)**0.5 self.q_grid = q_grid w_grid = self.omega_w wqeh = self.wqeh # w_grid.copy() # self.qeh qqeh = self.qqeh sortqeh = np.argsort(qqeh) qqeh = qqeh[sortqeh] dW_qw = dW_qw[sortqeh] sort = np.argsort(q_grid) isort = np.argsort(sort) if metal and np.isclose(qqeh[0], 0): """We don't have the right q=0 limit for metals and semi-metals. -> Point should be omitted from interpolation""" qqeh = qqeh[1:] dW_qw = dW_qw[1:] sort = sort[1:] from scipy.interpolate import RectBivariateSpline yr = RectBivariateSpline(qqeh, wqeh, dW_qw.real, s=0) yi = RectBivariateSpline(qqeh, wqeh, dW_qw.imag, s=0) dWgw_qw = yr(q_grid[sort], w_grid) + 1j * yi(q_grid[sort], w_grid) dW_qw = yr(qqeh, w_grid) + 1j * yi(qqeh, w_grid) if metal: # Interpolation is done -> put back zeros at q=0 dWgw_qw = np.insert(dWgw_qw, 0, 0, axis=0) qqeh = np.insert(qqeh, 0, 0) dW_qw = np.insert(dW_qw, 0, 0, axis=0) q_cut = q_grid[sort][0] / 2. else: q_cut = q_grid[sort][1] / 2. q0 = np.array([q for q in qqeh if q <= q_cut]) if len(q0) > 1: # Integrate arround q=0 vol = np.pi * (q0[-1] + q0[1] / 2.)**2 if np.isclose(q0[0], 0): weight0 = np.pi * (q0[1] / 2.)**2 / vol c = (1 - weight0) / np.sum(q0) weights = c * q0 weights[0] = weight0 else: c = 1 / np.sum(q0) weights = c * q0 dWgw_qw[0] = ( np.repeat(weights[:, np.newaxis], len(w_grid), axis=1) * dW_qw[:len(q0)]).sum(axis=0) if not include_q0: # Omit q=0 contrinution completely. dWgw_qw[0] = 0.0 dWgw_qw = dWgw_qw[isort] # Put dW back on native grid. return dWgw_qw
def simulate_with_interpolated_single_diode_approximation( self, module="WINAICO WSx-240P6"): """ simulate_with_interpolated_single_diode_approximation(self, module="WINAICO WSx-240P6") Does the simulation with an interpolated single diode approximation using the pvlib.pvsystem.calcparams_desoto() [1] function and the pvlib.pvsystem.singlediode() [2] function. Parameters ---------- module: str Must be one of: * A module found in the pvlib.pvsystem.retrieve_sam("CECMod") database * "WINAICO WSx-240P6" -> Good for open-field applications * "LG Electronics LG370Q1C-A5" -> Good for rooftop applications Returns ------- Returns a reference to the invoking SolarWorkflowManager object. Notes ----- Required columns in the placements dataframe to use this functions are 'lon', 'lat', 'elev', 'tilt' and 'azimuth'. Required data in the sim_data dictionary are 'poa_global' and 'cell_temperature'. References ---------- [1] https://pvlib-python.readthedocs.io/en/stable/generated/pvlib.pvsystem.calcparams_desoto.html [2] https://pvlib-python.readthedocs.io/en/stable/generated/pvlib.pvsystem.singlediode.html [3] (1, 2) W. De Soto et al., “Improvement and validation of a model for photovoltaic array performance”, Solar Energy, vol 80, pp. 78-88, 2006. [4] System Advisor Model web page. https://sam.nrel.gov. [5] A. Dobos, “An Improved Coefficient Calculator for the California Energy Commission 6 Parameter Photovoltaic Module Model”, Journal of Solar Energy Engineering, vol 134, 2012. [6] O. Madelung, “Semiconductors: Data Handbook, 3rd ed.” ISBN 3-540-40488-0 [7] S.R. Wenham, M.A. Green, M.E. Watt, “Applied Photovoltaics” ISBN 0 86758 909 4 [8] A. Jain, A. Kapoor, “Exact analytical solutions of the parameters of real solar cells using Lambert W-function”, Solar Energy Materials and Solar Cells, 81 (2004) 269-277. [9] D. King et al, “Sandia Photovoltaic Array Performance Model”, SAND2004-3535, Sandia National Laboratories, Albuquerque, NM [10] “Computer simulation of the effects of electrical mismatches in photovoltaic cell interconnection circuits” JW Bishop, Solar Cell (1988) https://doi.org/10.1016/0379-6787(88)90059-2 """ """ TODO: Make it work with multiple module definitions """ assert 'poa_global' in self.sim_data assert 'cell_temperature' in self.sim_data self.configure_cec_module(module) sel = self.sim_data['poa_global'] > 0 poa = self.sim_data['poa_global'][sel] cell_temp = self.sim_data['cell_temperature'][sel] # Use RectBivariateSpline to speed up simulation, but at the cost of accuracy (should still be >99.996%) maxpoa = np.nanmax(poa) _poa = np.concatenate([ np.logspace(-1, np.log10(maxpoa / 10), 20, endpoint=False), np.linspace(maxpoa / 10, maxpoa, 80) ]) _temp = np.linspace(cell_temp.min(), cell_temp.max(), 100) poaM, tempM = np.meshgrid(_poa, _temp) sotoParams = pvlib.pvsystem.calcparams_desoto( effective_irradiance=poaM.flatten(), temp_cell=tempM.flatten(), alpha_sc=self.module.alpha_sc, a_ref=self.module.a_ref, I_L_ref=self.module.I_L_ref, I_o_ref=self.module.I_o_ref, R_sh_ref=self.module.R_sh_ref, R_s=self.module.R_s, EgRef=1.121, # PVLIB v0.7.2 Default dEgdT=-0.0002677, # PVLIB v0.7.2 Default irrad_ref=1000, # PVLIB v0.7.2 Default temp_ref=25, # PVLIB v0.7.2 Default ) photoCur, satCur, resSeries, resShunt, nNsVth = sotoParams gen = pvlib.pvsystem.singlediode( photocurrent=photoCur, saturation_current=satCur, resistance_series=resSeries, resistance_shunt=resShunt, nNsVth=nNsVth, ivcurve_pnts=None, # PVLIB v0.7.2 Default method='lambertw', # PVLIB v0.7.2 Default ) interpolator = RectBivariateSpline(_temp, _poa, gen['p_mp'].reshape(poaM.shape), kx=3, ky=3) self.sim_data['module_dc_power_at_mpp'] = np.zeros_like( self.sim_data['poa_global']) self.sim_data['module_dc_power_at_mpp'][sel] = interpolator(cell_temp, poa, grid=False) interpolator = RectBivariateSpline(_temp, _poa, gen['v_mp'].reshape(poaM.shape), kx=3, ky=3) self.sim_data['module_dc_voltage_at_mpp'] = np.zeros_like( self.sim_data['poa_global']) self.sim_data['module_dc_voltage_at_mpp'][sel] = interpolator( cell_temp, poa, grid=False) self.sim_data[ 'capacity_factor'] = self.sim_data['module_dc_power_at_mpp'] / ( self.module.I_mp_ref * self.module.V_mp_ref) # Estimate total system generation if "capacity" in self.placements.columns: self.sim_data['total_system_generation'] = self.sim_data[ 'capacity_factor'] * np.broadcast_to(self.placements.capacity, self._sim_shape_) if "modules_per_string" in self.placements.columns and "strings_per_inverter" in self.placements.columns: total_modules = self.placements.modules_per_string * \ self.placements.strings_per_inverter * \ getattr(self.placements, "number_of_inverters", 1) self.sim_data['total_system_generation'] = self.sim_data[ 'module_dc_power_at_mpp'] * np.broadcast_to( total_modules, self._sim_shape_) return self
def _upsample_cam(class_activation_matrix, new_dimensions): """Upsamples class-activation matrix (CAM). CAM may be 1-D, 2-D, or 3-D. :param class_activation_matrix: numpy array containing 1-D, 2-D, or 3-D class-activation matrix. :param new_dimensions: numpy array of new dimensions. If matrix is {1D, 2D, 3D}, this must be a length-{1, 2, 3} array, respectively. :return: class_activation_matrix: Upsampled version of input. """ num_rows_new = new_dimensions[0] row_indices_new = numpy.linspace(1, num_rows_new, num=num_rows_new, dtype=float) row_indices_orig = numpy.linspace(1, num_rows_new, num=class_activation_matrix.shape[0], dtype=float) if len(new_dimensions) == 1: # interp_object = UnivariateSpline( # x=row_indices_orig, y=numpy.ravel(class_activation_matrix), # k=1, s=0 # ) interp_object = UnivariateSpline( x=row_indices_orig, y=numpy.ravel(class_activation_matrix), k=3, s=0) return interp_object(row_indices_new) num_columns_new = new_dimensions[1] column_indices_new = numpy.linspace(1, num_columns_new, num=num_columns_new, dtype=float) column_indices_orig = numpy.linspace(1, num_columns_new, num=class_activation_matrix.shape[1], dtype=float) if len(new_dimensions) == 2: interp_object = RectBivariateSpline(x=row_indices_orig, y=column_indices_orig, z=class_activation_matrix, kx=3, ky=3, s=0) return interp_object(x=row_indices_new, y=column_indices_new, grid=True) num_heights_new = new_dimensions[2] height_indices_new = numpy.linspace(1, num_heights_new, num=num_heights_new, dtype=float) height_indices_orig = numpy.linspace(1, num_heights_new, num=class_activation_matrix.shape[2], dtype=float) interp_object = RegularGridInterpolator(points=(row_indices_orig, column_indices_orig, height_indices_orig), values=class_activation_matrix, method='linear') column_index_matrix, row_index_matrix, height_index_matrix = ( numpy.meshgrid(column_indices_new, row_indices_new, height_indices_new)) query_point_matrix = numpy.stack( (row_index_matrix, column_index_matrix, height_index_matrix), axis=-1) return interp_object(query_point_matrix)
def createInterp(data, xData, yData): yData = yData[::-1] return RectBivariateSpline(xData, yData, np.flipud(data).transpose())
def fit_psf(dat,vdat,mag,dmag,x,y,psffile,fit_rad=1.,ap_rad=1.,max_flux=5.e4,nsigma=3.,add_sys=True,aperture=False,remove_contam=True,spatial_vary=True,nsig_clip=3.,resid=False): """ Fits the mean psf for an image to each source to determine flux. dat: input fits image rdat: input fits variance image aperture: True for aperture photometry with radius fit_rad remove_contam: True to remove psf contamination during aperture photometry fit_rad: factor times fwhm in psffile for fitting Note: returns the nsigma limit if flux is <= nsigma * dflux """ if (os.path.exists(psffile)): psf = pyfits.getdata(psffile) hdra = pyfits.getheader(psffile) fwhm = hdra['FWHM'] fit_rad *= fwhm ap_rad *= fwhm dpsf = hdra['DPSF'] oversamp = hdra['OVERSAM'] else: sys.stderr.write("""Warning: psf file %s not found!\n""" % psffile) sx,sy = dat.shape sz = len(psf) sz0 = (sz-1)/(2*oversamp) if (fit_rad>sz0): fit_rad=1.*sz0 area = (2*sz0+1)**2 score,chi2 = 0.*mag, 0.*mag vdat[dat>max_flux] = 0. # create functions to shift psf's for use below xx0 = linspace(-sz0,sz0,sz) xx = linspace(-sz0,sz0,2*sz0+1) psf_shift = RectBivariateSpline(xx0,xx0,psf) ap_corr0=1. if (aperture): sys.stderr.write("""Performing aperture photometry with radius %.2f\n""" % ap_rad) rad0 = sqrt( (xx0**2)[:,newaxis] + (xx0**2)[newaxis,:] ) psfa = 0.*rad0 psfa[rad0<=ap_rad-0.5] = 0.5 psfa[rad0<=ap_rad+0.5] += 0.5 psfa_shift = RectBivariateSpline(xx0,xx0,psfa,kx=1,ky=1) # estimate aperture correction p = psf_shift(xx,xx) pa = psfa_shift(xx,xx) ap_corr0 = (p*pa).sum() # holders for final flux and error n=len(x) flx = 10**(-0.4*(mag-25.)) dflx = zeros(n,dtype='float64') # # now compute flx and dflx for each source # i,j = y.round().astype('int16')-1, x.round().astype('int16')-1 dx,dy = y-i-1,x-j-1 # watch out for image boundaries i1,i2 = (i-sz0).clip(0),(i+sz0+1).clip(0,sx) j1,j2 = (j-sz0).clip(0),(j+sz0+1).clip(0,sy) i10,i20 = i1-i+sz0,i2-i+sz0 j10,j20 = j1-j+sz0,j2-j+sz0 # assign parents to children (overlapping psf's) parent = zeros(n,dtype='int16')-1 ii = mag.argsort() sz02 = sz0**2 for i in ii: dis2 = (x-x[i])**2 + (y-y[i])**2 j = (dis2<sz02)*(parent==-1) p0 = parent[i] if (p0==-1): parent[j] = i else: parent[j] = p0 parent[i] = p0 # figure out which ones we just want to punt on ii = parent.argsort() h = (i2[ii]>i1[ii])*(j2[ii]>j1[ii]) mag[ii[~h]],dmag[ii[~h]] = 999.,999. ii = ii[h]; parent = parent[ii] n1 = len(ii) # mapping of that input psf array to the data: hr = ( (xx**2)[:,newaxis] + (xx**2)[newaxis,:] ) <= fit_rad**2 # # fit the psf, fitting multiple psf's to grouped sources # nparent=[] ib = 0 while (ib<n1): ie = ib while (parent[ie]==parent[ib]): ie+=1 if (ie==n1 or parent[ib]==-1): break neb = ie-ib ii1 = ii[ib:ie] nparent.append(neb) # # do work with neb overlapping sources numbered ii[ib:ie] # matr = zeros((neb,neb),dtype='float64') vmatr = zeros((neb,neb),dtype='float64') vec = zeros(neb,dtype='float64') vec0 = zeros(neb,dtype='float64') if (aperture): if (remove_contam): matra = zeros((neb,neb),dtype='float64') vmatra = zeros((neb,neb),dtype='float64') ap_corr = zeros(neb,dtype='float64') veca = zeros(neb,dtype='float64') dveca = zeros(neb,dtype='float64') i1a,i2a = i1[ii1].min(),i2[ii1].max() j1a,j2a = j1[ii1].min(),j2[ii1].max() # define the extraction regions and mask, wt p = [] wt = zeros((i2a-i1a,j2a-j1a),dtype='bool') for k in xrange(neb): i = ii[ib+k] pstamp1 = s_[i10[i]:i20[i],j10[i]:j20[i]] dstamp1 = s_[i1[i]-i1a:i2[i]-i1a,j1[i]-j1a:j2[i]-j1a] wt[dstamp1] += hr[pstamp1] # p: psf to be extracted on selection region p.append( psf_shift(xx-dx[i],xx-dy[i])[pstamp1] ) wtb = ~wt wt0 = wt.copy() h = vdat[i1a:i2a,j1a:j2a]<=0 wt[h] = False; wtb[h] = False # now calculate the psf or aperture photometry background if (wtb.sum()>0): d = dat[i1a:i2a,j1a:j2a][wtb] bg = median(d) dbg = 1.48*median(abs(d-bg)) h = abs(d-bg)<2*dbg if (h.sum()>0): bg = 2.5*bg-1.5*d[h].mean() else: bg=0. # now calculate the psf or aperture photometry for k in xrange(neb): i = ii[ib+k] pstamp = s_[i10[i]:i20[i],j10[i]:j20[i]] dstamp = s_[i1[i]:i2[i],j1[i]:j2[i]] dstamp1 = s_[i1[i]-i1a:i2[i]-i1a,j1[i]-j1a:j2[i]-j1a] d,vd = dat[dstamp]-bg,vdat[dstamp] hr0 = wt[dstamp1] phr,phr1 = p[k][hr0], p[k][wt0[dstamp1]] # we will solve flux = matr^(-1) vec vec[k] = dot(d[hr0],phr) matr[k,k], vmatr[k,k] = dot(phr,phr), dot(phr,phr*vd[hr0]) # systematic error estimate for saturated sources if (matr[k,k]>0): vec0[k] = 0.01*sqrt(abs(dot(phr1,phr1)/matr[k,k] - 1.)) if (aperture): # pa, aperture photometry window function pa = psfa_shift(xx-dx[i],xx-dy[i])[pstamp]*(vd>0) phra = pa.ravel() veca[k] = dot(d.ravel(),phra) dveca[k] = sqrt( dot(vd.ravel(),phra**2) ) ap_corr[k] = dot(p[k].ravel(),phra)/ap_corr0 # calculate the psf overlap between sources (covariance matrix) for l in xrange(k): j = ii[ib+l] ist,isp = max(i1[i],i1[j]), min(i2[i],i2[j]) jst,jsp = max(j1[i],j1[j]), min(j2[i],j2[j]) if (isp>ist and jsp>jst): slca = s_[ist-i1[i]:ist-i1[i]+isp-ist,jst-j1[i]:jst-j1[i]+jsp-jst] slcb = s_[ist-i1[j]:ist-i1[j]+isp-ist,jst-j1[j]:jst-j1[j]+jsp-jst] hr1 = hr0[slca]; pk,pl = p[k][slca][hr1],p[l][slcb][hr1] matr[k,l] = matr[l,k] = dot(pk,pl) vmatr[k,l] = vmatr[l,k] = dot( vd[slca][hr1]*pk,pl ) if (aperture and remove_contam and neb>1): # determine fraction of psf l present in aperture k for l in xrange(neb): if (l==k): continue j = ii[ib+l] ist,isp = max(i1[i],i1[j]), min(i2[i],i2[j]) jst,jsp = max(j1[i],j1[j]), min(j2[i],j2[j]) if (isp>ist and jsp>jst): slca = s_[ist-i1[i]:ist-i1[i]+isp-ist,jst-j1[i]:jst-j1[i]+jsp-jst] slcb = s_[ist-i1[j]:ist-i1[j]+isp-ist,jst-j1[j]:jst-j1[j]+jsp-jst] matra[k,l] = dot(pa[slca].ravel(),p[l][slcb].ravel()) vmatra[k,l] = dot((pa[slca]*vd[slca]).ravel(),p[l][slcb].ravel()) if (neb>1): # replace matr with it's inverse eig,mm = eigh(matr); eig = abs(eig) h=eig==0 if ((~h).sum()>0): eig[h] += eig[~h].min()*1.e-6 matr = dot(mm/eig,mm.T) else: matr = zeros((neb,neb),dtype='float64') elif(matr[0,0]!=0): matr[0,0] = 1./matr[0,0] # the psf flux flx0 = dot(matr,vec) # determine the chi^2 for each source and the score for k in xrange(neb): i = ii[ib+k] pstamp = s_[i10[i]:i20[i],j10[i]:j20[i]] dstamp = s_[i1[i]:i2[i],j1[i]:j2[i]] d,vd = dat[dstamp],vdat[dstamp] mdl = flx0[k]*p[k] for l in xrange(neb): if (l==k): continue j = ii[ib+l] ist,isp = max(i1[i],i1[j]), min(i2[i],i2[j]) jst,jsp = max(j1[i],j1[j]), min(j2[i],j2[j]) if (isp>ist and jsp>jst): slca = s_[ist-i1[i]:ist-i1[i]+isp-ist,jst-j1[i]:jst-j1[i]+jsp-jst] slcb = s_[ist-i1[j]:ist-i1[j]+isp-ist,jst-j1[j]:jst-j1[j]+jsp-jst] mdl[slca] += flx0[l]*p[l][slcb] hr0 = hr[pstamp]*(vd>0) hs = hr0.sum() if (hs>0): chi2[i] = dot( (d[hr0] - mdl[hr0])**2,1./(vd[hr0]+(0.1*mdl[hr0])**2) )/hs pkhr0 = p[k][hr0] src = d[hr0]-mdl[hr0]+flx0[k]*pkhr0 dd = dot(src,src) - vd[hr0].sum() dd1 = dot(pkhr0,pkhr0) if (dd>0 and dd1>0): score[i] = dot(src,pkhr0) / sqrt(dd*dd1) vmatr1 = dot(matr,dot(vmatr,matr)) if (aperture): # do not include psf estimates of fluxes from other sources in the aperture flx[ii1],dflx[ii1] = veca,dveca if (remove_contam and neb>1): flx[ii1] -= dot(matra,flx0) dflx[ii1] = sqrt( ( dflx[ii1]**2 + ((dot(matra,vmatr1)-2*dot(vmatra,matr))*matra).sum(axis=1) ).clip(0) ) h = ap_corr>0 flx[ii1[h]] /= ap_corr[h]; dflx[ii1[h]] /= ap_corr[h] else: flx[ii1],dflx[ii1] = flx0, sqrt( (diag(vmatr1)).clip(0) ) if (add_sys): dflx[ii1] = sqrt( dflx[ii1]**2 + (vec0*flx[ii1])**2 ) if (resid): for k in xrange(neb): i = ii[ib+k] dstamp = s_[i1[i]:i2[i],j1[i]:j2[i]] dat[dstamp] -= flx0[k]*p[k] ib = ie # summarize the PSF grouping nparent = array(nparent) np = unique(nparent) sys.stderr.write("""PSF Group(Frequency): """) for p in np: j=nparent==p sys.stderr.write("""%d(%d) """ % (p,j.sum())) sys.stderr.write("""\n""") # translate fluxes to mags for i0 in xrange(n1): i = ii[i0] if (dflx[i]<=0): mag[i],dmag[i] = 999,999 elif (flx[i]<nsigma*dflx[i]): mag[i],dmag[i] = 25-2.5*log10(flx[i].clip(0)+nsigma*dflx[i]),999 else: mag[i],dmag[i] = 25-2.5*log10(flx[i]),2.5/log(10.)*dflx[i]/flx[i] return flx,dflx,score,chi2,2.5*log10(ap_corr0)
def cgstki(vel, z, tt, dt, nx, ny, dim, domain, simtstep): x = np.arange(tt) ttt = int(tt / dt) ttt = np.arange(ttt) n = len(ttt) velp = np.empty((nx, ny, dim, n)) # velp[x,y,(i,j,k), t] # velp = vel # on recup une tranche du domaine et les 2 composantes de vitesse associees integ = 'rk45' # if doublegyre velp[:, :, :, :] = vel[:, :, z, 0:2, :] print velp[25, 25, :, :] ptlist = np.indices((nx, ny)) ptlist = ptlist.astype(float) domain = domain.astype(float) dx = abs(domain[1] - domain[0]) / nx dy = abs(domain[3] - domain[2]) / ny print 'dx', dx print 'dy', dy rr = 1 nnx = nx * rr nny = ny * rr ptlist[0] = ptlist[0] * dx + domain[0] ptlist[1] = ptlist[1] * dy + domain[2] ptlistini = ptlist stamp = time.time() # interpolator h = dt # lenght t step # n = int((tt) / h) # nb t step # n=tt # n += 1 # print tt, n # print '%i interpolation onver time intervals' % n # noinspection PyPep8Naming print 'dt', dt, 't physique', tt, '# time steps', ttt interpU = np.empty((nx, ny, dim, n)) interTime = False if interTime: for i in xrange(nx): for j in xrange(ny): # print 'i', i, 'j', j y = velp[i, j, :, :] # print y.shape f = interp1d(x, y, kind='quadratic') for ti in ttt: interpU[i, j, :, ti] = f(ti) else: interpU = velp # interp spatiale sur une grille r fois plus fine grid = np.indices((nnx, nny)) grid = grid.astype(float) # grid=np.swapaxes(grid,1,2) grid[0] = grid[0] * abs(domain[1] - domain[0]) / nnx + domain[0] grid[1] = grid[1] * abs(domain[3] - domain[2]) / nny + domain[2] gridini = grid points = np.empty((nx * ny, dim)) val = np.empty((nx * ny, dim, n)) interpU_i = np.empty((nnx, nny, dim, n)) newgridu = np.empty((nnx, nny, n)) newgridv = np.empty((nnx, nny, n)) grid_i = np.empty((dim, nnx, nny)) grid_i[0, :, :] = grid[0] grid_i[1, :, :] = grid[1] grid_iini = np.zeros((dim, nnx, nny)) grid_iini = np.empty((dim, nnx, nny)) grid_iini[0, :, :] = grid[0] grid_iini[1, :, :] = grid[1] # print interpU[25, 25, 0, 0] # print grid_i # timeinter = False # if timeinter: # a putain de refaire avec inperpolate.interpnd # k = 0 print 'interpolation over space, dx/ %i' % rr print domain print nx, ny, nnx, nny for ti in ttt: print 'ti', ti k = 0 for i in xrange(nx): for j in xrange(ny): points[k] = ptlistini[:, i, j] val[k, 0, ti] = interpU[i, j, 0, ti] val[k, 1, ti] = interpU[i, j, 1, ti] k += 1 newgridu[:, :, ti] = griddata(points, val[:, 0, ti], (grid[0], grid[1]), method='cubic', fill_value=0.) newgridv[:, :, ti] = griddata(points, val[:, 1, ti], (grid[0], grid[1]), method='cubic', fill_value=0.) print np.max(newgridv[:, :, ti]) print '*********************' interpU_i[:, :, 0, :] = newgridu[:, :, :] # interpU_i[:, :, 0, :] = interpU[:, :, 0,:] interpU_i[:, :, 1, :] = newgridv[:, :, :] # interpU_i[:, :, 1, :] = interpU[:, :, 1,:] print 'toto', points.shape, val.shape, grid.shape print np.max(newgridu) # print grid[0] # else: # interpU_i = velp print 'avection time !' for ti in ttt: print 'advection from time ', ti * dt, 'to ', dt * (ti + 1) print 'ti', ti # totou = interp2d(grid[0, :, :], grid[1, :, :], interpU_i[:, :, 0, ti], kind='linear') # print 'x', grid_iini[0, :, 0] # print 'y', grid_iini[1, 0, :] totou = RectBivariateSpline(grid_iini[0, :, 0], grid_iini[1, 0, :], interpU_i[:, :, 0, ti]) # totov = interp2d(grid[0, :, :], grid[1, :, :], interpU_i[:, :, 1, ti], kind='linear') totov = RectBivariateSpline(grid_iini[0, :, 0], grid_iini[1, 0, :], interpU_i[:, :, 1, ti]) print 'interpolator interpolated' for i in xrange(nnx): for j in xrange(nny): grid_i[0, i, j] += totou(grid_i[0, i, j], grid_i[1, i, j]) * dt grid_i[1, i, j] += totov(grid_i[0, i, j], grid_i[1, i, j]) * dt # gradient of the flow map # shadden method dphi = np.empty((nnx, nny, 2, 2)) # 0,0 0,1 # 1,0 1,1 ftle = True if ftle: for i in range(1, nnx - 1): for j in range(1, nny - 1): dphi[i, j, 0, 0] = (grid_i[0, i + 1, j] - grid_i[0, i - 1, j]) / ( grid_iini[0, i + 1, j] - grid_iini[0, i - 1, j]) dphi[i, j, 0, 1] = (grid_i[0, i, j + 1] - grid_i[0, i, j - 1]) / ( grid_iini[1, i, j + 1] - grid_iini[1, i, j - 1]) dphi[i, j, 1, 0] = (grid_i[1, i + 1, j] - grid_i[1, i - 1, j]) / ( grid_iini[0, i + 1, j] - grid_iini[0, i - 1, j]) dphi[i, j, 1, 1] = (grid_i[1, i, j + 1] - grid_i[1, i, j - 1]) / ( grid_iini[1, i, j + 1] - grid_iini[1, i, j - 1]) # bords a l arrache; dphi[0, :, 0, 0] = dphi[1, :, 0, 0] dphi[nnx - 1, :, 0, 0] = dphi[nnx - 2, :, 0, 0] dphi[:, 0, 0, 0] = dphi[:, 1, 0, 0] dphi[:, nny - 1, 0, 0] = dphi[:, nny - 2, 0, 0] dphi[0, :, 0, 1] = dphi[1, :, 0, 1] dphi[nnx - 1, :, 0, 1] = dphi[nnx - 2, :, 0, 1] dphi[:, 0, 0, 1] = dphi[:, 1, 0, 0] dphi[:, nny - 1, 0, 1] = dphi[:, nny - 2, 0, 1] dphi[0, :, 1, 0] = dphi[1, :, 1, 0] dphi[nnx - 1, :, 1, 0] = dphi[nnx - 2, :, 1, 0] dphi[:, 0, 1, 0] = dphi[:, 1, 1, 0] dphi[:, nny - 1, 1, 0] = dphi[:, nny - 2, 1, 0] dphi[0, :, 1, 1] = dphi[1, :, 1, 1] dphi[nnx - 1, :, 1, 1] = dphi[nnx - 2, :, 1, 1] dphi[:, 0, 1, 1] = dphi[:, 1, 1, 1] dphi[:, nny - 1, 1, 1] = dphi[:, nny - 2, 1, 1] gdphi = np.empty((nnx, nny, 2, 2)) for i in xrange(nnx): for j in xrange(nny): gdphi[i, j, :, :] = np.dot(dphi[i, j, :, :].T, dphi[i, j, :, :]) # gdphi[i, j, :, :] = dphi[i, j, :, :]* dphi[i, j, :, :] feuteuleu = np.empty((nnx, nny)) for i in xrange(nnx): for j in xrange(nny): feuteuleu[i, j] = np.log( np.sqrt(np.max(LA.eigvals(gdphi[i, j, :, :])[1]))) pass # feuteuleu[i, j] = np.sqrt(LA.eigvals(gdphi[i, j, :, :])[1]) # print len(LA.eigvals(gdphi[i, j, :, :])) # print LA.eigvals(gdphi[20, 20, :,:]) eigenValues = np.empty( (nnx, nny, 2)) # en chaque point, 2 eigval + 2 eigvec eigenVectors = np.empty( (nnx, nny, 2, 2)) # en chaque point, 2 eigval + 2 eigvec print '111' eigenValues, eigenVectors = LA.eig(gdphi) stamp = time.time() for i in xrange(nnx): for j in xrange(nny): if eigenValues[i, j, 0] < eigenValues[i, j, 0]: print i, j, 'EIG NOT ORDERED DAMMIT' print 'time= %f' % (time.time() - stamp) f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2) # print didx.shape Y, X = np.mgrid[0:nx * dx:rr * nx * 1j, 0:ny * dy:rr * ny * 1j] uu = grid_i[0, :, :] - grid_iini[0, :, :] # -grid_iini[0,:,:] vv = grid_i[1, :, :] - grid_iini[1, :, :] # -grid_iini[1,:,:] magx = np.sqrt(uu * uu + vv * vv) U = interpU_i[:, :, 0, 0] V = interpU_i[:, :, 1, 0] magu = np.sqrt(U * U + V * V) # print grid_i[0, 5, :]- grid_iini[0, 5, :] ax1.imshow(uu) ax2.imshow(vv) # ax2.imshow(magx) # ax2.imshow(didy) # ax2.imshow(didy) ax3.imshow(feuteuleu) ax4.imshow(magx) # ax3.quiver(X, Y, U, V, color=magu) # ax4.streamplot(X, Y, uu, vv, density=0.6, color='k', linewidth=magx) # plt.show() print '-------------------------' print 'error', np.random.random_integers(0, 100) print '-------------------------' return eigenValues, eigenVectors, interpU_i
def get_psf(dat,rdat,dmag,x,y,idx,mask=[],fwhm=10.,outfile='psf.fits',max_flux=5.e4,oversamp=1,nsig_clip=5.,dmag_max=0.01,nfwhm=3,nmax=100,spatial_vary=True): """ Determines the mean psf for an image. infile: input fits image rmsfile: input fits rms image mask: optional input fits mask image (0 for star, 1 otherwise) xyfile: sextractor file with star locations, run_sex1.sh format fwhm: stellar fwhm nfwhm: determine psf out to a radius of nfwhm*fwhm """ psf=[] resw=[] sz0 = nfwhm*int(round(fwhm)) sz = oversamp*sz0 sz00 = 8*int(round(fwhm)) sx,sy = dat.shape xx = linspace(-sz0,sz0,2*sz+1) rad = sqrt( (xx**2)[:,newaxis] + (xx**2)[newaxis,:] ) hr = rad<=fwhm # try to use only high s/n sources to estimate the psf ii = dmag<=dmag_max for i in xrange(3): if (ii.sum()<10): ii = dmag<=dmag_max*2 if (ii.sum()>10): x,y,dmag,idx = x[ii],y[ii],dmag[ii],idx[ii] # select only unsaturated sources away from edges and other sources n=len(x) jj = zeros(n,dtype='bool') bg = zeros(n,dtype='float64') for i in xrange(n): j,k = int(round(y[i]))-1,int(round(x[i]))-1 r = rdat[j-sz00:j+sz00+1,k-sz00:k+sz00+1] d = dat[j-sz0:j+sz0+1,k-sz0:k+sz0+1] dist = sqrt( (x[i]-x)**2+(y[i]-y)**2 ) dist[i] = 999. if ( (r==0).sum() == 0 and r.shape == (1+2*sz00,1+2*sz00) and d.max()<=max_flux and dist.min()>fwhm*2 ): jj[i] = True if (len(mask)>0): d0 = dat[j-sz00:j+sz00+1,k-sz00:k+sz00+1] m = mask[j-sz00:j+sz00+1,k-sz00:k+sz00+1] if (m.sum()>0): dd0 = d0[m] bg[i] = median(dd0) dbg = 1.48*median(abs(dd0-bg[i])) h = abs(dd0-bg[i])<2*dbg if (h.sum()>0): bg[i] = 2.5*bg[i] - 1.5*dd0[h].mean() if (jj.sum()>0): x = x[jj]; y = y[jj]; dmag = dmag[jj] bg = bg[jj]; idx = idx[jj]; n = jj.sum() # only need nmax sources at most if (n>nmax): x = x[:nmax]; y = y[:nmax]; dmag = dmag[:nmax] idx = idx[:nmax]; bg = bg[:nmax]; n = nmax # now, create and fill postage stamps around each source dat1 = zeros((n,1+2*sz0,1+2*sz0),dtype='float64') vdat1 = zeros((n,1+2*sz0,1+2*sz0),dtype='float64') xx0 = linspace(-sz0,sz0,2*sz0+1) rad0 = sqrt( (xx0**2)[:,newaxis] + (xx0**2)[newaxis,:] ) # be sure to unmask a suitable region around each star to allow for fitting # (the maskfile blocks all stars by default) if (len(mask)>0): nf = int(floor((rad0.max()-fwhm/4)*4/fwhm))-1 for i in xrange(n): j,k = int(round(y[i]))-1,int(round(x[i]))-1 dat1[i] = dat[j-sz0:j+sz0+1,k-sz0:k+sz0+1] msk = mask[j-sz0:j+sz0+1,k-sz0:k+sz0+1].copy() r1 = fwhm*nfwhm for l in xrange(nf): h = (rad0>fwhm+l*fwhm/4.)*(rad0<=fwhm+(l+1)*fwhm/4.) if (msk[h].sum()>0.5*h.sum()): r1 = fwhm+(l+1)*fwhm/4. break msk[rad0<=r1] = True r = rdat[j-sz0:j+sz0+1,k-sz0:k+sz0+1] msk[r<=0] = False vdat1[i,msk] = r[msk]**2 else: for i in xrange(n): j,k = int(round(y[i]))-1,int(round(x[i]))-1 dat1[i] = dat[j-sz0:j+sz0+1,k-sz0:k+sz0+1] r = rdat[j-sz0:j+sz0+1,k-sz0:k+sz0+1] h=r>0 vdat1[i,h] = r[h]**2 h = dat1>=max_flux dat1[h] = 0. vdat1[h] = 0. #pyfits.writeto('stars.fits',dat1) # correctly (sub-pixel) center the stamps and weights at the sextractor positions # we will oversample dat = zeros((n,2*sz+1,2*sz+1),dtype='float64') vdat = zeros((n,2*sz+1,2*sz+1),dtype='float64') dx,dy = y-y.round(),x-x.round() for i in xrange(n): dat[i] = RectBivariateSpline(xx0,xx0,dat1[i])(xx+dx[i],xx+dy[i]) - bg[i] vdat[i] = RectBivariateSpline(xx0,xx0,vdat1[i])(xx+dx[i],xx+dy[i]).clip(0) vdat1=0.; dat1=0. # make sure each source is shaped like a majority of the others # sources i and j with matr[i,j] > 0.9 or so have similar shapes (see, "score" below) dhr = dat[:,hr]; shr = sqrt(vdat[:,hr]) matr = dot(dhr,dhr.T) - dot(shr,shr.T); matrt = matr.T dhr,shr=0.,0. dd = diag(matr) h = dd>0; norm = sqrt(dd[h]) matr[:,h] /= norm; matrt[:,h] /= norm; matr[~h,:] = 0; matr[:,~h] = 0 f = median(matr,axis=0); matr=0. g = f>0.9; ng=g.sum() n0 = n if (ng>0.5*n): if (g.sum()>5): dat = dat[g]; vdat = vdat[g]; n = ng; x = x[g]; y = y[g]; dmag = dmag[g] idx = idx[g] # iteratively determine the psf: # guess psf, determine flux f, refine psf, determine flux,... psf = exp(-0.5*(rad*2.35/fwhm)**2) f = zeros(n,dtype='float64') for i in xrange(3): norm = dot(vdat[:,hr],psf[hr]**2) g = norm>0 f[g] = dot(dat[g][:,hr]*vdat[g][:,hr],psf[hr]) / norm[g] g *= f>0 datf = dat[g]/f[g,newaxis,newaxis]; datf1 = datf.copy() h = vdat[g]==0 datf[h] = inf; datf1[h] = -inf psf = median(vstack((datf,datf1)),axis=0) psf[isinf(psf)] = 0 psf[isnan(psf)] = 0 # now with decent psf and flux estimates, sigma-clip bad pixels # don't use deviant or low snr points to estimate the psf h = (dat - f[:,newaxis,newaxis]*psf[newaxis,:,:])**2 > nsig_clip**2*( vdat + (0.1*dat.clip(0))**2 ) vdat[h] = 0. h = vdat > ((f/3.)**2)[:,newaxis,newaxis]*(psf**2)[newaxis,:,:] vdat[h] = 0. vnorm = dot(vdat[g].T,f[g]**2).T h = vnorm>0 if (h.sum()>0): psf[h] = dot((dat[g]*vdat[g]).T,f[g]).T[h] / vnorm[h] # almost done if (len(psf)==0): sys.stderr.write("""PSF determination failed, using Gaussian\n""") psf = exp(-0.5*(rad*2.35/fwhm)**2) norm = psf.sum()/oversamp**2 psf /= norm if (len(resw)>0): resw[0]/=norm mdpsf = 1./sqrt( (1./dmag**2).sum() ) # finally save out some potentially useful information sys.stderr.write("PSF Precision: %.2e\n""" % mdpsf ) hdu = pyfits.PrimaryHDU(psf) hdu.header['DPSF'] = mdpsf hdu.header['FWHM'] = fwhm hdu.header['OVERSAM'] = oversamp if (len(resw)>0): hdu.header['WINGN'] = resw[0] hdu.header['WINGI'] = resw[1] hdu.writeto(outfile,clobber=True) return 1
T0_A = thick(0, 0, lim, step, R_A, a_A) #fm^-2 T0_B = thick(0, 0, lim, step, R_B, a_B) #fm^-2 #Evaluate density of sources and QS^2. #First compute TA and TB. T_A = np.zeros((ll, ll)) T_B = np.zeros((ll, ll)) for j in range(ll): for k in range(ll): T_A[j, k] = thick(xx[j], yy[k], lim, step, R_A, a_A) T_B[j, k] = thick(xx[j], yy[k], lim, step, R_B, a_B) #To avoid re-computing thickness functions, we interpolate. #Probability density for nuclei A and B. n_A = RectBivariateSpline(xx, yy, (Nc**2 - 1) / (32 * np.pi) * Q0_A**2 * T_A / T0_A * 1 / np.log(1 + Q0_A**2 / m**2 * T_A / T0_A)) n_B = RectBivariateSpline(xx, yy, (Nc**2 - 1) / (32 * np.pi) * Q0_B**2 * T_B / T0_B * 1 / np.log(1 + Q0_B**2 / m**2 * T_B / T0_B)) #Saturation scales ^2. Q2_A = RectBivariateSpline(xx, yy, Q0_A**2 * T_A / T0_A) Q2_B = RectBivariateSpline(xx, yy, Q0_B**2 * T_B / T0_B) #Define grid where event-by-event profile will be evaluated. #Should be able to resolve structures of size 1/Qs ~ 0.2 fm. #A 100*100 square from [-9, 9] fm seems to be good enough for all systems. size = 1000 #number of cells along one side dim = 14 #fm , the grid stretches from [-dim, +dim] grid_step = 2 * dim / size #fm area = grid_step**2 #fm^2 #grid coordinates
def LucasKanadefunction(initialtemp, initialtemp1, rectpoints, pos0=np.zeros(2)): threshold = 0.0001 # Threshold for convergence of the error of the parameters # Top-Left, Top-Right, Bottom-Left and Bottom-Right Corners of the template x1, y1, x2, y2, x3, y3, x4, y4 = rectpoints[0], rectpoints[1], rectpoints[ 2], rectpoints[3], rectpoints[4], rectpoints[5], rectpoints[ 6], rectpoints[7] initial_y, initial_x = np.gradient( initialtemp1) # Calculating Intensity Gradient of the next frame dp = 1 # Initializing the variable for storing error in the parameters while np.square(dp).sum( ) > threshold: # Looping until the solution converges below the threshold posx, posy = pos0[0], pos0[1] # Initial Parameters # Warped Parameters x1_warp, y1_warp, x2_warp, y2_warp, x3_warp, y3_warp, x4_warp, y4_warp = x1 + posx, y1 + posy, x2 + posx, y2 + posy, x3 + posx, y3 + posy, x4 + posx, y4 + posy x = np.arange(0, initialtemp.shape[0], 1) y = np.arange(0, initialtemp.shape[1], 1) a1 = np.linspace( x1, x3, 87 ) # Interpolating points from top-left x-coordinate to the bottom-right x-coordinate b1 = np.linspace( y1, y3, 36 ) # Interpolating points from top-left y-coordinate to the bottom-right y-coordinate a2 = np.linspace( x4, x2, 87 ) # Interpolating points from top-right x-coordinate to the bottom-left x-coordinate b2 = np.linspace( y2, y4, 36 ) # Interpolating points from top-right y-coordinate to the bottom-left y-coordinate a = np.union1d(a1, a2) # Taking unique Union of all the x-coordinates b = np.union1d(b1, b2) # Taking unique Union of all the y-coordinates aa, bb = np.meshgrid(a, b) # Creating a mesh of all these points a1_warp = np.linspace( x1_warp, x3_warp, 87 ) # Interpolating warped points from top-left x-coordinate to the bottom-right x-coordinate b1_warp = np.linspace( y1_warp, y3_warp, 36 ) # Interpolating warped points from top-left y-coordinate to the bottom-right y-coordinate a2_warp = np.linspace( x4_warp, x2_warp, 87 ) # Interpolating warped points from top-right x-coordinate to the bottom-left x-coordinate b2_warp = np.linspace( y2_warp, y4_warp, 36 ) # Interpolating warped points from top-right y-coordinate to the bottom-left y-coordinate a_warp = np.union1d( a1_warp, a2_warp) # Taking unique Union of all the warped x-coordinates b_warp = np.union1d( b1_warp, b2_warp) # Taking unique Union of all the warped y-coordinates aaw, bbw = np.meshgrid(a_warp, b_warp) # Creating a mesh of all these points spline = RectBivariateSpline( x, y, initialtemp ) # Smoothing and Interpolating intensity data over all the template frame T = spline.ev( bb, aa ) # Evaluating the intensity data over all the interpolated points spline1 = RectBivariateSpline( x, y, initialtemp1 ) # Smoothing and Interpolating intensity data over all the next frame warpImg = spline1.ev( bbw, aaw ) # Evaluating the intensity data over all the warped interpolated points error = T - warpImg # Calculating the change in intensity from the template frame to the next frame errorImg = error.reshape(-1, 1) spline_gx = RectBivariateSpline( x, y, initial_x ) # Smoothing and Interpolating intensity gradient data over all the next frame in x-direction initial_x_w = spline_gx.ev( bbw, aaw ) # Evaluating intensity gradient data over all the interpolated points in x-direction spline_gy = RectBivariateSpline( x, y, initial_y ) # Smoothing and Interpolating intensity gradient data over all the next frame in y-direction initial_y_w = spline_gy.ev( bbw, aaw ) # Evaluating intensity gradient data over all the interpolated points in y-direction I = np.vstack((initial_x_w.ravel(), initial_y_w.ravel() )).T # Stacking both the intensity gradients together jacobian = np.array([[1, 0], [0, 1]]) hessian = I @ jacobian # Initializing the Jacobian H = hessian.T @ hessian # Calculating Hessian Matrix dp = np.linalg.inv(H) @ (hessian.T) @ errorImg pos0[0] += dp[0, 0] pos0[1] += dp[1, 0] p = pos0 return p # Returing the updated parameters
def load_templates(self, template_fname): ''' Load in stellar template colors from an ASCII file. The colors should be stored in the following format: # # Arbitrary comments # # Mr FeH gr ri iz zy # -1.00 -2.50 0.5132 0.2444 0.1875 0.0298 -0.99 -2.50 0.5128 0.2442 0.1873 0.0297 ... or something similar. A key point is that there be a row in the comments that lists the names of the colors. The code identifies this row by the presence of both 'Mr' and 'FeH' in the row, as above. The file must be whitespace-delimited, and any whitespace will do (note that the whitespace is not required to be regular). ''' f = open(abspath(template_fname), 'r') row = [] self.color_name = ['gr', 'ri', 'iz', 'zy'] for l in f: line = l.rstrip().lstrip() if len(line) == 0: # Empty line continue if line[0] == '#': # Comment if ('Mr' in line) and ('FeH' in line): try: self.color_name = line.split()[3:] except: pass continue data = line.split() if len(data) < 6: print 'Error reading in stellar templates.' print 'The following line does not have the correct number of entries (6 expected):' print line return 0 row.append([float(s) for s in data]) f.close() template = np.array(row, dtype=np.float64) # Organize data into record array dtype = [('Mr','f4'), ('FeH','f4')] for c in self.color_name: dtype.append((c, 'f4')) self.data = np.empty(len(template), dtype=dtype) self.data['Mr'] = template[:,0] self.data['FeH'] = template[:,1] for i,c in enumerate(self.color_name): self.data[c] = template[:,i+2] self.MrFeH_bounds = [[np.min(self.data['Mr']), np.max(self.data['Mr'])], [np.min(self.data['FeH']), np.max(self.data['FeH'])]] # Produce interpolating class with data self.Mr_coords = np.unique(self.data['Mr']) self.FeH_coords = np.unique(self.data['FeH']) self.interp = {} for c in self.color_name: tmp = self.data[c][:] tmp.shape = (len(self.FeH_coords), len(self.Mr_coords)) self.interp[c] = RectBivariateSpline(self.Mr_coords, self.FeH_coords, tmp.T, kx=3, ky=3, s=0)
def create_deformation_field(frame, x, y, u, v, interpolation_order=3): """ Deform an image by window deformation where a new grid is defined based on the grid and displacements of the previous pass and pixel values are interpolated onto the new grid. Parameters ---------- frame : 2d np.ndarray, dtype=np.int32 an two dimensions array of integers containing grey levels of the first frame. x : 2d np.ndarray a two dimensional array containing the x coordinates of the interrogation window centers, in pixels. y : 2d np.ndarray a two dimensional array containing the y coordinates of the interrogation window centers, in pixels. u : 2d np.ndarray a two dimensional array containing the u velocity component, in pixels/seconds. v : 2d np.ndarray a two dimensional array containing the v velocity component, in pixels/seconds. interpolation_order: scalar the degree of the interpolation of the B-splines over the rectangular mesh Returns ------- x,y : new grid (after meshgrid) u,v : deformation field """ y1 = y[:, 0] # extract first coloumn from meshgrid x1 = x[0, :] # extract first row from meshgrid side_x = np.arange(frame.shape[1]) # extract the image grid side_y = np.arange(frame.shape[0]) # interpolating displacements onto a new meshgrid ip = RectBivariateSpline(y1, x1, u, kx=interpolation_order, ky=interpolation_order) ut = ip(side_y, side_x) # the way how to use the interpolation functions differs from matlab ip2 = RectBivariateSpline(y1, x1, v, kx=interpolation_order, ky=interpolation_order) vt = ip2(side_y, side_x) x, y = np.meshgrid(side_x, side_y) # plt.figure() # plt.quiver(x1,y1,u,-v,color='r') # plt.quiver(x,y,ut,-vt) # plt.gca().invert_yaxis() # plt.show() return x, y, ut, vt
print('diagnostic: (python) Σi zi ρi =', sumrhoz) # resize the density data nx2, ny2 = args.nside + 1, args.nside + 1 Lx, Ly = nx2 - 1.0, ny2 - 1.0 x = np.linspace(0.0, Lx, nx) y = np.linspace(0.0, Ly, ny) x2 = np.linspace(0.0, Lx, nx2) y2 = np.linspace(0.0, Ly, ny2) rho2 = np.zeros((ncmp, nx2, ny2)) for k in range(ncmp): spline = RectBivariateSpline(y, x, rho[k, :, :]) rho2[k, :, :] = spline(y2, x2) # create and save the revised data and run controls rc_file = args.name + '.rc' with open(rc_file, 'w') as fp: fp.write(args.name + '\n') fp.write(' '.join(ions) + '\n') fp.write('%i %i %i' % (nx2, ny2, ncmp) + ' :: nx ny ncmp\n') fp.write(' '.join(['%i' % val for val in bc]) + ' :: boundary_conditions\n') fp.write('\t'.join(['%i' % val for val in z]) + ' :: z\n') fp.write('\t'.join(['%g' % val for val in d]) + ' :: d\n') fp.write('%g :: dt\n' % args.dt)
def multipass_img_deform( frame_a, frame_b, current_iteration, x_old, y_old, u_old, v_old, settings, mask_coords=[], ): # window_size, # overlap, # iterations, # current_iteration, # x_old, # y_old, # u_old, # v_old, # correlation_method="circular", # normalized_correlation=False, # subpixel_method="gaussian", # deformation_method="symmetric", # sig2noise_method="peak2peak", # sig2noise_threshold=1.0, # sig2noise_mask=2, # interpolation_order=1, """ Multi pass of the PIV evaluation. This function does the PIV evaluation of the second and other passes. It returns the coordinates of the interrogation window centres, the displacement u, v for each interrogation window as well as the signal to noise ratio array (which is full of NaNs if opted out) Parameters ---------- frame_a : 2d np.ndarray the first image frame_b : 2d np.ndarray the second image window_size : tuple of ints the size of the interrogation window overlap : tuple of ints the overlap of the interrogation window, e.g. window_size/2 x_old : 2d np.ndarray the x coordinates of the vector field of the previous pass y_old : 2d np.ndarray the y coordinates of the vector field of the previous pass u_old : 2d np.ndarray the u displacement of the vector field of the previous pass in case of the image mask - u_old and v_old are MaskedArrays v_old : 2d np.ndarray the v displacement of the vector field of the previous pass subpixel_method: string the method used for the subpixel interpolation. one of the following methods to estimate subpixel location of the peak: 'centroid' [replaces default if correlation map is negative], 'gaussian' [default if correlation map is positive], 'parabolic' interpolation_order : int the order of the spline interpolation used for the image deformation mask_coords : list of x,y coordinates (pixels) of the image mask, default is an empty list Returns ------- x : 2d np.array array containg the x coordinates of the interrogation window centres y : 2d np.array array containg the y coordinates of the interrogation window centres u : 2d np.array array containing the horizontal displacement for every interrogation window [pixels] u : 2d np.array array containing the vertical displacement for every interrogation window it returns values in [pixels] s2n : 2D np.array of signal to noise ratio values """ if not isinstance(u_old, np.ma.MaskedArray): raise ValueError('Expected masked array') # calculate the y and y coordinates of the interrogation window centres. # Hence, the # edges must be extracted to provide the sufficient input. x_old and y_old # are the coordinates of the old grid. x_int and y_int are the coordinates # of the new grid window_size = settings.windowsizes[current_iteration] overlap = settings.overlap[current_iteration] x, y = get_rect_coordinates(frame_a.shape, window_size, overlap) # The interpolation function dont like meshgrids as input. # plus the coordinate system for y is now from top to bottom # and RectBivariateSpline wants an increasing set y_old = y_old[:, 0] # y_old = y_old[::-1] x_old = x_old[0, :] y_int = y[:, 0] # y_int = y_int[::-1] x_int = x[0, :] # interpolating the displacements from the old grid onto the new grid # y befor x because of numpy works row major ip = RectBivariateSpline(y_old, x_old, u_old.filled(0.)) u_pre = ip(y_int, x_int) ip2 = RectBivariateSpline(y_old, x_old, v_old.filled(0.)) v_pre = ip2(y_int, x_int) # if settings.show_plot: if settings.show_all_plots: plt.figure() plt.quiver(x_old, y_old, u_old, -1 * v_old, color='b') plt.quiver(x_int, y_int, u_pre, -1 * v_pre, color='r', lw=2) plt.gca().set_aspect(1.) plt.gca().invert_yaxis() plt.title('inside deform, invert') plt.show() # @TKauefer added another method to the windowdeformation, 'symmetric' # splits the onto both frames, takes more effort due to additional # interpolation however should deliver better results old_frame_a = frame_a.copy() old_frame_b = frame_b.copy() # Image deformation has to occur in image coordinates # therefore we need to convert the results of the # previous pass which are stored in the physical units # and so y from the get_coordinates if settings.deformation_method == "symmetric": # this one is doing the image deformation (see above) x_new, y_new, ut, vt = create_deformation_field( frame_a, x, y, u_pre, v_pre) frame_a = scn.map_coordinates(frame_a, ((y_new - vt / 2, x_new - ut / 2)), order=settings.interpolation_order, mode='nearest') frame_b = scn.map_coordinates(frame_b, ((y_new + vt / 2, x_new + ut / 2)), order=settings.interpolation_order, mode='nearest') elif settings.deformation_method == "second image": frame_b = deform_windows( frame_b, x, y, u_pre, -v_pre, interpolation_order=settings.interpolation_order) else: raise Exception("Deformation method is not valid.") # if settings.show_plot: if settings.show_all_plots: if settings.deformation_method == 'symmetric': plt.figure() plt.imshow(frame_a - old_frame_a) plt.show() plt.figure() plt.imshow(frame_b - old_frame_b) plt.show() # if do_sig2noise is True # sig2noise_method = sig2noise_method # else: # sig2noise_method = None # so we use here default circular not normalized correlation: # if we did not want to validate every step, remove the method if settings.sig2noise_validate is False: settings.sig2noise_method = None u, v, s2n = extended_search_area_piv( frame_a, frame_b, window_size=window_size, overlap=overlap, width=settings.sig2noise_mask, subpixel_method=settings.subpixel_method, sig2noise_method=settings.sig2noise_method, correlation_method=settings.correlation_method, normalized_correlation=settings.normalized_correlation, use_vectorized=settings.use_vectorized, ) shapes = np.array(get_field_shape(frame_a.shape, window_size, overlap)) u = u.reshape(shapes) v = v.reshape(shapes) s2n = s2n.reshape(shapes) u += u_pre v += v_pre # reapply the image mask to the new grid if settings.image_mask: grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords) u = np.ma.masked_array(u, mask=grid_mask) v = np.ma.masked_array(v, mask=grid_mask) else: u = np.ma.masked_array(u, np.ma.nomask) v = np.ma.masked_array(v, np.ma.nomask) # validate in the multi-pass by default u, v, mask = validation.typical_validation(u, v, s2n, settings) if np.all(mask): raise ValueError("Something happened in the validation") if not isinstance(u, np.ma.MaskedArray): raise ValueError('not a masked array anymore') if settings.show_all_plots: plt.figure() nans = np.nonzero(mask) plt.quiver(x[~nans], y[~nans], u[~nans], -v[~nans], color='b') plt.quiver(x[nans], y[nans], u[nans], -v[nans], color='r') plt.gca().invert_yaxis() plt.gca().set_aspect(1.) plt.title('After sig2noise, inverted') plt.show() # we have to replace outliers u, v = filters.replace_outliers( u, v, method=settings.filter_method, max_iter=settings.max_filter_iteration, kernel_size=settings.filter_kernel_size, ) # reapply the image mask to the new grid if settings.image_mask: grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords) u = np.ma.masked_array(u, mask=grid_mask) v = np.ma.masked_array(v, mask=grid_mask) else: u = np.ma.masked_array(u, np.ma.nomask) v = np.ma.masked_array(v, np.ma.nomask) if settings.show_all_plots: plt.figure() plt.quiver(x, y, u, -v, color='r') plt.quiver(x, y, u_pre, -1 * v_pre, color='b') plt.gca().invert_yaxis() plt.gca().set_aspect(1.) plt.title(' after replaced outliers, red, invert') plt.show() return x, y, u, v, s2n, mask
def deft_2d(xis_raw, yis_raw, bbox, G=20, alpha=2, num_samples=0, tol=1E-3, ti_shift=-1, tf_shift=0, verbose=False, details=False): ''' Performs DEFT density estimation in 2D Args: xis_raw: The x-data, comprising a list (or scipy array) of real numbers. Data falling outside bbox is discarded. yis_raw: The y-data, comprising a list (or scipy array) of real numbers. Data falling outside bbox is discarded. bbox: The domain (bounding box) on which density estimation is performed. This should be of the form [xmin, xmax, ymin, ymax]. Density estimation will be performed on G grid points evenly spaced within this interval. Max and min grid points are placed half a grid spacing in from the boundaries. G: Number of grid points to use in each dimension. Total number of gridpoints used in the calculation is G**2. alpha: The smoothness parameter, specifying which derivative of the filed is constrained by the prior. May be any integer >= 2. num_samples: The number of density estimates to sample from the Bayesian posterior. If zero is passed, no sample are drawn. num_ts: The number of lengthscales at which to compute Q_\ell. ti_shift: Initial integration t is set to t_i = log[ (2*pi)**(2*alpha) * L**(2-2*alpha)] + ti_shift (see Eq. 10) tf_shift: Final integration t is set to t_f = log[ (2*pi*G)**(2*alpha) * L**(2-2*alpha)] + tf_shift (see Eq. 10) verbose: If True, user feedback is provided details: If True, calculation details are returned along with the density estimate Q_star_func Returns: Q_star_func: A function, defined within bbox, providing a cubic spline interpolation of the maximum a posteriori density estimate. results: Returned only if `details' is set to be True. Contains detailed information about the density estimation calculation. ''' # Time execution start_time = time.clock() # Check data types L = G V = G**2 assert G == sp.floor(G) assert len(xis_raw) > 1 assert num_samples >= 0 and num_samples == sp.floor(num_samples) assert 2 * alpha - 2 >= 1 assert len(bbox) == 4 ### ### Draw grid and histogram data ### # Make sure xis is a numpy array xis_raw = sp.array(xis_raw) # If xdomain are specified, check them, and keep only data that falls within xlb = bbox[0] xub = bbox[1] assert (xub - xlb > 0) ylb = bbox[2] yub = bbox[3] assert (yub - ylb > 0) # Throw away data not within bbox indices = (xis_raw >= xlb) & (xis_raw <= xub) & (yis_raw >= ylb) & (yis_raw <= yub) xis = xis_raw[indices] yis = yis_raw[indices] # Determine number of data points within bbox N = len(xis) # Compute edges for histogramming xedges = sp.linspace(xlb, xub, L + 1) dx = xedges[1] - xedges[0] yedges = sp.linspace(ylb, yub, L + 1) dy = yedges[1] - yedges[0] # Compute grid for binning xgrid = xedges[:-1] + dx / 2 ygrid = yedges[:-1] + dy / 2 # Convert to z = normalized x xzis = (xis - xlb) / dx yzis = (yis - ylb) / dx xzedges = (xedges - xlb) / dx yzedges = (yedges - ylb) / dy # Histogram data [H, xxx, yyy] = sp.histogram2d(xzis, yzis, [xzedges, yzedges]) R = H / N R_flat = R.flatten() R_col = sp.mat(R_flat).T ### ### Use weak-field approximation to get phi_0 ### # Compute fourier transform of data R_ks = fft2(R) # Set constant component of data FT to 0 R_ks[0] = 0 # Set mode numbers seperately for even G and for odd G if G % 2 == 0: # If G is even # ks = [0, 1, ... , G/2, -G/2+1, ..., -1] ks = sp.concatenate((sp.arange(0, G / 2 + 1), sp.arange(1 - G / 2, 0))) else: # If G is odd # ks = [0, 1, ... , (G-1)/2, -(G-1)/2, ..., -1] ks = sp.concatenate( (sp.arange(0, (G - 1) / 2 + 1), sp.arange(-(G - 1) / 2, 0))) # Mode numbers corresponding to fourier transform A = sp.mat(sp.tile((2.0 * sp.pi * ks / G)**2.0, [G, 1])) B = A.T tau_ks = sp.log(V * sp.array(A + B)**alpha) tau_ks[0, 0] = 1 # This number is actually irrelevant since R_hat[0,0] = 0 # Set t_i to the \ell_i vale in Eq. 10 plus shift t_i = sp.log( ((2 * sp.pi)**(2 * alpha)) * G**(2.0 - 2.0 * alpha)) + ti_shift # Set t_f to the \ell_f value in Eq. 10 plus shift t_f = sp.log( ((2 * sp.pi * G)**(2 * alpha)) * G**(2.0 - 2.0 * alpha)) + tf_shift # Compute Fourier components of phi phi_ks = -(G**2) * sp.array(R_ks) / sp.array((1.0 + sp.exp(tau_ks - t_i))) # Invert the Fourier transform to get phi weak field approx phi0 = sp.ravel(sp.real(ifft2(phi_ks))) ### ### Integrate ODE ### # Build 2D Laplacian matrix delsq_2d = (-4.0 * sp.eye(V) + sp.eye(V, V, -1) + sp.eye(V, V, +1) + sp.eye(V, V, -G) + sp.eye(V, V, +G) + sp.eye(V, V, -V + 1) + sp.eye(V, V, V - 1) + sp.eye(V, V, -V + G) + sp.eye(V, V, V - G)) # Make Delta, and make it sparse Delta = csr_matrix(-delsq_2d)**alpha # This is the key function: computes deriviate of phi for ODE integration def this_flow(t, phi): # Compute distribution Q corresponding to phi Q = sp.exp(-phi) / V A = Delta + sp.exp(t) * diags(Q, 0) dphidt = sp.real(spsolve(A, sp.exp(t) * (Q - R_flat))) # Return the time derivative of phi return dphidt backend = 'vode' solver = ode(this_flow).set_integrator(backend, nsteps=1, atol=tol, rtol=tol) solver.set_initial_value(phi0, t_i) # suppress Fortran-printed warning solver._integrator.iwork[2] = -1 warnings.filterwarnings("ignore", category=UserWarning) # Make containers phis = [] ts = [] log_evidence = [] Qs = [] ells = [] log_dets = [] # Will keep initial phi to check that integration is working well phi0_col = sp.mat(phi0).T kinetic0 = (phi0_col.T * Delta * phi0_col)[0, 0] # Integrate phi over specified t range. integration_start_time = time.clock() keep_going = True max_log_evidence = -sp.Inf coeff = 1.0 while solver.t < t_f and keep_going: # Step integrator solver.integrate(t_f, step=True) # Compute deteriminant. phi = solver.y t = solver.t beta = N * sp.exp(-t) ell = (N / sp.exp(t))**(1.0 / (2.0 * alpha - 2.0)) # Compute new distribution Q = sp.exp(-phi) / sum(sp.exp(-phi)) phi_col = sp.mat(phi).T # Check that S[phi] < S[phi0]. If not, phi might just be f****d up, due to the # integration having to solve a degenerate system of equations. # In this case, set phi = phi0 and restart integration from there. S = 0.5 * (phi_col.T * Delta * phi_col)[0, 0] + sp.exp(t) * ( R_col.T * phi_col)[0, 0] + sp.exp(t) * sum(sp.exp(-phi) / G) S0 = 0.5 * (phi0_col.T * Delta * phi0_col)[0, 0] + sp.exp(t) * ( R_col.T * phi0_col)[0, 0] + sp.exp(t) * sum(sp.exp(-phi0) / G) if S0 < S: t_i = t_i + 0.5 solver = ode(this_flow).set_integrator(backend, nsteps=1, atol=tol, rtol=tol) solver.set_initial_value(phi0, t_i) # Reset containers phis = [] ts = [] log_evidence = [] Qs = [] ells = [] log_dets = [] keep_going = True max_log_evidence = -sp.Inf #print 'Restarting integration at t_i = %f'%t_i else: # Compute betaS directly again to minimize multiplying very large # numbers by very small numbers. Also, subtract initial kinetic term betaS = beta * 0.5 * (phi_col.T * Delta * phi_col - kinetic0 )[0, 0] + N * (R_col.T * phi_col)[0, 0] + N # Compute the log determinant of Lambda Lambda = Delta + sp.exp(t) * sp.diag(Q) log_det_Lambda, coeff = get_log_det_Lambda(Lambda, coeff) log_evidence_value = -betaS - 0.5 * log_det_Lambda + 0.5 * alpha * t #sp.log(beta) if log_evidence_value > max_log_evidence: max_log_evidence = log_evidence_value if (log_evidence_value < max_log_evidence - 300) and (ell < G): keep_going = False # Record shit phis.append(phi) ts.append(t) ells.append(ell) Qs.append(Q) log_evidence.append(log_evidence_value) log_dets.append(log_det_Lambda) warnings.resetwarnings() if verbose: print 'Integration took %0.2f seconds.' % (time.clock() - integration_start_time) # Set ts and ells ts = sp.array(ts) phis = sp.array(phis) Qs = sp.array(Qs) ells = sp.array(ells) log_evidence = sp.array(log_evidence) # Noramlize weights for different ells and save ell_weights = sp.exp(log_evidence) - max(log_evidence) ell_weights[ell_weights < -100] = 0.0 assert (not sum(ell_weights) == 0) assert (all(sp.isfinite(ell_weights))) ell_weights /= sum(ell_weights) # Find the best lengthscale i_star = sp.argmax(log_evidence) phi_star = phis[i_star, :] Q_star = sp.reshape(sp.exp(-phi_star) / sum(sp.exp(-phi_star)), [G, G]) ell_star = ells[i_star] t_star = ts[i_star] M_star = sp.exp(t_star) ### ### Sample from posterior (only if requirested) ### # Get ell range log_ells_raw = sp.log(ells)[::-1] log_ell_i = max(log_ells_raw) log_ell_f = min(log_ells_raw) # Interploate Qs at 300 different ells K = 1000 phis_raw = phis[::-1, :] log_ells_grid = sp.linspace(log_ell_f, log_ell_i, K) # Create function to get interpolated phis phis_interp_func = interp1d(log_ells_raw, phis_raw, axis=0, kind='cubic') # Compute weights for each ell on the fine grid log_weights_func = interp1d(log_ells_raw, sp.log(ell_weights[::-1]), kind='cubic') log_weights_grid = log_weights_func(log_ells_grid) weights_grid = sp.exp(log_weights_grid) weights_grid /= sum(weights_grid) # If user requests samples if num_samples > 0: Lambda_star = (ell_star**(2.0 * alpha - 1.0)) * (Delta + M_star * sp.diag(Q_star)) eigvals, eigvecs = eig(Lambda_star) # Lambda_star is Hermetian; shouldn't need to do this eigvals = sp.real(eigvals) eigvecs = sp.real(eigvecs) # Initialize container variables Qs_sampled = sp.zeros([num_samples, G, G]) phis_sampled = sp.zeros([num_samples, V]) #is_sampled = sp.zeros([num_samples]) log_ells_sampled = sp.zeros([num_samples]) for j in range(num_samples): # First choose a classical path based on i = choice(K, p=weights_grid) phi_cl = phis_interp_func(log_ells_grid[i]) #is_sampled[j] = int(i) log_ells_sampled[j] = log_ells_grid[i] # Draw random amplitudes for all modes and compute dphi etas = randn(L) dphi = sp.ravel( sp.real(sp.mat(eigvecs) * sp.mat(etas / sp.sqrt(eigvals)).T)) # Record final sampled phi phi = phi_cl + dphi Qs_sampled[j, :, :] = sp.reshape( sp.exp(-phi) / sum(sp.exp(-phi)), [G, G]) ### ### Return results (with everything in correct lenght units!) ### results = Results() results.G = G results.ts = ts results.N = N results.alpha = alpha results.num_samples = num_samples results.xgrid = xgrid results.bbox = bbox results.dx = dx results.dy = dy # Everything with units of length gets multiplied by dx!!! results.ells = [ells * dx, ells * dy] results.L = [G * dx, G * dy] results.V = V * dx * dy results.h = [dx, dy] results.Qs = Qs / (dx * dy) results.phis = phis results.log_evidence = log_evidence # Comptue star results phi_star = sp.reshape(phi_star, [G, G]) results.phi_star = deepcopy(phi_star) results.Q_star = deepcopy(Q_star) / (dx * dy) results.i_star = i_star results.ell_star = [ells[i_star] * dx, ells[i_star] * dy] bbox = [xlb, xub, ylb, yub] # Create interpolated Q_star. Man this is a pain in the ass! extended_xgrid = sp.zeros(G + 2) extended_xgrid[1:-1] = xgrid extended_xgrid[0] = xlb extended_xgrid[-1] = xub extended_ygrid = sp.zeros(L + 2) extended_ygrid[1:-1] = ygrid extended_ygrid[0] = ylb extended_ygrid[-1] = yub # Extend grid for phi_star for interpolating function extended_phi_star = sp.zeros([G + 2, G + 2]) extended_phi_star[1:-1, 1:-1] = phi_star # Get rows row = 0.5 * (phi_star[0, :] + phi_star[-1, :]) extended_phi_star[0, 1:-1] = row extended_phi_star[-1, 1:-1] = row # Get cols col = 0.5 * (phi_star[:, 0] + phi_star[:, -1]) extended_phi_star[1:-1, 0] = col extended_phi_star[1:-1, -1] = col # Get remaining corners, which share the same value corner = 0.25 * (row[0] + row[-1] + col[0] + col[-1]) extended_phi_star[0, 0] = corner extended_phi_star[0, -1] = corner extended_phi_star[-1, 0] = corner extended_phi_star[-1, -1] = corner # Finally, compute interpolated function phi_star_func = RectBivariateSpline(extended_xgrid, extended_ygrid, extended_phi_star, bbox=bbox) Z = sp.sum((dx * dy) * sp.exp(-phi_star)) def Q_star_func(x, y): return sp.exp(-phi_star_func(x, y)) / Z # If samples are requested, return those too if num_samples > 0: results.phis_sampled = phis_sampled results.Qs_sampled = Qs_sampled / (dx * dy) results.ells_sampled = sp.exp(log_ells_sampled) * dx * dy # Stop time time_elapsed = time.clock() - start_time if verbose: print 'deft_2d: %1.2f sec for alpha = %d, G = %d, N = %d' % ( time_elapsed, alpha, G, N) if details: return Q_star_func, results else: return Q_star_func
def sparsery(ops): """ bin ops['reg_file'] then detect ROIs using correlations in time Parameters ---------------- ops : dictionary 'reg_file', 'Ly', 'Lx', 'yrange', 'xrange', 'tau', 'fs', 'nframes', 'high_pass', 'batch_size' Returns ---------------- ops : dictionary adds 'max_proj', 'Vcorr', 'Vmap', 'Vsplit' stat : array of dicts list of ROIs """ rez, max_proj = utils.bin_movie(ops) ops['max_proj'] = max_proj nbinned, Lyc, Lxc = rez.shape # cropped size ops['Lyc'] = Lyc ops['Lxc'] = Lxc sdmov = utils.get_sdmov(rez, ops) rez /= sdmov # subtract low-pass filtered version of binned movie rez = neuropil_subtraction(rez, ops['spatial_hp']) LL = np.meshgrid(np.arange(Lxc), np.arange(Lyc)) gxy = [np.array(LL).astype('float32')] dmov = rez movu = [] # downsample movie at various spatial scales # downsampled sizes Lyp = np.zeros(5, 'int32') Lxp = np.zeros(5, 'int32') for j in range(5): # convolve movu.append(square_conv2(dmov, 3)) # downsample dmov = 2 * downsample(dmov) gxy0 = downsample(gxy[j], False) gxy.append(gxy0) nbinned, Lyp[j], Lxp[j] = movu[j].shape # find maximum spatial scale for each pixel V0 = [] ops['Vmap'] = [] for j in range(len(movu)): V0.append(movu[j].max(axis=0)) ops['Vmap'].append(V0[j].copy()) # spline over scales I = np.zeros((len(gxy), gxy[0].shape[1], gxy[0].shape[2])) for t in range(1, len(gxy) - 1): gmodel = RectBivariateSpline(gxy[t][1, :, 0], gxy[t][0, 0, :], ops['Vmap'][t], kx=min(3, gxy[t][1, :, 0].size - 1), ky=min(3, gxy[t][0, 0, :].size - 1)) I[t] = gmodel.__call__(gxy[0][1, :, 0], gxy[0][0, 0, :]) I0 = I.max(axis=0) ops['Vcorr'] = I0 # find best scale based on scale of top peaks # (used to set threshold) imap = np.argmax(I, axis=0).flatten() ipk = np.abs(I0 - maximum_filter(I0, size=(11, 11))).flatten() < 1e-4 isort = np.argsort(I0.flatten()[ipk])[::-1] im, nm = mode(imap[ipk][isort[:50]]) if ops['spatial_scale'] > 0: im = max(1, min(4, ops['spatial_scale'])) fstr = 'FORCED' else: fstr = 'estimated' if im == 0: print('ERROR: best scale was 0, everything should break now!') # threshold for accepted peaks (scale it by spatial scale) Th2 = ops['threshold_scaling'] * 5 * max(1, im) vmultiplier = max(1, np.float32(rez.shape[0]) / 1200) print( 'NOTE: %s spatial scale ~%d pixels, time epochs %2.2f, threshold %2.2f ' % (fstr, 3 * 2**im, vmultiplier, vmultiplier * Th2)) ops['spatscale_pix'] = 3 * 2**im V0 = [] ops['Vmap'] = [] # get standard deviation for pixels for all values > Th2 for j in range(len(movu)): V0.append(threshold_reduce(movu[j], Th2)) ops['Vmap'].append(V0[j].copy()) movu[j] = np.reshape(movu[j], (movu[j].shape[0], -1)) xpix, ypix, lam = [], [], [] rez = np.reshape(rez, (-1, Lyc * Lxc)) lxs = 3 * 2**np.arange(5) nscales = len(lxs) niter = 250 * ops['max_iterations'] Vmax = np.zeros((niter)) ihop = np.zeros((niter)) vrat = np.zeros((niter)) Npix = np.zeros((niter)) t0 = time.time() for tj in range(niter): # find peaks in stddev's v0max = np.array([V0[j].max() for j in range(5)]) imap = np.argmax(v0max) imax = np.argmax(V0[imap]) yi, xi = np.unravel_index(imax, (Lyp[imap], Lxp[imap])) # position of peak yi, xi = gxy[imap][1, yi, xi], gxy[imap][0, yi, xi] # check if peak is larger than threshold * max(1,nbinned/1200) Vmax[tj] = v0max.max() if Vmax[tj] < vmultiplier * Th2: break ls = lxs[imap] ihop[tj] = imap # make square of initial pixels based on spatial scale of peak ypix0, xpix0, lam0 = add_square(int(yi), int(xi), ls, Lyc, Lxc) # project movie into square to get time series tproj = rez[:, ypix0 * Lxc + xpix0] @ lam0 goodframe = np.nonzero(tproj > Th2)[0] # frames with activity > Th2 # extend mask based on activity similarity for j in range(3): ypix0, xpix0, lam0 = iter_extend(ypix0, xpix0, rez[goodframe], Lyc, Lxc) tproj = rez[:, ypix0 * Lxc + xpix0] @ lam0 goodframe = np.nonzero(tproj > Th2)[0] if len(goodframe) < 1: break if len(goodframe) < 1: break # check if ROI should be split vrat[tj], ipack = two_comps(rez[:, ypix0 * Lxc + xpix0], lam0, Th2) if vrat[tj] > 1.25: lam0, xp, goodframe = ipack tproj[goodframe] = xp ix = lam0 > lam0.max() / 5 xpix0 = xpix0[ix] ypix0 = ypix0[ix] lam0 = lam0[ix] # update residual on raw movie rez[np.ix_(goodframe, ypix0 * Lxc + xpix0)] -= tproj[goodframe][:, np.newaxis] * lam0 # update filtered movie ys, xs, lms = multiscale_mask(ypix0, xpix0, lam0, Lyp, Lxp) for j in range(nscales): movu[j][np.ix_(goodframe, xs[j] + Lxp[j] * ys[j])] -= np.outer( tproj[goodframe], lms[j]) Mx = movu[j][:, xs[j] + Lxp[j] * ys[j]] V0[j][ys[j], xs[j]] = (Mx**2 * np.float32(Mx > Th2)).sum(axis=0)**.5 xpix.append(xpix0) ypix.append(ypix0) lam.append(lam0) if tj % 1000 == 0: print('%d ROIs, score=%2.2f' % (tj, Vmax[tj])) ops['Vmax'] = Vmax ops['ihop'] = ihop ops['Vsplit'] = vrat stat = [{ 'ypix': ypix[n] + ops['yrange'][0], 'lam': lam[n] * sdmov[ypix[n], xpix[n]], 'xpix': xpix[n] + ops['xrange'][0], 'footprint': ops['ihop'][n] } for n in range(len(xpix))] return ops, stat
def get_hmi_map(model_pars, option_pars, indx, dataset='hmi_m_45s_2014_07_06_00_00_45_tai_magnetogram_fits', sunpydir=os.path.expanduser('~/sunpy/data/'), figsdir=os.path.expanduser('~/figs/hmi/'), l_newdata=False): """ indx is 4 integers: lower and upper indices each of x,y coordinates # dataset of the form 'hmi_m_45s_2014_07_06_00_00_45_tai_magnetogram_fits' # """ from scipy.interpolate import RectBivariateSpline from sunpy.net import vso import sunpy.map client = vso.VSOClient() results = client.query( vso.attrs.Time("2014/07/05 23:59:50", "2014/07/05 23:59:55"), vso.attrs.Instrument('HMI'), vso.attrs.Physobs('LOS_magnetic_field')) #print results.show() if l_newdata: if not os.path.exists(sunpydir): raise ValueError( "in get_hmi_map set 'sunpy' dir for vso data\n" + "for large files you may want link to local drive rather than network" ) client.get(results).wait(progress=True) if not os.path.exists(figsdir): os.makedirs(figsdir) hmi_map = sunpy.map.Map(sunpydir + dataset) #hmi_map = hmi_map.rotate() #hmi_map.peek() s = hmi_map.data[indx[0]:indx[1], indx[2]:indx[3]] #units of Gauss Bz s *= u.G nx = s.shape[0] ny = s.shape[1] nx2, ny2 = 2 * nx, 2 * ny # size of interpolant #pixel size in arc seconds dx, dy = hmi_map.scale.items()[0][1], hmi_map.scale.items()[1][1] x, y = np.mgrid[hmi_map.xrange[0] + indx[0] * dx:hmi_map.xrange[0] + indx[1] * dx:1j * nx2, hmi_map.xrange[0] + indx[2] * dy:hmi_map.xrange[0] + indx[3] * dy:1j * ny2] #arrays to interpolate s from/to fx = u.Quantity(np.linspace(x.min().value, x.max().value, nx), unit=x.unit) fy = u.Quantity(np.linspace(y.min().value, y.max().value, ny), unit=y.unit) xnew = u.Quantity(np.linspace(x.min().value, x.max().value, nx2), unit=x.unit) ynew = u.Quantity(np.linspace(y.min().value, y.max().value, ny2), unit=y.unit) f = RectBivariateSpline(fx, fy, s.to(u.T)) #The initial model assumes a relatively small region, so a linear #Cartesian map is applied here. Consideration may be required if larger #regions are of interest, where curvature or orientation near the lim #of the surface is significant. s_int = f(xnew, ynew) #interpolate s and convert units to Tesla s_int /= 4. # rescale s as extra pixels will sum over FWHM x_int = x * 7.25e5 * u.m #convert units to metres y_int = y * 7.25e5 * u.m dx_int = dx * 7.25e5 * u.m dy_int = dy * 7.25e5 * u.m FWHM = 0.5 * (dx_SI + dy_SI) smax = max(abs(s.min()), abs(s.max())) # set symmetric plot scale cmin = -smax * 1e-4 cmax = smax * 1e-4 # # filename = 'hmi_map' # import loop_plots as mhs # mhs.plot_hmi( # s*1e-4,x_SI.min(),x_SI.max(),y_SI.min(),y_SI.max(), # cmin,cmax,filename,savedir,annotate = '(a)' # ) # filename = 'hmi_2x2_map' # mhs.plot_hmi( # s_SI*4,x_SI.min(),x_SI.max(),y_SI.min(),y_SI.max(), # cmin,cmax,filename,savedir,annotate = '(a)' # ) # # return s_SI, x_SI, y_SI, nx2, ny2, dx_SI, dy_SI, cmin, cmax, FWHM dz = (xyz[5] - xyz[4]) / (Nxyz[2] - 1) Z = u.Quantity(np.linspace(xyz[4].value, xyz[5].value, Nxyz[2]), unit=xyz.unit) Zext = u.Quantity(np.linspace(Z.min().value - 4. * dz.value, Z.max().value + 4. * dz.value, Nxyz[2] + 8), unit=Z.unit) coords = { 'dx': (xyz[1] - xyz[0]) / (Nxyz[0] - 1), 'dy': (xyz[3] - xyz[2]) / (Nxyz[1] - 1), 'dz': (xyz[5] - xyz[4]) / (Nxyz[2] - 1), 'xmin': xyz[0], 'xmax': xyz[1], 'ymin': xyz[2], 'ymax': xyz[3], 'zmin': xyz[4], 'zmax': xyz[5], 'Z': Z, 'Zext': Zext } return coords
def compute_abs_derivatives(mceq_run, pid, barr_param, zenith_list): mceq_run.unset_mod_pprod(dont_fill=False) barr_pars = [p for p in barr if p.startswith(barr_param) and "ch" not in p] print("Parameters corresponding to selection", barr_pars) dim_res = len(zenith_list), etr.shape[0] gs = mceq_run.get_solution unit = 1e4 # Solving nominal MCEq flux numu, anumu, nue, anue, nutau, anutau = ( np.zeros(dim_res), np.zeros(dim_res), np.zeros(dim_res), np.zeros(dim_res), np.zeros(dim_res), np.zeros(dim_res), ) for iz, zen_deg in enumerate(zenith_list): mceq_run.set_theta_deg(zen_deg) mceq_run.solve() numu[iz] = gs("total_numu", 0)[tr] * unit anumu[iz] = gs("total_antinumu", 0)[tr] * unit nue[iz] = gs("total_nue", 0)[tr] * unit anue[iz] = gs("total_antinue", 0)[tr] * unit nutau[iz] = gs("total_nutau", 0)[tr] * unit anutau[iz] = gs("total_antinutau", 0)[tr] * unit # Solving for plus one sigma mceq_run.unset_mod_pprod(dont_fill=True) for p in barr_pars: mceq_run.set_mod_pprod(primary_particle, pid, barr_unc, (p, delta)) mceq_run.regenerate_matrices(skip_decay_matrix=True) numu_up, anumu_up, nue_up, anue_up, nutau_up, anutau_up = ( np.zeros(dim_res), np.zeros(dim_res), np.zeros(dim_res), np.zeros(dim_res), np.zeros(dim_res), np.zeros(dim_res), ) for iz, zen_deg in enumerate(zenith_list): mceq_run.set_theta_deg(zen_deg) mceq_run.solve() numu_up[iz] = gs("total_numu", 0)[tr] * unit anumu_up[iz] = gs("total_antinumu", 0)[tr] * unit nue_up[iz] = gs("total_nue", 0)[tr] * unit anue_up[iz] = gs("total_antinue", 0)[tr] * unit nutau_up[iz] = gs("total_nutau", 0)[tr] * unit anutau_up[iz] = gs("total_antinutau", 0)[tr] * unit # Solving for minus one sigma mceq_run.unset_mod_pprod(dont_fill=True) for p in barr_pars: mceq_run.set_mod_pprod(primary_particle, pid, barr_unc, (p, -delta)) mceq_run.regenerate_matrices(skip_decay_matrix=True) numu_down, anumu_down, nue_down, anue_down, nutau_down, anutau_down = ( np.zeros(dim_res), np.zeros(dim_res), np.zeros(dim_res), np.zeros(dim_res), np.zeros(dim_res), np.zeros(dim_res), ) for iz, zen_deg in enumerate(zenith_list): mceq_run.set_theta_deg(zen_deg) mceq_run.solve() numu_down[iz] = gs("total_numu", 0)[tr] * unit anumu_down[iz] = gs("total_antinumu", 0)[tr] * unit nue_down[iz] = gs("total_nue", 0)[tr] * unit anue_down[iz] = gs("total_antinue", 0)[tr] * unit nutau_down[iz] = gs("total_nutau", 0)[tr] * unit anutau_down[iz] = gs("total_antinutau", 0)[tr] * unit # calculating derivatives fd_derivative = lambda up, down: (up - down) / (2.0 * delta) dnumu = fd_derivative(numu_up, numu_down) danumu = fd_derivative(anumu_up, anumu_down) dnue = fd_derivative(nue_up, nue_down) danue = fd_derivative(anue_up, anue_down) dnutau = fd_derivative(nutau_up, nutau_down) danutau = fd_derivative(anutau_up, anutau_down) result = collections.OrderedDict() result_type = [ "numu", "dnumu", "numubar", "dnumubar", "nue", "dnue", "nuebar", "dnuebar", "nutau", "nutaubar", "dnutau", "dnutaubar", ] for dist, sp in zip( [ numu, dnumu, anumu, danumu, nue, dnue, anue, danue, nutau, dnutau, anutau, danutau, ], result_type, ): result[sp] = RectBivariateSpline(cos_theta, np.log(etr), dist) return result