def test_calculate_p(): obj = test() p1, p2, p3 = calculate_p(obj) p1_expected = obj.p1_start + (0.5 + np.arange(obj.N_p1)) * obj.dp1 p2_expected = obj.p2_start + (0.5 + np.arange(obj.N_p2)) * obj.dp2 p3_expected = obj.p3_start + (0.5 + np.arange(obj.N_p3)) * obj.dp3 p2_expected, p1_expected, p3_expected = np.meshgrid(p2_expected, p1_expected, p3_expected ) p1_expected = af.reorder(af.flat(af.to_array(p1_expected)), 2, 3, 0, 1 ) p2_expected = af.reorder(af.flat(af.to_array(p2_expected)), 2, 3, 0, 1 ) p3_expected = af.reorder(af.flat(af.to_array(p3_expected)), 2, 3, 0, 1 ) assert(af.sum(af.abs(p1_expected - p1)) == 0) assert(af.sum(af.abs(p2_expected - p2)) == 0) assert(af.sum(af.abs(p3_expected - p3)) == 0)
def test_dump_moments(): test_obj = test() N_g = test_obj.N_ghost dump_moments(test_obj, 'test_file') h5f = h5py.File('test_file.h5', 'r') moments_read = h5f['moments'][:] h5f.close() moments_read = np.swapaxes(moments_read, 0, 1) print(moments_read.shape) print(compute_moments_imported(test_obj, 'density').shape) assert(af.sum(af.to_array(moments_read[:, :, 0]) - af.reorder(compute_moments_imported(test_obj, 'density'), 1, 2, 0 )[N_g:-N_g, N_g:-N_g] )==0 ) assert(af.sum(af.to_array(moments_read[:, :, 1]) - af.reorder(compute_moments_imported(test_obj, 'energy'), 1, 2, 0 )[N_g:-N_g, N_g:-N_g] )==0 )
def calculate_density(f, vel_x): deltav = af.sum(vel_x[0, 1]-vel_x[0, 0]) value_of_density = af.sum(f, 1)*deltav af.eval(value_of_density) return(value_of_density)
def center_of_mass(inarr): arr = af.abs(inarr) normalizer = af.sum(arr) t_dims = list(arr.dims()) mod_dims = [None, None, None, None] for i in range(len(t_dims)): mod_dims[i] = 1 com = [] for dim in range(len(t_dims)): # swap mod_dims[dim] = t_dims[dim] t_dims[dim] = 1 grid = af.iota(mod_dims[0], mod_dims[1], mod_dims[2], mod_dims[3], tile_dims=t_dims) # print(grid) com.append(af.sum(grid * arr) / normalizer) # swap back t_dims[dim] = mod_dims[dim] mod_dims[dim] = 1 return com
def fraction_finder(positions_x, positions_y, x_grid, y_grid, dx, dy): ''' function fraction_finder(positions_x, positions_y, x_grid, y_grid, dx, dy) ----------------------------------------------------------------------- Input variables: positions_x and length_domain_x positions_x: An one dimensional array of size equal to number of particles taken in the PIC code. It contains the positions of particles in x direction. positions_y: An one dimensional array of size equal to number of particles taken in the PIC code. It contains the positions of particles in y direction. x_grid, y_grid: This is an array denoting the position grid chosen in the PIC simulation in x and y directions respectively dx, dy: This is the distance between any two consecutive grid nodes of the position grid in x and y directions respectively ----------------------------------------------------------------------- returns: x_frac, y_frac This function returns the fractions of grid cells needed to perform the 2D charge deposition ''' x_frac = (positions_x - af.sum(x_grid[0])) / dx y_frac = (positions_y - af.sum(y_grid[0])) / dy af.eval(x_frac, y_frac) return x_frac, y_frac
def polyval_2d(poly_2d, xi, eta): ''' ''' poly_2d_shape = poly_2d.shape poly_xy = af.tile(poly_2d, d0 = 1, d1 = 1, d2 = 1, d3 = xi.shape[0]) poly_xy_shape = poly_xy.shape # print(poly_xy) xi_power = af.flip(af.range(poly_xy_shape[1], dtype = af.Dtype.u32)) xi_power = af.tile(af.transpose(xi_power), d0 = poly_xy_shape[0]) xi_power = af.tile(xi_power, d0 = 1, d1 = 1, d2 = xi.shape[0]) eta_power = af.flip(af.range(poly_xy_shape[0], dtype = af.Dtype.u32)) eta_power = af.tile(eta_power, d0 = 1, d1 = poly_xy_shape[1]) eta_power = af.tile(eta_power, d0 = 1, d1 = 1, d2 = eta.shape[0]) Xi = af.reorder(xi, d0 = 2, d1 = 1, d2 = 0) Xi = af.tile(Xi, d0 = poly_xy_shape[0], d1 = poly_xy_shape[1]) Xi = af.pow(Xi, xi_power) Xi = af.reorder(Xi, d0 = 0, d1 = 1, d2 = 3, d3 = 2) # print(Xi) Eta = af.reorder(eta, d0 = 2, d1 = 1, d2 = 0) Eta = af.tile(Eta, d0 = poly_xy_shape[0], d1 = poly_xy_shape[1]) Eta = af.pow(Eta, eta_power) Eta = af.reorder(Eta, d0 = 0, d1 = 1, d2 = 3, d3 = 2) # print(Eta) Xi_Eta = Xi * Eta poly_val = af.broadcast(multiply, poly_xy, Xi_Eta) poly_val = af.sum(af.sum(poly_val, dim = 1), dim = 0) poly_val = af.reorder(poly_val, d0 = 2, d1 = 3, d2 = 0, d3 = 1) return poly_val
def integrate(integrand_coeffs): ''' Performs integration according to the given quadrature method by taking in the coefficients of the polynomial and the number of quadrature points. The number of quadrature points and the quadrature scheme are set in params.py module. Parameters ---------- integrand_coeffs : arrayfire.Array [M N 1 1] The coefficients of M number of polynomials of order N arranged in a 2D array. Returns ------- Integral : arrayfire.Array [M 1 1 1] The value of the definite integration performed using the specified quadrature method for M polynomials. ''' integrand = integrand_coeffs if (params.scheme == 'gauss_quadrature'): #print('gauss_quad') gaussian_nodes = params.gauss_points Gauss_weights = params.gauss_weights nodes_tile = af.transpose( af.tile(gaussian_nodes, 1, integrand.shape[1])) power = af.flip(af.range(integrand.shape[1])) nodes_power = af.broadcast(utils.power, nodes_tile, power) weights_tile = af.transpose( af.tile(Gauss_weights, 1, integrand.shape[1])) nodes_weight = nodes_power * weights_tile value_at_gauss_nodes = af.matmul(integrand, nodes_weight) integral = af.sum(value_at_gauss_nodes, 1) if (params.scheme == 'lobatto_quadrature'): #print('lob_quad') lobatto_nodes = params.lobatto_quadrature_nodes Lobatto_weights = params.lobatto_weights_quadrature nodes_tile = af.transpose(af.tile(lobatto_nodes, 1, integrand.shape[1])) power = af.flip(af.range(integrand.shape[1])) nodes_power = af.broadcast(utils.power, nodes_tile, power) weights_tile = af.transpose( af.tile(Lobatto_weights, 1, integrand.shape[1])) nodes_weight = nodes_power * weights_tile value_at_lobatto_nodes = af.matmul(integrand, nodes_weight) integral = af.sum(value_at_lobatto_nodes, 1) return integral
def cost(Weights, X, Y, lambda_param=1.0): # Number of samples m = Y.dims()[0] dim0 = Weights.dims()[0] dim1 = Weights.dims()[1] if len(Weights.dims()) > 1 else None dim2 = Weights.dims()[2] if len(Weights.dims()) > 2 else None dim3 = Weights.dims()[3] if len(Weights.dims()) > 3 else None # Make the lambda corresponding to Weights(0) == 0 lambdat = af.constant(lambda_param, dim0, dim1, dim2, dim3) # No regularization for bias weights lambdat[0, :] = 0 # Get the prediction H = predict_prob(X, Weights) # Cost of misprediction Jerr = -1 * af.sum(Y * af.log(H) + (1 - Y) * af.log(1 - H), dim=0) # Regularization cost Jreg = 0.5 * af.sum(lambdat * Weights * Weights, dim=0) # Total cost J = (Jerr + Jreg) / m # Find the gradient of cost D = (H - Y) dJ = (af.matmulTN(X, D) + lambdat * Weights) / m return J, dJ
def test_fft_poisson(): """ This function tests that the FFT solver works as intended. We take an expression for density for which the fields can be calculated analytically, and check that the numerical solution as given by the FFT solver and the analytical solution correspond well with each other. """ x_start = 0 y_start = 0 z_start = 0 x_end = 1 y_end = 2 z_end = 3 N_x = np.random.randint(16, 32) N_y = np.random.randint(16, 32) N_z = np.random.randint(16, 32) dx = (x_end - x_start) / N_x dy = (y_end - y_start) / N_y dz = (z_end - z_start) / N_z # Using a centered formulation for the grid points of x, y, z: x = x_start + (np.arange(N_x) + 0.5) * dx y = y_start + (np.arange(N_y) + 0.5) * dy z = z_start + (np.arange(N_z) + 0.5) * dz y, x, z = np.meshgrid(y, x, z) x = af.to_array(x) y = af.to_array(y) z = af.to_array(z) rho = af.sin(2 * np.pi * x + 4 * np.pi * y + 6 * np.pi * z) Ex_analytic = -(2 * np.pi) / (56 * np.pi**2) * \ af.cos(2 * np.pi * x + 4 * np.pi * y + 6 * np.pi * z) Ey_analytic = -(4 * np.pi) / (56 * np.pi**2) * \ af.cos(2 * np.pi * x + 4 * np.pi * y + 6 * np.pi * z) Ez_analytic = -(6 * np.pi) / (56 * np.pi**2) * \ af.cos(2 * np.pi * x + 4 * np.pi * y + 6 * np.pi * z) Ex_numerical, Ey_numerical, Ez_numerical = fft_poisson(rho, dx, dy, dz) # Checking that the L1 norm of error is at machine precision: Ex_err = af.sum( af.abs(Ex_numerical - Ex_analytic)) / Ex_analytic.elements() Ey_err = af.sum( af.abs(Ey_numerical - Ey_analytic)) / Ey_analytic.elements() Ez_err = af.sum( af.abs(Ez_numerical - Ez_analytic)) / Ez_analytic.elements() assert (Ex_err < 1e-14) assert (Ey_err < 1e-14) assert (Ez_err < 1e-14)
def calculate_vbulk(f, vel_x): deltav = af.sum(vel_x[0, 1]-vel_x[0, 0]) value_of_momentum = af.sum(f*vel_x, 1)*deltav value_of_vbulk = value_of_momentum/calculate_density(f, vel_x) af.eval(value_of_vbulk) return(value_of_vbulk)
def calculate_temperature(f, vel_x): deltav = af.sum(vel_x[0, 1]-vel_x[0, 0]) v_bulk = af.tile(calculate_vbulk(f, vel_x), 1, N_velocity) value_of_temperature = af.sum(f*(vel_x-v_bulk)**2, 1)*deltav value_of_temperature = value_of_temperature/calculate_density(f, vel_x) af.eval(value_of_temperature) return(value_of_temperature)
def integrate_1d(polynomials, order, scheme = 'gauss'): ''' Integrates single variables using the Gauss-Legendre or Gauss-Lobatto quadrature. Parameters ---------- polynomials : af.Array [number_of_polynomials degree 1 1] The polynomials to be integrated. order : int Order of the quadrature. scheme : str Possible options are - ``gauss`` for using Gauss-Legendre quadrature - ``lobatto`` for using Gauss-Lobatto quadrature Returns ------- integral : af.Array [number_of_polynomials 1 1 1] The integral for the respective polynomials using the given quadrature scheme. ''' integral = 0.0 if scheme == 'gauss': N_g = order xi_gauss = af.np_to_af_array(lagrange.gauss_nodes(N_g)) gauss_weights = lagrange.gaussian_weights(N_g) polyval_gauss = polyval_1d(polynomials, xi_gauss) integral = af.sum(af.transpose(af.broadcast(multiply, af.transpose(polyval_gauss), gauss_weights)), dim = 1) return integral elif scheme == 'lobatto': N_l = order xi_lobatto = lagrange.LGL_points(N_l) lobatto_weights = lagrange.lobatto_weights(N_l) polyval_lobatto = polyval_1d(polynomials, xi_lobatto) integral = af.sum(af.transpose(af.broadcast(multiply, af.transpose(polyval_lobatto), lobatto_weights)), dim = 1) return integral else: return -1.
def test_fdtd_mode1(): error_B1 = np.zeros(3) error_B2 = np.zeros(3) error_E3 = np.zeros(3) N = 2**np.arange(5, 8) for i in range(N.size): obj = test(N[i]) N_g = obj.N_ghost B1_fdtd = gauss1D(obj.q2[:, N_g:-N_g, N_g:-N_g], 0.1) B2_fdtd = gauss1D(obj.q1[:, N_g:-N_g, N_g:-N_g], 0.1) obj.yee_grid_EM_fields[3, N_g:-N_g, N_g:-N_g] = B1_fdtd obj.yee_grid_EM_fields[4, N_g:-N_g, N_g:-N_g] = B2_fdtd dt = obj.dq1 / 2 time = np.arange(dt, 1 + dt, dt) E3_initial = obj.yee_grid_EM_fields[2].copy() B1_initial = obj.yee_grid_EM_fields[3].copy() B2_initial = obj.yee_grid_EM_fields[4].copy() obj.J1, obj.J2, obj.J3 = 0, 0, 0 for time_index, t0 in enumerate(time): fdtd(obj, dt) error_B1[i] = af.sum( af.abs(obj.yee_grid_EM_fields[3, N_g:-N_g, N_g:-N_g] - B1_initial[0, N_g:-N_g, N_g:-N_g])) / ( B1_initial.elements()) error_B2[i] = af.sum( af.abs(obj.yee_grid_EM_fields[4, N_g:-N_g, N_g:-N_g] - B2_initial[0, N_g:-N_g, N_g:-N_g])) / ( B2_initial.elements()) error_E3[i] = af.sum( af.abs(obj.yee_grid_EM_fields[2, N_g:-N_g, N_g:-N_g] - E3_initial[0, N_g:-N_g, N_g:-N_g])) / ( E3_initial.elements()) poly_B1 = np.polyfit(np.log10(N), np.log10(error_B1), 1) poly_B2 = np.polyfit(np.log10(N), np.log10(error_B2), 1) poly_E3 = np.polyfit(np.log10(N), np.log10(error_E3), 1) assert (abs(poly_B1[0] + 3) < 0.6) assert (abs(poly_B2[0] + 3) < 0.6) assert (abs(poly_E3[0] + 2) < 0.6)
def evolve_electrodynamic_fields(self, J1, J2, J3, dt): """ Evolve the fields using FDTD. Parameters ---------- J1 : af.Array Array which contains the J1 current for each species. J2 : af.Array Array which contains the J2 current for each species. J3 : af.Array Array which contains the J3 current for each species. dt: double Timestep size """ self.J1 = af.sum(J1, 1) self.J2 = af.sum(J2, 1) self.J3 = af.sum(J3, 1) self.current_values_to_yee_grid() # Here: # cell_centered_EM_fields[:3] is at n # cell_centered_EM_fields[3:] is at n+1/2 # cell_centered_EM_fields_at_n_plus_half[3:] is at n-1/2 self.cell_centered_EM_fields_at_n[:3] = self.cell_centered_EM_fields[: 3] self.cell_centered_EM_fields_at_n[3:] = \ 0.5 * ( self.cell_centered_EM_fields_at_n_plus_half[3:] + self.cell_centered_EM_fields[3:] ) self.cell_centered_EM_fields_at_n_plus_half[ 3:] = self.cell_centered_EM_fields[3:] fdtd(self, dt) self.yee_grid_to_cell_centered_grid() # Here # cell_centered_EM_fields[:3] is at n+1 # cell_centered_EM_fields[3:] is at n+3/2 self.cell_centered_EM_fields_at_n_plus_half[:3] = \ 0.5 * ( self.cell_centered_EM_fields_at_n[:3] + self.cell_centered_EM_fields[:3] ) return
def check_error(params): error = np.zeros(N.size) for i in range(N.size): domain.N_p1 = int(N[i]) domain.N_p2 = int(N[i]) domain.N_p3 = int(N[i]) # Defining the physical system to be solved: system = physical_system(domain, boundary_conditions, params, initialize, advection_terms, collision_operator.BGK, moments) # Declaring a linear system object which will evolve the defined physical system: nls = nonlinear_solver(system) # Time parameters: dt = 0.0001 * 32 / nls.N_p1 t_final = 0.2 time_array = np.arange(dt, t_final + dt, dt) if (time_array[-1] > t_final): time_array = np.delete(time_array, -1) # Finding final resting point of the blob: E1 = nls.fields_solver.cell_centered_EM_fields[0] E2 = nls.fields_solver.cell_centered_EM_fields[1] E3 = nls.fields_solver.cell_centered_EM_fields[2] B1 = nls.fields_solver.cell_centered_EM_fields[3] B2 = nls.fields_solver.cell_centered_EM_fields[4] B3 = nls.fields_solver.cell_centered_EM_fields[5] sol = odeint(dp_dt, np.array([0, 0, 0]), time_array, args=(af.mean(E1), af.mean(E2), af.mean(E3), af.mean(B1), af.mean(B2), af.mean(B3), af.sum(params.charge[0]), af.sum(params.mass[0])), atol=1e-12, rtol=1e-12) f_reference = af.broadcast(initialize.initialize_f, nls.q1_center, nls.q2_center, nls.p1_center - sol[-1, 0], nls.p2_center - sol[-1, 1], nls.p3_center - sol[-1, 2], params) for time_index, t0 in enumerate(time_array): nls.strang_timestep(dt) error[i] = af.mean(af.abs(nls.f - f_reference)) return (error)
def fraction_finder(x, y, x_grid, y_grid, dx_frac_finder, dy_frac_finder): # print('x_grid[0] is ', x_grid[0]) x_frac = (x - af.sum(x_grid[0])) / dx_frac_finder # print('y_grid[0] is ', y_grid[0]) # print(' (y - (y_grid[0])) / dy_frac_finder is ', (y - (y_grid[0])) / dy_frac_finder) y_frac = (y - af.sum(y_grid[0])) / dy_frac_finder af.eval(x_frac, y_frac) return x_frac, y_frac
def test_fdtd_mode2(): error_E1 = np.zeros(3) error_E2 = np.zeros(3) error_B3 = np.zeros(3) N = 2**np.arange(5, 8) for i in range(N.size): obj = test(N[i]) N_g = obj.N_ghost obj.yee_grid_EM_fields[0, N_g:-N_g, N_g:-N_g] = gauss1D( obj.q2[:, N_g:-N_g, N_g:-N_g], 0.1) obj.yee_grid_EM_fields[1, N_g:-N_g, N_g:-N_g] = gauss1D( obj.q1[:, N_g:-N_g, N_g:-N_g], 0.1) dt = obj.dq1 / 2 time = np.arange(dt, 1 + dt, dt) B3_initial = obj.yee_grid_EM_fields[5].copy() E1_initial = obj.yee_grid_EM_fields[0].copy() E2_initial = obj.yee_grid_EM_fields[1].copy() obj.J1, obj.J2, obj.J3 = 0, 0, 0 for time_index, t0 in enumerate(time): fdtd(obj, dt) error_E1[i] = af.sum( af.abs(obj.yee_grid_EM_fields[0, N_g:-N_g, N_g:-N_g] - E1_initial[:, N_g:-N_g, N_g:-N_g])) / ( E1_initial.elements()) error_E2[i] = af.sum( af.abs(obj.yee_grid_EM_fields[1, N_g:-N_g, N_g:-N_g] - E2_initial[:, N_g:-N_g, N_g:-N_g])) / ( E2_initial.elements()) error_B3[i] = af.sum( af.abs(obj.yee_grid_EM_fields[5, N_g:-N_g, N_g:-N_g] - B3_initial[:, N_g:-N_g, N_g:-N_g])) / ( B3_initial.elements()) poly_E1 = np.polyfit(np.log10(N), np.log10(error_E1), 1) poly_E2 = np.polyfit(np.log10(N), np.log10(error_E2), 1) poly_B3 = np.polyfit(np.log10(N), np.log10(error_B3), 1) assert (abs(poly_E1[0] + 3) < 0.4) assert (abs(poly_E2[0] + 3) < 0.4) assert (abs(poly_B3[0] + 2) < 0.4)
def gaussian(dims, sigmas, **kwargs): alpha = 1.0 grid = af.constant(1.0, dims[0], dims[1], dims[2]) for i in range(len(sigmas)): multiplier = -0.5 * alpha / pow(sigmas[i], 2) exponent = af.pow((af.range(dims[0], dims[1], dims[2], dim=i) - (dims[i] - 1) / 2.0), 2) * multiplier grid = grid * af.arith.exp(exponent) grid_tot = af.sum(af.sum(af.sum(grid, dim=0), dim=1), dim=2) grid_total = af.tile(grid_tot, dims[0], dims[1], dims[2]) grid = grid / grid_total return grid
def func(x0): fields = self._scattering_obj.forward(x0, fx_illu, fy_illu) field_scattered = self._defocus_obj.forward(field_scattered, self.prop_distances) field_measure = self._crop_obj.forward(field_scattered) residual = af.abs(field_measure) - amplitude function_value = af.sum(residual*af.conjg(residual)).real return function_value
def lagrange_interpolation(fn_i): ''' Finds the general interpolation of a function. Parameters ---------- fn_i : af.Array [N N_LGL 1 1] Value of :math:`N` functions at the LGL points. Returns ------- lagrange_interpolation : af.Array [N N_LGL 1 1] :math:`N` interpolated polynomials for :math:`N` functions. ''' fn_i = af.transpose(af.reorder(fn_i, d0=2, d1=1, d2=0)) lagrange_interpolation = af.broadcast(utils.multiply, params.lagrange_coeffs, fn_i) lagrange_interpolation = af.reorder(af.sum(lagrange_interpolation, dim=0), d0=2, d1=1, d2=0) return lagrange_interpolation
def input_info(A, Asp): m, n = A.dims() nnz = af.sum((A != 0)) print(" matrix size: %i x %i" %(m, n)) print(" matrix sparsity: %2.2f %%" %(100*nnz/n**2,)) print(" dense matrix memory usage: ") print(" sparse matrix memory usage: ")
def _binObject(self, obj, adjoint=False): """ function to bin the object by factor of slice_binning_factor """ if self.slice_binning_factor == 1: return obj assert self.shape[2] >= 1 if adjoint: obj_out = af.constant(0.0, self._shape_full[0], self._shape_full[1], self._shape_full[2], dtype=af_complex_datatype) for idx in range((self.shape[2] - 1) * self.slice_binning_factor, -1, -self.slice_binning_factor): idx_slice = slice( idx, np.min([obj_out.shape[2], idx + self.slice_binning_factor])) obj_out[:, :, idx_slice] = af.broadcast( self.assign_broadcast, obj_out[:, :, idx_slice], obj[:, :, idx // self.slice_binning_factor]) else: obj_out = af.constant(0.0, self.shape[0], self.shape[1], self.shape[2], dtype=af_complex_datatype) for idx in range(0, obj.shape[2], self.slice_binning_factor): idx_slice = slice( idx, np.min([obj.shape[2], idx + self.slice_binning_factor])) obj_out[:, :, idx // self.slice_binning_factor] = af.sum( obj[:, :, idx_slice], 2) return obj_out
def lobatto_quad_multivar_poly(poly_xi_eta, N_quad, advec_var): ''' ''' shape_poly_2d = shape(poly_xi_eta) xi_LGL = lagrange.LGL_points(N_quad) eta_LGL = lagrange.LGL_points(N_quad) Xi, Eta = af_meshgrid(xi_LGL, eta_LGL) Xi = af.flat(Xi) Eta = af.flat(Eta) w_i = lagrange.lobatto_weights(N_quad) w_j = lagrange.lobatto_weights(N_quad) W_i, W_j = af_meshgrid(w_i, w_j) W_i = af.tile(af.flat(W_i), d0 = 1, d1 = shape_poly_2d[2]) W_j = af.tile(af.flat(W_j), d0 = 1, d1 = shape_poly_2d[2]) P_xi_eta_quad_val = af.transpose(polyval_2d(poly_xi_eta, Xi, Eta)) integral = af.sum(W_i * W_j * P_xi_eta_quad_val, dim = 0) return af.transpose(integral)
def log_loss(y_true, y_prob): """Compute Logistic loss for classification. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels. y_prob : array-like of float, shape = (n_samples, n_classes) Predicted probabilities, as returned by a classifier's predict_proba method. Returns ------- loss : float The degree to which the samples are correctly predicted. """ # eps = np.finfo(y_prob.dtype).eps # y_prob = np.clip(y_prob, eps, 1 - eps) # if y_prob.shape[1] == 1: # y_prob = np.append(1 - y_prob, y_prob, axis=1) # # if y_true.shape[1] == 1: # y_true = np.append(1 - y_true, y_true, axis=1) # # return - xlogy(y_true, y_prob).sum() / y_prob.shape[0] eps = numpy.finfo(typemap(y_prob.dtype())).eps y_prob[y_prob < eps] = eps y_prob[y_prob > (1.0 - eps)] = 1.0 - eps if y_prob.numdims() == 1: y_prob = af.join(1, (1.0 - y_prob).as_type(y_prob.dtype()), y_prob) if y_true.numdims() == 1: y_true = af.join(1, (1.0 - y_true).as_type(y_true.dtype()), y_true) return - af.sum(af.flat(xlogy(y_true, y_prob))) / y_prob.shape[0]
def test_calculate_q(): obj = test() q1, q2 = calculate_q_center(obj) q1_expected = obj.q1_start + \ (0.5 + np.arange(-obj.N_ghost, obj.N_q1 + obj.N_ghost)) * obj.dq1 q2_expected = obj.q2_start + \ (0.5 + np.arange(-obj.N_ghost, obj.N_q2 + obj.N_ghost)) * obj.dq2 q2_expected, q1_expected = np.meshgrid(q2_expected, q1_expected) q1_expected = af.reorder(af.to_array(q1_expected), 2, 0, 1) q2_expected = af.reorder(af.to_array(q2_expected), 2, 0, 1) assert (af.sum(af.abs(q1_expected - q1)) == 0) assert (af.sum(af.abs(q2_expected - q2)) == 0)
def sum(a: ndarray, axis: tp.Optional[int] = None) \ -> tp.Union[numbers.Number, ndarray]: """ Sum of array elements over a given axis. """ return _wrap_af_array(af.sum(a._af_array, dim=axis))
def compute_electrostatic_fields(self, rho_hat): """ Computes the electrostatic fields by making use of FFTs by solving the Poisson equation: div^2 phi = rho to return the FT of the fields. Parameters ---------- rho_hat : af.Array FT for the charge density for each of the species. shape:(1, N_s, N_q1, N_q2) """ # Summing over all the species: phi_hat = multiply(af.sum(rho_hat, 1), 1 / (self.k_q1**2 + self.k_q2**2)) # (1, 1, N_q1, N_q2) # Setting the background electric potential to zero: phi_hat[: , :, 0, 0] = 0 self.E1_hat = -phi_hat * 1j * self.k_q1 / self.params.eps self.E2_hat = -phi_hat * 1j * self.k_q2 / self.params.eps self.E3_hat = 0 * self.E1_hat self.B1_hat = 0 * self.E1_hat self.B2_hat = 0 * self.E1_hat self.B3_hat = 0 * self.E1_hat af.eval(self.E1_hat, self.E2_hat, self.E3_hat, self.B1_hat, self.B2_hat, self.B3_hat ) return
def lagrange_interpolation_u(u, gv): ''' Calculates the coefficients of the Lagrange interpolation using the value of u at the mapped LGL points in the domain. The interpolation using the Lagrange basis polynomials is given by :math:`L_i(\\xi) u_i(\\xi)` Where L_i are the Lagrange basis polynomials and u_i is the value of u at the LGL points. Parameters ---------- u : arrayfire.Array [N_LGL N_Elements 1 1] The value of u at the mapped LGL points. Returns ------- lagrange_interpolated_coeffs : arrayfire.Array[1 N_LGL N_Elements 1] The coefficients of the polynomials obtained by Lagrange interpolation. Each polynomial is of order N_LGL - 1. ''' lagrange_coeffs_tile = af.tile(gv.lagrange_coeffs, 1, 1,\ params.N_Elements) reordered_u = af.reorder(u, 0, 2, 1) lagrange_interpolated_coeffs = af.sum(af.broadcast(utils.multiply,\ reordered_u, lagrange_coeffs_tile), 0) return lagrange_interpolated_coeffs
def input_info(A, Asp): m, n = A.dims() nnz = af.sum((A != 0)) print(" matrix size: %i x %i" % (m, n)) print(" matrix sparsity: %2.2f %%" % (100 * nnz / n**2, )) print(" dense matrix memory usage: ") print(" sparse matrix memory usage: ")
def matmul_3D(a, b): ''' Finds the matrix multiplication of :math:`Q` pairs of matrices ``a`` and ``b``. Parameters ---------- a : af.Array [M N Q 1] First set of :math:`Q` 2D arrays :math:`N \\neq 1` and :math:`M \\neq 1`. b : af.Array [N P Q 1] Second set of :math:`Q` 2D arrays :math:`P \\neq 1`. Returns ------- matmul : af.Array [M P Q 1] Matrix multiplication of :math:`Q` sets of 2D arrays. ''' shape_a = shape(a) shape_b = shape(b) P = shape_b[1] a = af.transpose(a) a = af.reorder(a, d0=0, d1=3, d2=2, d3=1) a = af.tile(a, d0=1, d1=P) b = af.tile(b, d0=1, d1=1, d2=1, d3=a.shape[3]) matmul = af.sum(a * b, dim=0) matmul = af.reorder(matmul, d0=3, d1=1, d2=2, d3=0) return matmul
def compute_electrostatic_fields(self, rho): # Summing for all species: rho = af.sum(rho, 1) if (self.params.fields_solver == 'fft'): fft_poisson(self, rho) communicate.communicate_fields(self) apply_bcs_fields(self)
def __index_shape__(A_shape, idx, del_singleton=True): shape = [] for i in range(0,len(idx)): if(idx[i] is None): shape.append(0) elif(isinstance(idx[i],numbers.Number)): if del_singleton: # Remove dimensions indexed with a scalar continue else: shape.append(1) elif(isinstance(idx[i],arrayfire.index.Seq)): if(idx[i].s == arrayfire.af_span): shape.append(A_shape[i]) else: shape.append(idx[i].size) elif(isinstance(idx[i],slice)): shape.append(__slice_len__(idx[i], pu.c2f(A_shape), i)) elif(isinstance(idx[i], arrayfire.Array)): if idx[i].dtype() is arrayfire.Dtype.b8: shape.append(int(arrayfire.sum(idx[i]))) else: shape.append(idx[i].elements()) elif(isinstance(idx[i],arrayfire.index)): if(idx[i].isspan()): shape.append(A_shape[i]) else: af_idx = idx[i].get() if(af_idx.isBatch): raise ValueError if(af_idx.isSeq): shape.append(arrayfire.seq(af_idx.seq()).size) else: shape.append(af_idx.arr_elements()) else: raise ValueError return pu.c2f(shape)
def test(): print("\nTesting benchmark functions...") A, b, x0 = setup_input(n=50, sparsity=7) # dense A Asp = to_sparse(A) x1, _ = calc_arrayfire(A, b, x0) x2, _ = calc_arrayfire(Asp, b, x0) if af.sum(af.abs(x1 - x2)/x2 > 1e-5): raise ValueError("arrayfire test failed") if np: An = to_numpy(A) bn = to_numpy(b) x0n = to_numpy(x0) x3, _ = calc_numpy(An, bn, x0n) if not np.allclose(x3, x1.to_list()): raise ValueError("numpy test failed") if sp: Asc = to_scipy_sparse(Asp) x4, _ = calc_scipy_sparse(Asc, bn, x0n) if not np.allclose(x4, x1.to_list()): raise ValueError("scipy.sparse test failed") x5, _ = calc_scipy_sparse_linalg_cg(Asc, bn, x0n) if not np.allclose(x5, x1.to_list()): raise ValueError("scipy.sparse.linalg.cg test failed") print(" all tests passed...")
def calc_pi_device(samples): x = randu(samples) y = randu(samples) return 4 * af.sum((x * x + y * y) < 1) / samples
def sum(self, s, axis): if self.dtype == numpy.bool: s = arrayfire.cast(s, pu.typemap(numpy.int64)) # s = s.astype(pu.typemap(numpy.int64)) return arrayfire.sum(s, dim=axis)
def simple_algorithm(verbose = False): display_func = _util.display_func(verbose) print_func = _util.print_func(verbose) a = af.randu(3, 3) print_func(af.sum(a), af.product(a), af.min(a), af.max(a), af.count(a), af.any_true(a), af.all_true(a)) display_func(af.sum(a, 0)) display_func(af.sum(a, 1)) display_func(af.product(a, 0)) display_func(af.product(a, 1)) display_func(af.min(a, 0)) display_func(af.min(a, 1)) display_func(af.max(a, 0)) display_func(af.max(a, 1)) display_func(af.count(a, 0)) display_func(af.count(a, 1)) display_func(af.any_true(a, 0)) display_func(af.any_true(a, 1)) display_func(af.all_true(a, 0)) display_func(af.all_true(a, 1)) display_func(af.accum(a, 0)) display_func(af.accum(a, 1)) display_func(af.sort(a, is_ascending=True)) display_func(af.sort(a, is_ascending=False)) b = (a > 0.1) * a c = (a > 0.4) * a d = b / c print_func(af.sum(d)); print_func(af.sum(d, nan_val=0.0)); display_func(af.sum(d, dim=0, nan_val=0.0)); val,idx = af.sort_index(a, is_ascending=True) display_func(val) display_func(idx) val,idx = af.sort_index(a, is_ascending=False) display_func(val) display_func(idx) b = af.randu(3,3) keys,vals = af.sort_by_key(a, b, is_ascending=True) display_func(keys) display_func(vals) keys,vals = af.sort_by_key(a, b, is_ascending=False) display_func(keys) display_func(vals) c = af.randu(5,1) d = af.randu(5,1) cc = af.set_unique(c, is_sorted=False) dd = af.set_unique(af.sort(d), is_sorted=True) display_func(cc) display_func(dd) display_func(af.set_union(cc, dd, is_unique=True)) display_func(af.set_union(cc, dd, is_unique=False)) display_func(af.set_intersect(cc, cc, is_unique=True)) display_func(af.set_intersect(cc, cc, is_unique=False))
#!/usr/bin/python import arrayfire as arr a = arr.random.rand(4,3) b = arr.random.randn(3,5) c = arr.dot(a,b) d = arr.sum(c) d0 = arr.sum(c, 0) d1 = arr.sum(c, 1) print(a) print(b) print(c) print(d) print(d0) print(d1)
A[1,:] = B[2,:] af.display(A) print("\n---- Bitwise operations\n") af.display(A & B) af.display(A | B) af.display(A ^ B) print("\n---- Transpose\n") af.display(A) af.display(af.transpose(A)) print("\n---- Flip Vertically / Horizontally\n") af.display(A) af.display(af.flip(A, 0)) af.display(af.flip(A, 1)) print("\n---- Sum, Min, Max along row / columns\n") af.display(A) af.display(af.sum(A, 0)) af.display(af.min(A, 0)) af.display(af.max(A, 0)) af.display(af.sum(A, 1)) af.display(af.min(A, 1)) af.display(af.max(A, 1)) print("\n---- Get minimum with index\n") (min_val, min_idx) = af.imin(A, 0) af.display(min_val) af.display(min_idx)
#!/usr/bin/python import arrayfire as af a = af.randu(3, 3) print(af.sum(a), af.product(a), af.min(a), af.max(a), af.count(a), af.any_true(a), af.all_true(a)) af.print_array(af.sum(a, 0)) af.print_array(af.sum(a, 1)) af.print_array(af.product(a, 0)) af.print_array(af.product(a, 1)) af.print_array(af.min(a, 0)) af.print_array(af.min(a, 1)) af.print_array(af.max(a, 0)) af.print_array(af.max(a, 1)) af.print_array(af.count(a, 0)) af.print_array(af.count(a, 1)) af.print_array(af.any_true(a, 0)) af.print_array(af.any_true(a, 1)) af.print_array(af.all_true(a, 0)) af.print_array(af.all_true(a, 1)) af.print_array(af.accum(a, 0)) af.print_array(af.accum(a, 1))