def check_divergence(self): """ Used to terminate the program if a blowup occurs in any segment of the solver, resulting in the values becoming infinity or undefined. """ if (af.any_true(af.isinf(self.f)) or af.any_true(af.isnan(self.f))): raise SystemExit('Solver Diverging!')
def op_fvm_q(self, dt): self._communicate_f() self._apply_bcs_f() if (self.performance_test_flag == True): tic = af.time() fvm_timestep_RK2(self, dt) if (self.performance_test_flag == True): af.sync() toc = af.time() self.time_fvm_solver += toc - tic # Solving for tau = 0 systems if (af.any_true( self.physical_system.params.tau(self.q1_center, self.q2_center, self.p1, self.p2, self.p3) == 0)): if (self.performance_test_flag == True): tic = af.time() self.f = self._source(self.f, self.q1_center, self.q2_center, self.p1, self.p2, self.p3, self.compute_moments, self.physical_system.params, True) if (self.performance_test_flag == True): af.sync() toc = af.time() self.time_sourcets += toc - tic af.eval(self.f) return
def RK5_step(self, dt): self.Y = integrators.RK5(dY_dt, self.Y, dt, self) # Solving for tau = 0 systems if (self.single_mode_evolution == False and af.any_true( self.physical_system.params.tau(self.q1_center, self.q2_center, self.p1, self.p2, self.p3) == 0)): f_hat = self.Y[:, :, :, 0] f = af.real(af.ifft2(0.5 * self.N_q2 * self.N_q1 * f_hat)) self.Y[:, :, :, 0] = 2 * af.fft2( self._source(f, self.q1_center, self.q2_center, self.p1, self.p2, self.p3, self.compute_moments, self.physical_system.params, True)) / (self.N_q2 * self.N_q1) return
def BGK(f, q1, q2, p1, p2, p3, moments, params, flag = False): """Return BGK operator -(f-f0)/tau.""" n = moments('density', f) # Floor used to avoid 0/0 limit: eps = 1e-30 p1_bulk = moments('mom_p1_bulk', f) / (n + eps) p2_bulk = moments('mom_p2_bulk', f) / (n + eps) p3_bulk = moments('mom_p3_bulk', f) / (n + eps) T = (1 / params.p_dim) * ( 2 * moments('energy', f) - n * p1_bulk**2 - n * p2_bulk**2 - n * p3_bulk**2 ) / (n + eps) + eps if(af.any_true(params.tau(q1, q2, p1, p2, p3) == 0)): f_MB = f0(p1, p2, p3, n, T, p1_bulk, p2_bulk, p3_bulk, params) if(flag == False): f_MB[:] = 0 return(f_MB) else: C_f = -( f - f0(p1, p2, p3, n, T, p1_bulk, p2_bulk, p3_bulk, params) ) / params.tau(q1, q2, p1, p2, p3) # When (f - f0) is NaN. Dividing by np.inf doesn't give 0 # Setting when tau is zero we assign f = f0 manually # WORKAROUND: if(isinstance(params.tau(q1, q2, p1, p2, p3), af.Array) is True): C_f = af.select(params.tau(q1, q2, p1, p2, p3) == np.inf, 0, C_f) af.eval(C_f) else: if(params.tau(q1, q2, p1, p2, p3) == np.inf): C_f = 0 return(C_f)
def RK5_step(self, dt): """ Evolves the physical system defined using an RK5 integrator. This method is 5th order accurate. Parameters ---------- dt: double The timestep size. """ # For purely collisional cases: tau = self.physical_system.params.tau(self.q1_center, self.q2_center, self.p1, self.p2, self.p3) if (af.any_true(tau == 0)): f0 = self._source( 0.5 * self.N_q1 * self.N_q2 * af.real(ifft2(self.f_hat)), self.time_elapsed, self.q1_center, self.q2_center, self.p1, self.p2, self.p3, self.compute_moments, self.physical_system.params, True) self.f_hat = af.select(tau == 0, 2 * fft2(f0) / (self.N_q1 * self.N_q2), self.f_hat) if (self.physical_system.params.EM_fields_enabled == True and self.physical_system.params.fields_type == 'electrodynamic'): # Since the fields and the distribution function are coupled, # we evolve the system by making use of a coupled integrator # which ensures that throughout the timestepping they are # evaluated at the same temporal locations. self.f_hat, self.fields_solver.fields_hat = \ integrators.RK5_coupled(df_hat_dt, self.f_hat, dfields_hat_dt, self.fields_solver.fields_hat, dt, self ) else: self.f_hat = integrators.RK5(df_hat_dt, self.f_hat, dt, self.fields_solver.fields_hat, self) return
def op_solve_src(self, dt): """ Evolves the source term of the equations specified: df/dt = source Parameters ---------- dt : double Time-step size to evolve the system """ if (self.performance_test_flag == True): tic = af.time() # Solving for tau = 0 systems: tau = self.physical_system.params.tau(self.q1_center, self.q2_center, self.p1_center, self.p2_center, self.p3_center) if (af.any_true(tau == 0)): self.f = af.select( tau == 0, self._source(self.f, self.time_elapsed, self.q1_center, self.q2_center, self.p1_center, self.p2_center, self.p3_center, self.compute_moments, self.physical_system.params, True), self.f) self.f = integrators.RK2(self._source, self.f, dt, self.time_elapsed, self.q1_center, self.q2_center, self.p1_center, self.p2_center, self.p3_center, self.compute_moments, self.physical_system.params) if (self.performance_test_flag == True): af.sync() toc = af.time() self.time_sourcets += toc - tic return
def op_solve_src(self, dt): if (self.performance_test_flag == True): tic = af.time() # Solving for tau = 0 systems if (af.any_true( self.physical_system.params.tau(self.q1_center, self.q2_center, self.p1, self.p2, self.p3) == 0)): self.f = self._source(self.f, self.q1_center, self.q2_center, self.p1, self.p2, self.p3, self.compute_moments, self.physical_system.params, True) else: self.f = integrators.RK2(self._source, self.f, dt, self.q1_center, self.q2_center, self.p1, self.p2, self.p3, self.compute_moments, self.physical_system.params) if (self.performance_test_flag == True): af.sync() toc = af.time() self.time_sourcets += toc - tic return
def simple_algorithm(verbose = False): display_func = _util.display_func(verbose) print_func = _util.print_func(verbose) a = af.randu(3, 3) print_func(af.sum(a), af.product(a), af.min(a), af.max(a), af.count(a), af.any_true(a), af.all_true(a)) display_func(af.sum(a, 0)) display_func(af.sum(a, 1)) display_func(af.product(a, 0)) display_func(af.product(a, 1)) display_func(af.min(a, 0)) display_func(af.min(a, 1)) display_func(af.max(a, 0)) display_func(af.max(a, 1)) display_func(af.count(a, 0)) display_func(af.count(a, 1)) display_func(af.any_true(a, 0)) display_func(af.any_true(a, 1)) display_func(af.all_true(a, 0)) display_func(af.all_true(a, 1)) display_func(af.accum(a, 0)) display_func(af.accum(a, 1)) display_func(af.sort(a, is_ascending=True)) display_func(af.sort(a, is_ascending=False)) b = (a > 0.1) * a c = (a > 0.4) * a d = b / c print_func(af.sum(d)); print_func(af.sum(d, nan_val=0.0)); display_func(af.sum(d, dim=0, nan_val=0.0)); val,idx = af.sort_index(a, is_ascending=True) display_func(val) display_func(idx) val,idx = af.sort_index(a, is_ascending=False) display_func(val) display_func(idx) b = af.randu(3,3) keys,vals = af.sort_by_key(a, b, is_ascending=True) display_func(keys) display_func(vals) keys,vals = af.sort_by_key(a, b, is_ascending=False) display_func(keys) display_func(vals) c = af.randu(5,1) d = af.randu(5,1) cc = af.set_unique(c, is_sorted=False) dd = af.set_unique(af.sort(d), is_sorted=True) display_func(cc) display_func(dd) display_func(af.set_union(cc, dd, is_unique=True)) display_func(af.set_union(cc, dd, is_unique=False)) display_func(af.set_intersect(cc, cc, is_unique=True)) display_func(af.set_intersect(cc, cc, is_unique=False))
def transform(self, X): """Impute all missing values in X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The input data to complete. """ check_is_fitted(self) X = self._validate_input(X, in_fit=False) #X = af.Array.to_ndarray(X) X_indicator = super()._transform_indicator(X) statistics = self.statistics_ if X.shape[1] != statistics.shape[0]: raise ValueError( f"X has {X.shape[1]} features per sample, expected {self.statistics_.shape[0]}" ) # Delete the invalid columns if strategy is not constant if self.strategy == "constant": valid_statistics = statistics else: # same as af.isnan but also works for object dtypes # invalid_mask = _get_mask(statistics, np.nan) # BUG: af runtime error invalid_mask = af.isnan(statistics) # FIXME valid_mask = invalid_mask.logical_not() valid_statistics = statistics[valid_mask] valid_statistics_indexes = np.flatnonzero(valid_mask) if af.any_true(invalid_mask): missing = af.arange(X.shape[1])[invalid_mask] if self.verbose: warnings.warn( f"Deleting features without observed values: {missing}" ) X = X[:, valid_statistics_indexes] # Do actual imputation if sp.issparse(X): if self.missing_values == 0: raise ValueError( "Imputation not possible when missing_values == 0 and input is sparse." "Provide a dense array instead.") else: mask = _get_mask(X.data, self.missing_values) indexes = af.repeat(af.arange(len(X.indptr) - 1, dtype=af.int), af.diff(X.indptr))[mask] X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False) else: # mask = _get_mask(X, self.missing_values) # BUG mask = af.isnan(X) # FIXME # n_missing = af.sum(mask, axis=0) # BUG af n_missing = af.sum(mask, dim=0) coordinates = af.where(mask.T)[::-1] # BUG valid_statistics = valid_statistics.to_ndarray().ravel() n_missing = n_missing.to_ndarray().ravel() values = np.repeat(valid_statistics, n_missing) # BUG values = af.interop.from_ndarray(values) odims = X.dims() X = af.flat(X) X[coordinates] = values X = af.moddims(X, *odims) return super()._concatenate_indicator(X, X_indicator)
def check_divergence(self): if (af.any_true(af.isinf(self.f)) or af.any_true(af.isnan(self.f))): raise SystemExit('Solver Diverging!')
#!/usr/bin/python import arrayfire as af a = af.randu(3, 3) print(af.sum(a), af.product(a), af.min(a), af.max(a), af.count(a), af.any_true(a), af.all_true(a)) af.print_array(af.sum(a, 0)) af.print_array(af.sum(a, 1)) af.print_array(af.product(a, 0)) af.print_array(af.product(a, 1)) af.print_array(af.min(a, 0)) af.print_array(af.min(a, 1)) af.print_array(af.max(a, 0)) af.print_array(af.max(a, 1)) af.print_array(af.count(a, 0)) af.print_array(af.count(a, 1)) af.print_array(af.any_true(a, 0)) af.print_array(af.any_true(a, 1)) af.print_array(af.all_true(a, 0)) af.print_array(af.all_true(a, 1)) af.print_array(af.accum(a, 0)) af.print_array(af.accum(a, 1))
def any(self, s, axis): return arrayfire.any_true(s, dim=axis)
def any(a: ndarray, axis: tp.Optional[int] = None, out: tp.Optional[ndarray] = None, keepdims: bool = False) \ -> tp.Union[bool, ndarray]: return _wrap_af_array(af.any_true(a._af_array, dim=axis))
def r2_score(y_true, y_pred, *, sample_weight=None, multioutput="uniform_average"): """R^2 (coefficient of determination) regression score function. Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Read more in the :ref:`User Guide <r2_score>`. Parameters ---------- y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. sample_weight : array-like of shape (n_samples,), optional Sample weights. multioutput : string in ['raw_values', 'uniform_average', \ 'variance_weighted'] or None or array-like of shape (n_outputs) Defines aggregating of multiple output scores. Array-like value defines weights used to average scores. Default is "uniform_average". 'raw_values' : Returns a full set of scores in case of multioutput input. 'uniform_average' : Scores of all outputs are averaged with uniform weight. 'variance_weighted' : Scores of all outputs are averaged, weighted by the variances of each individual output. .. versionchanged:: 0.19 Default value of multioutput is 'uniform_average'. Returns ------- z : float or ndarray of floats The R^2 score or ndarray of scores if 'multioutput' is 'raw_values'. Notes ----- This is not a symmetric function. Unlike most other scores, R^2 score may be negative (it need not actually be the square of a quantity R). This metric is not well-defined for single samples and will return a NaN value if n_samples is less than two. References ---------- .. [1] `Wikipedia entry on the Coefficient of determination <https://en.wikipedia.org/wiki/Coefficient_of_determination>`_ Examples -------- >>> from sklearn.metrics import r2_score >>> y_true = [3, -0.5, 2, 7] >>> y_pred = [2.5, 0.0, 2, 8] >>> r2_score(y_true, y_pred) 0.948... >>> y_true = [[0.5, 1], [-1, 1], [7, -6]] >>> y_pred = [[0, 2], [-1, 2], [8, -5]] >>> r2_score(y_true, y_pred, ... multioutput='variance_weighted') 0.938... >>> y_true = [1, 2, 3] >>> y_pred = [1, 2, 3] >>> r2_score(y_true, y_pred) 1.0 >>> y_true = [1, 2, 3] >>> y_pred = [2, 2, 2] >>> r2_score(y_true, y_pred) 0.0 >>> y_true = [1, 2, 3] >>> y_pred = [3, 2, 1] >>> r2_score(y_true, y_pred) -3.0 """ y_type, y_true, y_pred, multioutput = _check_reg_targets( y_true, y_pred, multioutput) check_consistent_length(y_true, y_pred, sample_weight) if _num_samples(y_pred) < 2: msg = "R^2 score is not well-defined with less than two samples." warnings.warn(msg, UndefinedMetricWarning) return float('nan') if sample_weight is not None: sample_weight = column_or_1d(sample_weight) weight = sample_weight[:, np.newaxis] else: weight = 1. numerator = af.sum((weight * (y_true - y_pred) ** 2), dim=0) #denominator = (weight * (y_true - np.average( #y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0, #dtype=np.float64) denominator = af.sum((weight * (y_true - af.tile(af.mean(y_true, weights=sample_weight, dim=0), y_true.shape[0])) ** 2), dim=0) nonzero_denominator = denominator != 0 nonzero_numerator = numerator != 0 valid_score = nonzero_denominator & nonzero_numerator y_sz_1 = 1 if y_true.numdims() == 1 else y_true.shape[1] output_scores = af.constant(0, y_sz_1) if(af.any_true(valid_score)): output_scores[valid_score] = (1.0 - (numerator[valid_score] / denominator[valid_score])).as_type(output_scores.dtype()) # arbitrary set to zero to avoid -inf scores, having a constant # y_true is not interesting for scoring a regression anyway output_scores[nonzero_numerator & ~nonzero_denominator] = 0. if isinstance(multioutput, str): if multioutput == 'raw_values': # return scores individually return output_scores elif multioutput == 'uniform_average': # passing None as weights results is uniform mean avg_weights = None elif multioutput == 'variance_weighted': avg_weights = denominator # avoid fail on constant y or one-element arrays if not af.any_true(nonzero_denominator): if not af.any_true(nonzero_numerator): return 1.0 else: return 0.0 else: avg_weights = multioutput #return np.average(output_scores, weights=avg_weights) return af.mean(output_scores, weights=avg_weights)
#!/usr/bin/python ####################################################### # Copyright (c) 2015, ArrayFire # All rights reserved. # # This file is distributed under 3-clause BSD license. # The complete license agreement can be obtained at: # http://arrayfire.com/licenses/BSD-3-Clause ######################################################## import arrayfire as af a = af.randu(3, 3) print(af.sum(a), af.product(a), af.min(a), af.max(a), af.count(a), af.any_true(a), af.all_true(a)) af.display(af.sum(a, 0)) af.display(af.sum(a, 1)) af.display(af.product(a, 0)) af.display(af.product(a, 1)) af.display(af.min(a, 0)) af.display(af.min(a, 1)) af.display(af.max(a, 0)) af.display(af.max(a, 1)) af.display(af.count(a, 0)) af.display(af.count(a, 1))
def hasnan(arr): return af.any_true(af.isnan(arr))
def simple_algorithm(verbose=False): display_func = _util.display_func(verbose) print_func = _util.print_func(verbose) a = af.randu(3, 3) k = af.constant(1, 3, 3, dtype=af.Dtype.u32) af.eval(k) print_func(af.sum(a), af.product(a), af.min(a), af.max(a), af.count(a), af.any_true(a), af.all_true(a)) display_func(af.sum(a, 0)) display_func(af.sum(a, 1)) rk = af.constant(1, 3, dtype=af.Dtype.u32) rk[2] = 0 af.eval(rk) display_func(af.sumByKey(rk, a, dim=0)) display_func(af.sumByKey(rk, a, dim=1)) display_func(af.productByKey(rk, a, dim=0)) display_func(af.productByKey(rk, a, dim=1)) display_func(af.minByKey(rk, a, dim=0)) display_func(af.minByKey(rk, a, dim=1)) display_func(af.maxByKey(rk, a, dim=0)) display_func(af.maxByKey(rk, a, dim=1)) display_func(af.anyTrueByKey(rk, a, dim=0)) display_func(af.anyTrueByKey(rk, a, dim=1)) display_func(af.allTrueByKey(rk, a, dim=0)) display_func(af.allTrueByKey(rk, a, dim=1)) display_func(af.countByKey(rk, a, dim=0)) display_func(af.countByKey(rk, a, dim=1)) display_func(af.product(a, 0)) display_func(af.product(a, 1)) display_func(af.min(a, 0)) display_func(af.min(a, 1)) display_func(af.max(a, 0)) display_func(af.max(a, 1)) display_func(af.count(a, 0)) display_func(af.count(a, 1)) display_func(af.any_true(a, 0)) display_func(af.any_true(a, 1)) display_func(af.all_true(a, 0)) display_func(af.all_true(a, 1)) display_func(af.accum(a, 0)) display_func(af.accum(a, 1)) display_func(af.scan(a, 0, af.BINARYOP.ADD)) display_func(af.scan(a, 1, af.BINARYOP.MAX)) display_func(af.scan_by_key(k, a, 0, af.BINARYOP.ADD)) display_func(af.scan_by_key(k, a, 1, af.BINARYOP.MAX)) display_func(af.sort(a, is_ascending=True)) display_func(af.sort(a, is_ascending=False)) b = (a > 0.1) * a c = (a > 0.4) * a d = b / c print_func(af.sum(d)) print_func(af.sum(d, nan_val=0.0)) display_func(af.sum(d, dim=0, nan_val=0.0)) val, idx = af.sort_index(a, is_ascending=True) display_func(val) display_func(idx) val, idx = af.sort_index(a, is_ascending=False) display_func(val) display_func(idx) b = af.randu(3, 3) keys, vals = af.sort_by_key(a, b, is_ascending=True) display_func(keys) display_func(vals) keys, vals = af.sort_by_key(a, b, is_ascending=False) display_func(keys) display_func(vals) c = af.randu(5, 1) d = af.randu(5, 1) cc = af.set_unique(c, is_sorted=False) dd = af.set_unique(af.sort(d), is_sorted=True) display_func(cc) display_func(dd) display_func(af.set_union(cc, dd, is_unique=True)) display_func(af.set_union(cc, dd, is_unique=False)) display_func(af.set_intersect(cc, cc, is_unique=True)) display_func(af.set_intersect(cc, cc, is_unique=False))