def reduce(self, dst): dst.tmp_comp[0] = serial_reduce_array(dst.psi >= 0.0, 'sum') dst.tmp_comp[1] = serial_reduce_array(dst.psi**2, 'sum') dst.get_carray('tmp_comp').set_data( parallel_reduce_array(dst.tmp_comp, 'sum')) epsilon = sqrt(dst.tmp_comp[1] / dst.tmp_comp[0]) if epsilon <= 1e-3: self.eqn_has_converged = 1
def reduce(self, dst, t, dt): dst.tmp_comp[0] = serial_reduce_array(dst.compression > 0.0, 'sum') dst.tmp_comp[1] = serial_reduce_array(dst.compression, 'sum') dst.tmp_comp[:] = parallel_reduce_array(dst.tmp_comp, 'sum') if dst.tmp_comp[0] > 0: avg_rho = dst.tmp_comp[1] / dst.tmp_comp[0] else: avg_rho = self.rho0 self.compression = fabs(avg_rho - self.rho0) / self.rho0
def reduce(self, dst): dst.tmp_comp[0] = serial_reduce_array(dst.array.compression > 0.0, 'sum') dst.tmp_comp[1] = serial_reduce_array(dst.array.compression, 'sum') dst.tmp_comp.set_data(parallel_reduce_array(dst.tmp_comp, 'sum')) if dst.tmp_comp[0] > 0: comp = dst.tmp_comp[1]/dst.tmp_comp[0]/self.rho0 else: comp = 0.0 self.compression = comp
def reduce(self, dst): n = len(dst.x) tmp_sum_logrho = serial_reduce_array(dst.logrho, 'sum') sum_logrho = parallel_reduce_array(tmp_sum_logrho, 'sum') g = exp(sum_logrho / n) lamda = self.k * numpy.power(g / dst.rho, self.eps) dst.h[:] = lamda * dst.h0
def reduce(self, d_rho, d_h, d_h0, dst): n = declare('int') k = declare('int') n = len(dst.x) tmp_sum_logrho = serial_reduce_array(dst.array.logrho, 'sum') sum_logrho = parallel_reduce_array(tmp_sum_logrho, 'sum') g = exp(sum_logrho / n) for k in range(n): lamda = self.k * pow(g / d_rho[k], self.eps) d_h[k] = lamda * d_h0[k]
def main(): comm = mpi.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() n = 5 data = np.ones(n) * (rank + 1) full_data = [] for i in range(size): full_data = np.concatenate([full_data, np.ones(n) * (i + 1)]) for op in ('sum', 'prod', 'min', 'max'): serial_data = serial_reduce_array(data, op) result = mpi_reduce_array(serial_data, op) expect = getattr(np, op)(full_data) msg = "For op %s: Expected %s, got %s" % (op, expect, result) assert expect == result, msg
def reduce(self, dst): m = serial_reduce_array(dst.m, op='sum') dst.total_mass[0] = parallel_reduce_array(m, op='sum')
def reduce(self, dst, t, dt): dst.total_mass[0] = serial_reduce_array(dst.m, op='sum') if dst.gpu is not None: dst.gpu.push('total_mass')
def reduce(self, dst): dst.total_mass[0] = serial_reduce_array(dst.array.m, op='sum')
def py_initialize(self, dst, t, dt): from numpy import sqrt vmag = sqrt(dst.u**2 + dst.v**2 + dst.w**2) dst.vmax[0] = serial_reduce_array(vmag, 'max') dst.vmax[:] = parallel_reduce_array(dst.vmax, 'max')
"""Test if the mpi_reduce_array function works correctly. """ import mpi4py.MPI as mpi import numpy as np from pysph.base.reduce_array import serial_reduce_array, mpi_reduce_array comm = mpi.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() n = 5 data = np.ones(n)*(rank + 1) full_data = [] for i in range(size): full_data = np.concatenate([full_data, np.ones(n)*(i+1)]) for op in ('sum', 'prod', 'min', 'max'): serial_data = serial_reduce_array(data, op) result = mpi_reduce_array(serial_data, op) expect = getattr(np, op)(full_data) msg = "For op %s: Expected %s, got %s"%(op, expect, result) assert expect == result, msg
def test_reduce_sum_works(self): x = np.linspace(0, 10, 100) expect = np.sum(x) result = serial_reduce_array(x, 'sum') self.assertAlmostEqual(result, expect)
def test_reduce_prod_works(self): x = np.linspace(0, 10, 100) expect = np.prod(x) result = serial_reduce_array(x, 'prod') self.assertAlmostEqual(result, expect)