예제 #1
0
def pvbBound(n):
    a = multiply(2, n)
    b = power(a, 50)
    c = multiply(6, b)
    d = divide(c, 0.05)
    e = log(d)
    f = divide(1.0, n)
    return divide(1.0, n) + sqrt(divide(1.0, power(n, 2)) + multiply(f, e))
예제 #2
0
 def ddx_cgrid_centered(self, q):
     dxc = np.tile(self.dxc, (Nz, Ny, 1))
     out = np.zeros(q.shape)
     out[:, :, 1:Nx - 1] = ma.divide((q[:, :, 2::] - q[:, :, :Nx - 2]),
                                     (dxc[:, :, 1:Nx - 1] + dxc[:, :, 3::]))
     out[:, :, 0] = ma.divide((q[:, :, 1] - q[:, :, 0]), dxc[:, :, 0])
     out[:, :, -1] = ma.divide((q[:, :, -1] - q[:, :, -2]), dxc[:, :, -1])
     return out
예제 #3
0
def ddT_Lgrid_centered(q, th):
    """Vertical second-order centered difference on the layers grid"""

    out = np.zeros(q.shape)
    # second order for interior
    out[1:-1, :] = ma.divide((q[1:-1] - q[2::]), (th[1:-1] + th[2::]))
    # first order for the top and bottom
    out[0, :] = ma.divide((q[0, :] - q[1, :]), th[0, :])
    out[-1, :] = ma.divide((q[-2, :] - q[-1, :]), th[-1, :])

    return out
예제 #4
0
    def ddz_cgrid_centered(self, q):
        """Vertical second-order centered difference on the c grid"""
        dzf = np.tile(self.dzf, (Ny, 1)).T
        if len(q.shape) == 3:
            dzf = np.tile(dzf.T, (Nx, 1, 1)).T
        out = np.zeros(q.shape)
        # second order for interior
        out[1:Nz - 1, :] = ma.divide((q[1:-1] - q[2::]),
                                     (dzf[1:-1] + dzf[2::]))
        # first order for the top and bottom
        out[0, :] = ma.divide((q[0, :] - q[1, :]), dzf[0, :])
        out[-1, :] = ma.divide((q[-2, :] - q[-1, :]), dzf[-1, :])

        return out
예제 #5
0
    def ddy_cgrid_centered_1D(self, q):
        """Merdional second-order centered difference on the c grid"""

        dyg = self.dyg
        out = np.zeros(q.shape)
        out[1:-1] = ma.divide((q[2::] - q[1:-1]), (dyg[1:-1] + dyg[2::]))
        out[0] = ma.divide((q[1] - q[0]), dyg[0])
        out[-1] = ma.divide((q[-1] - q[-2]), dyg[-1])

        #if isinstance(q, ma.masked_array):
        #    mask = q.mask
        #    mask[:,1:Ny-1] = q.mask[:,2:] | q.mask[:,:Ny-2]
        #   out = ma.masked_array(out, mask)

        return out
예제 #6
0
 def depth_average(self, Var):
     """Depth Average a variable in C-grid with varying depth"""
     Depth_av = (ma.mean(
         ma.divide(Var * np.tile(self.dzf, (self.Nx, self.Ny, 1)).T,
                   self.Depth,
                   axis=0)))
     return Depth_av
예제 #7
0
def ddy_Lgrid_centered(q, dyg):
    """Merdional second-order centered difference on the layers grid"""

    dyg = np.tile(dyg, (len(q[:, 1]), 1))
    out = np.zeros(q.shape)
    out[:, 1:-1] = ma.divide((q[:, 2::] - q[:, 1:-1]),
                             (dyg[:, 1:-1] + dyg[:, 2::]))
    out[:, 0] = ma.divide((q[:, 1] - q[:, 0]), dyg[:, 0])
    out[:, -1] = ma.divide((q[:, -1] - q[:, -2]), dyg[:, -1])

    #if isinstance(q, ma.masked_array):
    #    mask = q.mask
    #    mask[:,1:Ny-1] = q.mask[:,2:] | q.mask[:,:Ny-2]
    #   out = ma.masked_array(out, mask)

    return out
예제 #8
0
def average_in_flux(mag, dmag, axis=None):
    flux = 10**(mag / -2.5)
    dflux = np.log(10) / 2.5 * flux * dmag
    avg_dflux = np.power(np.sum(np.power(dflux, -2), axis), -0.5)
    avg_flux = np.sum(flux * np.power(dflux, -2), axis) * avg_dflux**2
    avg_mag = -2.5 * np.log10(avg_flux)
    avg_dmag = 2.5 / np.log(10) * np.divide(avg_dflux, avg_flux)
    return avg_mag, avg_dmag
예제 #9
0
def average_in_flux(mag, dmag, axis=None):
    flux = 10**(mag / -2.5)
    dflux = np.log(10) / 2.5 * flux * dmag
    avg_dflux = np.power(np.sum(np.power(dflux, -2), axis), -0.5)
    avg_flux = np.sum(flux * np.power(dflux, -2), axis) * avg_dflux**2
    avg_mag = -2.5 * np.log10(avg_flux)
    avg_dmag = 2.5 / np.log(10) * np.divide(avg_dflux, avg_flux)
    return avg_mag, avg_dmag
예제 #10
0
 def get_qgpv_grad(self, mask=None):
     """Calculate QGPV gradient from standard output fields"""
     if mask is not None:
         T = self.mnc('Tav.nc', 'THETA', mask)
     else:
         T = self.mnc('Tav.nc', 'THETA')
     # isopycnal slope
     s = ma.divide(-self.ddy_cgrid_centered(T), self.ddz_cgrid_centered(T))
     return self.beta - self.f0 * self.ddz_cgrid_centered(s)
예제 #11
0
    def ddy_cgrid_centered(self, q):
        """Merdional second-order centered difference on the c grid"""

        dyg = np.tile(self.dyg, (Nz, 1))
        if len(q.shape) == 3:
            dyg = np.tile(dyg.T, (Nx, 1, 1)).T
        out = np.zeros(q.shape)
        out[:, 1:-1] = ma.divide((q[:, 2::] - q[:, 1:-1]),
                                 (dyg[:, 1:-1] + dyg[:, 2::]))
        out[:, 0] = ma.divide((q[:, 1] - q[:, 0]), dyg[:, 0])
        out[:, -1] = ma.divide((q[:, -1] - q[:, -2]), dyg[:, -1])

        #if isinstance(q, ma.masked_array):
        #    mask = q.mask
        #    mask[:,1:Ny-1] = q.mask[:,2:] | q.mask[:,:Ny-2]
        #   out = ma.masked_array(out, mask)

        return out
예제 #12
0
def smap_p_e_exact_downscale(doy_start, doy_end):
    """
    smap_p_e usa 9km index range: lat: [171:507] lon: [568: 1241], starting from 0, included
    corresponding 3km index range: lat: [513:1523] lon [1704:3725], starting from 0, included
    """
    in_path = os.path.join("Data", "SMAP_P_E", "usa")
    out_path = get_out_path(os.path.join("Data", "SMAP_P_E", "usa_3km_exact"))
    cont_var_dic = [
        "soil_moisture", "tb_v_corrected", "freeze_thaw_fraction",
        "roughness_coefficient", "surface_temperature", "vegetation_opacity",
        "vegetation_water_content", "albedo"
    ]

    for doy in generate_doy(doy_start, doy_end, ""):
        print(doy)
        fh_in = Dataset(os.path.join(in_path, doy + ".nc"), "r")
        fh_out = Dataset(os.path.join(out_path, doy + ".nc"), "w")

        lats, lons = get_lat_lon("M03")
        lats = lats[513:1524]
        lons = lons[1704:3726]

        fh_out.createDimension('lat', len(lats))
        fh_out.createDimension('lon', len(lons))
        outVar = fh_out.createVariable('lat', 'f4', ('lat', ))
        outVar.setncatts({"units": "degree_north"})
        outVar[:] = lats[:]
        outVar = fh_out.createVariable('lon', 'f4', ('lon', ))
        outVar.setncatts({"units": "degree_east"})
        outVar[:] = lons[:]

        datatype = None
        tb_value, ts_value = None, None
        for v_name, varin in fh_in.variables.items():
            if v_name in cont_var_dic:
                outVar = fh_out.createVariable(v_name, varin.datatype,
                                               varin.dimensions)
                outVar.setncatts(
                    {k: varin.getncattr(k)
                     for k in varin.ncattrs()})
                varin_value = varin[:]
                varin_value = np.repeat(varin_value, 3, axis=0)
                varin_value = np.repeat(varin_value, 3, axis=1)
                outVar[:] = varin_value[:]
                if v_name == "tb_v_corrected":
                    datatype = varin.datatype
                    tb_value = varin_value[:]
                if v_name == "surface_temperature":
                    ts_value = varin_value[:]

        outVar = fh_out.createVariable("tb_divide_ts", datatype,
                                       ("lat", "lon"))
        outVar[:] = ma.divide(tb_value, ts_value)

        fh_in.close()
        fh_out.close()
예제 #13
0
def plotHistogramContour(X, Y, Z, xlabel="",ylabel=""):
    # Number of bins in each axis in the histogram
    bins = 40

    # Range of x and y-axis, respectively
    r = [[np.min(X), np.max(X)], [np.min(Y), np.max(Y)]]
    print('Plotting contour... valid data-points: ', len(Z))
    print('[X-range, Y-range] : ', r)

    # Weights are temperatures. H is the sum of all temperature at point (X, Y)
    H, xedges, yedges = np.histogram2d(X, Y, bins=(bins, bins*5), range=r, weights=Z)
    # N is the number of data-points in the bins at position (X, Y)
    N, xedges, yedges = np.histogram2d(X, Y, bins=(bins, bins*5), range=r, weights=None)
    # Some sanity check and finally taking the average of temperatures
    H = ma.masked_less(H, 100)
    S = ma.divide(H, N, where=N>0)      # S is average temperature as each (X,Y)
    S = ma.masked_greater(S, 300)
    #print('H: ', H)
    #print('N: ', N)
    #print('S: ', S)
    #print("Shape of S: ", np.shape(S), " max: ", np.max(S), " min: ", np.min(S))

    # We had 'buckets' in the histogram for x and y-axis,
    # but we need a 'single value' for each bucket. We take their midpoint.
    ## Using list comprehension :)
    xpoints = [(xedges[i]+xedges[i+1])/2.0 for i in range(len(xedges)-1)]
    ypoints = [(yedges[i]+yedges[i+1])/2.0 for i in range(len(yedges)-1)]

    # Standard for plotting contour, create a meshgrid
    (P,Q) = np.meshgrid(xpoints, ypoints)
    plt.figure(figsize=(9,9))
    # The contour lines at the following temperature will be labeled
    V = (140, 160, 180, 200, 220, 240, 260, 280)
    # Plot a line contour
    contours = plt.contour(P, Q, S.T, V, colors='0.20', corner_mask=True)
    # We use transpose of S as S.T because that's what histogram2d() returns
    # Plot a filled contour
    plt.contourf(P, Q, S.T, 128, cmap=plt.cm.jet)
    plt.clabel(contours, inline=True, fontsize=8)
    plt.xlabel(xlabel)
    plt.ylabel(ylabel)
    plt.title("Temperature Contour")
    plt.autoscale()
    plt.colorbar()
    print('Done plotting.')

    # Save the contour plot as a high resolution EPS file
    global figcount
    if(figcount != -1):
        savefile = 'contour-%d.eps' %(figcount)
        plt.savefig(savefile, format='eps')
        figcount = figcount + 1
        print("File '%s' saved in the current working directory." %(savefile))
예제 #14
0
 def __call__(self, container):
     self.c = container
     distCalc = Neighbors()
     distx, disty, distz = distCalc.CalcDist(self.c.xpos, self.c.ypos,
                                             self.c.zpos)
     distx = distCalc.CheckDist(distx, self.c.Lx)
     disty = distCalc.CheckDist(disty, self.c.Ly)
     distz = distCalc.CheckDist(distz, self.c.Lz)
     #debug_here()
     distCalc.UpdateNeighbors(self.c, calcz=False)
     masses = self.c.massVector
     distr = ma.masked_array(sqrt(distx**2 + disty**2 + distz**2),
                             [distx**2 + disty**2 + distz**2 == 0])
     K1 = (ma.divide(self.sigma, distr).filled(0.))**12
     K2 = (ma.divide(self.sigma, distr).filled(0.))**6
     #debug_here()
     KE = sum(sum(triu(array(4. * self.epsilon * (K1 - K2)))))
     print KE
     K3 = 2 * K1 - K2
     magnitude = 24. * ma.divide(self.epsilon, distr) * K3
     xacl = ny.sum(array((magnitude * ma.divide(distx, distr)).filled(0.)),
                   axis=1) / masses
     yacl = ny.sum(array((magnitude * ma.divide(disty, distr)).filled(0.)),
                   axis=1) / masses
     zacl = ny.sum(array((magnitude * ma.divide(distz, distr)).filled(0.)),
                   axis=1) / masses
     return xacl, yacl, zacl
예제 #15
0
 def __call__(self, container):
     self.c = container
     distCalc = Neighbors()
     xacl = zeros(self.c.numParticles)
     yacl = zeros(self.c.numParticles)
     zacl = zeros(self.c.numParticles)
     masses = self.c.massVector
     print "tick"
     for particle in range(self.c.numParticles):
         neighbors = self.c.neighborList[particle]
         distx, disty, distz, distr = distCalc.NeighborDist(
             particle, neighbors, self.c)
         K1 = (ma.divide(self.sigma, distr).filled(0.))**12
         K2 = (ma.divide(self.sigma, distr).filled(0.))**6
         K3 = 2 * K1 - K2
         magnitude = 24. * ma.divide(self.epsilon, distr) * K3
         #debug_here()
         xacl[particle] = sum(
             array((magnitude *
                    ma.divide(distx, distr)).filled(0.))) / masses[particle]
         yacl[particle] = sum(
             array((magnitude *
                    ma.divide(disty, distr)).filled(0.))) / masses[particle]
         zacl[particle] = sum(
             array((magnitude *
                    ma.divide(distz, distr)).filled(0.))) / masses[particle]
         #debug_here()
     return xacl, yacl, zacl
예제 #16
0
 def __call__(self, container):
     self.c = container
     distCalc = Neighbors()
     distx, disty, distz = distCalc.CalcDist(self.c.xpos,
                                              self.c.ypos,
                                              self.c.zpos)
     distx = distCalc.CheckDist(distx, self.c.Lx)
     disty = distCalc.CheckDist(disty, self.c.Ly)
     distz = distCalc.CheckDist(distz, self.c.Lz)
     #debug_here()
     distCalc.UpdateNeighbors(self.c,calcz = False)
     masses = self.c.massVector
     distr = ma.masked_array(sqrt(distx**2 + disty**2 + distz**2),
                             [distx**2 + disty**2 + distz**2 == 0])
     K1 = (ma.divide(self.sigma,distr).filled(0.))**12
     K2 = (ma.divide(self.sigma,distr).filled(0.))**6
     #debug_here()
     KE = sum(sum(triu(array(4.*self.epsilon*(K1-K2)))))
     print KE
     K3 = 2*K1 - K2
     magnitude = 24.*ma.divide(self.epsilon,distr)*K3
     xacl = ny.sum(array((magnitude * ma.divide(distx,distr)).filled(0.)), axis = 1)/masses
     yacl = ny.sum(array((magnitude * ma.divide(disty,distr)).filled(0.)), axis = 1)/masses
     zacl = ny.sum(array((magnitude * ma.divide(distz,distr)).filled(0.)), axis = 1)/masses
     return xacl, yacl, zacl
예제 #17
0
def divide(a, b):
    """Divides one map by another.

    It also calculates the variance of the ratio
    """
    
    new = Map.empty()

    new.data[0] = ma.divide(a.data[0], b.data[0]) 
    new.data[1] = get_variance_ratio(new.data[0], b.data[0], a.data[0],
                                     b.data[1], a.data[1])

    return new
예제 #18
0
 def runLogisticRegression(self):
     diff = 1
     while diff > 0.01:
         permutation = np.random.permutation(self.N)
         newWeights = self.w.copy()
         for i in permutation:
             x, y = self.trainingData[i]
             gradient = divide(
                 multiply(-1.0, multiply(x, y)),
                 (1.0 + exp(multiply(y, np.dot(transpose(self.w), x)))))
             newWeights = subtract(newWeights,
                                   multiply(self.learningRate, gradient))
         self.epoch += 1
         diff = norm(self.w - newWeights)
         self.w = newWeights
예제 #19
0
    def applyDownscaling(self, emissivity_image_100m, mean_emissivity_100m):

        #emissivity_image = self.calcEmissivitySobrino()

        # ******** MODIS LST (1km) ***********

        lst_image = Image(
            self.modis_image.lst.split(".")[0] + '_subdivided_100m.tif')

        modis_array = lst_image.getArray(masked=True,
                                         lower_valid_range=7500,
                                         upper_valid_range=65535)

        # convertir à des températures de surface en Celsius
        lst_metadata = lst_image.getMetadata()

        # vérifier si le scale_factor est présent dans les métadonnées (c'est le cas pour AppEEARS, pas EarthData)
        if 'scale_factor' in lst_metadata:
            scale_factor = float(
                lst_metadata['scale_factor'])  # multiplier par 0.02
            add_offset = float(lst_metadata['add_offset'])
        else:
            scale_factor = float(0.02)
            add_offset = float(0)

        # conversion en Kelvin, puis en Celsius
        kelvin_array = np.add(np.multiply(modis_array, scale_factor),
                              add_offset)
        lst_celsius_array = np.subtract(kelvin_array, 273.15)

        # apply PBIM formula (T_high = T_low * emissivity_high / emissivity_avg) for each pixel

        # ********** Émissivité (100m) *********
        emissivity = Image(emissivity_image_100m).getArray(masked=False)

        mean_emissivity = Image(mean_emissivity_100m).getArray(masked=False)

        # PBIM formula
        t_high = ma.divide(ma.multiply(lst_celsius_array, emissivity),
                           mean_emissivity)

        Image(emissivity_image_100m).save_band(
            t_high, r'secteur3/PBIM_100m_result.tif')
예제 #20
0
파일: test_old_ma.py 프로젝트: numpy/numpy
 def test_testArithmetic(self):
     # Test of basic arithmetic.
     (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
     a2d = array([[1, 2], [0, 4]])
     a2dm = masked_array(a2d, [[0, 0], [1, 0]])
     assert_(eq(a2d * a2d, a2d * a2dm))
     assert_(eq(a2d + a2d, a2d + a2dm))
     assert_(eq(a2d - a2d, a2d - a2dm))
     for s in [(12,), (4, 3), (2, 6)]:
         x = x.reshape(s)
         y = y.reshape(s)
         xm = xm.reshape(s)
         ym = ym.reshape(s)
         xf = xf.reshape(s)
         assert_(eq(-x, -xm))
         assert_(eq(x + y, xm + ym))
         assert_(eq(x - y, xm - ym))
         assert_(eq(x * y, xm * ym))
         with np.errstate(divide='ignore', invalid='ignore'):
             assert_(eq(x / y, xm / ym))
         assert_(eq(a10 + y, a10 + ym))
         assert_(eq(a10 - y, a10 - ym))
         assert_(eq(a10 * y, a10 * ym))
         with np.errstate(divide='ignore', invalid='ignore'):
             assert_(eq(a10 / y, a10 / ym))
         assert_(eq(x + a10, xm + a10))
         assert_(eq(x - a10, xm - a10))
         assert_(eq(x * a10, xm * a10))
         assert_(eq(x / a10, xm / a10))
         assert_(eq(x ** 2, xm ** 2))
         assert_(eq(abs(x) ** 2.5, abs(xm) ** 2.5))
         assert_(eq(x ** y, xm ** ym))
         assert_(eq(np.add(x, y), add(xm, ym)))
         assert_(eq(np.subtract(x, y), subtract(xm, ym)))
         assert_(eq(np.multiply(x, y), multiply(xm, ym)))
         with np.errstate(divide='ignore', invalid='ignore'):
             assert_(eq(np.divide(x, y), divide(xm, ym)))
예제 #21
0
 def test_testArithmetic(self):
     # Test of basic arithmetic.
     (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
     a2d = array([[1, 2], [0, 4]])
     a2dm = masked_array(a2d, [[0, 0], [1, 0]])
     assert_(eq(a2d * a2d, a2d * a2dm))
     assert_(eq(a2d + a2d, a2d + a2dm))
     assert_(eq(a2d - a2d, a2d - a2dm))
     for s in [(12,), (4, 3), (2, 6)]:
         x = x.reshape(s)
         y = y.reshape(s)
         xm = xm.reshape(s)
         ym = ym.reshape(s)
         xf = xf.reshape(s)
         assert_(eq(-x, -xm))
         assert_(eq(x + y, xm + ym))
         assert_(eq(x - y, xm - ym))
         assert_(eq(x * y, xm * ym))
         with np.errstate(divide='ignore', invalid='ignore'):
             assert_(eq(x / y, xm / ym))
         assert_(eq(a10 + y, a10 + ym))
         assert_(eq(a10 - y, a10 - ym))
         assert_(eq(a10 * y, a10 * ym))
         with np.errstate(divide='ignore', invalid='ignore'):
             assert_(eq(a10 / y, a10 / ym))
         assert_(eq(x + a10, xm + a10))
         assert_(eq(x - a10, xm - a10))
         assert_(eq(x * a10, xm * a10))
         assert_(eq(x / a10, xm / a10))
         assert_(eq(x ** 2, xm ** 2))
         assert_(eq(abs(x) ** 2.5, abs(xm) ** 2.5))
         assert_(eq(x ** y, xm ** ym))
         assert_(eq(np.add(x, y), add(xm, ym)))
         assert_(eq(np.subtract(x, y), subtract(xm, ym)))
         assert_(eq(np.multiply(x, y), multiply(xm, ym)))
         with np.errstate(divide='ignore', invalid='ignore'):
             assert_(eq(np.divide(x, y), divide(xm, ym)))
예제 #22
0
 def __call__(self,container):
     self.c = container
     distCalc = Neighbors()
     xacl = zeros(self.c.numParticles)
     yacl = zeros(self.c.numParticles)
     zacl = zeros(self.c.numParticles)
     masses = self.c.massVector
     print "tick"
     for particle in range(self.c.numParticles):
         neighbors = self.c.neighborList[particle]
         distx, disty, distz, distr = distCalc.NeighborDist(particle, 
                                                            neighbors, 
                                                            self.c)
         K1 = (ma.divide(self.sigma,distr).filled(0.))**12
         K2 = (ma.divide(self.sigma,distr).filled(0.))**6
         K3 = 2*K1 - K2
         magnitude = 24.*ma.divide(self.epsilon,distr)*K3
         #debug_here()
         xacl[particle] = sum(array((magnitude * ma.divide(distx,distr)).filled(0.)))/masses[particle]
         yacl[particle] = sum(array((magnitude * ma.divide(disty,distr)).filled(0.)))/masses[particle]
         zacl[particle] = sum(array((magnitude * ma.divide(distz,distr)).filled(0.)))/masses[particle]
         #debug_here()
     return xacl, yacl, zacl
예제 #23
0
    """

    import numpy.ma as ma
    import os.path

    hits = np.array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 1, 2, 0],
                     [0, 2, 1, 3, 0], [0, 0, 0, 0, 1]])

    visits = np.array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 9, 3, 0],
                       [0, 2, 2, 4, 0], [0, 0, 0, 0, 8]])

    undef_mask = (visits == 0)
    alpha = ma.masked_array(hits, dtype=np.float)
    alpha[undef_mask] = ma.masked

    means = ma.divide(alpha, visits)

    means_ds = ma.zeros(means.shape)
    means_ds[undef_mask] = DiSt.UNDEFINED.value
    means_ds[~undef_mask] = ma.masked

    worldmap_extent = [150.4, 183.0, 0, 24.5]
    test_ds_list = [DiSt.UNDEFINED, DiSt.UNIFORM, DiSt.BIMODAL]

    test_v_min = 0
    test_v_max = 1

    test_occ = True

    # Create Colorizer Object
    mean_colorizer = MapColorizer()
 def _compute(self, blob_generator):
     # Design pattern:
     for blob in blob_generator:
         blob.data = divide(blob.data, self.max)
         yield blob
예제 #25
0
파일: ebc.py 프로젝트: yuhaozhang/ebc
    def initialize_cluster_centers(self, pXY, K):
        """ Initializes the cluster assignments along each axis, by first selecting k centers, 
        and then map each row to its closet center under cosine similarity.

        Args:
            pXY: original data matrix
            K: numbers of clusters desired in each dimension

        Return:
            new_C: a list of list of cluster id that the current index in the current axis is assigned to.
        """
        if not isinstance(pXY, SparseMatrix):
            raise Exception("Matrix argument to initialize_cluster_centers is not an instance of SparseMatrix.")
        new_C = [[-1] * Ni for Ni in pXY.N]

        for axis in xrange(len(K)): # loop over each dimension
            # choose cluster centers
            axis_length = pXY.N[axis]
            center_indices = random.sample(xrange(axis_length), K[axis])
            cluster_ids = {}
            for i in xrange(K[axis]):  # assign identifiers to clusters
                center_index = center_indices[i]
                cluster_ids[center_index] = i
            centers = defaultdict(lambda: defaultdict(float))  # all nonzero indices for each center
            for coords in pXY.nonzero_elements:
                coord_this_axis = coords[axis]
                if coord_this_axis in cluster_ids: # is a center
                    reduced_coords = tuple([coords[i] for i in xrange(len(coords)) if i != axis]) # coords without the current axis
                    centers[cluster_ids[coord_this_axis]][reduced_coords] = pXY.nonzero_elements[coords] # (cluster_id, other coords) -> value

            # assign rows to clusters
            scores = np.zeros(shape=(pXY.N[axis], K[axis])) # scores: axis_size x cluster_number
            denoms_P = np.zeros(shape=(pXY.N[axis]))
            denoms_Q = np.zeros(shape=(K[axis]))
            for coords in pXY.nonzero_elements:
                coord_this_axis = coords[axis]
                if coord_this_axis in center_indices:
                    continue  # don't reassign cluster centers, please
                reduced_coords = tuple([coords[i] for i in xrange(len(coords)) if i != axis])
                for cluster_index in cluster_ids:
                    xhat = cluster_ids[cluster_index]  # need cluster ID, not the axis index
                    if reduced_coords in centers[xhat]:  # overlapping point
                        P_i = pXY.nonzero_elements[coords]
                        Q_i = centers[xhat][reduced_coords]
                        scores[coords[axis]][xhat] += P_i * Q_i  # now doing based on cosine similarity
                        denoms_P[coords[axis]] += P_i * P_i  # magnitude of this slice of original matrix
                        denoms_Q[xhat] += Q_i * Q_i  # magnitude of cluster centers

            # normalize scores
            scores = divide(scores, outer(sqrt(denoms_P), sqrt(denoms_Q)))
            scores[scores == 0] = -1.0

            # add random jitter to scores to handle tie-breaking
            scores += self.jitter_max * random_sample(scores.shape)
            new_cXYi = list(scores.argmax(1))  # this needs to be argmax because cosine similarity

            # make sure to assign the cluster centers to themselves
            for center_index in cluster_ids:
                new_cXYi[center_index] = cluster_ids[center_index]

            # ensure numbers of clusters are correct
            self.ensure_correct_number_clusters(new_cXYi, K[axis])
            new_C[axis] = new_cXYi
        return new_C
예제 #26
0
 def __call__(self, container):
     self.c = container
     distCalc = Neighbors()
     #distCalc.UpdateNeighbors(self.c,2**(1/6))
     xacl = zeros(self.c.numParticles)
     yacl = zeros(self.c.numParticles)
     zacl = zeros(self.c.numParticles)
     masses = self.c.massVector
     #print self.c.neighborList[-1]
     for particle in range(self.c.numParticles):
         if self.c.ypos[particle] <= self.c.openingPosition and self.c.ypos[particle] >= (self.c.openingPosition - 2):
             self.c.particleFlux[self.c.integrationIteration] += 1
         neighbors = self.c.neighborList[particle]
         distx, disty, distz, distr, relVelx, relVely, relVelz = distCalc.NeighborDist(particle, 
                                                            neighbors, 
                                                            self.c)                
         K1 = (ma.divide(self.sigma,distr).filled(0.))**12
         K2 = (ma.divide(self.sigma,distr).filled(0.))**6
         K3 = 2*K1 - K2
         magnitude = 24*ma.divide(self.epsilon,distr)*K3
         #debug_here()
         #rUnitVector = distr/norm()
         dampingForcex = zeros(len(neighbors))
         dampingForcey = zeros(len(neighbors))
         dampingForcez = zeros(len(neighbors))            
         for i in range(len(neighbors)):
             
                 #print relVelx, relVely
                #debug_here()
             displacement = array([distx[i],disty[i],distz[i]])
             unitVector = displacement/norm(displacement)
             nans = isnan(unitVector)
             unitVector[nans] = 0.
             dotProduct = dot(displacement, (relVelx[i],relVely[i],relVelz[i]))
             #debug_here()
             forceVector = -1*self.gamma*dotProduct*unitVector
             dampingForcex[i] = forceVector[0]
             dampingForcey[i] = forceVector[1]
             dampingForcez[i] = forceVector[2]
             #if len(self.c.neighborList[-1]) > 1 and particle == (self.c.numParticles - 1):
                 #print displacement
             """
             dampingForcex[i] = (self.gamma*dotProduct*unitVector * ma.divide(distx[i],distr[i])).filled(0.)
             dampingForcey[i] = (self.gamma*dotProduct*unitVector + ma.divide(disty[i],distr[i])).filled(0.)
             dampingForcez[i] = (self.gamma*dotProduct*unitVector + ma.divide(distz[i],distr[i])).filled(0.)
             """
         #debug_here()
         distFromWall1, xWall1, yWall1 = distCalc.lineDist(-1*sqrt(3),-1,20.,
                                                           self.c.xpos[particle], self.c.ypos[particle])
         distFromWall2, xWall2, yWall2 = distCalc.lineDist(sqrt(3),-1,-7.,
                                                           self.c.xpos[particle], self.c.ypos[particle])
         distFromWall3, xWall3, yWall3 = distCalc.lineDist(0,1,0,
                                                           self.c.xpos[particle], self.c.ypos[particle])
         wall1AclX = 0.
         wall1AclY = 0.
         wall2AclX = 0.
         wall2AclY = 0.
         wall3AclX = 0.
         wall3AclY = 0.
         
         if distFromWall1 <= 2**(1/6) and self.c.ypos[particle] >=10:
             K1 = (self.sigma/distFromWall1)**12
             K2 = (self.sigma/distFromWall1)**6
             K3 = 2*K1 - K2
             wallMag1 = 24*(self.epsilon/distFromWall1)*K3
             displacement1 = array([xWall1,yWall1])
             unitVector = displacement1/(displacement1**2)
             nans = isnan(unitVector)
             unitVector[nans] = 0.
             dotProduct = dot(displacement1, (self.c.xvel[particle],self.c.yvel[particle]))
             #debug_here()
             forceVector1 = -1*self.gamma*dotProduct*unitVector
             wall1AclX = forceVector1[0]
             wall1AclX += wallMag1*(xWall1/distFromWall1)
             wall1AclY = forceVector1[1]
             wall1AclY += wallMag1*(yWall1/distFromWall1)
         if distFromWall2 <= 2**(1/6) and self.c.ypos[particle] >=10:
             K1 = (self.sigma/distFromWall2)**12
             K2 = (self.sigma/distFromWall2)**6
             K3 = 2*K1 - K2
             wallMag2 = 24*(self.epsilon/distFromWall2)*K3
             displacement2 = array([xWall2,yWall2])
             unitVector = displacement2/(displacement2**2)
             nans = isnan(unitVector)
             unitVector[nans] = 0.
             dotProduct = dot(displacement2, (self.c.xvel[particle],self.c.yvel[particle]))
             #debug_here()
             forceVector2 = -1*self.gamma*dotProduct*unitVector
             wall2AclX = forceVector2[0]
             wall2AclX += wallMag2*(xWall2/distFromWall2)
             wall2AclY = forceVector2[1]
             wall2AclY += wallMag2*(yWall2/distFromWall2)
         if distFromWall3 <= 2**(1/6):
             K1 = (self.sigma/distFromWall3)**12
             K2 = (self.sigma/distFromWall3)**6
             K3 = 2*K1 - K2
             wallMag3 = 24*(self.epsilon/distFromWall3)*K3
             displacement3 = array([xWall3,yWall3])
             unitVector = displacement3/(displacement3**2)
             nans = isnan(unitVector)
             unitVector[nans] = 0.
             dotProduct = dot(displacement3, (self.c.xvel[particle],self.c.yvel[particle]))
             #debug_here()
             forceVector3 = -1*self.gamma*dotProduct*unitVector
             wall3AclX = forceVector3[0]
             wall3AclX += wallMag3*(xWall3/distFromWall3)
             wall3AclY = forceVector3[1]
             wall3AclY += wallMag3*(yWall3/distFromWall3)
         
         xacl[particle] = sum(array((magnitude * ma.divide(distx,distr)).filled(0.)))/masses[particle]
         xacl[particle] += sum(dampingForcex)/masses[particle]
         xacl[particle] += wall1AclX
         xacl[particle] += wall2AclX
         xacl[particle] += wall3AclX
         yacl[particle] = sum(array((magnitude * ma.divide(disty,distr)).filled(0.)))/masses[particle]
         yacl[particle] += sum(dampingForcey)/masses[particle]
         yacl[particle] += wall1AclY
         yacl[particle] += wall2AclY
         yacl[particle] += wall3AclY
         zacl[particle] = sum(array((magnitude * ma.divide(distz,distr)).filled(0.)))/masses[particle]
         zacl[particle] += sum(dampingForcex)/masses[particle]
         yacl[particle] += -2.
         #debug_here()
          ###set floor particles to stationary states
     #xacl[0:(self.c.floorSize+2*self.c.wallSize+2*self.c.slantSize)] = 0.
     #yacl[0:(self.c.floorSize+2*self.c.wallSize+2*self.c.slantSize)] = 0.
     
     zacl *= 0.
     #print self.c.particleFlux[self.c.integrationIteration]
     return xacl, yacl, zacl
예제 #27
0
   def __call__(self, container):
       self.c = container # particle container
       distCalc = Neighbors() 
       #distCalc.UpdateNeighbors(self.c,2**(1/6))
       xacl = zeros(self.c.numParticles)
       yacl = zeros(self.c.numParticles)
       zacl = zeros(self.c.numParticles)
       masses = self.c.massVector
       #print self.c.neighborList[-1]
       for particle in range(self.c.numParticles):
       
           ###Used to determine particle flux through funnel opening for each iteration
           if self.c.ypos[particle] <= self.c.openingPosition and self.c.ypos[particle] >= (self.c.openingPosition - 2):
               self.c.particleFlux[self.c.integrationIteration] += 1
           
           
           neighbors = self.c.neighborList[particle]
           
           ### Calculate distances
           distx, disty, distz, distr, relVelx, relVely, relVelz = distCalc.NeighborDist(particle, 
                                                              neighbors, 
                                                              self.c)                
                                                              
           ### These are the force calculations for particle interactions. This gives us the
           ### gradient along the radial direction between particles, hence distr is used.
           magnitude = self.forceCalc(distr)
           #debug_here()
           #rUnitVector = distr/norm()
           dampingForcex = zeros(len(neighbors))
           dampingForcey = zeros(len(neighbors))
           dampingForcez = zeros(len(neighbors))            
           for i in range(len(neighbors)):
               
                   #print relVelx, relVely
                  #debug_here()
               displacement = array([distx[i],disty[i],distz[i]])
               unitVector = displacement/norm(displacement)
               nans = isnan(unitVector)
               unitVector[nans] = 0.
               dotProduct = dot(displacement, (relVelx[i],relVely[i],relVelz[i]))
               #debug_here()
               forceVector = -1*self.gamma*dotProduct*unitVector
               dampingForcex[i] = forceVector[0]
               dampingForcey[i] = forceVector[1]
               dampingForcez[i] = forceVector[2]
 
           wallAclX = 0
           wallAclY = 0
           for wall in self.c.wallList:
               aclX,aclY = self.wallCalc(wall,self.c,particle)
               if abs(aclX) > self.FORCE_BOUND:
                   aclX = aclX/abs(aclX) * self.FORCE_BOUND
               if abs(aclY) > self.FORCE_BOUND:
                   aclY = aclY/abs(aclY) * self.FORCE_BOUND
               wallAclX += aclX
               wallAclY += aclY
               mag = sqrt(aclX**2 + aclY**2)
               if abs(wallAclX) > self.forceTestX or abs(wallAclY) > self.forceTestY:
                   self.forceTestX = abs(wallAclX)
                   self.forceTestY = abs(wallAclY)
                   print("PING: " + str(self.forceTestX) + ", " + str(self.forceTestY))
               
           accelerationX = sum(array((magnitude * ma.divide(distx,distr)).filled(0.)))/masses[particle] + sum(dampingForcex)/masses[particle] + wallAclX
           accelerationY = sum(array((magnitude * ma.divide(disty,distr)).filled(0.)))/masses[particle] + sum(dampingForcey)/masses[particle] + wallAclY
           
           if abs(accelerationX) > self.FORCE_BOUND:
               accelerationX = accelerationX/abs(accelerationX) * self.FORCE_BOUND
           if abs(accelerationY) > self.FORCE_BOUND:
               accelerationY = accelerationY/abs(accelerationY) * self.FORCE_BOUND
               
           xacl[particle] += accelerationX
           yacl[particle] += accelerationY
           
           zacl[particle] = sum(array((magnitude * ma.divide(distz,distr)).filled(0.)))/masses[particle]
           zacl[particle] += sum(dampingForcex)/masses[particle]
           yacl[particle] += -2.
     
       
       zacl *= 0.
       #print self.c.particleFlux[self.c.integrationIteration]
       return xacl, yacl, zacl
예제 #28
0
    def get_Sp(self):
        """ Docstring """
        b = self.get_zonal_avg('Tav.nc', 'THETA', mask=None)

        return ma.divide(-self.ddy_cgrid_centered(b),
                         self.ddz_cgrid_centered(b))
예제 #29
0
            z1, dz1 = average_in_flux(group['z2'][f1], group['dz2'][f1])
            if np.all(group['dc1'][f0]):
                dc0 = np.sum(np.power(group['dc1'][f0], -2))**-0.5
                c0 = np.sum(
                    group['c1'][f0] * np.power(group['dc1'][f0], -2)) * dc0**2
            else:
                dc0 = 0.
                c0 = np.mean(group['c1'][f0])
            if np.all(group['dc2'][f1]):
                dc1 = np.sum(np.power(group['dc2'][f1], -2))**-0.5
                c1 = np.sum(
                    group['c2'][f1] * np.power(group['dc2'][f1], -2)) * dc1**2
            else:
                dc1 = 0.
                c1 = np.mean(group['c2'][f1])
            color = np.divide(m0 - m1 + z0 - z1, 1 - c0 + c1)
            dcolor = np.abs(color) * np.sqrt(
                np.divide(dm0**2 + dm1**2 + dz0**2 + dz1**2,
                          (m0 - m1 + z0 - z1)**2) +
                np.divide(dc0**2 + dc1**2, (1 - c0 + c1)**2))
            for row in group:
                colors.append(color)
                dcolors.append(dcolor)
        targets[filters] = np.array(colors)
        targets['d' + filters] = np.array(dcolors)

    # calibrate all the instrumental magnitudes
    zcol = [
        color_to_use[row['filter']][0]
        if color_to_use[row['filter']] else row['filter'] * 2
        for row in targets
예제 #30
0
def validationExterne():
    """ Permet d'effectuer une validation externe entre l'image résultante de la réduction d'échelle et une image de
        température de surface calculée à partir des bandes 10 et 11 de Landsat 8 (disponibles sur EarthData).

        Les résultats de la validation externe sont des métriques de qualité en comparant les résultats de la réduction
        d'échelle à la température de surface calculée à 100m. Ces résultats sont présentés dans la console par des
        'print' (lignes 129 à 144).
    """

    # Match prediction result extent
    landsat_b10 = Image(
        r'data/LC08_L1TP_014028_20200706_20200721_01_T1_B10.TIF')
    landsat_b10.reprojectMatch(
        r'data/MOD11_L2.clipped_test2.tif'.split(".")[0] +
        '_subdivided_100m.tif', False)
    landsat_b10.setNewFile(
        landsat_b10.filename.replace(".TIF", "_reproject.tif"))

    # Get TOA radiance
    b10_array = landsat_b10.getArray(masked=True,
                                     lower_valid_range=1,
                                     upper_valid_range=65535)
    b10_array_radiance = ma.add(ma.multiply(b10_array, 0.00033420), 0.10000)

    # Get Brightness Temperature
    b10_array_brightness_temp = (1321.0789 / (ma.log(
        (774.8853 / b10_array_radiance) + 1))) - 273.15

    # Get NDVI
    landsat_b4 = Image(
        r'data/LC08_L1TP_014028_20200706_20200721_01_T1_B4_reproject.tif')
    b4_DN = landsat_b4.getArray(masked=True,
                                lower_valid_range=1,
                                upper_valid_range=65535)
    b4 = np.add(np.multiply(b4_DN, float(0.00002)), float(-0.10))

    landsat_b5 = Image(
        r'data/LC08_L1TP_014028_20200706_20200721_01_T1_B5_reproject.tif')
    b5_DN = landsat_b5.getArray(masked=True,
                                lower_valid_range=1,
                                upper_valid_range=65535)
    b5 = np.add(np.multiply(b5_DN, float(0.00002)), float(-0.10))

    ndvi = np.divide(np.subtract(b5, b4),
                     np.add(b5, b4),
                     where=((np.add(b5, b4)) != 0))

    # Get proportion of vegetation
    min_ndvi = ma.amin(ndvi)
    max_ndvi = ma.amax(ndvi)

    pv = ma.power(
        ma.divide(ma.subtract(ndvi, min_ndvi),
                  (ma.subtract(max_ndvi, min_ndvi)),
                  where=(ma.subtract(max_ndvi, min_ndvi)) != 0), 2)

    # Get emissivity
    emissivity = 0.004 * pv + 0.986

    # Get Landsat 8 LST
    landsat_lst = b10_array_brightness_temp / (
        1 +
        (0.00115 * b10_array_brightness_temp / 1.4388) * ma.log(emissivity))

    # Save LST image for visualization
    landsat_b10.save_band(landsat_lst, r'data/landsat_lst.tif')

    # Validation between both arrays
    predicted_lst = ma.masked_invalid(
        Image(r'data/MODIS_predit_100m.tif').getArray())
    predicted_lst_with_residuals = ma.masked_invalid(
        Image(r'data/MODIS_predit_100m_avec_residus.tif').getArray())

    predicted_lst = ma.filled(predicted_lst, 0)
    predicted_lst_with_residuals = ma.filled(predicted_lst_with_residuals, 0)

    # Without residuals
    print('Without residual correction')
    print('Mean Absolute Error (MAE):',
          metrics.mean_absolute_error(predicted_lst, landsat_lst))
    print('Mean Squared Error:',
          metrics.mean_squared_error(predicted_lst, landsat_lst))
    print('Root Mean Squared Error:',
          np.sqrt(metrics.mean_squared_error(predicted_lst, landsat_lst)),
          "°C")
    print(
        'Accuracy:', 100 -
        np.mean(100 * ((abs(predicted_lst - landsat_lst)) / landsat_lst)), "%")
    print('Explained variance score (EVS):',
          metrics.explained_variance_score(predicted_lst, landsat_lst))

    # With residuals
    print("\n")
    print('With residual correction')
    print(
        'Mean Absolute Error (MAE):',
        metrics.mean_absolute_error(predicted_lst_with_residuals, landsat_lst))
    print(
        'Mean Squared Error:',
        metrics.mean_squared_error(predicted_lst_with_residuals, landsat_lst))
    print(
        'Root Mean Squared Error:',
        np.sqrt(
            metrics.mean_squared_error(predicted_lst_with_residuals,
                                       landsat_lst)), "°C")
    print(
        'Accuracy:', 100 - np.mean(100 * (
            (abs(predicted_lst_with_residuals - landsat_lst)) / landsat_lst)),
        "%")
    print(
        'Explained variance score (EVS):',
        metrics.explained_variance_score(predicted_lst_with_residuals,
                                         landsat_lst))
예제 #31
0
            m1, dm1 = average_in_flux(group['instmag_amcorr'][f1], group['dinstmag'][f1], axis=0)
            z0, dz0 = average_in_flux(group['z1'][f0], group['dz1'][f0])
            z1, dz1 = average_in_flux(group['z2'][f1], group['dz2'][f1])
            if np.all(group['dc1'][f0]):
                dc0 = np.sum(np.power(group['dc1'][f0], -2))**-0.5
                c0 = np.sum(group['c1'][f0] * np.power(group['dc1'][f0], -2)) * dc0**2
            else:
                dc0 = 0.
                c0 = np.mean(group['c1'][f0])
            if np.all(group['dc2'][f1]):
                dc1 = np.sum(np.power(group['dc2'][f1], -2))**-0.5
                c1 = np.sum(group['c2'][f1] * np.power(group['dc2'][f1], -2)) * dc1**2
            else:
                dc1 = 0.
                c1 = np.mean(group['c2'][f1])
            color = np.divide(m0 - m1 + z0 - z1, 1 - c0 + c1)
            dcolor = np.abs(color) * np.sqrt(
                        np.divide(dm0**2 + dm1**2 + dz0**2 + dz1**2, (m0 - m1 + z0 - z1)**2)
                        + np.divide(dc0**2 + dc1**2, (1 - c0 + c1)**2)
                                            )
            for row in group:
                colors.append(color)
                dcolors.append(dcolor)
        targets[filters] = np.array(colors)
        targets['d'+filters] = np.array(dcolors)

    # calibrate all the instrumental magnitudes
    zcol = [color_to_use[row['filter']][0] if color_to_use[row['filter']] else row['filter']*2 for row in targets]
    zeropoint = np.choose(zcol == targets['zcol1'], [targets['z2'], targets['z1']])
    dzeropoint = np.choose(zcol == targets['zcol1'], [targets['dz2'], targets['dz1']])
    colorterm = np.choose(zcol == targets['zcol1'], [targets['c2'], targets['c1']])
예제 #32
0
def rpBound(n):
    return sqrt(
        divide(multiply(2, log(multiply(multiply(
            2, n), power(n, 50)))), n)) + sqrt(
                multiply(divide(2, n), log(divide(1, 0.05)))) + divide(1, n)
예제 #33
0
def devroyeBound(n):
    # Ran into overflow error performing naive calculation.  Had to decompose natural log components.
    return divide(1, (n - 2.0)) + sqrt(
        divide(1, power(n - 2.0, 2)) +
        multiply(divide(1, multiply(2, (n - 2.0))),
                 log(4) + multiply(100, log(n)) - log(0.05)))
예제 #34
0
def vcBound(n):
    return sqrt(
        multiply(divide(8.0, n),
                 log(multiply(4, divide(power(multiply(2, n), 50), 0.05)))))
예제 #35
0
    def __call__(self, container):
        self.c = container  # particle container
        distCalc = Neighbors()
        #distCalc.UpdateNeighbors(self.c,2**(1/6))
        xacl = zeros(self.c.numParticles)
        yacl = zeros(self.c.numParticles)
        zacl = zeros(self.c.numParticles)
        masses = self.c.massVector
        #print self.c.neighborList[-1]
        for particle in range(self.c.numParticles):

            ###Used to determine particle flux through funnel opening for each iteration
            if self.c.ypos[particle] <= self.c.openingPosition and self.c.ypos[
                    particle] >= (self.c.openingPosition - 2):
                self.c.particleFlux[self.c.integrationIteration] += 1

            neighbors = self.c.neighborList[particle]

            ### Calculate distances
            distx, disty, distz, distr, relVelx, relVely, relVelz = distCalc.NeighborDist(
                particle, neighbors, self.c)

            ### These are the force calculations for particle interactions. This gives us the
            ### gradient along the radial direction between particles, hence distr is used.
            magnitude = self.forceCalc(distr)
            #debug_here()
            #rUnitVector = distr/norm()
            dampingForcex = zeros(len(neighbors))
            dampingForcey = zeros(len(neighbors))
            dampingForcez = zeros(len(neighbors))
            for i in range(len(neighbors)):

                #print relVelx, relVely
                #debug_here()
                displacement = array([distx[i], disty[i], distz[i]])
                unitVector = displacement / norm(displacement)
                nans = isnan(unitVector)
                unitVector[nans] = 0.
                dotProduct = dot(displacement,
                                 (relVelx[i], relVely[i], relVelz[i]))
                #debug_here()
                forceVector = -1 * self.gamma * dotProduct * unitVector
                dampingForcex[i] = forceVector[0]
                dampingForcey[i] = forceVector[1]
                dampingForcez[i] = forceVector[2]

            wallAclX = 0
            wallAclY = 0
            for wall in self.c.wallList:
                aclX, aclY = self.wallCalc(wall, self.c, particle)
                if abs(aclX) > self.FORCE_BOUND:
                    aclX = aclX / abs(aclX) * self.FORCE_BOUND
                if abs(aclY) > self.FORCE_BOUND:
                    aclY = aclY / abs(aclY) * self.FORCE_BOUND
                wallAclX += aclX
                wallAclY += aclY
                mag = sqrt(aclX**2 + aclY**2)
                if abs(wallAclX) > self.forceTestX or abs(
                        wallAclY) > self.forceTestY:
                    self.forceTestX = abs(wallAclX)
                    self.forceTestY = abs(wallAclY)
                    print("PING: " + str(self.forceTestX) + ", " +
                          str(self.forceTestY))

            accelerationX = sum(
                array((magnitude * ma.divide(
                    distx, distr)).filled(0.))) / masses[particle] + sum(
                        dampingForcex) / masses[particle] + wallAclX
            accelerationY = sum(
                array((magnitude * ma.divide(
                    disty, distr)).filled(0.))) / masses[particle] + sum(
                        dampingForcey) / masses[particle] + wallAclY

            if abs(accelerationX) > self.FORCE_BOUND:
                accelerationX = accelerationX / abs(
                    accelerationX) * self.FORCE_BOUND
            if abs(accelerationY) > self.FORCE_BOUND:
                accelerationY = accelerationY / abs(
                    accelerationY) * self.FORCE_BOUND

            xacl[particle] += accelerationX
            yacl[particle] += accelerationY

            zacl[particle] = sum(
                array((magnitude *
                       ma.divide(distz, distr)).filled(0.))) / masses[particle]
            zacl[particle] += sum(dampingForcex) / masses[particle]
            yacl[particle] += -2.

        zacl *= 0.
        #print self.c.particleFlux[self.c.integrationIteration]
        return xacl, yacl, zacl