Beispiel #1
0
def test_fft_poisson():
    
    obj = test()
    fft_poisson(obj, -af.sin(2 * np.pi * obj.q1_center + 4 * np.pi * obj.q2_center))

    E1_expected = (0.1 / np.pi) * af.cos(  2 * np.pi * obj.q1_center
                                         + 4 * np.pi * obj.q2_center
                                        )

    E2_expected = (0.2 / np.pi) * af.cos(  2 * np.pi * obj.q1_center
                                         + 4 * np.pi * obj.q2_center
                                        )

    N_g = obj.N_ghost

    error_E1 = af.mean(af.abs(  obj.cell_centered_EM_fields[0, 0, N_g:-N_g, N_g:-N_g] 
                              - E1_expected[0, 0, N_g:-N_g, N_g:-N_g]
                             )
                      )

    error_E2 = af.mean(af.abs(  obj.cell_centered_EM_fields[1, 0, N_g:-N_g, N_g:-N_g] 
                              - E2_expected[0, 0, N_g:-N_g, N_g:-N_g]
                             )
                      )

    assert (error_E1 < 1e-14 and error_E2 < 1e-14)
Beispiel #2
0
def gradient(nn, delta):

    nabla_b = []
    nabla_w = []

    # output
    dact = nn['nonlin'][-1][1]
    t = nn['zs'][-1]

    asdf = dact(2)

    #This is d_relu.  It is a binary output
    dW = average_gradient(delta * af.sign(1e-5 - nn['zs'][-1]),
                          nn['activations'][-2])
    nabla_b.append(af.mean(delta))
    nabla_w.append(dW)

    for i in range(len(nn['weights']) - 2, -1, -1):
        dact = delta * af.max(nn['zs'][i + 1])
        trans = nn['weights'][i + 1].T
        delta = af.matmul(trans, dact)

        dW = average_gradient(delta, nn['activations'][i])
        nabla_b.append(af.mean(delta * af.max(nn['zs'][i])))
        nabla_w.append(dW)
    return nabla_w, nabla_b
def gradient(nn, delta):
 
    nabla_b = []
    nabla_w = []

    # output
    dact = nn['nonlin'][-1][1]
    t = nn['zs'][-1]
   
    asdf = dact(2)
    
    #This is d_relu.  It is a binary output
    dW = average_gradient(delta*af.sign(1e-5 - nn['zs'][-1]), nn['activations'][-2])
    nabla_b.append(af.mean(delta))
    nabla_w.append(dW)

    for i in range(len(nn['weights']) - 2, -1, -1):
        dact = delta * af.max(nn['zs'][i+1])
	trans = nn['weights'][i+1].T
        delta = af.matmul(trans, dact)


        dW = average_gradient(delta,nn['activations'][i])
        nabla_b.append(af.mean(delta*af.max(nn['zs'][i])))
        nabla_w.append(dW)
    return nabla_w, nabla_b
def main():
    T = 1
    nT = 20 * T
    R_first = 1000
    R = 5000000

    x0 = 0 # initial log stock price
    v0 = 0.087**2 # initial volatility
    r = math.log(1.0319) # risk-free rate
    rho = -0.82 # instantaneous correlation between Brownian motions
    sigmaV = 0.14 # variance of volatility
    kappa = 3.46 # mean reversion speed
    vBar = 0.008 # mean variance
    k = math.log(0.95) # strike price

    # first run
    ( x, v ) = simulateHestonModel( T, nT, R_first, r, kappa, vBar, sigmaV, rho, x0, v0 )

    # Price plain vanilla call option
    tic = time.time()
    ( x, v ) = simulateHestonModel( T, nT, R, r, kappa, vBar, sigmaV, rho, x0, v0 )
    af.sync()
    toc = time.time() - tic
    K = math.exp(k)
    zeroConstant = af.constant(0, R, dtype=af.Dtype.f32)
    C_CPU = math.exp(-r * T) * af.mean(af.maxof(af.exp(x) - K, zeroConstant))
    print("Time elapsed = {} secs".format(toc))
    print("Call price = {}".format(C_CPU))
    print(af.mean(v))
Beispiel #5
0
def simple_statistics(verbose=False):
    display_func = _util.display_func(verbose)
    print_func = _util.print_func(verbose)

    a = af.randu(5, 5)
    b = af.randu(5, 5)
    w = af.randu(5, 1)

    display_func(af.mean(a, dim=0))
    display_func(af.mean(a, weights=w, dim=0))
    print_func(af.mean(a))
    print_func(af.mean(a, weights=w))

    display_func(af.var(a, dim=0))
    display_func(af.var(a, isbiased=True, dim=0))
    display_func(af.var(a, weights=w, dim=0))
    print_func(af.var(a))
    print_func(af.var(a, isbiased=True))
    print_func(af.var(a, weights=w))

    display_func(af.stdev(a, dim=0))
    print_func(af.stdev(a))

    display_func(af.var(a, dim=0))
    display_func(af.var(a, isbiased=True, dim=0))
    print_func(af.var(a))
    print_func(af.var(a, isbiased=True))

    display_func(af.median(a, dim=0))
    print_func(af.median(w))

    print_func(af.corrcoef(a, b))
def simple_statistics(verbose=False):
    display_func = _util.display_func(verbose)
    print_func   = _util.print_func(verbose)

    a = af.randu(5, 5)
    b = af.randu(5, 5)
    w = af.randu(5, 1)

    display_func(af.mean(a, dim=0))
    display_func(af.mean(a, weights=w, dim=0))
    print_func(af.mean(a))
    print_func(af.mean(a, weights=w))

    display_func(af.var(a, dim=0))
    display_func(af.var(a, isbiased=True, dim=0))
    display_func(af.var(a, weights=w, dim=0))
    print_func(af.var(a))
    print_func(af.var(a, isbiased=True))
    print_func(af.var(a, weights=w))

    display_func(af.stdev(a, dim=0))
    print_func(af.stdev(a))

    display_func(af.var(a, dim=0))
    display_func(af.var(a, isbiased=True, dim=0))
    print_func(af.var(a))
    print_func(af.var(a, isbiased=True))

    display_func(af.median(a, dim=0))
    print_func(af.median(w))

    print_func(af.corrcoef(a, b))
Beispiel #7
0
def test_compute_electrostatic_fields():

    test_obj = test()
    compute_electrostatic_fields(test_obj)

    E1 = 0.5 * test_obj.N_q1 * test_obj.N_q2 * af.ifft2(test_obj.E1_hat)
    E2 = 0.5 * test_obj.N_q1 * test_obj.N_q2 * af.ifft2(test_obj.E2_hat)

    E1_analytical =   test_obj.physical_system.params.charge_electron \
                    * 2 * np.pi / (20 * np.pi**2) \
                    * (  0.01 * af.sin(2 * np.pi * test_obj.q1 + 4 * np.pi * test_obj.q2)
                       + 0.02 * af.cos(2 * np.pi * test_obj.q1 + 4 * np.pi * test_obj.q2)
                      )

    E2_analytical =   test_obj.physical_system.params.charge_electron \
                    * 4 * np.pi / (20 * np.pi**2) \
                    * (  0.01 * af.sin(2 * np.pi * test_obj.q1 + 4 * np.pi * test_obj.q2)
                       + 0.02 * af.cos(2 * np.pi * test_obj.q1 + 4 * np.pi * test_obj.q2)
                      )

    add = lambda a,b:a+b

    error_E1 = af.mean(af.abs(af.broadcast(add, E1_analytical, - E1)))
    error_E2 = af.mean(af.abs(af.broadcast(add, E2_analytical, - E2)))

    assert(error_E1 < 1e-14)
    assert(error_E2 < 1e-14)
Beispiel #8
0
def main():
    T = 1
    nT = 20 * T
    R_first = 1000
    R = 5000000

    x0 = 0  # initial log stock price
    v0 = 0.087**2  # initial volatility
    r = math.log(1.0319)  # risk-free rate
    rho = -0.82  # instantaneous correlation between Brownian motions
    sigmaV = 0.14  # variance of volatility
    kappa = 3.46  # mean reversion speed
    vBar = 0.008  # mean variance
    k = math.log(0.95)  # strike price

    # first run
    (x, v) = simulateHestonModel(T, nT, R_first, r, kappa, vBar, sigmaV, rho,
                                 x0, v0)

    # Price plain vanilla call option
    tic = time.time()
    (x, v) = simulateHestonModel(T, nT, R, r, kappa, vBar, sigmaV, rho, x0, v0)
    af.sync()
    toc = time.time() - tic
    K = math.exp(k)
    zeroConstant = af.constant(0, R, dtype=af.Dtype.f32)
    C_CPU = math.exp(-r * T) * af.mean(af.maxof(af.exp(x) - K, zeroConstant))
    print("Time elapsed = {} secs".format(toc))
    print("Call price = {}".format(C_CPU))
    print(af.mean(v))
Beispiel #9
0
def test_compute_moments():

    obj = test()

    rho_num = compute_moments(obj, 'density')
    rho_ana = 1 + 0.01 * af.sin(2 * np.pi * obj.q1_center +
                                4 * np.pi * obj.q2_center)

    error_rho = af.mean(af.abs(rho_num - rho_ana))

    E_num = compute_moments(obj, 'energy')
    E_ana =   3/2 * (1 + 0.01 * af.sin(2 * np.pi * obj.q1_center + 4 * np.pi * obj.q2_center)) \
                  * (1 + 0.01 * af.cos(2 * np.pi * obj.q1_center + 4 * np.pi * obj.q2_center)) \
            + 3/2 * (1 + 0.01 * af.sin(2 * np.pi * obj.q1_center + 4 * np.pi * obj.q2_center)) \
                  * (0.01 * af.exp(-10 * obj.q1_center**2 - 10 * obj.q2_center**2))**2

    error_E = af.mean(af.abs(E_num - E_ana))

    mom_p1b_num = compute_moments(obj, 'mom_v1_bulk')
    mom_p1b_ana =   (1 + 0.01 * af.sin(2 * np.pi * obj.q1_center + 4 * np.pi * obj.q2_center)) \
                  * (0.01 * af.exp(-10 * obj.q1_center**2 - 10 * obj.q2_center**2))

    error_p1b = af.mean(af.abs(mom_p1b_num - mom_p1b_ana))

    mom_p2b_num = compute_moments(obj, 'mom_v2_bulk')
    mom_p2b_ana =   (1 + 0.01 * af.sin(2 * np.pi * obj.q1_center + 4 * np.pi * obj.q2_center)) \
                  * (0.01 * af.exp(-10 * obj.q1_center**2 - 10 * obj.q2_center**2))

    error_p2b = af.mean(af.abs(mom_p2b_num - mom_p2b_ana))

    mom_p3b_num = compute_moments(obj, 'mom_v3_bulk')
    mom_p3b_ana  =   (1 + 0.01 * af.sin(2 * np.pi * obj.q1_center + 4 * np.pi * obj.q2_center)) \
                   * (0.01 * af.exp(-10 * obj.q1_center**2 - 10 * obj.q2_center**2))

    error_p3b = af.mean(af.abs(mom_p3b_num - mom_p3b_ana))

    print(error_rho)
    print(error_E)
    print(error_p1b)
    print(error_p2b)
    print(error_p3b)

    # print((error_rho + error_E + error_p1b + error_p2b + error_p3b) / 5)

    assert (error_rho < 1e-13)
    assert (error_E < 1e-13)
    assert (error_p1b < 1e-13)
    assert (error_p2b < 1e-13)
    assert (error_p3b < 1e-13)
Beispiel #10
0
def simple_statistics(verbose=False):
    display_func = _util.display_func(verbose)
    print_func = _util.print_func(verbose)

    a = af.randu(5, 5)
    b = af.randu(5, 5)
    w = af.randu(5, 1)

    display_func(af.mean(a, dim=0))
    display_func(af.mean(a, weights=w, dim=0))
    print_func(af.mean(a))
    print_func(af.mean(a, weights=w))

    display_func(af.var(a, dim=0))
    display_func(af.var(a, isbiased=True, dim=0))
    display_func(af.var(a, weights=w, dim=0))
    print_func(af.var(a))
    print_func(af.var(a, isbiased=True))
    print_func(af.var(a, weights=w))

    mean, var = af.meanvar(a, dim=0)
    display_func(mean)
    display_func(var)
    mean, var = af.meanvar(a, weights=w, bias=af.VARIANCE.SAMPLE, dim=0)
    display_func(mean)
    display_func(var)

    display_func(af.stdev(a, dim=0))
    print_func(af.stdev(a))

    display_func(af.var(a, dim=0))
    display_func(af.var(a, isbiased=True, dim=0))
    print_func(af.var(a))
    print_func(af.var(a, isbiased=True))

    display_func(af.median(a, dim=0))
    print_func(af.median(w))

    print_func(af.corrcoef(a, b))

    data = af.iota(5, 3)
    k = 3
    dim = 0
    order = af.TOPK.DEFAULT  # defaults to af.TOPK.MAX
    assert (dim == 0)  # topk currently supports first dim only
    values, indices = af.topk(data, k, dim, order)
    display_func(values)
    display_func(indices)
Beispiel #11
0
def test_dx_dxi():
    '''
    A Test function to check the dx_xi function in wave_equation module by
    passing nodes of an element and using the LGL points. Analytically, the
    differential would be a constant. The check has a tolerance 1e-7.
    '''
    threshold = 1e-8

    params.N_LGL = 8
    params.N_quad = 10
    params.N_Elements = 10
    wave = 'gaussian'

    gv = global_variables.advection_variables(params.N_LGL, params.N_quad,\
                                          params.x_nodes, params.N_Elements,\
                                          params.c, params.total_time, wave,\
                                          params.c_x, params.c_y, params.courant,\
                                          params.mesh_file, params.total_time_2d)

    nodes = np.array([7, 10], dtype=np.float64)
    test_nodes = af.interop.np_to_af_array(nodes)
    analytical_dx_dxi = 1.5
    check_dx_dxi = abs(
        (af.mean(wave_equation.dx_dxi_numerical(test_nodes, gv.xi_LGL)) -
         analytical_dx_dxi)) <= threshold

    assert check_dx_dxi
def test_interpolation():
    '''
    '''
    threshold = 8e-9
    params.N_LGL = 8

    gv = gvar.advection_variables(params.N_LGL, params.N_quad,\
                                          params.x_nodes, params.N_Elements,\
                                          params.c, params.total_time, params.wave,\
                                          params.c_x, params.c_y, params.courant,\
                                          params.mesh_file, params.total_time_2d)

    N_LGL = 8
    xi_LGL = lagrange.LGL_points(N_LGL)
    xi_i = af.flat(af.transpose(af.tile(xi_LGL, 1, N_LGL)))
    eta_j = af.tile(xi_LGL, N_LGL)
    f_ij = np.e**(xi_i + eta_j)
    interpolated_f = wave_equation_2d.lag_interpolation_2d(
        f_ij, gv.Li_Lj_coeffs)
    xi = utils.linspace(-1, 1, 8)
    eta = utils.linspace(-1, 1, 8)

    assert (af.mean(
        af.transpose(utils.polyval_2d(interpolated_f, xi, eta)) -
        np.e**(xi + eta)) < threshold)
Beispiel #13
0
def check_error(params):
    error = np.zeros(N.size)

    for i in range(N.size):
        domain.N_p1 = int(N[i])
        domain.N_p2 = int(N[i])
        # Defining the physical system to be solved:
        system = physical_system(domain, boundary_conditions, params,
                                 initialize, advection_terms,
                                 collision_operator.BGK, moment_defs)

        # Declaring a linear system object which will evolve the defined physical system:
        nls = nonlinear_solver(system)
        N_g = nls.N_ghost_q

        # Time parameters:
        dt = 0.01 * 32 / nls.N_p1
        t_final = 0.2

        time_array = np.arange(dt, t_final + dt, dt)

        f_reference = nls.f

        for time_index, t0 in enumerate(time_array):
            nls.strang_timestep(dt)

        error[i] = af.mean(af.abs(nls.f - f_reference))

    return (error)
Beispiel #14
0
def monte_carlo_options(N,
                        K,
                        t,
                        vol,
                        r,
                        strike,
                        steps,
                        use_barrier=True,
                        B=None,
                        ty=af.Dtype.f32):
    payoff = af.constant(0, N, 1, dtype=ty)

    dt = t / float(steps - 1)
    s = af.constant(strike, N, 1, dtype=ty)

    randmat = af.randn(N, steps - 1, dtype=ty)
    randmat = af.exp((r - (vol * vol * 0.5)) * dt +
                     vol * math.sqrt(dt) * randmat)

    S = af.product(af.join(1, s, randmat), 1)

    if (use_barrier):
        S = S * af.all_true(S < B, 1)

    payoff = af.maxof(0, S - K)
    return af.mean(payoff) * math.exp(-r * t)
Beispiel #15
0
 def _meanObject(self, obj, adjoint=False):
     """
     function to bin the object by factor of slice_binning_factor
     """
     if self.slice_binning_factor == 1:
         return obj
     assert self.shape[2] >= 1
     if adjoint:
         obj_out = af.constant(0.0,
                               self._shape_full[0],
                               self._shape_full[1],
                               self._shape_full[2],
                               dtype=af_complex_datatype)
         for idx in range((self.shape[2] - 1) * self.slice_binning_factor,
                          -1, -self.slice_binning_factor):
             idx_slice = slice(
                 idx,
                 np.min([obj_out.shape[2],
                         idx + self.slice_binning_factor]))
             obj_out[:, :, idx_slice] = af.broadcast(
                 self.assign_broadcast, obj_out[:, :, idx_slice],
                 obj[:, :, idx // self.slice_binning_factor])
     else:
         obj_out = af.constant(0.0,
                               self.shape[0],
                               self.shape[1],
                               self.shape[2],
                               dtype=af_complex_datatype)
         for idx in range(0, obj.shape[2], self.slice_binning_factor):
             idx_slice = slice(
                 idx,
                 np.min([obj.shape[2], idx + self.slice_binning_factor]))
             obj_out[:, :, idx // self.slice_binning_factor] = af.mean(
                 obj[:, :, idx_slice], dim=2)
     return obj_out
Beispiel #16
0
def average(a: ndarray,
            axis: tp.Optional[int] = None,
            weights: tp.Optional[ndarray] = None) \
        -> tp.Union[numbers.Number, ndarray]:
    """
    Compute the weighted average along the specified axis.
    """

    if weights is None:
        af_weights = None
    else:
        if axis is not None:
            if axis == 0:
                af_weights = weights.reshape((-1, ))._af_array
            elif axis == 1:
                af_weights = weights.reshape((1, -1))._af_array
            elif axis == 2:
                af_weights = weights.reshape((1, 1, -1))._af_array
            elif axis == 3:
                af_weights = weights.reshape((1, 1, 1, -1))._af_array
            else:
                raise ValueError('axis must be between 0 and 3')
        else:
            af_weights = weights._af_array

    new_af_array = af.mean(a._af_array, weights=af_weights, dim=axis)

    if isinstance(new_af_array, af.Array):
        return ndarray(new_af_array)
    else:
        return new_af_array
Beispiel #17
0
    def reconstruction_loss_arbitrary_params(self, centres, defocuses):

        stack = self.crop_stack(centres)
        stack_gpu = [self.np_to_af(img, af.Dtype.c32) for img in stack]
        reconstruction = reconstruct(stack_gpu.copy(),
                                     defocuses,
                                     stack_on_gpu=True)

        losses = [0.] * len(stack_gpu)
        for i in range(len(losses)):
            collapse = af.abs(deconstruction[i])**2
            collapse *= af.mean(stack_gpu[i]) / af.mean(collapse)

            losses[i] = af.mean((stack_gpu[i] - collapse)**2)

        return np.max(losses)
def test_check_maxwells_constraints():

    params = params_check_maxwells_contraints
    system = physical_system(domain,
                             boundary_conditions,
                             params,
                             initialize_check_maxwells_contraints,
                             advection_terms,
                             collision_operator.BGK,
                             moments
                            )

    dq1 = (domain.q1_end - domain.q1_start) / domain.N_q1
    dq2 = (domain.q2_end - domain.q2_start) / domain.N_q2
    
    q1, q2 = calculate_q_center(domain.q1_start, domain.q2_start,
                                domain.N_q1, domain.N_q2, domain.N_ghost,
                                dq1, dq2
                               )
    
    rho = (  params.pert_real * af.cos(  params.k_q1 * q1 
                                       + params.k_q2 * q2
                                      )
           - params.pert_imag * af.sin(  params.k_q1 * q1 
                                       + params.k_q2 * q2
                                      )
          )

    obj = fields_solver(system, 
                        rho, 
                        False
                       )

    # Checking for ∇.E = rho / epsilon
    rho_left_bot = 0.25 * (  rho 
                           + af.shift(rho, 0, 0, 0, 1)
                           + af.shift(rho, 0, 0, 1, 0)
                           + af.shift(rho, 0, 0, 1, 1)
                          ) 

    N_g = obj.N_g
    assert(af.mean(af.abs(obj.compute_divB()[:, :, N_g:-N_g, N_g:-N_g]))<1e-14)

    divE  = obj.compute_divE()
    rho_b = af.mean(rho_left_bot) # background

    assert(af.mean(af.abs(divE - rho_left_bot + rho_b)[:, :, N_g:-N_g, N_g:-N_g])<1e-6)
Beispiel #19
0
def test_communicate_fields():
    obj = test_fields()
    communicate_fields(obj)

    Ng = obj.N_ghost

    expected = af.sin(2 * np.pi * obj.q1 + 4 * np.pi * obj.q2)
    assert (af.mean(af.abs(obj.cell_centered_EM_fields - expected)) < 5e-14)
Beispiel #20
0
def test_dump_distribution_function():
    test_obj = test()

    f_before_load = test_obj.Y.copy()

    dump_distribution_function(test_obj, 'test_file')
    load_distribution_function(test_obj, 'test_file')

    assert (af.mean(af.abs(test_obj.Y - f_before_load)) < 1e-14)
Beispiel #21
0
def check_error(params):
    error = np.zeros(N.size)

    for i in range(N.size):
        domain.N_p1 = int(N[i])
        domain.N_p2 = int(N[i])
        # Defining the physical system to be solved:
        system = physical_system(domain, boundary_conditions, params,
                                 initialize, advection_terms,
                                 collision_operator.BGK, moment_defs)

        # Declaring a linear system object which will evolve the defined physical system:
        nls = nonlinear_solver(system)
        N_g = nls.N_ghost_q

        # Time parameters:
        dt = 0.001 * 32 / nls.N_p1
        t_final = 0.1

        time_array = np.arange(dt, t_final + dt, dt)

        # Finding final resting point of the blob:
        E1 = nls.cell_centered_EM_fields[0]
        E2 = nls.cell_centered_EM_fields[1]
        B3 = nls.cell_centered_EM_fields[5]

        sol = odeint(dpdt,
                     np.array([0, 0]),
                     time_array,
                     args=(af.mean(E1), af.mean(E2), af.mean(B3),
                           params.charge_electron, params.mass_particle))

        f_reference = af.broadcast(initialize.initialize_f, nls.q1_center,
                                   nls.q2_center, nls.p1_center - sol[-1, 0],
                                   nls.p2_center - sol[-1, 1], nls.p3_center,
                                   params)

        for time_index, t0 in enumerate(time_array):
            nls.strang_timestep(dt)

        error[i] = af.mean(af.abs(nls.f - f_reference))

    return (error)
Beispiel #22
0
def volume_int_convergence():
    '''
    convergence test for volume int flux
    '''
    N_LGL = np.arange(15).astype(float) + 3
    L1_norm_option_3 = np.zeros([15])
    L1_norm_option_1 = np.zeros([15])
    for i in range(0, 15):
        test_waveEqn.change_parameters(i + 3, 16, i + 4)
        vol_int_analytical = np.zeros([params.N_Elements, params.N_LGL])
        for j in range (params.N_Elements):
            for k in range (params.N_LGL):
                vol_int_analytical[j][k] = (analytical_volume_integral\
                             (af.transpose(params.element_array[j]), k))
        vol_int_analytical = af.transpose(af.np_to_af_array\
                                                   (vol_int_analytical))
        L1_norm_option_3[i] = af.mean(af.abs(vol_int_analytical\
                                      - wave_equation.volume_integral_flux(params.u_init, 0)))


    for i in range(0, 15):
        test_waveEqn.change_parameters(i + 3, 16, i + 3)
        vol_int_analytical = np.zeros([params.N_Elements, params.N_LGL])
        for j in range (params.N_Elements):
            for k in range (params.N_LGL):
                vol_int_analytical[j][k] = analytical_volume_integral(\
                                           af.transpose(params.element_array[j]), k)
        vol_int_analytical  = af.transpose(af.np_to_af_array(vol_int_analytical))
        L1_norm_option_1[i] = af.mean(af.abs(vol_int_analytical\
                                      - wave_equation.volume_integral_flux(params.u_init, 0)))
    normalization = 0.0023187 / (3 ** (-3))


    print(L1_norm_option_1, L1_norm_option_3)
    plt.loglog(N_LGL, L1_norm_option_1, marker='o', label='L1 norm option 1')
    plt.loglog(N_LGL, L1_norm_option_3, marker='o', label='L1 norm option 3')
    plt.loglog(N_LGL, normalization * N_LGL **(-N_LGL), color='black', linestyle='--', label='$N_{LGL}^{-N_{LGL}}$')
    plt.title('L1 norm of volume integral term')
    plt.xlabel('LGL points')
    plt.ylabel('L1 norm')
    plt.legend(loc='best')
    plt.show()
def simple_statistics(verbose=False):
    display_func = _util.display_func(verbose)
    print_func   = _util.print_func(verbose)

    a = af.randu(5, 5)
    b = af.randu(5, 5)
    w = af.randu(5, 1)

    display_func(af.mean(a, dim=0))
    display_func(af.mean(a, weights=w, dim=0))
    print_func(af.mean(a))
    print_func(af.mean(a, weights=w))

    display_func(af.var(a, dim=0))
    display_func(af.var(a, isbiased=True, dim=0))
    display_func(af.var(a, weights=w, dim=0))
    print_func(af.var(a))
    print_func(af.var(a, isbiased=True))
    print_func(af.var(a, weights=w))

    display_func(af.stdev(a, dim=0))
    print_func(af.stdev(a))

    display_func(af.var(a, dim=0))
    display_func(af.var(a, isbiased=True, dim=0))
    print_func(af.var(a))
    print_func(af.var(a, isbiased=True))

    display_func(af.median(a, dim=0))
    print_func(af.median(w))

    print_func(af.corrcoef(a, b))

    data = af.iota(5, 3)
    k = 3
    dim = 0
    order = af.TOPK.DEFAULT # defaults to af.TOPK.MAX
    assert(dim == 0) # topk currently supports first dim only
    values,indices = af.topk(data, k, dim, order)
    display_func(values)
    display_func(indices)
Beispiel #24
0
    def reconstruction_loss(self, stack_gpu, defocus_incr, defocus_ramp):

        defocuses = [
            incr * ramp for incr, ramp in zip(defocus_incr, defocus_ramp)
        ]
        reconstruction = reconstruct(stack_gpu.copy(),
                                     defocuses,
                                     stack_on_gpu=True)

        #Use the wavefunction to recreate the original images
        deconstruction = [self.propagate_back_to_defocus(reconstruction, defocus, self.wavelength) \
           for defocus in defocuses]

        losses = [0.] * len(stack_gpu)
        for i in range(len(losses)):
            collapse = af.abs(deconstruction[i])**2
            collapse *= af.mean(stack_gpu[i]) / af.mean(collapse)

            losses[i] = af.mean((stack_gpu[i] - collapse)**2)

        return np.max(losses)
def test_volume_integral_flux():
    '''
    A test function to check the volume_integral_flux function in wave_equation
    module by analytically calculated Gauss-Lobatto quadrature.
    
    Reference
    ---------
    The link to the sage worksheet where the calculations were caried out is
    given below.
    `https://goo.gl/5Mub8M`
    '''
    threshold = 4e-9
    params.c = 1
    change_parameters(8, 10, 11, 'gaussian')

    referenceFluxIntegral = af.transpose(af.interop.np_to_af_array(np.array
        ([
        [-0.002016634876668093, -0.000588597708116113, -0.0013016773719126333,\
        -0.002368387579324652, -0.003620502047659841, -0.004320197094090966,
        -0.003445512010153811, 0.0176615086879261],\

        [-0.018969769374, -0.00431252844519,-0.00882630935977,-0.0144355176966,\
        -0.019612124119, -0.0209837936827, -0.0154359890788, 0.102576031756], \

        [-0.108222418798, -0.0179274222595, -0.0337807018822, -0.0492589052599,\
        -0.0588472807471, -0.0557970236273, -0.0374764132459, 0.361310165819],\

        [-0.374448714304, -0.0399576371245, -0.0683852285846, -0.0869229749357,\
        -0.0884322503841, -0.0714664112839, -0.0422339853622, 0.771847201979], \

        [-0.785754362849, -0.0396035640187, -0.0579313769517, -0.0569022801117,\
        -0.0392041960688, -0.0172295769141, -0.00337464521455, 1.00000000213],\

        [-1.00000000213, 0.00337464521455, 0.0172295769141, 0.0392041960688,\
        0.0569022801117, 0.0579313769517, 0.0396035640187, 0.785754362849],\

        [-0.771847201979, 0.0422339853622, 0.0714664112839, 0.0884322503841, \
        0.0869229749357, 0.0683852285846, 0.0399576371245, 0.374448714304],\

        [-0.361310165819, 0.0374764132459, 0.0557970236273, 0.0588472807471,\
        0.0492589052599, 0.0337807018822, 0.0179274222595, 0.108222418798], \

        [-0.102576031756, 0.0154359890788, 0.0209837936827, 0.019612124119, \
        0.0144355176966, 0.00882630935977, 0.00431252844519, 0.018969769374],\

        [-0.0176615086879, 0.00344551201015 ,0.00432019709409, 0.00362050204766,\
        0.00236838757932, 0.00130167737191, 0.000588597708116, 0.00201663487667]\

         ])))

    numerical_flux = wave_equation.volume_integral_flux(params.u[:, :, 0])
    assert (af.mean(af.abs(numerical_flux - referenceFluxIntegral)) <
            threshold)
Beispiel #26
0
def test_compute_moments():

    obj = test()

    rho_num = compute_moments(obj, 'density')
    rho_ana = 1 + 0.01 * af.sin(2 * np.pi * obj.q1 + 4 * np.pi * obj.q2)

    error_rho = af.mean(af.abs(rho_num - rho_ana))

    E_num = compute_moments(obj, 'energy')
    E_ana =   3 * (1 + 0.01 * af.sin(2 * np.pi * obj.q1 + 4 * np.pi * obj.q2)) \
                * (1 + 0.01 * af.cos(2 * np.pi * obj.q1 + 4 * np.pi * obj.q2)) \
            + 3 * (1 + 0.01 * af.sin(2 * np.pi * obj.q1 + 4 * np.pi * obj.q2)) \
                *  (0.01 * af.exp(-10 * obj.q1**2 - 10 * obj.q2**2))**2

    error_E = af.mean(af.abs(E_num - E_ana))

    mom_p1b_num = compute_moments(obj, 'mom_p1_bulk')
    mom_p1b_ana =   (1 + 0.01 * af.sin(2 * np.pi * obj.q1 + 4 * np.pi * obj.q2)) \
                  * (0.01 * af.exp(-10 * obj.q1**2 - 10 * obj.q2**2))

    error_p1b = af.mean(af.abs(mom_p1b_num - mom_p1b_ana))

    mom_p2b_num = compute_moments(obj, 'mom_p2_bulk')
    mom_p2b_ana =   (1 + 0.01 * af.sin(2 * np.pi * obj.q1 + 4 * np.pi * obj.q2)) \
                  * (0.01 * af.exp(-10 * obj.q1**2 - 10 * obj.q2**2))

    error_p2b = af.mean(af.abs(mom_p2b_num - mom_p2b_ana))

    mom_p3b_num = compute_moments(obj, 'mom_p3_bulk')
    mom_p3b_ana  =   (1 + 0.01 * af.sin(2 * np.pi * obj.q1 + 4 * np.pi * obj.q2)) \
                   * (0.01 * af.exp(-10 * obj.q1**2 - 10 * obj.q2**2))

    error_p3b = af.mean(af.abs(mom_p3b_num - mom_p3b_ana))

    assert (error_rho < 1e-13)
    assert (error_E < 1e-13)
    assert (error_p1b < 1e-13)
    assert (error_p2b < 1e-13)
    assert (error_p3b < 1e-13)
def squared_loss(y_true, y_pred):
    """Compute the squared loss for regression.
    Parameters
    ----------
    y_true : array-like or label indicator matrix
        Ground truth (correct) values.
    y_pred : array-like or label indicator matrix
        Predicted values, as returned by a regression estimator.
    Returns
    -------
    loss : float
        The degree to which the samples are correctly predicted.
    """
    return af.mean(af.flat((y_true - y_pred) ** 2)) / 2
Beispiel #28
0
def test_f_interp_2d():
    N = 2**np.arange(5, 11)
    error = np.zeros(N.size)

    for i in range(N.size):
        test_obj = test(int(N[i]), int(N[i]), 3)
        f_interp_2d(test_obj, 0.00001)
        f_analytic = af.sin(2 * np.pi * (test_obj.q1_center - 0.00001) +
                            4 * np.pi * (test_obj.q2_center - 0.00001))
        error[i] = af.mean(
            af.abs(test_obj.f[:, 3:-3, 3:-3] - f_analytic[:, 3:-3, 3:-3]))

    poly = np.polyfit(np.log10(N), np.log10(error), 1)
    assert (abs(poly[0] + 2) < 0.2)
Beispiel #29
0
    def check_maxwells_contraint_equations(self, rho):

        PETSc.Sys.Print("Initial Maxwell's Constraints:")
        divB_error = af.abs(self.compute_divB())
        af_to_petsc_glob_array(self, divB_error, self._glob_divB_error_array)

        PETSc.Sys.Print('MAX(|divB|) =', self._glob_divB_error.max())

        # TODO: What the is going on here? rho_b??
        rho_by_eps     = rho #/ self.params.dielectric_epsilon
        rho_b          = af.mean(rho_by_eps) # background
        divE_error     = self.compute_divE() - rho_by_eps + rho_b

        af_to_petsc_glob_array(self, divE_error, self._glob_divE_error_array)
        PETSc.Sys.Print('MAX(|divE-rho|) =', self._glob_divE_error.max())
Beispiel #30
0
def check_error(params):
    error = np.zeros(N.size)

    for i in range(N.size):
        domain.N_p1 = int(N[i])
        domain.N_p2 = int(N[i])
        # Defining the physical system to be solved:
        system = physical_system(domain, boundary_conditions, params,
                                 initialize, advection_terms,
                                 collision_operator.BGK, moment_defs)

        # Declaring a linear system object which will evolve the defined physical system:
        nls = nonlinear_solver(system)
        N_g = nls.N_ghost_q

        # Time parameters:
        dt = 0.01 * 32 / nls.N_p1
        t_final = 0.2

        time_array = np.arange(dt, t_final + dt, dt)

        # Since only the p = 1 mode is excited:

        E1 = nls.cell_centered_EM_fields_at_n[0]
        E2 = nls.cell_centered_EM_fields_at_n[1]
        E3 = nls.cell_centered_EM_fields_at_n[2]

        B1 = nls.cell_centered_EM_fields_at_n[3]
        B2 = nls.cell_centered_EM_fields_at_n[4]
        B3 = nls.cell_centered_EM_fields_at_n[5]

        (A_p1, A_p2,
         A_p3) = af.broadcast(nls._A_p, nls.q1_center, nls.q2_center,
                              nls.p1_center, nls.p2_center, nls.p3_center, E1,
                              E2, E3, B1, B2, B3, nls.physical_system.params)

        f_analytic = af.broadcast(initialize.initialize_f, nls.q1_center,
                                  nls.q2_center,
                                  addition(nls.p1_center, -A_p1 * t_final),
                                  addition(nls.p2_center, -A_p2 * t_final),
                                  nls.p3_center, nls.physical_system.params)

        for time_index, t0 in enumerate(time_array):
            nls.strang_timestep(dt)

        error[i] = af.mean(af.abs(nls.f - f_analytic))

    return (error)
def monte_carlo_options(N, K, t, vol, r, strike, steps, use_barrier = True, B = None, ty = af.Dtype.f32):
    payoff = af.constant(0, N, 1, dtype = ty)

    dt = t / float(steps - 1)
    s = af.constant(strike, N, 1, dtype = ty)

    randmat = af.randn(N, steps - 1, dtype = ty)
    randmat = af.exp((r - (vol * vol * 0.5)) * dt + vol * math.sqrt(dt) * randmat);

    S = af.product(af.join(1, s, randmat), 1)

    if (use_barrier):
        S = S * af.all_true(S < B, 1)

    payoff = af.maxof(0, S - K)
    return af.mean(payoff) * math.exp(-r * t)
Beispiel #32
0
def test_1V():

    obj = test()

    obj.single_mode_evolution = False

    f_generalized = MB_dist(obj.q1_center, obj.q2_center, obj.p1, obj.p2,
                            obj.p3, 1)

    C_f_hat_generalized = 2 * af.fft2(
        collision_operator.BGK(
            f_generalized, obj.q1_center, obj.q2_center, obj.p1, obj.p2,
            obj.p3, obj.compute_moments,
            obj.physical_system.params)) / (obj.N_q2 * obj.N_q1)

    # Background
    C_f_hat_generalized[0, 0, :] = 0

    # Finding the indices of the mode excited:
    i_q1_max = np.unravel_index(
        af.imax(af.abs(C_f_hat_generalized))[1],
        (obj.N_q1, obj.N_q2, obj.N_p1 * obj.N_p2 * obj.N_p3),
        order='F')[0]

    i_q2_max = np.unravel_index(
        af.imax(af.abs(C_f_hat_generalized))[1],
        (obj.N_q1, obj.N_q2, obj.N_p1 * obj.N_p2 * obj.N_p3),
        order='F')[1]

    obj.p1 = np.array(af.reorder(obj.p1, 1, 2, 3, 0))
    obj.p2 = np.array(af.reorder(obj.p2, 1, 2, 3, 0))
    obj.p3 = np.array(af.reorder(obj.p3, 1, 2, 3, 0))

    delta_f_hat = 0.01 * (1 / (2 * np.pi))**(1 / 2) \
                       * np.exp(-0.5 * obj.p1**2)

    obj.single_mode_evolution = True

    C_f_hat_single_mode = collision_operator.linearized_BGK(
        delta_f_hat, obj.p1, obj.p2, obj.p3, obj.compute_moments,
        obj.physical_system.params)

    assert (af.mean(
        af.abs(
            af.flat(C_f_hat_generalized[i_q1_max, i_q2_max]) -
            af.to_array(C_f_hat_single_mode.flatten()))) < 1e-14)
Beispiel #33
0
    def train(self, X, Y):
        # Initialize parameters to 0
        self.__weights = af.constant(0, X.dims()[1], Y.dims()[1])
        # self.__weights = af.randu(X.dims()[1], Y.dims()[1])

        for i in range(self.__maxiter):
            P = self.predict_proba(X)
            err = Y - P

            mean_abs_err = af.mean(af.abs(err))
            if mean_abs_err < self.__maxerr:
                break

            if self.__verbose and ((i + 1) % 25 == 0):
                print("Iter: {}, Err: {}".format(i+1, mean_abs_err))

            self.__weights = self.__weights + self.__alpha * af.matmulTN(X, err)
Beispiel #34
0
def check_error(params):

    error = np.zeros(N.size)

    for i in range(N.size):
        af.device_gc()

        domain.N_q1 = int(N[i])
        domain.N_p1 = int(N[i])
 
        # Defining the physical system to be solved:
        system = physical_system(domain,
                                 boundary_conditions,
                                 params,
                                 initialize,
                                 advection_terms,
                                 collision_operator.BGK,
                                 moments
                                )

        # Declaring a linear system object which will evolve the defined physical system:
        nls = nonlinear_solver(system)
        N_g = nls.N_ghost

        # Time parameters:
        dt      = 0.001 * 32/nls.N_q1
        t_final = 0.1

        time_array  = np.arange(dt, t_final + dt, dt)
        f_reference = af.broadcast(initialize.initialize_f,
                                   af.broadcast(lambda a, b:a+b, nls.q1_center, - nls.p1_center * t_final), 
                                   nls.q2_center, nls.p1_center, nls.p2_center, nls.p3_center, params
                                  )

        for time_index, t0 in enumerate(time_array):
            nls.strang_timestep(dt)

        error[i] = af.mean(af.abs(  nls.f[:, :, N_g:-N_g, N_g:-N_g] 
                                  - f_reference[:, :, N_g:-N_g, N_g:-N_g]
                                 )
                          )

    return(error)
Beispiel #35
0
def time_evolution(gv):
    '''
    '''
    # Creating a folder to store hdf5 files. If it doesn't exist.
    results_directory = 'results/2d_hdf5_%02d' % (int(params.N_LGL))
    if not os.path.exists(results_directory):
        os.makedirs(results_directory)

    u = gv.u_e_ij
    delta_t = gv.delta_t_2d
    time = gv.time_2d
    u_init = gv.u_e_ij

    A_inverse = af.np_to_af_array(np.linalg.inv(np.array(A_matrix(gv))))

    for i in trange(time.shape[0]):
        L1_norm = af.mean(af.abs(u_init - u))

        if (L1_norm >= 100):
            break
        if (i % 10) == 0:
            h5file = h5py.File(
                'results/2d_hdf5_%02d/dump_timestep_%06d' %
                (int(params.N_LGL), int(i)) + '.hdf5', 'w')
            dset = h5file.create_dataset('u_i', data=u, dtype='d')

            dset[:, :] = u[:, :]

        u += +RK4_timestepping(A_inverse, u, delta_t, gv)

        #Implementing second order time-stepping.
        #u_n_plus_half =  u + af.matmul(A_inverse, b_vector(u))\
        #                      * delta_t / 2

        #u            +=  af.matmul(A_inverse, b_vector(u_n_plus_half))\
        #                  * delta_t

    return L1_norm
#######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################

import arrayfire as af

a = af.randu(5, 5)
b = af.randu(5, 5)
w = af.randu(5, 1)

af.display(af.mean(a, dim=0))
af.display(af.mean(a, weights=w, dim=0))
print(af.mean(a))
print(af.mean(a, weights=w))

af.display(af.var(a, dim=0))
af.display(af.var(a, isbiased=True, dim=0))
af.display(af.var(a, weights=w, dim=0))
print(af.var(a))
print(af.var(a, isbiased=True))
print(af.var(a, weights=w))

af.display(af.stdev(a, dim=0))
print(af.stdev(a))

af.display(af.var(a, dim=0))
#!/usr/bin/python
import arrayfire as af

a = af.randu(5, 5)
b = af.randu(5, 5)
w = af.randu(5, 1)

af.print_array(af.mean(a, dim=0))
af.print_array(af.mean(a, weights=w, dim=0))
print(af.mean(a))
print(af.mean(a, weights=w))

af.print_array(af.var(a, dim=0))
af.print_array(af.var(a, isbiased=True, dim=0))
af.print_array(af.var(a, weights=w, dim=0))
print(af.var(a))
print(af.var(a, isbiased=True))
print(af.var(a, weights=w))

af.print_array(af.stdev(a, dim=0))
print(af.stdev(a))

af.print_array(af.var(a, dim=0))
af.print_array(af.var(a, isbiased=True, dim=0))
print(af.var(a))
print(af.var(a, isbiased=True))

af.print_array(af.median(a, dim=0))
print(af.median(w))

print(af.corrcoef(a, b))
Beispiel #38
0
 def mean(self, s, axis):
     if self.dtype == numpy.bool:
         s = s.astype(pu.typemap(numpy.float64))
     return arrayfire.mean(s, dim=axis)