def test_points_shift():
    """Tests that shifted points are inside the unit square.

    Also tests that all points are shifted the same, and that the shift
    is random.

    """
    J = 100
    N = 1024
    point_generation_method = 'mc'
    seed = 42
    points = point_gen.mc_points(J,N,point_generation_method,[0,1],seed)

    shifted_points = point_gen.shift(points)

    assert (-0.5 <= shifted_points).all() and (shifted_points <= 0.5).all()

    # Checking points have been shifted by the same amount. If either
    # point has, in any coordinate, been 'wrapped round', then the
    # difference in their shifts will be 1 (the size of the hypercube).
    shift_0 = shifted_points[0,:] - points[0,:]

    shift_1 = shifted_points[1,:] - points[1,:]

    # This is a bit of a hack, because arrays of truth values are
    # complicated. It says that for every element of the arrays, either
    # they are equal, or they differ by 1.
    differences = (shift_0 - shift_1) * (np.abs(shift_0-shift_1) - 1)

    assert np.allclose(differences,0)
    
    # Heuristic check (similar to that in test_mc_points_correct) that
    # the shift is random.
    assert shift_0.mean() < 0.07
def test_lexicographic_ordering():
    """Tests whether lexicographic ordering works."""

    points = point_gen.mc_points(20,2**5,'qmc',[0,1],seed=None,
                                 order_lexicographically=True)

    points_ordering = np.lexsort(points.transpose())

    print(points_ordering,flush=True)
    
    points_ordered = deepcopy(points_ordering)

    points_ordered.sort()

    print(points_ordered,flush=True)
    
    assert (points_ordering == points_ordered).all()
def test_mc_points_correct():
    """Tests that Monte Carlo points are in the (centred) unit cube.

    Also provides a quick 'check' that the points are random.
    """
    
    J = 100
    N = 1024
    point_generation_method = 'mc'
    seed = 42
    section = [0,1]
    points = point_gen.mc_points(J,N,point_generation_method,section,seed)
    assert (-0.5 <= points).all() and (points <= 0.5).all()

    # The following is a 'quick and dirty' check that the points are
    # random - whether their average is near the centre of the cube. The
    # threshold for 'near' is a heuristic that I chose by looking at
    # generated random numbers.
    assert np.isclose(points.mean(),0.0,atol=0.0025)
def test_qmc_points_correct():
    """Tests that Monte Carlo points are in the (centred) unit cube.

    Also checks that they are the same as generated by Dirk Nuyens' code
    (although given Dirks code underlies the code being tested, this
    isn't a great test). But the test was written without looking at the
    code being tested, so maybe that makes it slightly better.

    """
    J = 100
    N = 1024
    point_generation_method = 'qmc'
    points = point_gen.mc_points(J,N,point_generation_method,[0,1])
    assert (-0.5 <= points).all() and (points <= 0.5).all()

    true_points_gen = latticeseq_b2.latticeseq_b2(s=J)

    for m in range(11):
        true_points = true_points_gen.calc_block(m) - 0.5
        if m == 0:
            # Dealing with  indexing
            assert (true_points == points[0:1,:]).all()
        else:
            assert (true_points == points[2**(m-1):2**m,:]).all()
def find_nbpc_points(M,nearby_preconditioning_proportion,kl_like,J,point_generation_method,this_ensemble_points,shift_no):
    """Finds the points to use as 'centres' for nearby preconditioning, and
    calculates which 'centre' corresponds to each qmc point.
    """
    # Points are generated here, so this is presumably the place to do
    # all the 'figuring out the centres' business.  We distribute the
    # points at the centres of the 'preconditioning balls' using a
    # tensor product grid.  Suppose we knew the radius of these balls
    # (in a weighted L^1-metric) should be r. Then the spacing of the
    # points in dimension j should be
    # \[d_j = r / (J * \sqrt{lambda_j})\]
    # (where the sqrt(lambda_j) are as in
    # helmholtz_firedrake.coefficients.UniformKLLikeCoeff). In order to
    # achieve this spacing, we would need \ceil(1/d_j) points in
    # dimension j. However, we know the number of points, and we reverse
    # engineer the above argument to get the radius of the balls, and
    # the spacing in each dimension. Suppose for simplicity (and because
    # there will be various other fudges and approximations in what
    # follows) that we have 1/d_j points in each dimension. Then the
    # total number of points is $r^{-J} \prod_{j=1}^J J
    # \sqrt{\lambda_j}.$ If we specify that the total number of
    # 'centres' is N_C, then we have
    # \[r = J(\prod_{j=1}^J\sqrt{\lambda_j})^{-J}\],
    # and thence we can determine d_j, and lay down equispaced points in
    # dimension j with this spacing. We then assemble the points in all
    # of stochastic space via tensor products. We then find the actual
    # 'centres' by selecting the QMC points that are nearest to these
    # 'ideal' centres. We then associated each and every QMC point with
    # a 'centre' by selecting the closest 'centre'.
    
    N = 2**M
    
    num_centres = round(N*nearby_preconditioning_proportion)   

    sqrt_lambda = kl_like._sqrt_lambda
    # Check this is a row vector

    # We distribute the number of points at which to construct the
    # preconditioners according to the decay of 1+sqrt(lambda_j).
    
    #TODO - tidy this documentation We do a bit of a hack to generate
    # the distribution of the centres in the different dimensions. We
    # write a function that assumes we know the radius $r$ of the
    # balls (in the funny metric), that we want, and then gives us the
    # number of points in each dimension (well, not quite, because at
    # this point the 'numbers of points' are not necessarily
    # integers). We then optimise this function (it's nonlinear and
    # nonsmooth) to find the (a?) value of $r$ that gives the correct
    # number of centres. We then round all the decimal numbers to get
    # a number of points that (we hope) isn't too far off.
    #
    # The reason why this is the right function to optimise, I'll
    # write in later

    def continuous_centre_nums(r):
        return np.array([np.max((1.0,ii)) for ii in (float(J)*sqrt_lambda)/(2.0*r)])

    def optim_fn(r):
        return continuous_centre_nums(r).prod()-float(num_centres)

    # Find appropriate endpoints for bisection optimisation
    lower_bound = 0.1
    while optim_fn(lower_bound) < 0:
        lower_bound = 0.5 * lower_bound

    upper_bound = 1.0
    while optim_fn(upper_bound) > 0:
        upper_bound = 2*upper_bound
    
    out = optimize.bisect(optim_fn,lower_bound,upper_bound)

    
    centre_nums = np.round(continuous_centre_nums(out))
    # A better way to do this would be to find the closest point on the integer lattice - not even this, but I've no idea how easy/hard that is....
    
    one_d_points = [-0.5+np.linspace(1.0/(jj+1.0),jj/(jj+1.0),int(jj)) for jj in centre_nums]

    centres_meshgrid = np.meshgrid(*one_d_points)

    proposed_centres = np.vstack([coord.flatten() for coord in centres_meshgrid]).transpose()

    # Now to actually locate the centres at QMC points
    all_qmc_points = point_gen.mc_points(
        J,N,point_generation_method,section=[0,1],seed=1)

    for ii_shift in range(shift_no+1):
        # Needed because the outer code iteratively shifts the qmc
        # points around
        all_qmc_points = point_gen.shift(all_qmc_points,seed=shift_no)

    centres = []
    
    for proposed in proposed_centres:
        nearest_point = np.argmin(weighted_L1_norm(proposed,all_qmc_points,sqrt_lambda))

        potential_centre = all_qmc_points[nearest_point,:]

        # We guard against a centre being selected twice
        skip = False
        for other_centre in centres:
            if np.isclose(potential_centre,other_centre).all():
                skip = True
                
        if not skip:
            centres.append(potential_centre)

    centres = np.vstack(centres)
    
    actual_num_centres = centres.shape[0]

    # Now find out, for each QMC point in this ensemble member, which
    # centre is nearest to it

    num_points_this_ensemble = this_ensemble_points.shape[0]

    nearest_centre = -np.ones(num_points_this_ensemble,dtype='int')

    for ii_point in range(num_points_this_ensemble):
        point = this_ensemble_points[ii_point,:]

        nearest_centre[ii_point] = np.argmin(weighted_L1_norm(point,centres,sqrt_lambda))

    # Check we've actually assigned a centre to each point
    assert (nearest_centre >= 0).all()
    
    return [centres,nearest_centre]
def generate_samples(k,h_spec,J,nu,M,
                     point_generation_method,
                     delta,lambda_mult,j_scaling,
                     qois,
                     num_spatial_cores,dim=2,
                     display_progress=False,physically_realistic=False,
                     nearby_preconditioning=False,
                     nearby_preconditioning_proportion=1):
    
    """Generates samples for Monte-Carlo methods for Helmholtz.

    Computes an approximation to the root-mean-squared error in
    Monte-Carlo or Quasi-Monte Carlo approximations of expectations of
    quantities of interest associated with the solution of a stochastic
    Helmholtz problem, where the randomness enters through a random
    field refractive index, given by an artificial-KL expansion.

    Parameters:

    k - positive float - the wavenumber for which to do computations.

    h_spec - 2-tuple - h_spec[0] should be a positive float and
    h_spec[1] should be a float. These specify the values of the mesh
    size h for which we will run experiments.
    h = h_spec[0] * k**h_spec[1].

    J - positive int - the stochastic dimension in the artificial-KL
    expansion for which to do experiments.

    nu - positive int - the number of random shifts to use in
    randomly-shifted QMC methods. Combines with M to give number of
    integration points for Monte Carlo.

    M - positive int - Specifies the number of integration points for
    which to do computations - NOTE: for Monte Carlo, the number of
    integration points will be given by nu*(2**M). For Quasi-Monte
    Carlo, we will sample 2**m integration points, and then randomly
    shift these nu times as part of the estimator.

    point_generation_method string - either 'mc' or 'qmc', specifying
    Monte-Carlo point generation or Quasi-Monte-Carlo (based on an
    off-the-shelf lattice rule). Monte-Carlo generation currently
    doesn't work, and so throws an error.

    delta - parameter controlling the rate of decay of the magntiude of
    the coefficients in the artifical-KL expansion - see
    helmholtz_firedrake.coefficients.UniformKLLikeCoeff for more
    information.

    lambda_mult - parameter controlling the absolute magntiude of the
    coefficients in the artifical-KL expansion - see
    helmholtz_firedrake.coefficients.UniformKLLikeCoeff for more
    information.


    j_scaling - parameter controlling the oscillation in the basis
    functions in the artifical-KL expansion - see
    helmholtz_firedrake.coefficients.UniformKLLikeCoeff for more
    information.

    qois - list of strings - the Quantities of Interest that are
    computed. Currently the only options for the elements of the string
    are:
        'integral' - the integral of the solution over the domain.
        'origin' the point value at the origin.
        'top_right' the point value at (1,1)
        'gradient_top_right' the gradient at (1,1)
    There are also the options 'testing' and 'testing_qmc', but these
    are used solely for testing the functions.

    num_spatial_cores - int - the number of cores we want to use to
    solve our PDE. (You need to specify this as we might use ensemble
    parallelism to speed things up.)

    dim - either 2 or 3 - the spatial dimension of the Helmholtz
    Problem.

    display_progress - boolean - if true, prints the sample number each
    time we sample.

    physically_realistic - boolean - if true, f and g correspond to a
    scattered plane wave, n is cut off away from the truncation
    boundary, and n is >= 0.1. Otherwise, f and g are given by a plane
    wave. The 'false' option is used to verify regression tests.

    nearby_preconditioning - boolean - if true, nearby preconditioning
    is used in the solves. A proportion (given by nearby_preconditioning
    proportion) of the realisations have their exact LU decompositions
    computed, and then these are used as preconditioners for all the
    other problems (where the preconditioner used is determined by the
    nearest problem, in some metric, that has had a preconditioner
    computed). Note that if ensembles are used to speed up the solution
    time, some LU decompositions may be calculated more than once. But
    for the purposes of assessing the effectiveness of the algorithm (in
    terms of total # GMRES iterations), this isn't a problem.

    nearby_preconditioning_proportion - float in [0,1]. See the text for
    nearby_preconditioning above.

    Output:
    If point_generation_method is 'qmc', then 
    output is a list: [k,samples,n_coeffs,GMRES_its,]

    k is a float - the wavenumber.

    samples is a list of length nu, where each entry of samples is a
    list of length num_qois, each entry of which is a numpy array of
    length 2**M, each entry of which is either: (i) a (complex-valued)
    float, or (ii) a numpy column vector, corresponding to a sample of
    the QoI.

    n_coeffs is a list of length nu, each entry of which is a 2**M by J
    numpy array, each row of which contains the KL-coefficients needed
    to generate the particular realisation of n.

    GMRES_its is a list of length nu, each entry of which is a list of
    length 2**M, containing ints - these are the number of GMRES
    iterations required for each sample.
    """
    
    if point_generation_method is 'mc':
        raise NotImplementedError("Monte Carlo sampling currently doesn't work")
        
    num_qois = len(qois)
    
    mesh_points = hh_utils.h_to_num_cells(h_spec[0]*k**h_spec[1],
                                              dim)
    
    ensemble = fd.Ensemble(fd.COMM_WORLD,num_spatial_cores)
    
    mesh = fd.UnitSquareMesh(mesh_points,mesh_points,comm=ensemble.comm)

    comm = ensemble.ensemble_comm

    n_coeffs = []
        
    if point_generation_method is 'mc':
        # This needs updating one I've figured out a way to do seeding
        # in a parallel-appropriate way
        N = nu*(2**M)
        kl_mc_points = point_gen.mc_points(
            J,N,point_generation_method,seed=1)

    elif point_generation_method is 'qmc':
        N = 2**M
        kl_mc_points = point_gen.mc_points(
            J,N,point_generation_method,section=[comm.rank,comm.size],seed=1)

    n_0 = 1.0

    kl_like = coeff.UniformKLLikeCoeff(
        mesh,J,delta,lambda_mult,j_scaling,n_0,kl_mc_points)       
        
    # Create the problem
    V = fd.FunctionSpace(mesh,"CG",1)
    prob = hh.StochasticHelmholtzProblem(
        k,V,A_stoch=None,n_stoch=kl_like,
        **{'A_pre' : fd.as_matrix([[1.0,0.0],[0.0,1.0]])})

    angle = np.pi/4.0
    
    if physically_realistic:

       make_physically_realistic(prob,angle)
    else:
        prob.f_g_plane_wave([np.cos(angle),np.sin(angle)])
                
    if point_generation_method is 'mc':

        samples = all_qoi_samples(prob,qois,ensemble.comm,display_progress)
                        
    elif point_generation_method == 'qmc':

        samples = []

        GMRES_its = []
                   
        for shift_no in range(nu):
            if display_progress:
                print('Shift number:',shift_no+1,flush=True)
            # Randomly shift the points
            prob.n_stoch.change_all_points(
                point_gen.shift(kl_mc_points,seed=shift_no))

            n_coeffs.append(deepcopy(prob.n_stoch.current_and_unsampled_points()))

            if nearby_preconditioning:
                [centres,nearest_centre] = find_nbpc_points(M,nearby_preconditioning_proportion,
                                                            prob.n_stoch,J,point_generation_method,
                                                            prob.n_stoch.current_and_unsampled_points(),
                                                            shift_no)
            else:
                centres = None
                nearest_centre = None

            [this_samples,this_GMRES_its] = all_qoi_samples(prob,qois,ensemble.comm,display_progress,
                                                            centres,nearest_centre,J,delta,lambda_mult,
                                                            j_scaling,n_0,angle,physically_realistic)
            
            # For outputting samples and GMRES iterations
            samples.append(this_samples)
            GMRES_its.append(this_GMRES_its)
            

    comm = ensemble.ensemble_comm

    samples = fancy_allgather(comm,samples,'samples')

    n_coeffs = fancy_allgather(comm,n_coeffs,'coeffs')

    # Have to hack around GMRES_its because it's not *quite* in the
    # right format

# list of list of Nones or Floats
# But if we don't use NBPC, then it's a list of Nones
    
    GMRES_its = [[np.array(ii)] for ii in GMRES_its]
    
    GMRES_its = fancy_allgather(comm,GMRES_its,'samples')

    GMRES_its = [ii[0].tolist() for ii in GMRES_its]
    
    return [k,samples,n_coeffs,GMRES_its,]