Esempio n. 1
0
    def sample(self, mesh):
        pos = np.zeros((self.n_point, 3), dtype=np.float32)
        ori = np.zeros((self.n_point, 3), dtype=np.float32)

        sa = mesh.area_total / float(self.n_point)
        # 点を1個生成する面積

        error = 0.0
        count = 0

        for i in range(mesh.n_face):
            tmp = mesh.area_face[i]
            # 面iの面積

            # 面iにまく点の数nptを計算
            npt = 0
            tmp /= sa
            tmp2 = tmp
            while (tmp >= 1):  # 対象の三角形に何点生成するか
                npt += 1
                tmp -= 1
            error += tmp2 - npt
            # 小数部分をまとめる
            if (error >= 1):  # 小数部分が1より大きければ生成する点数を増加
                npt += 1
                error -= 1

            for j in range(npt):  # 各点の座標値を計算
                vec, self.seed = sobol.i4_sobol(2, self.seed)
                r1 = np.sqrt(vec[0])
                r2 = vec[1]
                for k in range(3):  # Osadaの方法
                    pos[ count ][ k ] = \
                    ( 1.0-r1 ) * mesh.vert[ mesh.face[ i ][ 0 ] ][ k ] + \
                    r1 * ( 1.0 - r2 ) * mesh.vert[ mesh.face[ i ][ 1 ] ][ k ] + \
                    r1 * ( r2 * mesh.vert[ mesh.face[ i ][ 2 ] ][ k ] )
                ori[count] = mesh.norm_face[i]
                # 点の向きは面の法線
                count += 1

        if (count != self.n_point):  # 生成する点の数が不足していたら追加
            vec, self.seed = sobol.i4_sobol(2, self.seed)
            r1 = np.sqrt(vec[0])
            r2 = vec[1]
            for k in range(3):  # Osadaの方法
                pos[ self.n_point - 1 ][ k ] = \
                ( 1.0-r1 ) * mesh.vert[ mesh.face[ mesh.n_face - 1 ][ 0 ] ][ k ] + \
                r1 * ( 1.0 - r2 ) * mesh.vert[ mesh.face[ mesh.n_face - 1 ][ 1 ] ][ k ] + \
                r1 * ( r2 * mesh.vert[ mesh.face[ mesh.n_face - 1 ][ 2 ] ][ k ] )
            ori[self.n_point - 1] = mesh.norm_face[mesh.n_face - 1]
            # 点の向きは面の法線
            count += 1

        return np.hstack([pos, ori])
Esempio n. 2
0
def get_design_sites(dim, n_sample, x_lb, x_ub, sampling_method="lhs"):

    x_lb = atleast_2d(x_lb)
    x_ub = atleast_2d(x_ub)

    x_lb = x_lb.T if size(x_lb, 0) != 1 else x_lb
    x_ub = x_ub.T if size(x_ub, 0) != 1 else x_ub

    if sampling_method == "lhs":
        # Latin Hyper Cube Sampling: Get evenly distributed sampling in R^dim
        samples = lhs(dim, samples=n_sample) * (x_ub - x_lb) + x_lb

    elif sampling_method == "uniform":
        samples = np.random.rand(n_sample, dim) * (x_ub - x_lb) + x_lb

    elif sampling_method == "sobol":
        seed = mod(int(time.time()) + os.getpid(), int(1e6))
        samples = np.zeros((n_sample, dim))
        for i in range(n_sample):
            samples[i, :], seed = i4_sobol(dim, seed)
        samples = samples * (x_ub - x_lb) + x_lb

    elif sampling_method == "halton":
        sequencer = Halton(dim)
        samples = sequencer.get(n_sample) * (x_ub - x_lb) + x_lb

    return samples
Esempio n. 3
0
def get_design_sites(dim, n_sample, x_lb, x_ub, sampling_method='lhs'):

    x_lb = atleast_2d(x_lb)
    x_ub = atleast_2d(x_ub)

    x_lb = x_lb.T if size(x_lb, 0) != 1 else x_lb
    x_ub = x_ub.T if size(x_ub, 0) != 1 else x_ub

    if sampling_method == 'lhs':
        # Latin Hyper Cube Sampling: Get evenly distributed sampling in R^dim
        samples = lhs(dim, samples=n_sample) * (x_ub - x_lb) + x_lb

    elif sampling_method == 'uniform':
        samples = np.random.rand(n_sample, dim) * (x_ub - x_lb) + x_lb

    elif sampling_method == 'sobol':
        seed = mod(int(time.time()) + os.getpid(), int(1e6))
        samples = np.zeros((n_sample, dim))
        for i in range(n_sample):
            samples[i, :], seed = i4_sobol(dim, seed)
        samples = samples * (x_ub - x_lb) + x_lb

    elif sampling_method == 'halton':
        sequencer = Halton(dim)
        samples = sequencer.get(n_sample) * (x_ub - x_lb) + x_lb

    return samples
Esempio n. 4
0
def generate_sobol_seq(dim, n, seed):
    seed = seed
    out = []
    for i in range(n):
        cur_out, seed = i4_sobol(dim, seed)
        out.append(cur_out)
    return np.array(out)
Esempio n. 5
0
    def __call__(self, number_of_samples,
                 dimension,
                 start=0):

        samples = np.zeros((number_of_samples, dimension))

        for sample in range(number_of_samples):
            samples[sample,: ] = sobol.i4_sobol(dimension, start + sample)[0]

        return samples
Esempio n. 6
0
def choose_sampling_method(N, k, rl):
    x = np.zeros((N, k))
    if rl == "R":
        x = np.random.random((N, k))
    elif rl == "S":
        x = cp.dist.sobol_lib.sobol(k, N).T
    elif rl == "L":
        x = cp.latin_hypercube(k, N).T
    elif rl == "sobol":
        seed = 123
        for i in range(N):
            r, seed = i4_sobol(k, seed)
            x[i, :] = r
    return x
def get_sobol(N):
    import sobol
    limit_rho_up = 10.
    limit_tau_up = 1.
    limit_gamma_up = 4.
    limit_rho_dn = 2.
    limit_tau_dn = 0.1
    limit_gamma_dn = 1.
    parameterLowerLimits = numpy.array([limit_rho_dn, limit_gamma_dn, limit_tau_dn])
    parameterUpperLimits = numpy.array([limit_rho_up, limit_gamma_up, limit_tau_up])
    Q = numpy.zeros( (N, 3) )
    for i in range(N):
        rho, gamma, tau = sobol.i4_sobol(3,i)[0] * (parameterUpperLimits - parameterLowerLimits) +parameterLowerLimits
        Q[i,0] = rho
        Q[i,1] = gamma
        Q[i,2] = tau
    return Q
Esempio n. 8
0
def generate_particles(domain, particles_per_cell = 30, seed = 112):
    total_particles = domain["nx"] * domain["ny"] * particles_per_cell
    particles = []

    # Particle format is: x, y, visc, xvel, yvel

    for _ in xrange(total_particles):
        xy, seed = sobol.i4_sobol(2, seed)
        xy = xy * 2  # This needs to be xmax and xmin, because the sobol returns from 0 - 1.

        # Custom rheology
        visc = 0.1 if xy[0] > 1. else 0.5

        particles.append([xy[0], xy[1], visc, 0., 0.])

    particles = np.array(particles)

    return particles
Esempio n. 9
0
def generate_particles(domain, particles_per_cell=30, seed=112):
    total_particles = domain["nx"] * domain["ny"] * particles_per_cell
    particles = []

    # Particle format is: x, y, visc, xvel, yvel

    for _ in xrange(total_particles):
        xy, seed = sobol.i4_sobol(2, seed)
        xy = xy * 2  # This needs to be xmax and xmin, because the sobol returns from 0 - 1.

        # Custom rheology
        visc = 0.1 if xy[0] > 1. else 0.5

        particles.append([xy[0], xy[1], visc, 0., 0.])

    particles = np.array(particles)

    return particles
Esempio n. 10
0
def get_sobol(N):
    import sobol
    limit_rho_up = 10.
    limit_tau_up = 1.
    limit_gamma_up = 4.
    limit_rho_dn = 2.
    limit_tau_dn = 0.1
    limit_gamma_dn = 1.
    parameterLowerLimits = numpy.array(
        [limit_rho_dn, limit_gamma_dn, limit_tau_dn])
    parameterUpperLimits = numpy.array(
        [limit_rho_up, limit_gamma_up, limit_tau_up])
    Q = numpy.zeros((N, 3))
    for i in range(N):
        rho, gamma, tau = sobol.i4_sobol(3, i)[0] * (
            parameterUpperLimits - parameterLowerLimits) + parameterLowerLimits
        Q[i, 0] = rho
        Q[i, 1] = gamma
        Q[i, 2] = tau
    return Q
def generate_sobol_points(M, dim):
    points = []
    for i in range(M):
        points.append(sobol.i4_sobol(dim, i)[0])
    return np.array(points)
Esempio n. 12
0
def sobol_test05():
    """
    sobol_test05 tests i4_sobol.
    """
    print(''
          'SOBOL_TEST05'
          '  I4_SOBOL computes the next element of a Sobol sequence.'
          ''
          '  In this test, we demonstrate how the SEED can be'
          '  manipulated to skip ahead in the sequence, or'
          '  to come back to any part of the sequence.'
          '')

    target = np.array([
    [  0,   1, 0.000000,  0.000000,  0.000000],
    [  1,   2, 0.500000,  0.500000,  0.500000],
    [  2,   3, 0.750000,  0.250000,  0.750000],
    [  3,   4, 0.250000,  0.750000,  0.250000],
    [  4,   5, 0.375000,  0.375000,  0.625000],
    [100, 101, 0.4140625, 0.2578125, 0.3046875],
    [101, 102, 0.9140625, 0.7578125, 0.8046875],
    [102, 103, 0.6640625, 0.0078125, 0.5546875],
    [103, 104, 0.1640625, 0.5078125, 0.0546875],
    [104, 105, 0.2265625, 0.4453125, 0.7421875],
    [  3,   4, 0.250000,  0.750000,  0.250000],
    [  4,   5, 0.375000,  0.375000,  0.625000],
    [  5,   6, 0.875000,  0.875000,  0.125000],
    [  6,   7, 0.625000,  0.125000,  0.375000],
    [  7,   8, 0.125000,  0.625000,  0.875000],
    [ 98,  99, 0.7890625, 0.3828125, 0.1796875],
    [ 99, 100, 0.2890625, 0.8828125, 0.6796875],
    [100, 101, 0.4140625, 0.2578125, 0.3046875],
    [101, 102, 0.9140625, 0.7578125, 0.8046875],
    [102, 103, 0.6640625, 0.0078125, 0.5546875]])

    results = np.full_like(target, np.nan)

    dim_num = 3

    print(''
          '  Using dimension DIM_NUM =   %d\n' % dim_num)

    seed = 0

    print(''
          '  Seed  Seed   I4_SOBOL'
          '  In    Out'
          '')

    for i in range(5):
        [r, seed_out] = i4_sobol(dim_num, seed)
        out = '%6d %6d  ' % (seed, seed_out)
        for j in range(1, dim_num + 1):
            out += '%10f  ' % r[j - 1]
        print(out)
        results[i, :] = [seed, seed_out] + list(r)
        seed = seed_out

    print(''
          '  Jump ahead by increasing SEED:'
          '')

    seed = 100

    print(''
          '  Seed  Seed   I4_SOBOL'
          '  In    Out'
          '')

    for i in range(5):
        [r, seed_out] = i4_sobol(dim_num, seed)
        out = '%6d %6d  ' % (seed, seed_out)
        for j in range(1, dim_num + 1):
            out += '%10f  ' % r[j - 1]
        print(out)
        results[5 + i, :] = [seed, seed_out] + list(r)
        seed = seed_out
    print(''
          '  Jump back by decreasing SEED:'
          '')

    seed = 3

    print(''
          '  Seed  Seed   I4_SOBOL'
          '  In    Out'
          '')

    for i in range(5):
        [r, seed_out] = i4_sobol(dim_num, seed)
        out = '%6d %6d  ' % (seed, seed_out)
        for j in range(1, dim_num + 1):
            out += '%10f  ' % r[j - 1]
        print(out)
        results[10 + i, :] = [seed, seed_out] + list(r)
        seed = seed_out

    print(''
          '  Jump back by decreasing SEED:'
          '')

    seed = 98

    print(''
          '  Seed  Seed   I4_SOBOL'
          '  In    Out'
          '')

    for i in range(5):
        [r, seed_out] = i4_sobol(dim_num, seed)
        out = '%6d %6d  ' % (seed, seed_out)
        for j in range(1, dim_num + 1):
            out += '%10f  ' % r[j - 1]
        print(out)
        results[15 + i, :] = [seed, seed_out] + list(r)
        seed = seed_out

    assert np.all(target == results)

    return
Esempio n. 13
0
def sobol_test04():
    """
    sobol_test04 tests i4_sobol.
    """
    print('\nSOBOL_TEST04'
          '  I4_SOBOL returns the next element'
          '  of a Sobol sequence.'
          '\n  In this test, we call I4_SOBOL repeatedly.')

    dim_max = 4

    target = {
        2: np.array([
            [  0,   1, 0.000000,  0.000000],
            [  1,   2, 0.500000,  0.500000],
            [  2,   3, 0.750000,  0.250000],
            [  3,   4, 0.250000,  0.750000],
            [  4,   5, 0.375000,  0.375000],
            # ......................
            [106, 107, 0.9765625, 0.1953125],
            [107, 108, 0.4765625, 0.6953125],
            [108, 109, 0.3515625, 0.0703125],
            [109, 110, 0.8515625, 0.5703125],
            [110, 111, 0.6015625, 0.3203125]]),
        3: np.array([
            [  0,   1, 0.000000,  0.000000,  0.000000],
            [  1,   2, 0.500000,  0.500000,  0.500000],
            [  2,   3, 0.750000,  0.250000,  0.750000],
            [  3,   4, 0.250000,  0.750000,  0.250000],
            [  4,   5, 0.375000,  0.375000,  0.625000],
            # ......................
            [106, 107, 0.9765625, 0.1953125, 0.4921875],
            [107, 108, 0.4765625, 0.6953125, 0.9921875],
            [108, 109, 0.3515625, 0.0703125, 0.1171875],
            [109, 110, 0.8515625, 0.5703125, 0.6171875],
            [110, 111, 0.6015625, 0.3203125, 0.8671875]]),
        4: np.array([
            [  0,   1, 0.000000,  0.000000,  0.000000,  0.000000],
            [  1,   2, 0.500000,  0.500000,  0.500000,  0.500000],
            [  2,   3, 0.750000,  0.250000,  0.750000,  0.250000],
            [  3,   4, 0.250000,  0.750000,  0.250000,  0.750000],
            [  4,   5, 0.375000,  0.375000,  0.625000,  0.125000],
            # ......................
            [106, 107, 0.9765625, 0.1953125, 0.4921875, 0.6640625],
            [107, 108, 0.4765625, 0.6953125, 0.9921875, 0.1640625],
            [108, 109, 0.3515625, 0.0703125, 0.1171875, 0.7890625],
            [109, 110, 0.8515625, 0.5703125, 0.6171875, 0.2890625],
            [110, 111, 0.6015625, 0.3203125, 0.8671875, 0.5390625]])}


    for dim_num in range(2, dim_max + 1):

        seed = 0
        qs = prime_ge(dim_num)

        print('\n  Using dimension DIM_NUM =   %d' % dim_num)
        print('\n  Seed   Seed    I4_SOBOL'
              '  In     Out\n')

        results = np.full((111, 2 + dim_num), np.nan)
        for i in range(111):
            [r, seed_out] = i4_sobol(dim_num, seed)
            if (i < 5 or 105 < i):
                out = '%6d %6d  ' % (seed, seed_out)
                for j in range(dim_num):
                    out += '%10f  ' % r[j]
                print(out)
            elif (i == 6):
                print('  ......................')
            results[i, :] = [seed, seed_out] + list(r)
            seed = seed_out

        assert np.all(target[dim_num][0:5, :] == results[0:5, :]), "Start of array doesn't match"
        assert np.all(target[dim_num][5:10, :] == results[106:111, :]), "End of array doesn't match"

    return
Esempio n. 14
0
 def sample(self, N):
     """ Returns an array of <N> sobol samples. """
     return np.array([i4_sobol(self.dim, i)[0] for i in range(N)])
Esempio n. 15
0
def main():
    """."""

    im_gt = np.squeeze(imread('charlie-chaplin.jpg'))

    rho_limits = [0.01, 10]
    gamma_limits = [0.01, 2]
    tau_limits = [0.01, 10]

    param_lb = np.array([rho_limits[0], gamma_limits[0], tau_limits[0]])
    param_ub = np.array([rho_limits[1], gamma_limits[1], tau_limits[1]])

    sample_size = 2000

    Q = np.zeros((sample_size, 3))

    for i in range(sample_size):
        rho, gamma, tau = sobol.i4_sobol(3, i)[0] * (param_ub - param_lb) + param_lb

        Q[i, 0] = rho
        Q[i, 1] = gamma
        Q[i, 2] = tau

    P = []
    E = []

    # Get initial set of parameters P and corresponding MSEs E
    for i in range(20):
        print i
        P.append(np.array([Q[i, 0], Q[i, 1], Q[i, 2]]))
        im_pred = kernel_ridge_regression(
            kernel_fct='Exponential', rho=Q[i, 0], gamma=Q[i, 1], tau=Q[i, 2])
        mse = compute_mse(im_pred, im_gt)
        E.append(mse)

    E_best = np.min(E)

    # Iterate improving hyperparameters
    for j in range(10):
        print 'Iteration hyperparameter improvement: {}'.format(j)
        u_best = -np.inf
        theta_best = None
        for i in range(sample_size):

            M = matern_kernel(P)

            theta = np.array([Q[i, 0], Q[i, 1], Q[i, 2]])
            m = get_m(P, theta)

            E_i, var_msea = compute_msea(E, M, m)

            gamma = (E_best - E_i) / np.sqrt(var_msea)

            u = np.sqrt(var_msea)*(gamma*stats.norm.cdf(gamma) +
                                   stats.norm.pdf(gamma))
            if u > u_best:
                u_best = u
                theta_best = theta.copy()

        P.append(theta_best)

        im_pred = kernel_ridge_regression(kernel_fct='Exponential',
            rho=theta_best[0], gamma=theta_best[1], tau=theta_best[2])
        mse = compute_mse(im_pred, im_gt)
        E.append(mse)

        if mse < E_best:
            E_best = mse
            print 'E_best improved: {}'.format(E_best)

    return 0
Esempio n. 16
0
    def __init__(self,
                 n,
                 rho,
                 boundary,
                 holes=[],
                 maxIterations=100,
                 fracTol=1.0e-3,
                 tessellationFileName=None,
                 nNodePerh=2.01,
                 offset=(0.0, 0.0),
                 rejecter=None):

        assert n > 0

        # Did we get passed a function or a constant for the density?
        if type(rho) == type(1.0):

            def rhofunc(posi):
                return rho
        else:
            rhofunc = rho
        self.rhofunc = rhofunc

        # Build a polytope PLC version of the boundary.
        plc = poly.polytope.PLC2d()
        plc_coords = poly.vector_of_double()
        edges = boundary.edges
        vertices = boundary.vertices()
        plc.facets.resize(len(edges))
        for i, edge in enumerate(edges):
            plc.facets[i].append(edge.first)
            plc.facets[i].append(edge.second)
            assert len(plc.facets[i]) == 2
        for p in vertices:
            plc_coords.append(p[0])
            plc_coords.append(p[1])
        assert len(plc_coords) == 2 * len(vertices)

        # Add any holes to the boundary PLC.
        plc.holes.resize(len(holes))
        for ihole, hole in enumerate(holes):
            offlast = len(plc_coords) / 2
            edges = hole.edges
            vertices = hole.vertices()
            plc.holes[ihole].resize(len(edges))
            for i, edge in enumerate(edges):
                plc.holes[ihole][i].append(offlast + edge.first)
                plc.holes[ihole][i].append(offlast + edge.second)
                assert len(plc.holes[ihole][i]) == 2
            for p in vertices:
                plc_coords.append(p[0])
                plc_coords.append(p[1])
            assert len(plc_coords) % 2 == 0

        # Initialize the desired number of generators in the boundary using the Sobol sequence.
        generators = poly.vector_of_double()
        seed = 0
        length = max(boundary.xmax.x - boundary.xmin.x,
                     boundary.xmax.y - boundary.xmin.y)
        while len(generators) < 2 * n:
            [coords, seed] = i4_sobol(2, seed)
            p = boundary.xmin + length * Vector2d(coords[0], coords[1])
            ihole = 0
            use = boundary.contains(p, False)
            if use:
                while use and ihole < len(holes):
                    use = not holes[ihole].contains(p, False)
                    ihole += 1
            if use:
                generators.append(p.x)
                generators.append(p.y)
        assert len(generators) == 2 * n

        # Iterate the points toward centroidal relaxation.
        self.tessellation = poly.polytope.Tessellation2d()
        tessellator = poly.polytope.BoostTessellator2d()
        iteration = 0
        maxDelta = 2.0 * fracTol
        while iteration < maxIterations and maxDelta > fracTol:
            tessellator.tessellate(points=generators,
                                   PLCpoints=plc_coords,
                                   geometry=plc,
                                   mesh=self.tessellation)
            new_generators = self.computeWeightedCentroids(self.tessellation)
            assert len(new_generators) == len(generators)
            maxDelta = 0.0
            for i in xrange(len(generators) / 2):
                deltai = sqrt((generators[2 * i] - new_generators[2 * i])**2 +
                              (generators[2 * i + 1] -
                               new_generators[2 * i + 1])**2)
                maxDelta = max(maxDelta, deltai / length)
                generators[2 * i] = 0.5 * (generators[2 * i] +
                                           new_generators[2 * i])
                generators[2 * i + 1] = 0.5 * (generators[2 * i + 1] +
                                               new_generators[2 * i + 1])
            iteration += 1
            print "CentroidalGenerator2d: Iteration %i, maxDelta=%g" % (
                iteration, maxDelta)

        # If requested, write out the final tessellation to a silo file.
        if tessellationFileName:
            poly.polytope.writeTessellation2d(mesh=self.tessellation,
                                              filePrefix=tessellationFileName,
                                              nodeFields=None,
                                              edgeFields=None,
                                              faceFields=None,
                                              cellFields=None,
                                              cycle=iteration)

        # Now we can fill out the usual Spheral generator info.
        assert len(self.tessellation.cells) == n
        self.x, self.y, self.m, self.H = [], [], [], []
        centroids = self.computeWeightedCentroids(self.tessellation)
        masses = self.computeMasses(self.tessellation)
        areas = self.computeAreas(self.tessellation)
        assert len(centroids) == 2 * n
        assert len(masses) == n
        assert len(areas) == n
        for i in xrange(n):
            self.x.append(centroids[2 * i] + offset[0])
            self.y.append(centroids[2 * i + 1] + offset[1])
            self.m.append(masses[i])
            hi = nNodePerh * sqrt(areas[i] / pi)
            assert hi > 0.0
            self.H.append(SymTensor2d(1.0 / hi, 0.0, 0.0, 1.0 / hi))
        assert len(self.x) == n
        assert len(self.y) == n
        assert len(self.m) == n
        assert len(self.H) == n

        # If the user provided a "rejecter", give it a pass
        # at the nodes.
        if rejecter:
            self.x, self.y, self.m, self.H = rejecter(self.x, self.y, self.m,
                                                      self.H)

        # Have the base class break up the serial node distribution
        # for parallel cases.
        NodeGeneratorBase.__init__(self, True, self.x, self.y, self.m, self.H)
        return
Esempio n. 17
0
    def forward(self, input, h_t, c_t, h_t2, c_t2):
        h_t, c_t = self.lstm1(input, (h_t, c_t))
        h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
        output = self.linear(h_t2)
        return [output, h_t, c_t, h_t2, c_t2]


criterion = nn.L1Loss()

backprop_stepss = []
import sobol
number_of_samples = 30
parameterUpperLimits = np.array([20])
parameterLowerLimits = np.array([100])
for i in range(number_of_samples):
    x = sobol.i4_sobol(1, i)[0] * (parameterUpperLimits -
                                   parameterLowerLimits) + parameterLowerLimits
    backprop_stepss.append(x)

backprop_stepss = np.concatenate(backprop_stepss, axis=0)


def lstm_network(backprop_steps, train_data_input):
    if use_gpu:
        power_lstm = Sequence()
        power_lstm.double()
        power_lstm.cuda()
        optimizer = optim.Adam(power_lstm.parameters(), lr=0.01)
        for epoch in range(0, 25):
            if use_gpu:
                h_t = Variable(
Esempio n. 18
0
def i4_sobol_generate(dim_num, n, skip):
    r = np.full((n, dim_num), np.nan)
    for j in range(n):
        seed = j + skip
        r[j, 0:dim_num], next_seed = i4_sobol(dim_num, seed)
    return r
Esempio n. 19
0
    def gpo(self, sm, sys, thSys):

        #=====================================================================
        # Initalisation
        #=====================================================================

        # Set initial settings
        sm.calcGradientFlag = False
        sm.calcHessianFlag = False
        self.nPars = thSys.nParInference
        self.filePrefix = thSys.filePrefix
        runNextIter = True
        self.iter = 0

        # Check algorithm settings and set to default if needed
        setSettings(self, "gpo")

        # Make a grid to evaluate the EI on
        l = np.array(self.lowerBounds[0:thSys.nParInference], dtype=np.float64)
        u = np.array(self.upperBounds[0:thSys.nParInference], dtype=np.float64)

        # Allocate vectors
        AQ = np.zeros((self.maxIter + 1, 1))
        mumax = np.zeros((self.maxIter + 1))
        thp = np.zeros((self.maxIter + 1, self.nPars))
        obp = np.zeros((self.maxIter + 1, 1))
        thhat = np.zeros((self.maxIter, self.nPars))
        thhatHessian = np.zeros((self.maxIter, self.nPars, self.nPars))
        obmax = np.zeros((self.maxIter + 1, 1))
        hyperParams = np.zeros((self.maxIter, 3 + self.nPars))
        xhatf = np.zeros((self.maxIter + 1, sys.T))

        #=====================================================================
        # Pre-run using random sampling to estimate hyperparameters
        #=====================================================================

        # Pre allocate vectors
        thPre = np.zeros((self.preIter, self.nPars))
        obPre = np.zeros((self.preIter, 1))

        #=====================================================================
        # Main loop
        #=====================================================================

        # Pre-compute hypercube points if required
        if (self.preSamplingMethod == "latinHyperCube"):
            lhd = lhs(self.nPars, samples=self.preIter)

        for kk in range(0, self.preIter):

            # Sampling parameters using uniform sampling or Latin hypercubes
            if (self.preSamplingMethod == "latinHyperCube"):
                # Sample parameters using a Latin hypercube over the parameter
                # bounds
                thPre[kk, :] = l + (u - l) * lhd[kk, :]
            elif (self.preSamplingMethod == "sobol"):
                # Sample parameters using a Sobol sequence over the parameter
                # bounds
                thPre[kk, :] = l + (u - l) * i4_sobol(self.nPars, 100 + kk)[0]
            else:
                # Draw parameters uniform over the parameter bounds
                thPre[kk, :] = l + (u - l) * np.random.random(self.nPars)

            # Evaluate the objective function in the parameters
            thSys.storeParameters(thPre[kk, :], sys)
            obPre[kk], tmp1 = self.evaluateObjectiveFunction(sm, sys, thSys)

            # Transform and save the parameters
            thSys.transform()
            thPre[kk, :] = thSys.returnParameters()[0:thSys.nParInference]

            # Write out progress if requested
            if (self.verbose):
                print("gpo: Pre-iteration: " + str(kk) + " of " + str(self.preIter) + " completed, sampled " +
                      str(np.round(thPre[kk, :], 3)) + " with " + str(np.round(obPre[kk], 2)) + ".")

        #=====================================================================
        # Fit the GP regression
        #=====================================================================

        # Remove nan values for the objective function
        idxNotNaN = ~np.isnan(obPre)
        thPre = thPre[(idxNotNaN).any(axis=1)]
        obPre = obPre[(idxNotNaN).any(axis=1)]

        # Specify the kernel ( Matern52 with ARD plus bias kernel to compensate
        # for non-zero mean )
        kernel = GPy.kern.Matern52(
            input_dim=self.nPars, ARD=True) + GPy.kern.Bias(input_dim=self.nPars)

        # Normalize the objective function evaluations
        ynorm = (obPre - np.mean(obPre)) / np.sqrt(np.var(obPre))

        # Create the model object
        m = GPy.models.GPRegression(thPre, ynorm, kernel, normalizer=False)

        #=====================================================================
        # Update hyperparameters
        #=====================================================================

        # Set constraints on hyperparameters
        m.Gaussian_noise.variance.constrain_bounded(0.01, 10.0)
        m.kern.Mat52.lengthscale.constrain_bounded(0.01, 10.0)
        m.kern.Mat52.variance.constrain_bounded(0.01, 25.0)

        # Run empirical Bayes to estimate the hyperparameters
        m.optimize('bfgs', max_iters=200)
        m.optimize_restarts(num_restarts=10, robust=True)
        self.GaussianNoiseVariance = np.array(
            m.Gaussian_noise.variance, copy=True)

        #=====================================================================
        # Write to output
        #=====================================================================

        self.thPre = thPre
        self.obPre = obPre
        self.m = m

        #=====================================================================
        # Main loop
        #=====================================================================

        # Save the initial parameters
        thSys.storeParameters(self.initPar, sys)
        thp[self.iter, :] = thSys.returnParameters()
        thSys.transform()

        while (runNextIter):

            # Store the parameter
            thSys.storeParameters(thp[self.iter, :], sys)
            thSys.transform()

            #------------------------------------------------------------------
            # Evalute the objective function
            #------------------------------------------------------------------
            obp[self.iter], xhatf[self.iter,
                                  :] = self.evaluateObjectiveFunction(sm, sys, thSys)

            # Collect the sampled data (if the objective is finite)
            idxNotNaN = ~np.isnan(obp[range(self.iter), :])
            x = np.vstack((thPre, thp[(idxNotNaN).any(axis=1)]))
            y = np.vstack((obPre, obp[(idxNotNaN).any(axis=1)]))

            #------------------------------------------------------------------
            # Fit the GP to the sampled data
            #------------------------------------------------------------------
            ynorm = (y - np.mean(y)) / np.sqrt(np.var(y))
            self.ynormMean = np.mean(y)
            self.ynormVar = np.var(y)

            m = GPy.models.GPRegression(x, ynorm, kernel, normalizer=False)

            #------------------------------------------------------------------
            # Re-estimate the hyperparameters
            #------------------------------------------------------------------
            if (np.remainder(self.iter + 1, self.EstimateHyperparametersInterval) == 0):

                # Set constraints on hyperparameters
                m.Gaussian_noise.variance.constrain_bounded(0.01, 10.0)
                m.kern.Mat52.lengthscale.constrain_bounded(0.01, 10.0)
                m.kern.Mat52.variance.constrain_bounded(0.01, 25.0)

                # Run empirical Bayes to estimate the hyperparameters
                m.optimize('bfgs', max_iters=200)
                m.optimize_restarts(num_restarts=10, robust=True)

                # Save the current noise variance
                self.GaussianNoiseVariance = np.array(
                    m.Gaussian_noise.variance, copy=True)

            else:

                # Overload current noise estimate (sets to 1.0 every time we
                # add data otherwise)
                m.Gaussian_noise.variance = self.GaussianNoiseVariance

            # Save all the hyperparameters
            hyperParams[self.iter, 0] = np.array(
                m.Gaussian_noise.variance, copy=True)
            hyperParams[self.iter, 1] = np.array(
                m.kern.bias.variance, copy=True)
            hyperParams[self.iter, 2] = np.array(
                m.kern.Mat52.variance, copy=True)
            hyperParams[self.iter, range(
                3, 3 + self.nPars)] = np.array(m.kern.Mat52.lengthscale, copy=True)

            #------------------------------------------------------------------
            # Find the maximum expected value of the GP over the sampled parameters
            #------------------------------------------------------------------
            Mup, ys2 = m.predict(x)
            mumax[self.iter] = np.max(Mup)

            #------------------------------------------------------------------
            # Compute the next point in which to sample the posterior
            #------------------------------------------------------------------

            # Optimize the AQ function
            aqThMax, aqMax, ierror = solve(self.AQfunction, l, u, user_data=(
                m, mumax[self.iter], self.epsilon), maxf=1000, maxT=1000)

            # Jitter the parameter estimates
            if (self.jitterParameters == True):
                flag = 0.0

                while (flag == 0.0):
                    z = np.random.multivariate_normal(np.zeros(self.nPars), self.jitteringCovariance[
                                                      range(self.nPars), :][:, range(self.nPars)])
                    flag = self.checkProposedParameters(aqThMax + z)

                thSys.storeParameters(aqThMax + z, sys)
                aqThMax += z

            # Set the new point and save the estimate of the AQ
            thp[self.iter + 1, :] = aqThMax
            AQ[self.iter + 1] = -aqMax

            # Update counter
            self.iter += 1

            #------------------------------------------------------------------
            # Check exit conditions
            #------------------------------------------------------------------

            # AQ function criteria
            if (AQ[self.iter] < self.tolLevel):
                print("GPO: reaches tolLevel, so exiting...")
                runNextIter = False

            # Max iteration criteria
            if (self.iter == self.maxIter):
                print("GPO: reaches maxIter, so exiting...")
                runNextIter = False

            #------------------------------------------------------------------
            # Estimate the current parameters by maximizing the GP
            #------------------------------------------------------------------
            if ((self.EstimateThHatEveryIteration == True) | (runNextIter == False)):
                thhatCurrent, obmaxCurrent, ierror = solve(
                    self.MUeval, l, u, user_data=m, algmethod=1, maxf=1000, maxT=1000)

                thhat[self.iter - 1, :] = thhatCurrent
                obmax[self.iter - 1, :] = obmaxCurrent

                print((thhatCurrent, obmaxCurrent))

                if (self.EstimateHessianEveryIteration == True):
                    self.estimateHessian(thhatCurrent)
                    thhatHessian[self.iter - 1, :, :] = self.invHessianEstimate

            #------------------------------------------------------------------
            # Print output to console
            #------------------------------------------------------------------
            if (self.verbose):
                if (self.EstimateThHatEveryIteration == True):
                    parm = ["%.4f" % v for v in thhat[self.iter - 1, :]]
                    print(
                        "##############################################################################################")
                    print("Iteration: " + str(self.iter) + " with current parameters: " +
                          str(parm) + " and AQ: " + str(np.round(AQ[self.iter], 2)))
                    print(
                        "##############################################################################################")
                else:
                    parm = ["%.4f" % v for v in thp[self.iter - 1, :]]
                    print(
                        "##############################################################################################")
                    print("Iteration: " + str(self.iter) + " sampled objective function at parameters: " +
                          str(parm) + " with value: " + str(np.round(obp[self.iter - 1], 2)))
                    print(
                        "##############################################################################################")

        #=====================================================================
        # Generate output
        #=====================================================================
        tmp = range(self.iter - 1)
        self.ob = obmax[tmp]
        self.th = thhat[tmp, :]
        self.thhat = thhat[self.iter - 1, :]
        self.thHessian = thhatHessian
        self.thhatHessian = thhatHessian[self.iter - 1, :, :]
        self.aq = AQ[range(self.iter)]
        self.obp = obp[tmp]
        self.thp = thp[range(self.iter), :]
        self.m = m
        self.x = x
        self.y = y
        self.xhatf = xhatf
        self.ynorm = ynorm
        self.hp = hyperParams
Esempio n. 20
0
    def __init__(self,
                 ndim,
                 n,
                 rho,
                 boundary,
                 gradrho,
                 holes,
                 centroidFrac,
                 maxIterations,
                 fracTol,
                 tessellationBaseDir,
                 tessellationFileName,
                 nNodePerh,
                 randomseed,
                 maxNodesPerDomain,
                 seedPositions,
                 enforceConstantMassPoints,
                 cacheFileName):

        assert ndim in (2,3)
        assert n > 0

        # Load our handy aliases.
        if ndim == 2:
            import Spheral2d as sph
        else:
            import Spheral3d as sph

        # Did we get passed a function or a constant for the density?
        if type(rho) in (float, int):
            def rhofunc(posi):
                return rho
            rhomax = rho
        else:
            rhofunc = rho
            rhomax = None
        self.rhofunc = rhofunc

        # Some useful geometry.
        box = boundary.xmax - boundary.xmin
        length = box.maxElement()
        boundvol = boundary.volume
        for hole in holes:
            boundvol -= hole.volume
        if boundvol <= 0.0:
            # The holes were not entirely contained in the bounding volume, so we punt.
            boundvol = 0.5*boundary.volume
        boxvol = 1.0
        for idim in xrange(ndim):
            boxvol *= box[idim]
        fracOccupied = min(1.0, boxvol/boundvol)
        assert fracOccupied > 0.0 and fracOccupied <= 1.0

        # If there is an pre-existing cache file, load it instead of doing all the work.
        if not self.restoreState(cacheFileName):

            # Create a temporary NodeList we'll use store and update positions.
            eos = sph.GammaLawGasMKS(2.0, 2.0)
            WT = sph.TableKernel(sph.NBSplineKernel(7), 1000)
            hmax = 2.0*(boundvol/pi*n)**(1.0/ndim)
            nodes = sph.makeFluidNodeList("tmp generator nodes", 
                                          eos,
                                          hmin = 1e-10,
                                          hmax = hmax,
                                          kernelExtent = WT.kernelExtent,
                                          hminratio = 1.0,
                                          nPerh = nNodePerh,
                                          topGridCellSize = 2.0*WT.kernelExtent*hmax)
        
            # Make a first pass looking for the maximum density (roughly).
            pos = nodes.positions()
            mass = nodes.mass()
            rhof = nodes.massDensity()
            H = nodes.Hfield()
            imin, imax = self.globalIDRange(n)
            nlocal = imax - imin
            nodes.numInternalNodes = nlocal
        
            # If the user provided the starting or seed positions, use 'em.
            if seedPositions is not None:
                hi = min(hmax, 2.0 * (boundvol/(pi*n))**(1.0/ndim))
                assert hi > 0.0
                nlocal = len(seedPositions)
                assert mpi.allreduce(nlocal, mpi.SUM) == n
                nodes.numInternalNodes = nlocal
                for i in xrange(nlocal):
                    pos[i] = seedPositions[i]
                    rhoi = rhofunc(pos[i])
                    rhof[i] = rhoi
                    mass[i] = rhoi * boundvol/n  # Not actually correct, but mass will be updated in centroidalRelaxNodes
                    H[i] = sph.SymTensor.one / hi
        
            else:
                # If necessary probe for a maximum density statistically.
                rangen = random.Random(randomseed + mpi.rank)
                if not rhomax:
                    rhomax = 0.0
                    nglobal = 0
                    while nglobal < n:
                        p = boundary.xmin + length*sph.Vector(rangen.random(), rangen.random(), rangen.random())
                        use = boundary.contains(p, False)
                        if use:
                            ihole = 0
                            while use and ihole < len(holes):
                                use = not holes[ihole].contains(p, True)
                                ihole += 1
                        if use:
                            rhomax = max(rhomax, rhofunc(p))
                            i = 1
                        else:
                            i = 0
                        nglobal += mpi.allreduce(i, mpi.SUM)
                    rhomax = mpi.allreduce(rhomax, mpi.MAX)
                print "MedialGenerator: selected a maximum density of ", rhomax
            
                # It's a bit tricky to properly use the Sobol sequence in parallel.  We handle this by searching for the lowest
                # seeds that give us the desired number of points.
                seeds = []
                seed = 0
                while mpi.allreduce(len(seeds), mpi.SUM) < n:
                    localseed = seed + mpi.rank
                    [coords, newseed] = i4_sobol(ndim, localseed)
                    p = boundary.xmin + length*sph.Vector(*tuple(coords))
                    use = boundary.contains(p, False)
                    if use:
                        ihole = 0
                        while use and ihole < len(holes):
                            use = not holes[ihole].contains(p, True)
                            ihole += 1
                    if use:
                        rhoi = rhofunc(p)
                        if rangen.random() < rhoi/rhomax:
                            seeds.append(localseed)
                    seed += mpi.procs
            
                # Drop the highest value seeds to ensure we have the correct number of total points.
                nglobal = mpi.allreduce(len(seeds), mpi.SUM)
                assert n + mpi.procs >= nglobal
                seeds.sort()
                seeds = [-1] + seeds
                while mpi.allreduce(len(seeds), mpi.SUM) > n + mpi.procs:
                    maxseed = mpi.allreduce(seeds[-1], mpi.MAX)
                    assert maxseed > -1
                    if seeds[-1] == maxseed:
                        seeds = seeds[:-1]
                seeds = seeds[1:]
            
                # Load balance the number of seeds per domain.
                if len(seeds) > nlocal:
                    extraseeds = seeds[nlocal:]
                else:
                    extraseeds = []
                extraseeds = mpi.allreduce(extraseeds, mpi.SUM)
                seeds = seeds[:nlocal]
                for iproc in xrange(mpi.procs):
                    ngrab = max(0, nlocal - len(seeds))
                    ntaken = mpi.bcast(ngrab, root=iproc)
                    if mpi.rank == iproc:
                        seeds += extraseeds[:ngrab]
                    extraseeds = extraseeds[ntaken:]
                assert len(extraseeds) == 0
                assert len(seeds) == nlocal
                assert mpi.allreduce(len(seeds), mpi.SUM) == n
            
                # Initialize the desired number of generators in the boundary using the Sobol sequence.
                hi = min(hmax, 2.0 * (boundvol/(pi*n))**(1.0/ndim))
                assert hi > 0.0
                for i, seed in enumerate(seeds):
                    [coords, newseed] = i4_sobol(ndim, seed)
                    p = boundary.xmin + length*sph.Vector(*tuple(coords))
                    rhoi = rhofunc(p)
                    pos[i] = p
                    rhof[i] = rhoi
                    mass[i] = rhoi * boundvol/n  # Not actually correct, but mass will be updated in centroidalRelaxNodes
                    H[i] = sph.SymTensor.one / hi
        
                # Each domain has independently generated the correct number of points, but they are randomly distributed.
                # Before going further it's useful to try and spatially collect the points by domain.
                # We'll use the Spheral Peano-Hilbert space filling curve implementation to do this.
                if mpi.procs > 1:
                    db = sph.DataBase()
                    db.appendNodeList(nodes)
                    maxNodes = max(maxNodesPerDomain, 2*n/mpi.procs)
                    redistributor = sph.PeanoHilbertOrderRedistributeNodes(2.0)
                    redistributor.redistributeNodes(db)
        
            # If we're in parallel we need the parallel boundary.
            if mpi.procs > 1:
                boundaries = [sph.TreeDistributedBoundary.instance()]
            else:
                boundaries = []
        
            # Iterate the points toward centroidal relaxation.
            vol, surfacePoint = centroidalRelaxNodes([(nodes, boundary, holes)],
                                                     W = WT,
                                                     rho = rhofunc,
                                                     gradrho = gradrho,
                                                     boundaries = boundaries,
                                                     fracTol = fracTol,
                                                     centroidFrac = centroidFrac,
                                                     maxIterations = maxIterations,
                                                     tessellationBaseDir = tessellationBaseDir,
                                                     tessellationFileName = tessellationFileName)
        
            # Store the values the descendent generators will need.
            self.vol, self.surface, self.pos, self.m, self.H = [], [], [], [], []
            for i in xrange(nodes.numInternalNodes):
                self.vol.append(vol(0,i))
                self.surface.append(surfacePoint(0,i))
                self.pos.append(sph.Vector(pos[i]))
                self.m.append(vol(0,i) * rhofunc(pos[i]))
                self.H.append(sph.SymTensor(H[i]))
            assert mpi.allreduce(len(self.vol), mpi.SUM) == n
            assert mpi.allreduce(len(self.surface), mpi.SUM) == n
            assert mpi.allreduce(len(self.pos), mpi.SUM) == n
            assert mpi.allreduce(len(self.m), mpi.SUM) == n
            assert mpi.allreduce(len(self.H), mpi.SUM) == n
        
            # If requested, enforce constant mass points.
            if enforceConstantMassPoints:
                msum = mpi.allreduce(sum([0.0] + self.m), mpi.SUM)
                self.m = [msum/n]*len(self.pos)

            # If requested, we can store the state of the generator such that it can be
            # later restored without going through all that work.
            if cacheFileName:
                self.dumpState(cacheFileName)

        return
Esempio n. 21
0
    def gpo(self, sm, sys, thSys):

        #=====================================================================
        # Initalisation
        #=====================================================================

        # Set initial settings
        sm.calcGradientFlag = False
        sm.calcHessianFlag = False
        self.nPars = thSys.nParInference
        self.filePrefix = thSys.filePrefix
        runNextIter = True
        self.iter = 0

        # Check algorithm settings and set to default if needed
        setSettings(self, "gpo")

        # Make a grid to evaluate the EI on
        l = np.array(self.lowerBounds[0:thSys.nParInference], dtype=np.float64)
        u = np.array(self.upperBounds[0:thSys.nParInference], dtype=np.float64)

        # Allocate vectors
        AQ = np.zeros((self.maxIter + 1, 1))
        mumax = np.zeros((self.maxIter + 1))
        thp = np.zeros((self.maxIter + 1, self.nPars))
        obp = np.zeros((self.maxIter + 1, 1))
        thhat = np.zeros((self.maxIter, self.nPars))
        thhatHessian = np.zeros((self.maxIter, self.nPars, self.nPars))
        obmax = np.zeros((self.maxIter + 1, 1))
        hyperParams = np.zeros((self.maxIter, 3 + self.nPars))
        xhatf = np.zeros((self.maxIter + 1, sys.T))

        #=====================================================================
        # Pre-run using random sampling to estimate hyperparameters
        #=====================================================================

        # Pre allocate vectors
        thPre = np.zeros((self.preIter, self.nPars))
        obPre = np.zeros((self.preIter, 1))

        #=====================================================================
        # Main loop
        #=====================================================================

        # Pre-compute hypercube points if required
        if (self.preSamplingMethod == "latinHyperCube"):
            lhd = lhs(self.nPars, samples=self.preIter)

        for kk in range(0, self.preIter):

            # Sampling parameters using uniform sampling or Latin hypercubes
            if (self.preSamplingMethod == "latinHyperCube"):
                # Sample parameters using a Latin hypercube over the parameter
                # bounds
                thPre[kk, :] = l + (u - l) * lhd[kk, :]
            elif (self.preSamplingMethod == "sobol"):
                # Sample parameters using a Sobol sequence over the parameter
                # bounds
                thPre[kk, :] = l + (u - l) * i4_sobol(self.nPars, 100 + kk)[0]
            else:
                # Draw parameters uniform over the parameter bounds
                thPre[kk, :] = l + (u - l) * np.random.random(self.nPars)

            # Evaluate the objective function in the parameters
            thSys.storeParameters(thPre[kk, :], sys)
            obPre[kk], tmp1 = self.evaluateObjectiveFunction(sm, sys, thSys)

            # Transform and save the parameters
            thSys.transform()
            thPre[kk, :] = thSys.returnParameters()[0:thSys.nParInference]

            # Write out progress if requested
            if (self.verbose):
                print("gpo: Pre-iteration: " + str(kk) + " of " +
                      str(self.preIter) + " completed, sampled " +
                      str(np.round(thPre[kk, :], 3)) + " with " +
                      str(np.round(obPre[kk], 2)) + ".")

        #=====================================================================
        # Fit the GP regression
        #=====================================================================

        # Remove nan values for the objective function
        idxNotNaN = ~np.isnan(obPre)
        thPre = thPre[(idxNotNaN).any(axis=1)]
        obPre = obPre[(idxNotNaN).any(axis=1)]

        # Specify the kernel ( Matern52 with ARD plus bias kernel to compensate
        # for non-zero mean )
        kernel = GPy.kern.Matern52(
            input_dim=self.nPars,
            ARD=True) + GPy.kern.Bias(input_dim=self.nPars)

        # Normalize the objective function evaluations
        ynorm = (obPre - np.mean(obPre)) / np.sqrt(np.var(obPre))

        # Create the model object
        m = GPy.models.GPRegression(thPre, ynorm, kernel, normalizer=False)

        #=====================================================================
        # Update hyperparameters
        #=====================================================================

        # Set constraints on hyperparameters
        m.Gaussian_noise.variance.constrain_bounded(0.01, 10.0)
        m.kern.Mat52.lengthscale.constrain_bounded(0.01, 10.0)
        m.kern.Mat52.variance.constrain_bounded(0.01, 25.0)

        # Run empirical Bayes to estimate the hyperparameters
        m.optimize('bfgs', max_iters=200)
        m.optimize_restarts(num_restarts=10, robust=True)
        self.GaussianNoiseVariance = np.array(m.Gaussian_noise.variance,
                                              copy=True)

        #=====================================================================
        # Write to output
        #=====================================================================

        self.thPre = thPre
        self.obPre = obPre
        self.m = m

        #=====================================================================
        # Main loop
        #=====================================================================

        # Save the initial parameters
        thSys.storeParameters(self.initPar, sys)
        thp[self.iter, :] = thSys.returnParameters()
        thSys.transform()

        while (runNextIter):

            # Store the parameter
            thSys.storeParameters(thp[self.iter, :], sys)
            thSys.transform()

            #------------------------------------------------------------------
            # Evalute the objective function
            #------------------------------------------------------------------
            obp[self.iter], xhatf[
                self.iter, :] = self.evaluateObjectiveFunction(sm, sys, thSys)

            # Collect the sampled data (if the objective is finite)
            idxNotNaN = ~np.isnan(obp[range(self.iter), :])
            x = np.vstack((thPre, thp[(idxNotNaN).any(axis=1)]))
            y = np.vstack((obPre, obp[(idxNotNaN).any(axis=1)]))

            #------------------------------------------------------------------
            # Fit the GP to the sampled data
            #------------------------------------------------------------------
            ynorm = (y - np.mean(y)) / np.sqrt(np.var(y))
            self.ynormMean = np.mean(y)
            self.ynormVar = np.var(y)

            m = GPy.models.GPRegression(x, ynorm, kernel, normalizer=False)

            #------------------------------------------------------------------
            # Re-estimate the hyperparameters
            #------------------------------------------------------------------
            if (np.remainder(self.iter + 1,
                             self.EstimateHyperparametersInterval) == 0):

                # Set constraints on hyperparameters
                m.Gaussian_noise.variance.constrain_bounded(0.01, 10.0)
                m.kern.Mat52.lengthscale.constrain_bounded(0.01, 10.0)
                m.kern.Mat52.variance.constrain_bounded(0.01, 25.0)

                # Run empirical Bayes to estimate the hyperparameters
                m.optimize('bfgs', max_iters=200)
                m.optimize_restarts(num_restarts=10, robust=True)

                # Save the current noise variance
                self.GaussianNoiseVariance = np.array(
                    m.Gaussian_noise.variance, copy=True)

            else:

                # Overload current noise estimate (sets to 1.0 every time we
                # add data otherwise)
                m.Gaussian_noise.variance = self.GaussianNoiseVariance

            # Save all the hyperparameters
            hyperParams[self.iter, 0] = np.array(m.Gaussian_noise.variance,
                                                 copy=True)
            hyperParams[self.iter, 1] = np.array(m.kern.bias.variance,
                                                 copy=True)
            hyperParams[self.iter, 2] = np.array(m.kern.Mat52.variance,
                                                 copy=True)
            hyperParams[self.iter, range(3, 3 + self.nPars)] = np.array(
                m.kern.Mat52.lengthscale, copy=True)

            #------------------------------------------------------------------
            # Find the maximum expected value of the GP over the sampled parameters
            #------------------------------------------------------------------
            Mup, ys2 = m.predict(x)
            mumax[self.iter] = np.max(Mup)

            #------------------------------------------------------------------
            # Compute the next point in which to sample the posterior
            #------------------------------------------------------------------

            # Optimize the AQ function
            aqThMax, aqMax, ierror = solve(self.AQfunction,
                                           l,
                                           u,
                                           user_data=(m, mumax[self.iter],
                                                      self.epsilon),
                                           maxf=1000,
                                           maxT=1000)

            # Jitter the parameter estimates
            if (self.jitterParameters == True):
                flag = 0.0

                while (flag == 0.0):
                    z = np.random.multivariate_normal(
                        np.zeros(self.nPars), self.jitteringCovariance[
                            range(self.nPars), :][:, range(self.nPars)])
                    flag = self.checkProposedParameters(aqThMax + z)

                thSys.storeParameters(aqThMax + z, sys)
                aqThMax += z

            # Set the new point and save the estimate of the AQ
            thp[self.iter + 1, :] = aqThMax
            AQ[self.iter + 1] = -aqMax

            # Update counter
            self.iter += 1

            #------------------------------------------------------------------
            # Check exit conditions
            #------------------------------------------------------------------

            # AQ function criteria
            if (AQ[self.iter] < self.tolLevel):
                print("GPO: reaches tolLevel, so exiting...")
                runNextIter = False

            # Max iteration criteria
            if (self.iter == self.maxIter):
                print("GPO: reaches maxIter, so exiting...")
                runNextIter = False

            #------------------------------------------------------------------
            # Estimate the current parameters by maximizing the GP
            #------------------------------------------------------------------
            if ((self.EstimateThHatEveryIteration == True) |
                (runNextIter == False)):
                thhatCurrent, obmaxCurrent, ierror = solve(self.MUeval,
                                                           l,
                                                           u,
                                                           user_data=m,
                                                           algmethod=1,
                                                           maxf=1000,
                                                           maxT=1000)

                thhat[self.iter - 1, :] = thhatCurrent
                obmax[self.iter - 1, :] = obmaxCurrent

                print((thhatCurrent, obmaxCurrent))

                if (self.EstimateHessianEveryIteration == True):
                    self.estimateHessian(thhatCurrent)
                    thhatHessian[self.iter - 1, :, :] = self.invHessianEstimate

            #------------------------------------------------------------------
            # Print output to console
            #------------------------------------------------------------------
            if (self.verbose):
                if (self.EstimateThHatEveryIteration == True):
                    parm = ["%.4f" % v for v in thhat[self.iter - 1, :]]
                    print(
                        "##############################################################################################"
                    )
                    print("Iteration: " + str(self.iter) +
                          " with current parameters: " + str(parm) +
                          " and AQ: " + str(np.round(AQ[self.iter], 2)))
                    print(
                        "##############################################################################################"
                    )
                else:
                    parm = ["%.4f" % v for v in thp[self.iter - 1, :]]
                    print(
                        "##############################################################################################"
                    )
                    print("Iteration: " + str(self.iter) +
                          " sampled objective function at parameters: " +
                          str(parm) + " with value: " +
                          str(np.round(obp[self.iter - 1], 2)))
                    print(
                        "##############################################################################################"
                    )

        #=====================================================================
        # Generate output
        #=====================================================================
        tmp = range(self.iter - 1)
        self.ob = obmax[tmp]
        self.th = thhat[tmp, :]
        self.thhat = thhat[self.iter - 1, :]
        self.thHessian = thhatHessian
        self.thhatHessian = thhatHessian[self.iter - 1, :, :]
        self.aq = AQ[range(self.iter)]
        self.obp = obp[tmp]
        self.thp = thp[range(self.iter), :]
        self.m = m
        self.x = x
        self.y = y
        self.xhatf = xhatf
        self.ynorm = ynorm
        self.hp = hyperParams