예제 #1
0
def fit_kernel(x=[],
               y=[],
               num_kernels=1,
               kernel=None,
               lamb=None,
               extrapolate="none",
               extrap_range=0.1):
    from apal_cxx import PyKernelRegressor

    allowed_extrapolations = ["none", "linear"]
    if extrapolate not in allowed_extrapolations:
        raise ValueError("extrapolate has to be one of {}"
                         "".format(allowed_extrapolations))

    if extrapolate == "linear":
        # Add extrapolation to the beginning
        extrap_range = extrap_range * (x[-1] - x[0])
        dx = x[1] - x[0]
        dy = y[1] - y[0]
        slope = dy / dx
        x_extrap = np.arange(x[0] - extrap_range, x[0], dx)
        y_extrap = y[0] + (dy / dx) * (x_extrap - x[0])
        x = np.concatenate((x_extrap, x))
        y = np.concatenate((y_extrap, y))

        # Add extrapolation to end
        dx = x[-1] - x[-2]
        dy = y[-1] - y[-2]
        x_extrap = np.arange(x[-1], x[-1] + extrap_range, dx)
        y_extrap = y[-1] + (dy / dx) * (x_extrap - x[-1])
        x = np.concatenate((x, x_extrap))
        y = np.concatenate((y, y_extrap))

    regressor = PyKernelRegressor(np.min(x), np.max(x))

    coeff = np.zeros(num_kernels)
    regressor.set_kernel(kernel)
    regressor.set_coeff(coeff)

    matrix = np.zeros((len(x), num_kernels))

    if num_kernels >= len(x):
        raise ValueError("The number of kernels has to be lower than "
                         "the number points!")

    for i in range(num_kernels):
        matrix[:, i] = regressor.evaluate_kernel(i, x)

    if lamb is None:
        coeff = np.linalg.lstsq(matrix, y)[0]
    else:
        u, d, vh = np.linalg.svd(matrix, full_matrices=False)
        D = np.diag(d / (lamb + d**2))
        Uy = u.T.dot(y)
        DUy = D.dot(Uy)
        coeff = vh.T.dot(DUy)
    regressor.set_coeff(coeff)
    return regressor
예제 #2
0
    def test_chglrealspace3D(self):
        chgl = self.get_chgl3D()
        chgl.build3D()

        # Initialize a regression kernel and a regressor
        kernel = PyGaussianKernel(2.0)
        regressor = PyKernelRegressor(0.0, 1.0)

        # Initialize a two phase landau polynomial
        landau = PyTwoPhaseLandau()

        # Transfer the kernel and the regressor
        regressor.set_kernel(kernel)
        landau.set_kernel_regressor(regressor)

        # Initialize 4D polynomial (concentration, shape1, shape2, shape3)
        poly = PyPolynomial(4)
        landau.set_polynomial(poly)
        chgl.set_free_energy(landau)

        chgl.run(5, 1000)
예제 #3
0
def main():
    #prefix = "data/almgsi_chgl_random_seed_strain_noise2/chgl"
    prefix = "data/almgsi_chgl_3D_surface_1nm_64_strain_consistent/chgl"
    dx = 10.0  # Discretisation in angstrom
    dim = 3
    L = 64
    num_gl_fields = 3
    M = 0.1
    alpha = 5.0
    dt = 1.0
    gl_damping = M

    coeff, terms = get_polyterms(FNAME)

    poly = PyPolynomial(4)

    with open(FNAME, 'r') as infile:
        info = json.load(infile)

    kernel = PyGaussianKernel(info["kernel"]["std_dev"])
    regressor = PyKernelRegressor(info["kernel_regressor"]["xmin"],
                                  info["kernel_regressor"]["xmax"])
    regressor.set_kernel(kernel)
    regressor.set_coeff(info["kernel_regressor"]["coeff"])
    grad_coeff = info["gradient_coeff"]

    for item in info["terms"]:
        c = item["coeff"]
        powers = item["powers"]
        poly.add_term(c, PyPolynomialTerm(powers))
        print(c, powers)

    alpha = grad_coeff[0] / dx**2
    b1 = grad_coeff[1] / dx**2
    b2 = grad_coeff[2] / dx**2
    gradient_coeff = [[b2, b1, b2], [b1, b2, b2], [b2, b2, b1]]
    print(gradient_coeff)

    chgl = PyCHGLRealSpace(dim, L, prefix, num_gl_fields, M, alpha, dt,
                           gl_damping, gradient_coeff)

    landau = PyTwoPhaseLandau()
    landau.set_polynomial(poly)
    landau.set_kernel_regressor(regressor)
    landau.set_discontinuity(info["discontinuity_conc"],
                             info["discontinuity_jump"])

    chgl.set_free_energy(landau)
    #chgl.from_npy_array(precipitates_square(L))
    chgl.use_adaptive_stepping(1E-10, 1, 0.05)
    chgl.build3D()
    add_strain(chgl)
    chgl.from_file(prefix + "00000053000.grid")

    chgl.run(500000, 5000, start=53000)
    chgl.save_free_energy_map(prefix + "_free_energy_map.grid")
예제 #4
0
    def test_run(self):
        chgl = self.get_chgl()
        chgl.print_polynomial()
        chgl.random_initialization([0.0, 0.0, 0.0], [1.0, 1.0, 1.0])
        with self.assertRaises(RuntimeError):
            chgl.run(5, 1000)

        # We set a free energy form
        landau = PyTwoPhaseLandau()

        # This polynomial has the wrong size
        # make sure that an exception is raised
        poly = PyPolynomial(2)
        regressor = PyKernelRegressor(0.0, 1.0)
        kernel = PyGaussianKernel(2.0)

        # Case 1: Fail because no kernel is set
        with self.assertRaises(ValueError):
            chgl.set_free_energy(landau)

        # Case 2: Fail because no polynomial is set
        regressor.set_kernel(kernel)
        landau.set_kernel_regressor(regressor)
        with self.assertRaises(ValueError):
            chgl.set_free_energy(landau)

        poly = PyPolynomial(3)
        landau.set_polynomial(poly)
        chgl.set_free_energy(landau)

        try:
            chgl.run(5, 1000)
        except RuntimeError as exc:
            # The only way run should raise a runtime error at this stage is
            # if FFTW is not installed
            self.assertTrue("CHGL requires FFTW!" in str(exc))
예제 #5
0
    def test_to_dict(self):

        width = 2.0
        kernel = PyGaussianKernel(width)
        regressor = PyKernelRegressor(0.0, 1.0)
        coeff = np.linspace(0.0, 10.0, 100)
        regressor.set_coeff(coeff)
        regressor.set_kernel(kernel)

        dict_repr = regressor.to_dict()
        self.assertAlmostEqual(0.0, dict_repr["xmin"])
        self.assertAlmostEqual(1.0, dict_repr["xmax"])
        self.assertEqual("gaussian", dict_repr["kernel_name"])
        self.assertTrue(np.allclose(coeff, dict_repr["coeff"]))
예제 #6
0
    def test_kernel_regressor(self):

        width = 2.0
        kernel = PyQuadraticKernel(2.0)

        coeff = [0.0, 2.0, -3.0, 0.0]
        regressor = PyKernelRegressor(-12.0, 12.0)
        regressor.set_kernel(kernel)
        regressor.set_coeff(coeff)

        center1 = -4.0
        center2 = 4.0
        self.assertAlmostEqual(regressor.evaluate(center1),
                               coeff[1] * kernel.evaluate(0.0))
        self.assertAlmostEqual(regressor.evaluate(center2),
                               coeff[2] * kernel.evaluate(0.0))
        self.assertAlmostEqual(regressor.deriv(center1),
                               coeff[1] * kernel.deriv(0.0))
        self.assertAlmostEqual(regressor.deriv(center2),
                               coeff[2] * kernel.deriv(0.0))

        # Try outside domain
        self.assertAlmostEqual(regressor.evaluate(1000.0), 0.0)
        self.assertAlmostEqual(regressor.evaluate(-1000.0), 0.0)
예제 #7
0
    def test_from_dict(self):
        width = 2.0
        kernel = PyGaussianKernel(width)
        regressor = PyKernelRegressor(0.0, 1.0)
        coeff = np.linspace(0.0, 10.0, 100)
        regressor.set_coeff(coeff)
        regressor.set_kernel(kernel)

        # Evaluate at points
        x_values = [0.0, 2.0, -2.0, 5.0]
        y_values_orig = regressor.evaluate(x_values)

        dict_repr = regressor.to_dict()

        regressor.from_dict(dict_repr)
        y_values_new = regressor.evaluate(x_values)
        self.assertTrue(np.allclose(y_values_orig, y_values_new))

        # Verify that exception is raised if a wrong kernel is passed
        dict_repr["kernel_name"] = "quadratic"
        with self.assertRaises(ValueError):
            regressor.from_dict(dict_repr)