Ejemplo n.º 1
0
    def micro_step(self):
        """Constrained optimization on a hypersphere."""
        eye = np.eye(self.displacement.size)

        gradient = self.mw_gradient
        gradient_diff = gradient - self.prev_grad
        coords_diff = self.mw_coords - self.prev_coords
        self.prev_grad = gradient
        # Without copy we would only store the reference...
        self.prev_coords = self.mw_coords.copy()

        dH, _ = bfgs_update(self.mw_hessian, coords_diff, gradient_diff)
        self.mw_hessian += dH
        eigvals, eigvecs = np.linalg.eig(self.mw_hessian)

        def lambda_func(lambda_):
            # Eq. (11) in [1]
            # (H - λI)^-1
            hmlinv = np.linalg.pinv(self.mw_hessian - eye * lambda_,
                                    rcond=1e-6)
            # (g - λp)
            glp = gradient - self.displacement * lambda_
            tmp = self.displacement - hmlinv.dot(glp)
            return tmp.dot(tmp) - 0.25 * (self.step_length**2)

        # Initial guess for λ.
        # λ must be smaller then the smallest eigenvector
        lambda_ = np.sort(eigvals)[0]
        lambda_ *= 1.5 if (lambda_ < 0) else 0.5
        # Find the root with scipy
        lambda_ = newton(lambda_func, lambda_, maxiter=500)

        # Calculate dx from optimized lambda
        dx = -np.dot(np.linalg.inv(self.mw_hessian - lambda_ * eye),
                     gradient - lambda_ * self.displacement)
        self.displacement += dx
        self.mw_coords += dx

        displ_norm = np.linalg.norm(self.displacement)
        tangent = gradient - gradient.dot(
            self.displacement) / displ_norm * gradient

        return dx, tangent
Ejemplo n.º 2
0
def run_bare_rfo(xyz_fn, charge, mult, trust=0.3, max_cycles=150):
    geom = geom_from_library(xyz_fn, coord_type="redund")
    # geom = geom_from_library(xyz_fn)
    geom.set_calculator(XTB(pal=4, charge=charge, mult=mult))
    # geom.set_calculator(Gaussian16(pal=3, route="HF 3-21G", charge=charge, mult=mult))
    grads = list()
    steps = list()
    H = geom.get_initial_hessian()
    converged = False
    for i in range(max_cycles):
        grad = -geom.forces
        grads.append(grad)

        if i > 0:
            dx = steps[-1]
            dg = grads[-1] - grads[-2]
            dH, _ = bfgs_update(H, dx, dg)
            H += dH
        H_proj = H.copy()
        if geom.internal:
            H_proj = geom.internal.project_hessian(H_proj)
        step = rfo(grad, H_proj, trust=trust)
        steps.append(step)
        max_g = np.abs(grad).max()
        rms_g = np.sqrt(np.mean(grad**2))
        max_s = np.abs(step).max()
        rms_s = np.sqrt(np.mean(step**2))
        print(f"{i:02d}: max(f)={max_g:.6f}, rms(f)={rms_g:.6f}, "
              f"max(step)={max_s:.6f}, rms(step)={rms_s:.6f}")
        converged = ((max_g < 4.5e-4) and (rms_g < 1.5e-4) and (max_s < 1.8e-3)
                     and (rms_s < 1.2e-3))
        if converged:
            print("Converged")
            break
        new_coords = geom.coords + step
        geom.coords = new_coords
    return converged, i + 1
Ejemplo n.º 3
0
    def step(self):
        mw_gradient = self.mw_gradient

        if len(self.irc_mw_gradients) > 1:
            dg = self.irc_mw_gradients[-1] - self.irc_mw_gradients[-2]
            dx = self.irc_mw_coords[-1] - self.irc_mw_coords[-2]
            dH, _ = bfgs_update(self.mw_hessian, dx, dg)
            self.mw_hessian += dH

        eigenvalues, eigenvectors = np.linalg.eigh(self.mw_hessian)
        # Drop small eigenvalues and corresponding eigenvectors
        small_vals = np.abs(eigenvalues) < 1e-8
        eigenvalues = eigenvalues[~small_vals]
        eigenvectors = eigenvectors[:, ~small_vals]

        # t step for numerical integration
        dt = 1 / self.N_euler * self.step_length / np.linalg.norm(mw_gradient)

        # Transform gradient to eigensystem of the hessian
        mw_gradient_trans = eigenvectors.T @ mw_gradient

        t = dt
        cur_length = 0
        for i in range(self.N_euler):
            dsdt = np.sqrt(
                np.sum(mw_gradient_trans**2 * np.exp(-2 * eigenvalues * t)))
            cur_length += dsdt * dt
            if cur_length > self.step_length:
                break
            t += dt
        alphas = (np.exp(-eigenvalues * t) - 1) / eigenvalues
        A = eigenvectors @ np.diag(alphas) @ eigenvectors.T
        step = A @ mw_gradient

        mw_coords = self.mw_coords.copy()
        self.mw_coords = mw_coords + step
Ejemplo n.º 4
0
    def micro_step(self, counter):
        """Constrained optimization on a hypersphere."""

        # Calculate gradient at current coordinates
        gradient = self.mw_gradient
        self.log(f"\tnorm(mw_grad)={np.linalg.norm(gradient):.6f}")

        # Interpolation proposed in the paper (Eq. (12) - (15)).
        # Does not seem to help.
        if self.line_search and (counter > 0):
            pivot_coords = self.pivot_coords[-1]
            p_prev = self.prev_coords - pivot_coords  # p"
            p_cur = self.mw_coords - pivot_coords  # p'
            g_prev = self.prev_grad  # g"
            g_cur = self.gradient  # g'
            g_prev_p = self.perp_component(g_prev, p_prev)
            g_cur_p = self.perp_component(g_cur, p_cur)
            g_prev_p_norm = np.linalg.norm(g_prev_p)
            g_cur_p_norm = np.linalg.norm(g_cur_p)
            # Angle between p_prev and p_cur
            theta_prime = np.arccos(
                p_prev.dot(p_cur) / np.linalg.norm(p_prev) /
                np.linalg.norm(p_cur))  # θ'
            theta = g_prev_p_norm * theta_prime / (g_prev_p_norm - g_cur_p_norm
                                                   )  # θ
            theta_quot = theta / theta_prime
            cos_theta = cos(theta)
            sin_theta = sin(theta)
            cos_theta_prime = cos(theta_prime)
            sin_theta_prime = sin(theta_prime)
            sin_quot = sin_theta / sin_theta_prime
            # Interpolated quantities
            g_interp = g_prev * (1 - theta_quot) + g_cur * theta_quot
            p_interp = (p_prev * (cos_theta - sin_quot * cos_theta_prime) +
                        p_cur * sin_quot)
            x_interp = pivot_coords + p_interp
            gradient = g_interp
            self.mw_coords = x_interp
            self.displacement = p_interp

        gradient_diff = gradient - self.prev_grad
        coords_diff = self.mw_coords - self.prev_coords
        # Update previous quantities.
        self.prev_coords = self.mw_coords.copy()
        self.prev_grad = gradient.copy()

        # Recalculate Hessian
        if (self.hessian_recalc
                # and (self.micro_counter > 0)
                and (self.micro_counter % self.hessian_recalc == 0)):
            self.mw_hessian = self.geometry.mw_hessian
        # Or update Hessian
        else:
            dH, _ = bfgs_update(self.mw_hessian, coords_diff, gradient_diff)
            self.mw_hessian += dH
        eigvals, eigvecs = np.linalg.eigh(self.mw_hessian)

        constraint = (0.5 * self.step_length)**2
        big = np.abs(eigvals) > 1e-8
        big_eigvals = eigvals[big]
        big_eigvecs = eigvecs[:, big]
        grad_star = big_eigvecs.T.dot(gradient)
        displ_star = big_eigvecs.T.dot(self.displacement)

        def get_dx(lambda_):
            """In basis of Hessian eigenvectors."""
            return -(grad_star - lambda_ * displ_star) / (big_eigvals -
                                                          lambda_)

        def on_sphere(lambda_):
            p = displ_star + get_dx(lambda_)
            return p.dot(p) - constraint

        # Initial guess for λ.
        # λ must be smaller then the smallest eigenvector
        lambda_0 = big_eigvals[0]
        lambda_0 *= 1.5 if (lambda_0 < 0) else 0.5
        self.log(
            f"\tSmallest eigenvalue is {big_eigvals[0]:.4f}, λ_0={lambda_0:.4f}."
        )
        # Find the root with scipy
        lambda_ = newton(on_sphere, lambda_0, maxiter=500)
        self.log(f"\tDetermined λ={lambda_0:.4f} from Newtons method.")

        # Calculate dx from optimized lambda in basis of Hessian eigenvectors and
        # transform back to mass-weighted Cartesians.
        dx = big_eigvecs.dot(get_dx(lambda_))
        self.displacement += dx
        self.mw_coords += dx

        grad_tangent_to_sphere = self.perp_component(gradient,
                                                     self.displacement)
        self.micro_counter += 1

        dx_norm = np.linalg.norm(dx)
        grad_norm = np.linalg.norm(grad_tangent_to_sphere)
        self.log(f"\tnorm(dx)={dx_norm:.6f}")
        self.log(f"\tgradient tangent to sphere={grad_norm:.6f}")

        return dx, grad_tangent_to_sphere
Ejemplo n.º 5
0
def run():
    # pysis_in = np.loadtxt("ref_rsa/in_hess")
    # this_in = np.loadtxt("test_in_hess")
    # geom = geom_from_library("codein.xyz")
    # geom = geom_from_xyz_file("ref_rsa/codeine.xyz")
    geom = geom_from_library("birkholz/vitamin_c.xyz")
    # geom.coords = shake_coords(geom.coords, seed=25032019)
    calc = XTB(charge=0, mult=1, pal=4)
    geom.set_calculator(calc)
    from pysisyphus.optimizers.RSAlgorithm import RSAlgorithm
    rsa_kwargs = {
        "hessian_recalc": 5,
    }
    opt = RSAlgorithm(geom, **rsa_kwargs)
    opt.run()
    return
    # g = geom.gradient
    # np.savetxt("gradient", g)
    # H = geom.hessian
    H = np.eye(geom.coords.size)
    np.savetxt("test_in_hess", H)
    # H = np.loadtxt("hessian")
    # g = np.loadtxt("gradient")
    # H = geom.hessian
    # from pysisyphus.calculators.AnaPot import AnaPot
    # pot = AnaPot()
    # geom = AnaPot.get_geom((0, 3, 0))
    # H = geom.hessian
    # H = np.eye(geom.coords.shape[0]) / 2
    trust = 0.3
    max_cycles = 50

    coords = list()
    steps = list()
    grads = list()
    # import pdb; pdb.set_trace()
    for i in range(max_cycles):
        coords.append(geom.coords.copy())
        g = geom.gradient
        grads.append(g)
        grad_norm = np.linalg.norm(g)
        if i > 0:
            dg = grads[-1] - grads[-2]
            dx = steps[-1]
            dH, _ = bfgs_update(H, dx, dg)
            H = H + dH
            # H = geom.hessian
        rms_g = np.sqrt(np.mean(g**2))
        print(f"Cycle {i:02d}: norm(g)={grad_norm:.4e}, rms(g)={rms_g:.6f}")
        if grad_norm < 1e-3:
            print("Converged")
            break
        # import pdb; pdb.set_trace()
        step = rsa(H, g, trust)
        new_coords = geom.coords + step
        geom.coords = new_coords
        steps.append(step)
    with open("opt.xyz", "w") as handle:
        handle.write(geom.as_xyz())
    coords = np.array(coords)
    # pot.plot()
    # ax = pot.ax
    # ax.plot(*coords.T[:2], "bo-")
    plt.show()