def scan_for_critical_points( problem, starting_points, stationarity_threshold=1e-4, mdnewton=True, debug=True, *problem_extra_args, **problem_extra_kwargs): """Scans for critical points of a scalar function. Args: problem: The potential-function specifying the problem. starting_points: iterable with starting points to start the search from. stationarity_threshold: Upper bound on permissible post-optimization stationarity for a solution to be considered good. debug: Whether to print newly found solutions right when they are discovered. problem_extra_args: Extra positional arguments for the problem-function. problem_extra_kwargs: Extra keyword arguments for the problem-function. Yields: A `Solution` numerical solution. """ def f_problem(pos): return problem(pos, *problem_extra_args, **problem_extra_kwargs) tf_stat_func = m_util.tf_stationarity(f_problem) tf_grad_stat_func = m_util.tf_grad(tf_stat_func) tf_grad_pot_func = None tf_jacobian_pot_func = None if mdnewton: tf_grad_pot_func = m_util.tf_grad(f_problem) tf_jacobian_pot_func = m_util.tf_jacobian(tf_grad_pot_func) for x0 in starting_points: val_opt, xs_opt = m_util.tf_minimize(tf_stat_func, x0, tf_grad_func=tf_grad_stat_func, precise=False) if val_opt > stationarity_threshold: continue # with next starting point. # We found a point that apparently is close to a critical point. t_xs_opt = tf.constant(xs_opt, dtype=tf.float64) if not mdnewton: yield Solution(potential=f_problem(t_xs_opt).numpy(), stationarity=tf_stat_func(t_xs_opt).numpy(), pos=xs_opt) continue # with next solution. # We could use MDNewton to force each gradient-component # of the stationarity condition to zero. It is however # more straightforward to instead do this directly # for the gradient of the potential. *_, xs_opt_mdnewton = m_util.tf_mdnewton( f_problem, t_xs_opt, maxsteps=4, debug_func=None, tf_grad_func=tf_grad_pot_func, tf_jacobian_func=tf_jacobian_pot_func) t_xs_opt_mdnewton = tf.constant(xs_opt_mdnewton, dtype=tf.float64) yield Solution(potential=f_problem(t_xs_opt_mdnewton).numpy(), stationarity=tf_stat_func(t_xs_opt_mdnewton).numpy(), pos=xs_opt_mdnewton)
def _reduce_second_m35(m35s, m35c, is_diagonal_35s, seed=0): """Reduces the 2nd 35-irrep.""" diag = numpy.diagonal(m35s if is_diagonal_35s else m35c) gens = _get_generators_for_reducing_second_m35( diag, 'gsS,sScC->gcC' if is_diagonal_35s else 'gcC,sScC->gsS', algebra.spin8.gamma_sscc) num_gens = len(gens) if num_gens == 0: return m35s, m35c # No residual symmetry to exploit. # This residual symmetry is typically rather small. # So, doing a direct minimization is perhaps appropriate. rng = numpy.random.RandomState(seed=seed) v_coeffs_initial = rng.normal( scale=1e-3, size=(num_gens, )) # Break symmetry with noise. # # @tf.function # TODO(tfish): Activate once compilation is fast. def tf_get_m35_rotated(t_coeffs): tc_gens = tf.constant(gens, dtype=tf.float64) tc_m35 = tf.constant(m35c if is_diagonal_35s else m35s, dtype=tf.float64) t_rot = tf.linalg.expm(tf.einsum('i,iab->ab', t_coeffs, tc_gens)) return tf.einsum('Ab,Bb->AB', tf.einsum('ab,Aa->Ab', tc_m35, t_rot), t_rot) # # @tf.function # TODO(tfish): Activate once compilation is fast. def tf_get_loss(t_coeffs): t_m35_rotated = tf_get_m35_rotated(t_coeffs) # Our 'loss' is the sum of magnitudes of the off-diagonal parts after # rotation. return (tf.norm(t_m35_rotated, ord=1) - tf.norm(tf.linalg.diag_part(t_m35_rotated), ord=1)) opt_val, opt_pos = m_util.tf_minimize(tf_get_loss, tf.constant(v_coeffs_initial, dtype=tf.float64), precise=False) m_diag = tf_get_m35_rotated(tf.constant(opt_pos, dtype=tf.float64)).numpy() return (m35s, m_diag) if is_diagonal_35s else (m_diag, m35c)