def get_scalar_mass_matrix(v70):
  """Returns the spin-0 mass matrix at position v70."""
  tf_scalar_evaluator = scalar_sector_tensorflow.get_tf_scalar_evaluator()
  t_left_onb = tf.constant(numpy.zeros(70), dtype=tf.float64)
  t_v70 = tf.constant(v70, dtype=tf.complex128)
  sinfo0 = tf_scalar_evaluator(t_v70)
  tc_v70_from_v70o = tf.constant(algebra.e7.v70_from_v70o)
  def tf_potential_relative(t_left_onb):
    sinfo = tf_scalar_evaluator(
      t_v70,
      t_left=tf.cast(tf.einsum('vV,V->v', tc_v70_from_v70o, t_left_onb),
                     tf.complex128))
    return sinfo.potential
  scalar_mm = m_util.tf_hessian(tf_potential_relative)(
    tf.constant(numpy.zeros(70), dtype=tf.float64)).numpy()
  return (scalar_mm /
          # Factor -3/8 (rather than -3/4) is due to normalization of
          # our orthonormal basis.
          (-3.0 / 8) / sinfo0.potential.numpy())
def call_with_scalar_mass_matrix_evaluator(f, *args):
    """Returns f(evaluator, *args), with `evaluator` a mass matrix evaluator.

  Here, `evaluator` is a TensorFlow-based evaluator function
  that maps a 70-vector to a scalar mass matrix.

  We are then passing this on to a function that gets run in TensorFlow
  session context. This way, we can bulk-process solutions.
  """
    graph = tf.Graph()
    with graph.as_default():
        tf_scalar_evaluator = scalar_sector_tensorflow.get_tf_scalar_evaluator(
        )
        t_left_onb = tf.Variable(  # Uses orthonormal basis.
            initial_value=numpy.zeros([70]),
            trainable=False,
            dtype=tf.float64)
        t_input = tf.placeholder(tf.float64, shape=[70])
        t_v70 = tf.Variable(initial_value=numpy.zeros([70]),
                            trainable=False,
                            dtype=tf.float64)
        op_assign_input = tf.assign(t_v70, t_input)
        sinfo = tf_scalar_evaluator(
            tf.cast(t_v70, tf.complex128),
            t_left=tf.cast(
                tf.einsum('vV,V->v', tf.constant(algebra.e7.v70_from_v70o),
                          t_left_onb), tf.complex128))
        t_potential = sinfo.potential
        t_scalar_mass_matrix = (
            tf.real(tf.hessians([t_potential], [t_left_onb])[0]) *
            # Factor -3/8 (rather than -3/4) is due to normalization of
            # our orthonormal basis.
            (-3.0 / 8) / t_potential)
        with tf.compat.v1.Session() as sess:
            sess.run([tf.global_variables_initializer()])

            def evaluator(v70):
                sess.run([op_assign_input], feed_dict={t_input: v70})
                ret = sess.run([t_scalar_mass_matrix])[0]
                return ret

            return f(evaluator, *args)
def refine_model_gradient_descent(low_dimensional_model,
                                  log10_stop_quality=-24,
                                  report_on=lambda n: n % 1000 == 0):
  """Refines a low-dimensional model via basic gradient descent."""
  if len(low_dimensional_model.params) == 0:
    return low_dimensional_model
  target_stationarity = 10.0**log10_stop_quality
  v70 = numpy.dot(low_dimensional_model.v70_from_params,
                  numpy.array(low_dimensional_model.params,
                              dtype=mpmath.mpf)).astype(float)
  tf_scalar_evaluator = scalar_sector_tensorflow.get_tf_scalar_evaluator()
  sinfo0 = tf_scalar_evaluator(tf.constant(v70, tf.float64))
  pot0 = sinfo0.potential.numpy()
  def still_good(potential):
    return abs(pot0 - potential) < 1e-4
  def pot_stat_grad(v70):
    tape = tf.GradientTape()
    t_v70 = tf.constant(v70, dtype=tf.float64)
    with tape:
      tape.watch(t_v70)
      sinfo = tf_scalar_evaluator(t_v70)
    return (sinfo.potential.numpy(),
            sinfo.stationarity.numpy(),
            tape.gradient(sinfo.stationarity, t_v70).numpy())
  def do_gradient_steps(num_steps, v70_start,
                        learning_rate=1e-5,
                        max_acceptable_stationarity=numpy.inf,
                        report_on=lambda n: False):
    v70 = v70_start
    for n in range(num_steps):
      n_pot, n_stat, n_grad = pot_stat_grad(v70)
      if n_stat > max_acceptable_stationarity:
        raise BadStationarity()
      if report_on(n):
        print('[GradDesc] %3d: p=%.16g s=%.8g' % (n, n_pot, n_stat))
      v70 -= learning_rate * n_grad
    return n_pot, n_stat, v70
  v70_now = v70
  stat_now = sinfo0.stationarity.numpy()
  learning_rate = 1e-5
  can_increase_learning_rate = True
  while True:
    trial_performances = [(numpy.inf, 0.02)]
    trial_learning_rate_factors = (5.0, 2.0, 1.25, 1.0, 0.8, 0.5, 0.2)
    try:
      for learning_rate_factor in trial_learning_rate_factors:
        if not can_increase_learning_rate and learning_rate_factor > 1:
          continue
        trial_learning_rate = learning_rate * learning_rate_factor
        # Do 10 steps with the trial learning rate.
        pot_stat_pos_log = [
          do_gradient_steps(10, v70_now,
                            learning_rate=trial_learning_rate)]
        # Closely look at what happens over a few more steps.
        for n in range(10):
          pot_stat_pos_log.append(
            do_gradient_steps(1, pot_stat_pos_log[-1][-1],
                              learning_rate=trial_learning_rate))
        if not all(still_good(pot_stat_pos[0])
                   for pot_stat_pos in pot_stat_pos_log):
          continue  # with next learning_rate_factor.
        if not all(psp_prev[1] >= psp[1] for psp_prev, psp in zip(
            pot_stat_pos_log, pot_stat_pos_log[1:])):
          continue  # with next learning_rate_factor.
        trial_performances.append(
          (pot_stat_pos_log[-1][1], learning_rate_factor))
      trial_performances.sort()
      best_factor = trial_performances[0][1]
      learning_rate *= best_factor * 0.9  # Include safety fudge-factor.
      print('[GradDesc] Adjusted learning rate to: %g' % learning_rate)
      pot, stat, v70_next = do_gradient_steps(
        8000, v70_now,
        learning_rate=learning_rate,
        max_acceptable_stationarity=1.1 * stat_now, report_on=report_on)
      if stat <= target_stationarity or learning_rate < 1e-16:
        return pot, stat, v70_next
      if stat < stat_now:
        stat_now = stat
        v70_now = v70_next
        can_increase_learning_rate = True
      else:
        raise BadStationarity()
    except BadStationarity:
      can_increase_learning_rate = False
      "Gradient-descent failed. Reducing learning rate."
      learning_rate *= 0.75
def refine_model_gradient_descent(low_dimensional_model,
                                  log10_stop_quality=-24,
                                  report_on=lambda n: n % 1000 == 0):
  """Refines a low-dimensional model via basic gradient descent."""
  if len(low_dimensional_model.params) == 0:
    return low_dimensional_model
  target_stationarity = 10.0**log10_stop_quality
  v70 = numpy.dot(low_dimensional_model.v70_from_params,
                  numpy.array(low_dimensional_model.params,
                              dtype=mpmath.mpf)).astype(float)
  graph = tf.Graph()
  with graph.as_default():
    tf_scalar_evaluator = scalar_sector_tensorflow.get_tf_scalar_evaluator()
    t_v70 = tf.Variable(
        initial_value=numpy.zeros([70]), trainable=True, dtype=tf.float64)
    sinfo = tf_scalar_evaluator(tf.cast(t_v70, tf.complex128))
    t_potential = sinfo.potential
    #
    t_stationarity = sinfo.stationarity
    t_grad = tf.gradients(t_stationarity, t_v70)[0]
    with tf.compat.v1.Session() as sess:
      sess.run([tf.global_variables_initializer()])
      n_pot0 = sess.run(t_potential, feed_dict={t_v70: v70})
      def still_good(potential):
        return abs(n_pot0 - potential) < 1e-4
      def do_gradient_steps(num_steps, v70_start,
                            learning_rate=1e-5,
                            max_acceptable_stationarity=numpy.inf,
                            report_on=lambda n: False):
        v70 = v70_start.astype(float)
        for n in range(num_steps):
          n_pot, n_stat, n_grad = sess.run(
              [t_potential, t_stationarity, t_grad],
              feed_dict={t_v70: v70})
          if n_stat > max_acceptable_stationarity:
            raise BadStationarity()
          if report_on(n):
            print('[GradDesc] %3d: p=%.16g s=%.8g' % (n, n_pot, n_stat))
          v70 -= learning_rate * n_grad
        return n_pot, n_stat, v70
      def descend_with_adaptive_learning_rate(v70):
        # Use gradent-descent with some naive heuristics to adjust the
        # learning rate.
        v70_now = v70
        n_stat_now = sess.run(t_stationarity, feed_dict={t_v70: v70})
        learning_rate = 1e-5
        can_increase_learning_rate = True
        while True:
          # Fallback: If no trial learning rate works, reduce it by a factor 50.
          trial_performances = [(numpy.inf, 0.02)]
          trial_learning_rate_factors = (5.0, 2.0, 1.25, 1.0, 0.8, 0.5, 0.2)
          try:
            for learning_rate_factor in trial_learning_rate_factors:
              if not can_increase_learning_rate and learning_rate_factor > 1:
                continue
              trial_learning_rate = learning_rate * learning_rate_factor
              # Do 10 steps with the trial learning rate.
              pot_stat_pos_log = [
                  do_gradient_steps(10, v70_now,
                                    learning_rate=trial_learning_rate)]
              # Closely look at what happens over a few more steps.
              for n in range(10):
                pot_stat_pos_log.append(
                    do_gradient_steps(1, pot_stat_pos_log[-1][-1],
                                      learning_rate=trial_learning_rate))
              if not all(still_good(pot_stat_pos[0])
                         for pot_stat_pos in pot_stat_pos_log):
                continue  # with next learning_rate_factor.
              if not all(psp_prev[1] >= psp[1] for psp_prev, psp in zip(
                  pot_stat_pos_log, pot_stat_pos_log[1:])):
                continue  # with next learning_rate_factor.
              trial_performances.append(
                (pot_stat_pos_log[-1][1], learning_rate_factor))
            trial_performances.sort()
            best_factor = trial_performances[0][1]
            learning_rate *= best_factor * 0.9  # Include safety fudge-factor.
            print('[GradDesc] Adjusted learning rate to: %g' % learning_rate)
            n_pot, n_stat, n_pos = do_gradient_steps(
                8000, v70_now,
                learning_rate=learning_rate,
                max_acceptable_stationarity=1.1*n_stat_now, report_on=report_on)
            if n_stat <= target_stationarity or learning_rate < 1e-16:
              return n_pot, n_stat, n_pos
            if n_stat < n_stat_now:
              n_stat_now = n_stat
              v70_now = n_pos
              can_increase_learning_rate = True
            else:
              raise BadStationarity()
          except BadStationarity:
            can_increase_learning_rate = False
            "Gradient-descent failed. Reducing learning rate."
            learning_rate *= 0.75
      return descend_with_adaptive_learning_rate(v70)