def testSplineCurveInverseIsCorrect(self):
   """Tests that the inverse curve is indeed the inverse of the curve."""
   x_knot = np.arange(0, 16, 0.01, dtype=np.float64)
   with self.session():
     alpha = distribution.inv_partition_spline_curve(x_knot).eval()
     x_recon = distribution.partition_spline_curve(alpha).eval()
   self.assertAllClose(x_recon, x_knot)
def main(argv):
  if len(argv) > 1:
    raise app.UsageError('Too many command-line arguments.')

  # Parameters governing how the x coordinate of the spline will be laid out.
  # We will construct a spline with knots at
  #   [0 : 1 / x_scale : x_max],
  # by fitting it to values sampled at
  #   [0 : 1 / (x_scale * redundancy) : x_max]
  x_max = 12
  x_scale = 1024
  redundancy = 4  # Must be >= 2 for the spline to be useful.

  spline_spacing = 1. / (x_scale * redundancy)
  x_knots = np.arange(
      0, x_max + spline_spacing, spline_spacing, dtype=np.float64)
  table = []
  # We iterate over knots, and for each knot recover the alpha value
  # corresponding to that knot with inv_partition_spline_curve(), and then
  # with that alpha we accurately approximate its partition function using
  # numerical_base_partition_function().
  for x_knot in x_knots:
    alpha = distribution.inv_partition_spline_curve(x_knot).numpy()
    partition = numerical_base_partition_function(alpha).numpy()
    table.append((x_knot, alpha, partition))
    print(table[-1])

  table = np.array(table)
  x = table[:, 0]
  alpha = table[:, 1]
  y_gt = np.log(table[:, 2])

  # We grab the values from the true log-partition table that correpond to
  # knots, by looking for where x * x_scale is an integer.
  mask = np.abs(np.round(x * x_scale) - (x * x_scale)) <= 1e-8
  values = y_gt[mask]

  # Initialize `tangents` using a central differencing scheme.
  values_pad = np.concatenate([[values[0] - values[1] + values[0]], values,
                               [values[-1] - values[-2] + values[-1]]], 0)
  tangents = (values_pad[2:] - values_pad[:-2]) / 2.

  # Construct the spline's value and tangent TF variables, constraining the last
  # knot to have a fixed value Z(infinity) and a tangent of zero.
  n = len(values)
  tangents = tf.Variable(tangents, tf.float64)
  values = tf.Variable(values, tf.float64)

  # Fit the spline.
  num_iters = 10001

  optimizer = tf.keras.optimizers.SGD(learning_rate=1e-9, momentum=0.99)

  trace = []
  for ii in range(num_iters):
    with tf.GradientTape() as tape:
      tape.watch([values, tangents])
      # Fix the endpoint to be a known constant with a zero tangent.
      i_values = tf.where(
          np.arange(n) == (n - 1),
          tf.ones_like(values) * 0.70526025442689566, values)
      i_tangents = tf.where(
          np.arange(n) == (n - 1), tf.zeros_like(tangents), tangents)
      i_y = cubic_spline.interpolate1d(x * x_scale, i_values, i_tangents)
      # We minimize the maximum residual, which makes for a very ugly
      # optimization problem but works well in practice.
      i_loss = tf.reduce_max(tf.abs(i_y - y_gt))
      grads = tape.gradient(i_loss, [values, tangents])
      optimizer.apply_gradients(zip(grads, [values, tangents]))
    trace.append(i_loss.numpy())
    if (ii % 200) == 0:
      print('{:5d}: {:e}'.format(ii, trace[-1]))

  mask = alpha <= 4
  max_error_a4 = np.max(np.abs(i_y[mask] - y_gt[mask]))
  max_error = np.max(np.abs(i_y - y_gt))
  print('Max Error (a <= 4): {:e}'.format(max_error_a4))
  print('Max Error: {:e}'.format(max_error))

  # Just a sanity-check on the error.
  assert max_error_a4 <= 5e-7
  assert max_error <= 5e-7

  # Save the spline to disk.
  np.savez(
      './data/partition_spline.npz',
      x_scale=x_scale,
      values=i_values.numpy(),
      tangents=i_tangents.numpy())
Esempio n. 3
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    # Parameters governing how the x coordinate of the spline will be laid out.
    # We will construct a spline with knots at
    #   [0 : 1 / x_scale : x_max],
    # by fitting it to values sampled at
    #   [0 : 1 / (x_scale * redundancy) : x_max]
    x_max = 12
    x_scale = 1024
    redundancy = 4  # Must be >= 2 for the spline to be useful.

    spline_spacing = 1. / (x_scale * redundancy)
    x_knots = np.arange(0,
                        x_max + spline_spacing,
                        spline_spacing,
                        dtype=np.float64)
    table = []
    with tf.Session() as sess:
        x_knot_ph = tf.placeholder(dtype=tf.float64, shape=())
        alpha_ph = distribution.inv_partition_spline_curve(x_knot_ph)
        partition_ph = numerical_base_partition_function(alpha_ph)
        # We iterate over knots, and for each knot recover the alpha value
        # corresponding to that knot with inv_partition_spline_curve(), and then
        # with that alpha we accurately approximate its partition function using
        # numerical_base_partition_function().
        for x_knot in x_knots:
            alpha, partition = sess.run((alpha_ph, partition_ph),
                                        {x_knot_ph: x_knot})
            table.append((x_knot, alpha, partition))
            print(table[-1])

    table = np.array(table)
    x = table[:, 0]
    alpha = table[:, 1]
    y_gt = np.log(table[:, 2])

    # We grab the values from the true log-partition table that correpond to
    # knots, by looking for where x * x_scale is an integer.
    mask = np.abs(np.round(x * x_scale) - (x * x_scale)) <= 1e-8
    values = y_gt[mask]

    # Initialize `tangents` using a central differencing scheme.
    values_pad = np.concatenate([[values[0] - values[1] + values[0]], values,
                                 [values[-1] - values[-2] + values[-1]]], 0)
    tangents = (values_pad[2:] - values_pad[:-2]) / 2.

    # Construct the spline's value and tangent TF variables, constraining the last
    # knot to have a fixed value Z(infinity) and a tangent of zero.
    n = len(values)
    tangents = tf.Variable(tangents, tf.float64)
    tangents = tf.where(
        np.arange(n) == (n - 1), tf.zeros_like(tangents), tangents)

    values = tf.Variable(values, tf.float64)
    values = tf.where(
        np.arange(n) == (n - 1),
        tf.ones_like(tangents) * 0.70526025442689566, values)

    # Interpolate into the spline.
    y = cubic_spline.interpolate1d(x * x_scale, values, tangents)

    # We minimize the maximum residual, which makes for a very ugly optimization
    # problem but appears to work in practice, and is what we most care about.
    loss = tf.reduce_max(tf.abs(y - y_gt))

    # Fit the spline.
    num_iters = 10001
    with tf.Session() as sess:
        global_step = tf.Variable(0, trainable=False)

        opt = tf.train.MomentumOptimizer(learning_rate=1e-9, momentum=0.99)
        step = opt.minimize(loss, global_step=global_step)
        sess.run(tf.global_variables_initializer())

        trace = []
        for ii in range(num_iters):
            _, i_loss, i_values, i_tangents, i_y = sess.run(
                [step, loss, values, tangents, y])
            trace.append(i_loss)
            if (ii % 200) == 0:
                print('%5d: %e' % (ii, i_loss))

    mask = alpha <= 4
    print('Max Error (a <= 4): %e' % np.max(np.abs(i_y[mask] - y_gt[mask])))
    print('Max Error: %e' % np.max(np.abs(i_y - y_gt)))

    # Save the spline to disk.
    np.savez('./data/partition_spline.npz',
             x_scale=x_scale,
             values=i_values,
             tangents=i_tangents)