コード例 #1
0
ファイル: invert_source.py プロジェクト: mc4117/adapt_utils
def gradient(m):
    """
    Compute the gradient of the QoI with respect to the input parameters.
    """
    dJdm = adolc.fos_reverse(tape_tag, 1.0)
    op.dJdm_progress.append(vecnorm(dJdm, order=np.Inf))
    return dJdm
コード例 #2
0
def gradient__save(m):
    """
    Apply the chain rule to both tapes
    """
    dJdm = gradient(m)
    g = vecnorm(dJdm, order=np.Inf)
    msg = "functional {:15.8e} gradient {:15.8e}"
    print(msg.format(op._J, g))
    op.control_trajectory.append(m)
    op.functional_trajectory.append(op._J)
    op.gradient_trajectory.append(dJdm)
    np.save(fname.format('ctrl'), op.control_trajectory)
    np.save(fname.format('func'), op.functional_trajectory)
    np.save(fname.format('grad'), op.gradient_trajectory)
    return dJdm
コード例 #3
0
        def derivative_cb_post(j, dj, m):
            """
            Callback for saving progress data to file during discrete adjoint inversion.
            """
            control = [mi.dat.data[0] for mi in m]
            djdm = [dji.dat.data[0] for dji in dj]
            msg = "functional {:15.8e}  gradient {:15.8e}"
            print_output(msg.format(j, vecnorm(djdm, order=np.Inf)))

            # Save progress to NumPy arrays on-the-fly
            control_values_opt.append(control)
            func_values_opt.append(j)
            gradient_values_opt.append(djdm)
            np.save(fname.format('ctrl'), np.array(control_values_opt))
            np.save(fname.format('func'), np.array(func_values_opt))
            np.save(fname.format('grad'), np.array(gradient_values_opt))
コード例 #4
0
 def gradient(m):
     """
     Gradient of reduced functional for continuous adjoint inversion.
     """
     J = reduced_functional(m) if len(
         swp.checkpoint) == 0 else swp.quantity_of_interest()
     swp.solve_adjoint(checkpointing_mode=chk)
     g = np.array([
         assemble(inner(bf, swp.adj_solution) * dx)
         for bf in op.basis_functions
     ])  # TODO: No minus sign?
     if use_regularisation:
         g += op.regularisation_term_gradient
     msg = " functional = {:15.8e}  gradient = {:15.8e}"
     print_output(msg.format(J, vecnorm(g, order=np.Inf)))
     return g
コード例 #5
0
    axis.grid(True, which='major', color='lightgrey')
axes.set_xlabel("Mesh element count")
axes.set_ylabel("Square timeseries error QoI")
savefig('converged_J', plot_dir, extensions=plot.extensions)

# Plot progress of gradient
print_output("Plotting progress of gradient norm...")
fig, axes = plt.subplots(figsize=(8, 6))
for level in range(levels):
    fname = os.path.join(
        op.di, 'optimisation_progress_{:s}' + '_{:d}.npy'.format(level))
    gradient_values_opt = np.load(fname.format('grad', level))
    label = '{:d} elements'.format(op.num_cells[level])
    its = range(1, len(gradient_values_opt) + 1)
    axes.loglog(its,
                [vecnorm(djdm, order=np.Inf) for djdm in gradient_values_opt],
                label=label)
axes.set_xticks([1, 10, 100])
# axes.set_yticks([2e3, 7e3])
for axis in (axes.xaxis, axes.yaxis):
    axis.grid(True, which='minor', color='lightgrey')
    axis.grid(True, which='major', color='lightgrey')
axes.set_xlabel("Iteration")
axes.set_ylabel(r"$\ell_\infty$-norm of gradient")
axes.legend(loc='best', fontsize=fontsize_legend)
savefig('optimisation_progress_dJdm_linf',
        plot_dir,
        extensions=plot.extensions)
fig, axes = plt.subplots(figsize=(8, 6))
for level in range(levels):
    fname = os.path.join(
コード例 #6
0
 def dJdm(m):
     djdm = 2 * rescaling * sum([(np.dot(g[i, :], m) - f[i]) * g[i, :]
                                 for i in range(N)])
     self.print_debug("INTERPOLATION: gradient = {:8.6e}".format(
         vecnorm(djdm, order=np.Inf)))
     return djdm
コード例 #7
0
control_trajectory = np.load(fname.format('ctrl') + '.npy')
functional_trajectory = np.load(fname.format('func') + '.npy')
gradient_trajectory = np.load(fname.format('grad') + '.npy')
line_search_trajectory = np.load(fname.format('ls') + '.npy')
i = 0
indices = [0]
for j, ctrl in enumerate(control_trajectory):
    if i == len(line_search_trajectory):
        break
    if np.allclose(ctrl, line_search_trajectory[i]):
        indices.append(j)
        i += 1
functional_trajectory = [functional_trajectory[i] for i in indices]
gradient_trajectory = [gradient_trajectory[i] for i in indices]
gradient_norm_trajectory = [
    vecnorm(g, order=np.Inf) for g in gradient_trajectory
]

fig, axes = plt.subplots(figsize=(8, 8))
axes.loglog(functional_trajectory)
axes.set_xlabel("Iteration")
axes.set_ylabel("Quantity of Interest")
axes.grid(True, which='both')
savefig("qoi_progress_{:d}_{:s}".format(level, categories),
        "plots",
        extensions=["pdf"])

fig, axes = plt.subplots(figsize=(8, 8))
axes.loglog(gradient_norm_trajectory)
axes.set_xlabel("Iteration")
axes.set_ylabel("Computed gradient")