def derivative(self, m_array=None, taylor_test=False, seed=0.001, forget=True, project=False):
        ''' An implementation of the reduced functional derivative evaluation
            that accepts the controls as an array of scalars. If no control values are given,
            the result is derivative at the last forward run.
            If taylor_test = True, the derivative is automatically verified
            by the Taylor remainder convergence test. The perturbation direction
            is random and the perturbation size can be controlled with the seed argument.
            '''

        # In the case that the control values have changed since the last forward run,
        # we first need to rerun the forward model with the new controls to have the
        # correct forward solutions
        m = [p.data() for p in self.controls]
        if m_array is not None and (m_array != self.get_global(m)).any():
            info_red("Rerunning forward model before computing derivative")
            self(m_array)

        dJdm = self.__base_derivative__(forget=forget, project=project)

        if project:
            dJdm_global = self.get_global(dJdm)
        else:
            dJdm_global = get_global(dJdm)

        # Perform the gradient test
        if taylor_test:
            minconv = utils.test_gradient_array(self.__call__, self.scale * dJdm_global, m_array,
                                                seed = seed)
            if minconv < 1.9:
                raise RuntimeWarning, "A gradient test failed during execution."
            else:
                info("Gradient test successful.")
            self(m_array)

        return dJdm_global
Esempio n. 2
0
def compute_gradient(J, param, forget=True, ignore=[], callback=lambda var, output: None, project=False):
    backend.parameters["adjoint"]["stop_annotating"] = True

    enlisted_controls = enlist(param)
    param = ListControl(enlisted_controls)
    dJdparam = enlisted_controls.__class__([None] * len(enlisted_controls))

    last_timestep = adjglobals.adjointer.timestep_count

    ignorelist = []
    for fn in ignore:
        if isinstance(fn, backend.Function):
            ignorelist.append(adjglobals.adj_variables[fn])
        elif isinstance(fn, str):
            ignorelist.append(libadjoint.Variable(fn, 0, 0))
        else:
            ignorelist.append(fn)

    for i in range(adjglobals.adjointer.timestep_count):
        adjglobals.adjointer.set_functional_dependencies(J, i)

    for i in range(adjglobals.adjointer.equation_count)[::-1]:
        fwd_var = adjglobals.adjointer.get_forward_variable(i)
        if fwd_var in ignorelist:
            info("Ignoring the adjoint equation for %s" % fwd_var)
            continue

        (adj_var, output) = adjglobals.adjointer.get_adjoint_solution(i, J)

        callback(adj_var, output.data)

        storage = libadjoint.MemoryStorage(output)
        storage.set_overwrite(True)
        adjglobals.adjointer.record_variable(adj_var, storage)
        fwd_var = libadjoint.Variable(adj_var.name, adj_var.timestep, adj_var.iteration)

        out = param.equation_partial_derivative(adjglobals.adjointer, output.data, i, fwd_var)
        dJdparam = _add(dJdparam, out)

        if last_timestep > adj_var.timestep:
            # We have hit a new timestep, and need to compute this timesteps' \partial J/\partial m contribution
            out = param.functional_partial_derivative(adjglobals.adjointer, J, adj_var.timestep)
            dJdparam = _add(dJdparam, out)

        last_timestep = adj_var.timestep

        if forget is None:
            pass
        elif forget:
            adjglobals.adjointer.forget_adjoint_equation(i)
        else:
            adjglobals.adjointer.forget_adjoint_values(i)

    rename(J, dJdparam, param)

    return postprocess(dJdparam, project, list_type=enlisted_controls)
Esempio n. 3
0
def company():
    if request.method=='POST':
        company=request.form.get('company')
        cn=company
        clow,chigh=info(company)
        c="static/{}.png".format(company)
        return render_template('company.html', clow=clow,chigh=chigh,company=c, company_name=cn)
    company='ASHOKLEY.NS'
    clow,chigh=info(company)
    c="static/{}.png".format(company)
    return render_template('company.html', clow=clow,chigh=chigh,company=c, company_name=company)
Esempio n. 4
0
def test_scalar_parameters_adjoint(J, a, dJda, seed=0.1):
    info_blue("Running Taylor remainder convergence analysis for the adjoint model ... ")

    functional_values = []
    f_direct = J(a)

    a = numpy.array([float(x) for x in a])
    dJda = numpy.array(dJda)

    perturbation_direction = a/5.0
    perturbation_sizes = [seed / (2**i) for i in range(5)]
    perturbations = [a * i for i in perturbation_sizes]
    for x in perturbations:
        da = [backend.Constant(a[i] + x[i]) for i in range(len(a))]
        functional_values.append(J(da))

    # First-order Taylor remainders (not using adjoint)
    no_gradient = [abs(perturbed_f - f_direct) for perturbed_f in functional_values]

    info("Taylor remainder without adjoint information: " + str(no_gradient))
    info("Convergence orders for Taylor remainder without adjoint information (should all be 1): " + str(convergence_order(no_gradient)))

    with_gradient = []
    for i in range(len(perturbations)):
        remainder = abs(functional_values[i] - f_direct - numpy.dot(dJda, perturbations[i]))
        with_gradient.append(remainder)

    info("Taylor remainder with adjoint information: " + str(with_gradient))
    info("Convergence orders for Taylor remainder with adjoint information (should all be 2): " + str(convergence_order(with_gradient)))

    return min(convergence_order(with_gradient))
Esempio n. 5
0
def test_scalar_parameters_adjoint(J, a, dJda, seed=0.1):
    info_blue("Running Taylor remainder convergence analysis for the adjoint model ... ")

    functional_values = []
    f_direct = J(a)

    a = numpy.array([float(x) for x in a])
    dJda = numpy.array(dJda)

    perturbation_direction = a/5.0
    perturbation_sizes = [seed / (2**i) for i in range(5)]
    perturbations = [a * i for i in perturbation_sizes]
    for x in perturbations:
        da = [backend.Constant(a[i] + x[i]) for i in range(len(a))]
        functional_values.append(J(da))

    # First-order Taylor remainders (not using adjoint)
    no_gradient = [abs(perturbed_f - f_direct) for perturbed_f in functional_values]

    info("Taylor remainder without adjoint information: " + str(no_gradient))
    info("Convergence orders for Taylor remainder without adjoint information (should all be 1): " + str(convergence_order(no_gradient)))

    with_gradient = []
    for i in range(len(perturbations)):
        remainder = abs(functional_values[i] - f_direct - numpy.dot(dJda, perturbations[i]))
        with_gradient.append(remainder)

    info("Taylor remainder with adjoint information: " + str(with_gradient))
    info("Convergence orders for Taylor remainder with adjoint information (should all be 2): " + str(convergence_order(with_gradient)))

    return min(convergence_order(with_gradient))
Esempio n. 6
0
def compute_adjoint(functional, forget=True, ignore=[]):

    ignorelist = []
    for fn in ignore:
        if isinstance(fn, backend.Function):
            ignorelist.append(adjglobals.adj_variables[fn])
        elif isinstance(fn, str):
            ignorelist.append(libadjoint.Variable(fn, 0, 0))
        else:
            ignorelist.append(fn)

    for i in range(adjglobals.adjointer.timestep_count):
        adjglobals.adjointer.set_functional_dependencies(functional, i)

    for i in range(adjglobals.adjointer.equation_count)[::-1]:
        fwd_var = adjglobals.adjointer.get_forward_variable(i)
        if fwd_var in ignorelist:
            info("Ignoring the adjoint equation for %s" % fwd_var)
            continue

        (adj_var,
         output) = adjglobals.adjointer.get_adjoint_solution(i, functional)
        if output.data:
            if backend.__name__ == "dolfin":
                output.data.rename(str(adj_var),
                                   "a Function from dolfin-adjoint")
            else:
                output.data.name = str(adj_var)

        storage = libadjoint.MemoryStorage(output)
        storage.set_overwrite(True)
        adjglobals.adjointer.record_variable(adj_var, storage)

        # forget is None: forget *nothing*.
        # forget is True: forget everything we can, forward and adjoint
        # forget is False: forget only unnecessary adjoint values
        if forget is None:
            pass
        elif forget:
            adjglobals.adjointer.forget_adjoint_equation(i)
        else:
            adjglobals.adjointer.forget_adjoint_values(i)

        yield (output.data, adj_var)
Esempio n. 7
0
def compute_adjoint(functional, forget=True, ignore=[]):

    ignorelist = []
    for fn in ignore:
        if isinstance(fn, backend.Function):
            ignorelist.append(adjglobals.adj_variables[fn])
        elif isinstance(fn, str):
            ignorelist.append(libadjoint.Variable(fn, 0, 0))
        else:
            ignorelist.append(fn)

    for i in range(adjglobals.adjointer.timestep_count):
        adjglobals.adjointer.set_functional_dependencies(functional, i)

    for i in range(adjglobals.adjointer.equation_count)[::-1]:
        fwd_var = adjglobals.adjointer.get_forward_variable(i)
        if fwd_var in ignorelist:
            info("Ignoring the adjoint equation for %s" % fwd_var)
            continue

        (adj_var, output) = adjglobals.adjointer.get_adjoint_solution(i, functional)
        if output.data:
            if backend.__name__ == "dolfin":
                output.data.rename(str(adj_var) , "a Function from dolfin-adjoint")
            else:
                output.data.name = str(adj_var)

        storage = libadjoint.MemoryStorage(output)
        storage.set_overwrite(True)
        adjglobals.adjointer.record_variable(adj_var, storage)

        # forget is None: forget *nothing*.
        # forget is True: forget everything we can, forward and adjoint
        # forget is False: forget only unnecessary adjoint values
        if forget is None:
            pass
        elif forget:
            adjglobals.adjointer.forget_adjoint_equation(i)
        else:
            adjglobals.adjointer.forget_adjoint_values(i)

        yield (output.data, adj_var)
        def grad(x, f, g):
            ''' Evaluates the gradient for the control values.
            f is the associated functional value and g are the values
            of the constraints. '''

            fail = False
            if not ignore_model_errors:
                dj = self.derivative(x, forget=False)
            else:
                try:
                    dj = self.derivative(x, forget=False)
                except:
                    fail = True

            if constraints is not None:
                gJac = np.concatenate([gather(c.jacobian(x)) for c in constraints])
            else:
                gJac = np.zeros(len(x))  # SNOPT fails if no constraints are given, hence add a dummy constraint

            info("j = %f\t\t|dJ| = %f" % (f[0], np.linalg.norm(dj)))
            return np.array([dj]), gJac, fail
        def grad(x, f, g):
            ''' Evaluates the gradient for the control values.
            f is the associated functional value and g are the values
            of the constraints. '''

            fail = False
            if not ignore_model_errors:
                dj = self.derivative(x, forget=False)
            else:
                try:
                    dj = self.derivative(x, forget=False)
                except:
                    fail = True

            if constraints is not None:
                gJac = np.concatenate(
                    [gather(c.jacobian(x)) for c in constraints])
            else:
                gJac = np.zeros(
                    len(x)
                )  # SNOPT fails if no constraints are given, hence add a dummy constraint

            info("j = %f\t\t|dJ| = %f" % (f[0], np.linalg.norm(dj)))
            return np.array([dj]), gJac, fail
Esempio n. 10
0
def test_initial_condition_tlm(J, dJ, ic, seed=0.01, perturbation_direction=None):
    '''forward must be a function that takes in the initial condition (ic) as a backend.Function
       and returns the functional value by running the forward run:

         func = J(ic)

       final_adjoint is the tangent linear variable for the solution on which the functional depends
       (usually the last TLM equation solved).

       dJ must be the derivative of the functional with respect to its argument, evaluated and assembled at
       the unperturbed solution (a backend Vector).

       This function returns the order of convergence of the Taylor
       series remainder, which should be 2 if the TLM is working
       correctly.'''

    # We will compute the gradient of the functional with respect to the initial condition,
    # and check its correctness with the Taylor remainder convergence test.
    info_blue("Running Taylor remainder convergence analysis for the tangent linear model... ")
    import controls

    adj_var = adjglobals.adj_variables[ic]; adj_var.timestep = 0
    if not adjglobals.adjointer.variable_known(adj_var):
        info_red(str(adj_var) + " not known.")
        raise libadjoint.exceptions.LibadjointErrorInvalidInputs("Your initial condition must be the /exact same Function/ as the initial condition used in the forward model.")

    # First run the problem unperturbed
    ic_copy = backend.Function(ic)
    f_direct = J(ic_copy)

    # Randomise the perturbation direction:
    if perturbation_direction is None:
        perturbation_direction = backend.Function(ic.function_space())
        compatibility.randomise(perturbation_direction)

    # Run the forward problem for various perturbed initial conditions
    functional_values = []
    perturbations = []
    for perturbation_size in [seed/(2**i) for i in range(5)]:
        perturbation = backend.Function(perturbation_direction)
        vec = perturbation.vector()
        vec *= perturbation_size
        perturbations.append(perturbation)

        perturbed_ic = backend.Function(ic)
        vec = perturbed_ic.vector()
        vec += perturbation.vector()

        functional_values.append(J(perturbed_ic))

    # First-order Taylor remainders (not using adjoint)
    no_gradient = [abs(perturbed_f - f_direct) for perturbed_f in functional_values]

    info("Taylor remainder without tangent linear information: " + str(no_gradient))
    info("Convergence orders for Taylor remainder without tangent linear information (should all be 1): " + str(convergence_order(no_gradient)))

    with_gradient = []
    for i in range(len(perturbations)):
        param = controls.FunctionControl(ic, perturbations[i])
        final_tlm = tlm_dolfin(param, forget=False).data
        remainder = abs(functional_values[i] - f_direct - final_tlm.vector().inner(dJ))
        with_gradient.append(remainder)

    info("Taylor remainder with tangent linear information: " + str(with_gradient))
    info("Convergence orders for Taylor remainder with tangent linear information (should all be 2): " + str(convergence_order(with_gradient)))

    return min(convergence_order(with_gradient))
Esempio n. 11
0
def test_gradient_array(J, dJdx, x, seed = 0.01, perturbation_direction = None):
    '''Checks the correctness of the derivative dJ.
       x must be an array that specifies at which point in the parameter space
       the gradient is to be checked, and dJdx must be an array containing the gradient.
       The function J(x) must return the functional value.

       This function returns the order of convergence of the Taylor
       series remainder, which should be 2 if the gradient is correct.'''

    # We will compute the gradient of the functional with respect to the initial condition,
    # and check its correctness with the Taylor remainder convergence test.
    info("Running Taylor remainder convergence analysis to check the gradient ... ")

    # First run the problem unperturbed
    j_direct = J(x)

    # Randomise the perturbation direction:
    if perturbation_direction is None:
        perturbation_direction = x.copy()
        compatibility.randomise(perturbation_direction)

    # Run the forward problem for various perturbed initial conditions
    functional_values = []
    perturbations = []
    perturbation_sizes = [seed/(2**i) for i in range(5)]
    for perturbation_size in perturbation_sizes:
        perturbation = perturbation_direction.copy() * perturbation_size
        perturbations.append(perturbation)

        perturbed_x = x.copy() + perturbation
        functional_values.append(J(perturbed_x))

    # First-order Taylor remainders (not using adjoint)
    no_gradient = [abs(perturbed_j - j_direct) for perturbed_j in functional_values]

    info("Absolute functional evaluation differences: %s" % str(no_gradient))
    info("Convergence orders for Taylor remainder without adjoint information (should all be 1): %s" % str(convergence_order(no_gradient)))

    with_gradient = []
    for i in range(len(perturbations)):
        remainder = abs(functional_values[i] - j_direct - numpy.dot(perturbations[i], dJdx))
        with_gradient.append(remainder)

    if min(with_gradient + no_gradient) < 1e-16:
        info("Warning: The Taylor remainders are close to machine precision (< %s). Try increasing the seed value in case the Taylor remainder test fails." % min(with_gradient + no_gradient))

    info("Absolute functional evaluation differences with adjoint: %s" % str(with_gradient))
    info("Convergence orders for Taylor remainder with adjoint information (should all be 2): %s" % str(convergence_order(with_gradient)))

    return min(convergence_order(with_gradient))
Esempio n. 12
0
def test_scalar_parameter_adjoint(J, a, dJda, seed=None):
    info_blue("Running Taylor remainder convergence analysis for the adjoint model ... ")

    functional_values = []
    f_direct = J(a)

    if seed is None:
        seed = float(a)/5.0
        if seed == 0.0:
            seed = 0.1

    perturbations = [seed / (2**i) for i in range(5)]

    for da in (backend.Constant(float(a) + x) for x in perturbations):
        functional_values.append(J(da))

    # First-order Taylor remainders (not using adjoint)
    no_gradient = [abs(perturbed_f - f_direct) for perturbed_f in functional_values]

    info("Taylor remainder without adjoint information: " + str(no_gradient))
    info("Convergence orders for Taylor remainder without adjoint information (should all be 1): " + str(convergence_order(no_gradient)))

    with_gradient = []
    gradient_fd   = []
    for i in range(len(perturbations)):
        gradient_fd.append((functional_values[i] - f_direct)/perturbations[i])

        remainder = abs(functional_values[i] - f_direct - float(dJda)*perturbations[i])
        with_gradient.append(remainder)

    info("Taylor remainder with adjoint information: " + str(with_gradient))
    info("Convergence orders for Taylor remainder with adjoint information (should all be 2): " + str(convergence_order(with_gradient)))

    info("Gradients (finite differencing): " + str(gradient_fd))
    info("Gradient (adjoint): " + str(dJda))

    return min(convergence_order(with_gradient))
Esempio n. 13
0
def test_initial_condition_adjoint(J, ic, final_adjoint, seed=0.01, perturbation_direction=None):
    '''forward must be a function that takes in the initial condition (ic) as a backend.Function
       and returns the functional value by running the forward run:

         func = J(ic)

       final_adjoint is the adjoint associated with the initial condition
       (usually the last adjoint equation solved).

       This function returns the order of convergence of the Taylor
       series remainder, which should be 2 if the adjoint is working
       correctly.'''

    # We will compute the gradient of the functional with respect to the initial condition,
    # and check its correctness with the Taylor remainder convergence test.
    info_blue("Running Taylor remainder convergence analysis for the adjoint model ... ")

    # First run the problem unperturbed
    ic_copy = backend.Function(ic)
    f_direct = J(ic_copy)

    # Randomise the perturbation direction:
    if perturbation_direction is None:
        perturbation_direction = backend.Function(ic.function_space())
        compatibility.randomise(perturbation_direction)

    # Run the forward problem for various perturbed initial conditions
    functional_values = []
    perturbations = []
    perturbation_sizes = [seed/(2**i) for i in range(5)]
    for perturbation_size in perturbation_sizes:
        perturbation = backend.Function(perturbation_direction)
        vec = perturbation.vector()
        vec *= perturbation_size
        perturbations.append(perturbation)

        perturbed_ic = backend.Function(ic)
        vec = perturbed_ic.vector()
        vec += perturbation.vector()

        functional_values.append(J(perturbed_ic))

    # First-order Taylor remainders (not using adjoint)
    no_gradient = [abs(perturbed_f - f_direct) for perturbed_f in functional_values]

    info("Taylor remainder without adjoint information: " + str(no_gradient))
    info("Convergence orders for Taylor remainder without adjoint information (should all be 1): " + str(convergence_order(no_gradient)))

    adjoint_vector = final_adjoint.vector()

    with_gradient = []
    gradient_fd   = []
    for i in range(len(perturbations)):
        gradient_fd.append((functional_values[i] - f_direct)/perturbation_sizes[i])

        remainder = abs(functional_values[i] - f_direct - adjoint_vector.inner(perturbations[i].vector()))
        with_gradient.append(remainder)

    info("Taylor remainder with adjoint information: " + str(with_gradient))
    info("Convergence orders for Taylor remainder with adjoint information (should all be 2): " + str(convergence_order(with_gradient)))

    info("Gradients (finite differencing): " + str(gradient_fd))
    info("Gradient (adjoint): " + str(adjoint_vector.inner(perturbation_direction.vector())))

    return min(convergence_order(with_gradient))
Esempio n. 14
0
def compute_gradient(J,
                     param,
                     forget=True,
                     ignore=[],
                     callback=lambda var, output: None,
                     project=False):
    if not isinstance(J, Functional):
        raise ValueError("J must be of type dolfin_adjoint.Functional.")

    flag = misc.pause_annotation()

    enlisted_controls = enlist(param)
    param = ListControl(enlisted_controls)

    if backend.parameters["adjoint"]["allow_zero_derivatives"]:
        dJ_init = []
        for c in enlisted_controls:
            if isinstance(c.data(), backend.Constant):
                dJ_init.append(backend.Constant(0))
            elif isinstance(c.data(), backend.Function):
                space = c.data().function_space()
                dJ_init.append(backend.Function(space))

    else:
        dJ_init = [None] * len(enlisted_controls)

    dJdparam = enlisted_controls.__class__(dJ_init)

    last_timestep = adjglobals.adjointer.timestep_count

    ignorelist = []
    for fn in ignore:
        if isinstance(fn, backend.Function):
            ignorelist.append(adjglobals.adj_variables[fn])
        elif isinstance(fn, str):
            ignorelist.append(libadjoint.Variable(fn, 0, 0))
        else:
            ignorelist.append(fn)

    for i in range(adjglobals.adjointer.timestep_count):
        adjglobals.adjointer.set_functional_dependencies(J, i)

    for i in range(adjglobals.adjointer.equation_count)[::-1]:
        fwd_var = adjglobals.adjointer.get_forward_variable(i)
        if fwd_var in ignorelist:
            info("Ignoring the adjoint equation for %s" % fwd_var)
            continue

        (adj_var, output) = adjglobals.adjointer.get_adjoint_solution(i, J)

        callback(adj_var, output.data)

        storage = libadjoint.MemoryStorage(output)
        storage.set_overwrite(True)
        adjglobals.adjointer.record_variable(adj_var, storage)
        fwd_var = libadjoint.Variable(adj_var.name, adj_var.timestep,
                                      adj_var.iteration)

        out = param.equation_partial_derivative(adjglobals.adjointer,
                                                output.data, i, fwd_var)
        dJdparam = _add(dJdparam, out)

        if last_timestep > adj_var.timestep:
            # We have hit a new timestep, and need to compute this timesteps' \partial J/\partial m contribution
            out = param.functional_partial_derivative(adjglobals.adjointer, J,
                                                      adj_var.timestep)
            dJdparam = _add(dJdparam, out)

        last_timestep = adj_var.timestep

        if forget is None:
            pass
        elif forget:
            adjglobals.adjointer.forget_adjoint_equation(i)
        else:
            adjglobals.adjointer.forget_adjoint_values(i)

    rename(J, dJdparam, param)

    misc.continue_annotation(flag)

    return postprocess(dJdparam, project, list_type=enlisted_controls)
Esempio n. 15
0
def taylor_test(J, m, Jm, dJdm, HJm=None, seed=None, perturbation_direction=None, value=None):
    '''J must be a function that takes in a parameter value m and returns the value
       of the functional:

         func = J(m)

       Jm is the value of the function J at the parameter m.
       dJdm is the gradient of J evaluated at m, to be tested for correctness.

       This function returns the order of convergence of the Taylor
       series remainder, which should be 2 if the adjoint is working
       correctly.

       If HJm is not None, the Taylor test will also attempt to verify the
       correctness of the Hessian. HJm should be a callable which takes in a
       direction and returns the Hessian of the functional in that direction
       (i.e., takes in a vector and returns a vector). In that case, an additional
       Taylor remainder is computed, which should converge at order 3 if the Hessian
       is correct.'''

    info_blue("Running Taylor remainder convergence test ... ")
    import controls

    if isinstance(m, list):
        m = ListControl(m)

    if isinstance(m, controls.ListControl):
        if perturbation_direction is None:
            perturbation_direction = [None] * len(m.controls)

        if value is None:
            value = [None] * len(m.controls)

        return min(taylor_test(J, m[i], Jm, dJdm[i], HJm, seed, perturbation_direction[i], value[i]) for i in range(len(m.controls)))

    def get_const(val):
        if isinstance(val, str):
            return float(constant.constant_values[val])
        else:
            return float(val)

    def get_value(param, value):
        if value is not None:
            return value
        else:
            try:
                return param.data()
            except libadjoint.exceptions.LibadjointErrorNeedValue:
                info_red("Do you need to pass forget=False to compute_gradient?")
                raise

    # First, compute perturbation sizes.
    seed_default = 0.01
    if seed is None:
        if isinstance(m, controls.ConstantControl):
            seed = get_const(m.a) / 5.0

            if seed == 0.0: seed = 0.1
        elif isinstance(m, controls.FunctionControl):
            ic = get_value(m, value)
            if len(ic.vector()) == 1: # our control is in R
                seed = float(ic) / 5.0
            else:
                seed = seed_default
        else:
            seed = seed_default

    perturbation_sizes = [seed/(2.0**i) for i in range(5)]

    # Next, compute the perturbation direction.
    if perturbation_direction is None:
        if isinstance(m, controls.ConstantControl):
            perturbation_direction = 1
        elif isinstance(m, controls.ConstantControls):
            perturbation_direction = numpy.array([get_const(x)/5.0 for x in m.v])
        elif isinstance(m, controls.FunctionControl):
            ic = get_value(m, value)
            perturbation_direction = backend.Function(ic.function_space())
            compatibility.randomise(perturbation_direction)
        else:
            raise libadjoint.exceptions.LibadjointErrorNotImplemented("Don't know how to compute a perturbation direction")
    else:
        if isinstance(m, controls.FunctionControl):
            ic = get_value(m, value)

    # So now compute the perturbations:
    if not isinstance(perturbation_direction, backend.Function):
        perturbations = [x*perturbation_direction for x in perturbation_sizes]
    else:
        perturbations = []
        for x in perturbation_sizes:
            perturbation = backend.Function(perturbation_direction)
            vec = perturbation.vector()
            vec *= x
            perturbations.append(perturbation)

    # And now the perturbed inputs:
    if isinstance(m, controls.ConstantControl):
        pinputs = [backend.Constant(get_const(m.a) + x) for x in perturbations]
    elif isinstance(m, controls.ConstantControls):
        a = numpy.array([get_const(x) for x in m.v])

        def make_const(arr):
            return [backend.Constant(x) for x in arr]

        pinputs = [make_const(a + x) for x in perturbations]
    elif isinstance(m, controls.FunctionControl):
        pinputs = []
        for x in perturbations:
            pinput = backend.Function(x)
            vec = pinput.vector()
            vec += ic.vector()
            pinputs.append(pinput)

    # Issue 34: We must evaluate HJm before we evaluate the tape at the
    # perturbed controls below.
    if HJm is not None:
        HJm_values = []
        for perturbation in perturbations:
            HJmp = HJm(perturbation)
            HJm_values.append(HJmp)

    # At last: the common bit!
    functional_values = []
    for pinput in pinputs:
        Jp = J(pinput)
        functional_values.append(Jp)

    # First-order Taylor remainders (not using adjoint)
    no_gradient = [abs(perturbed_J - Jm) for perturbed_J in functional_values]

    info("Taylor remainder without gradient information: " + str(no_gradient))
    info("Convergence orders for Taylor remainder without gradient information (should all be 1): " + str(convergence_order(no_gradient)))

    with_gradient = []
    for i in range(len(perturbations)):
        if isinstance(m, controls.ConstantControl) or isinstance(m, controls.ConstantControls):
            remainder = taylor_remainder_with_gradient(m, Jm, dJdm, functional_values[i], perturbations[i])
        else:
            remainder = taylor_remainder_with_gradient(m, Jm, dJdm, functional_values[i], perturbations[i], ic=ic)
        with_gradient.append(remainder)

    if min(with_gradient + no_gradient) < 1e-16:
        warning("Warning: The Taylor remainders are close to machine precision (< %s). Try increasing the seed value in case the Taylor remainder test fails." % min(with_gradient + no_gradient))

    info("Taylor remainder with gradient information: " + str(with_gradient))
    info("Convergence orders for Taylor remainder with gradient information (should all be 2): " + str(convergence_order(with_gradient)))

    if HJm is not None:
        with_hessian = []
        if isinstance(m, controls.ConstantControl):
            for i in range(len(perturbations)):
                remainder = abs(functional_values[i] - Jm - float(dJdm)*perturbations[i] - 0.5*perturbations[i]*HJm_values[i])
                with_hessian.append(remainder)
        elif isinstance(m, controls.ConstantControls):
            for i in range(len(perturbations)):
                remainder = abs(functional_values[i] - Jm - numpy.dot(dJdm, perturbations[i]) - 0.5*numpy.dot(perturbations[i], HJm_values[i]))
                with_hessian.append(remainder)
        elif isinstance(m, controls.FunctionControl):
            for i in range(len(perturbations)):
                remainder = abs(functional_values[i] - Jm - dJdm.vector().inner(perturbations[i].vector()) - 0.5*perturbations[i].vector().inner(HJm_values[i].vector()))
                with_hessian.append(remainder)

        info("Taylor remainder with Hessian information: " + str(with_hessian))
        info("Convergence orders for Taylor remainder with Hessian information (should all be 3): " + str(convergence_order(with_hessian)))
        return min(convergence_order(with_hessian))
    else:
        return min(convergence_order(with_gradient))
Esempio n. 16
0
def test_gradient_array(J, dJdx, x, seed = 0.01, perturbation_direction = None):
    '''Checks the correctness of the derivative dJ.
       x must be an array that specifies at which point in the parameter space
       the gradient is to be checked, and dJdx must be an array containing the gradient.
       The function J(x) must return the functional value.

       This function returns the order of convergence of the Taylor
       series remainder, which should be 2 if the gradient is correct.'''

    # We will compute the gradient of the functional with respect to the initial condition,
    # and check its correctness with the Taylor remainder convergence test.
    info("Running Taylor remainder convergence analysis to check the gradient ... ")

    # First run the problem unperturbed
    j_direct = J(x)

    # Randomise the perturbation direction:
    if perturbation_direction is None:
        perturbation_direction = x.copy()
        compatibility.randomise(perturbation_direction)

    # Run the forward problem for various perturbed initial conditions
    functional_values = []
    perturbations = []
    perturbation_sizes = [seed/(2**i) for i in range(5)]
    for perturbation_size in perturbation_sizes:
        perturbation = perturbation_direction.copy() * perturbation_size
        perturbations.append(perturbation)

        perturbed_x = x.copy() + perturbation
        functional_values.append(J(perturbed_x))

    # First-order Taylor remainders (not using adjoint)
    no_gradient = [abs(perturbed_j - j_direct) for perturbed_j in functional_values]

    info("Absolute functional evaluation differences: %s" % str(no_gradient))
    info("Convergence orders for Taylor remainder without adjoint information (should all be 1): %s" % str(convergence_order(no_gradient)))

    with_gradient = []
    for i in range(len(perturbations)):
        remainder = abs(functional_values[i] - j_direct - numpy.dot(perturbations[i], dJdx))
        with_gradient.append(remainder)

    if min(with_gradient + no_gradient) < 1e-16:
        info("Warning: The Taylor remainders are close to machine precision (< %s). Try increasing the seed value in case the Taylor remainder test fails." % min(with_gradient + no_gradient))

    info("Absolute functional evaluation differences with adjoint: %s" % str(with_gradient))
    info("Convergence orders for Taylor remainder with adjoint information (should all be 2): %s" % str(convergence_order(with_gradient)))

    return min(convergence_order(with_gradient))
Esempio n. 17
0
def _taylor_test_single_control(J, m, Jm, dJdm, HJm, seed, perturbation_direction, value, size=None):
    from . import function, controls

    # Default to five runs/perturbations is none given
    if size is None:
        size = 5
    
    # Check inputs
    if not isinstance(m, libadjoint.Parameter):
        raise ValueError("m must be a valid control instance.")

    def get_const(val):
        if isinstance(val, str):
            return float(constant.constant_values[val])
        else:
            return float(val)

    def get_value(param, value):
        if value is not None:
            return value
        else:
            try:
                return param.data()
            except libadjoint.exceptions.LibadjointErrorNeedValue:
                info_red("Do you need to pass forget=False to compute_gradient?")
                raise

    # First, compute perturbation sizes.
    seed_default = 0.01
    if seed is None:
        if isinstance(m, controls.ConstantControl):
            seed = get_const(m.a) / 5.0

            if seed == 0.0: seed = 0.1
        elif isinstance(m, controls.FunctionControl):
            ic = get_value(m, value)
            if len(ic.vector()) == 1: # our control is in R
                seed = float(ic) / 5.0
            else:
                seed = seed_default
        else:
            seed = seed_default

    perturbation_sizes = [seed/(2.0**i) for i in range(size)]

    # Next, compute the perturbation direction.
    if perturbation_direction is None:
        if isinstance(m, controls.ConstantControl):
            perturbation_direction = 1
        elif isinstance(m, controls.ConstantControls):
            perturbation_direction = numpy.array([get_const(x)/5.0 for x in m.v])
        elif isinstance(m, controls.FunctionControl):
            ic = get_value(m, value)

            # Check for MultiMeshFunction_space
            if isinstance(ic.function_space(), compatibility.multi_mesh_function_space_type):
                perturbation_direction = backend.MultiMeshFunction(ic.function_space())
            else:
                perturbation_direction = function.Function(ic.function_space())

            compatibility.randomise(perturbation_direction)

        else:
            raise libadjoint.exceptions.LibadjointErrorNotImplemented("Don't know how to compute a perturbation direction")
    else:
        if isinstance(m, controls.FunctionControl):
            ic = get_value(m, value)
        elif isinstance(m, controls.ConstantControl):
            perturbation_direction = float(perturbation_direction)

    # So now compute the perturbations:
    if not isinstance(perturbation_direction, (backend.Function, backend.MultiMeshFunction)):
        perturbations = [x*perturbation_direction for x in perturbation_sizes]
    else:
        perturbations = []
        for x in perturbation_sizes:
            perturbation = perturbation_direction.copy(deepcopy=True)
            vec = perturbation.vector()
            vec *= x
            perturbations.append(perturbation)

    # And now the perturbed inputs:
    if isinstance(m, controls.ConstantControl):
        pinputs = [backend.Constant(get_const(m.a) + x) for x in perturbations]
    elif isinstance(m, controls.ConstantControls):
        a = numpy.array([get_const(x) for x in m.v])

        def make_const(arr):
            return [backend.Constant(x) for x in arr]

        pinputs = [make_const(a + x) for x in perturbations]
    elif isinstance(m, controls.FunctionControl):
        pinputs = []
        for x in perturbations:
            pinput = x.copy(deepcopy=True)
            vec = pinput.vector()
            vec += ic.vector()
            pinputs.append(pinput)

    # Issue 34: We must evaluate HJm before we evaluate the tape at the
    # perturbed controls below.
    if HJm is not None:
        HJm_values = []
        for perturbation in perturbations:
            HJmp = HJm(perturbation)
            HJm_values.append(HJmp)

    # At last: the common bit!
    functional_values = []
    for pinput in pinputs:
        Jp = J(pinput)
        functional_values.append(Jp)

    # First-order Taylor remainders (not using adjoint)
    no_gradient = [abs(perturbed_J - Jm) for perturbed_J in functional_values]

    info("Taylor remainder without gradient information: " + str(no_gradient))
    info("Convergence orders for Taylor remainder without gradient information (should all be 1): " + str(convergence_order(no_gradient)))

    with_gradient = []
    for i in range(len(perturbations)):
        if isinstance(m, controls.ConstantControl) or isinstance(m, controls.ConstantControls):
            remainder = taylor_remainder_with_gradient(m, Jm, dJdm, functional_values[i], perturbations[i])
        else:
            remainder = taylor_remainder_with_gradient(m, Jm, dJdm, functional_values[i], perturbations[i], ic=ic)
        with_gradient.append(remainder)

    if min(with_gradient + no_gradient) < 1e-16:
        warning("Warning: The Taylor remainders are close to machine precision (< %s). Try increasing the seed value in case the Taylor remainder test fails." % min(with_gradient + no_gradient))

    info("Taylor remainder with gradient information: " + str(with_gradient))
    info("Convergence orders for Taylor remainder with gradient information (should all be 2): " + str(convergence_order(with_gradient)))

    if HJm is not None:
        with_hessian = []
        if isinstance(m, controls.ConstantControl):
            for i in range(len(perturbations)):
                remainder = abs(functional_values[i] - Jm - float(dJdm)*perturbations[i] - 0.5*perturbations[i]*HJm_values[i])
                with_hessian.append(remainder)
        elif isinstance(m, controls.ConstantControls):
            for i in range(len(perturbations)):
                remainder = abs(functional_values[i] - Jm - numpy.dot(dJdm, perturbations[i]) - 0.5*numpy.dot(perturbations[i], HJm_values[i]))
                with_hessian.append(remainder)
        elif isinstance(m, controls.FunctionControl):
            for i in range(len(perturbations)):
                remainder = abs(functional_values[i] - Jm - dJdm.vector().inner(perturbations[i].vector()) - 0.5*perturbations[i].vector().inner(HJm_values[i].vector()))
                with_hessian.append(remainder)

        info("Taylor remainder with Hessian information: " + str(with_hessian))
        info("Convergence orders for Taylor remainder with Hessian information (should all be 3): " + str(convergence_order(with_hessian)))
        return min(convergence_order(with_hessian))
    else:
        return min(convergence_order(with_gradient))
Esempio n. 18
0
def test_initial_condition_tlm(J, dJ, ic, seed=0.01, perturbation_direction=None):
    '''forward must be a function that takes in the initial condition (ic) as a backend.Function
       and returns the functional value by running the forward run:

         func = J(ic)

       final_adjoint is the tangent linear variable for the solution on which the functional depends
       (usually the last TLM equation solved).

       dJ must be the derivative of the functional with respect to its argument, evaluated and assembled at
       the unperturbed solution (a backend Vector).

       This function returns the order of convergence of the Taylor
       series remainder, which should be 2 if the TLM is working
       correctly.'''

    import controls

    # We will compute the gradient of the functional with respect to the initial condition,
    # and check its correctness with the Taylor remainder convergence test.
    info_blue("Running Taylor remainder convergence analysis for the tangent linear model... ")

    adj_var = adjglobals.adj_variables[ic]; adj_var.timestep = 0
    if not adjglobals.adjointer.variable_known(adj_var):
        info_red(str(adj_var) + " not known.")
        raise libadjoint.exceptions.LibadjointErrorInvalidInputs("Your initial condition must be the /exact same Function/ as the initial condition used in the forward model.")

    # First run the problem unperturbed
    ic_copy = backend.Function(ic)
    f_direct = J(ic_copy)

    # Randomise the perturbation direction:
    if perturbation_direction is None:
        perturbation_direction = backend.Function(ic.function_space())
        compatibility.randomise(perturbation_direction)

    # Run the forward problem for various perturbed initial conditions
    functional_values = []
    perturbations = []
    for perturbation_size in [seed/(2**i) for i in range(5)]:
        perturbation = backend.Function(perturbation_direction)
        vec = perturbation.vector()
        vec *= perturbation_size
        perturbations.append(perturbation)

        perturbed_ic = backend.Function(ic)
        vec = perturbed_ic.vector()
        vec += perturbation.vector()

        functional_values.append(J(perturbed_ic))

    # First-order Taylor remainders (not using adjoint)
    no_gradient = [abs(perturbed_f - f_direct) for perturbed_f in functional_values]

    info("Taylor remainder without tangent linear information: " + str(no_gradient))
    info("Convergence orders for Taylor remainder without tangent linear information (should all be 1): " + str(convergence_order(no_gradient)))

    with_gradient = []
    for i in range(len(perturbations)):
        param = controls.FunctionControl(ic, perturbations[i])
        final_tlm = tlm_dolfin(param, forget=False).data
        remainder = abs(functional_values[i] - f_direct - final_tlm.vector().inner(dJ))
        with_gradient.append(remainder)

    info("Taylor remainder with tangent linear information: " + str(with_gradient))
    info("Convergence orders for Taylor remainder with tangent linear information (should all be 2): " + str(convergence_order(with_gradient)))

    return min(convergence_order(with_gradient))
Esempio n. 19
0
def test_initial_condition_adjoint(J, ic, final_adjoint, seed=0.01, perturbation_direction=None):
    '''forward must be a function that takes in the initial condition (ic) as a backend.Function
       and returns the functional value by running the forward run:

         func = J(ic)

       final_adjoint is the adjoint associated with the initial condition
       (usually the last adjoint equation solved).

       This function returns the order of convergence of the Taylor
       series remainder, which should be 2 if the adjoint is working
       correctly.'''

    from . import function

    # We will compute the gradient of the functional with respect to the initial condition,
    # and check its correctness with the Taylor remainder convergence test.
    info_blue("Running Taylor remainder convergence analysis for the adjoint model ... ")

    # First run the problem unperturbed
    ic_copy = ic.copy(deepcopy=True)
    f_direct = J(ic_copy)

    # Randomise the perturbation direction:
    if perturbation_direction is None:
        perturbation_direction = function.Function(ic.function_space())
        compatibility.randomise(perturbation_direction)

    # Run the forward problem for various perturbed initial conditions
    functional_values = []
    perturbations = []
    perturbation_sizes = [seed/(2**i) for i in range(5)]
    for perturbation_size in perturbation_sizes:
        perturbation = perturbation_direction.copy(deepcopy=True)
        vec = perturbation.vector()
        vec *= perturbation_size
        perturbations.append(perturbation)

        perturbed_ic = ic.copy(deepcopy=True)
        vec = perturbed_ic.vector()
        vec += perturbation.vector()

        functional_values.append(J(perturbed_ic))

    # First-order Taylor remainders (not using adjoint)
    no_gradient = [abs(perturbed_f - f_direct) for perturbed_f in functional_values]

    info("Taylor remainder without adjoint information: " + str(no_gradient))
    info("Convergence orders for Taylor remainder without adjoint information (should all be 1): " + str(convergence_order(no_gradient)))

    adjoint_vector = final_adjoint.vector()

    with_gradient = []
    gradient_fd   = []
    for i in range(len(perturbations)):
        gradient_fd.append((functional_values[i] - f_direct)/perturbation_sizes[i])

        remainder = abs(functional_values[i] - f_direct - adjoint_vector.inner(perturbations[i].vector()))
        with_gradient.append(remainder)

    info("Taylor remainder with adjoint information: " + str(with_gradient))
    info("Convergence orders for Taylor remainder with adjoint information (should all be 2): " + str(convergence_order(with_gradient)))

    info("Gradients (finite differencing): " + str(gradient_fd))
    info("Gradient (adjoint): " + str(adjoint_vector.inner(perturbation_direction.vector())))

    return min(convergence_order(with_gradient))
Esempio n. 20
0
def test_scalar_parameter_adjoint(J, a, dJda, seed=None):
    info_blue("Running Taylor remainder convergence analysis for the adjoint model ... ")

    functional_values = []
    f_direct = J(a)

    if seed is None:
        seed = float(a)/5.0
        if seed == 0.0:
            seed = 0.1

    perturbations = [seed / (2**i) for i in range(5)]

    for da in (backend.Constant(float(a) + x) for x in perturbations):
        functional_values.append(J(da))

    # First-order Taylor remainders (not using adjoint)
    no_gradient = [abs(perturbed_f - f_direct) for perturbed_f in functional_values]

    info("Taylor remainder without adjoint information: " + str(no_gradient))
    info("Convergence orders for Taylor remainder without adjoint information (should all be 1): " + str(convergence_order(no_gradient)))

    with_gradient = []
    gradient_fd   = []
    for i in range(len(perturbations)):
        gradient_fd.append((functional_values[i] - f_direct)/perturbations[i])

        remainder = abs(functional_values[i] - f_direct - float(dJda)*perturbations[i])
        with_gradient.append(remainder)

    info("Taylor remainder with adjoint information: " + str(with_gradient))
    info("Convergence orders for Taylor remainder with adjoint information (should all be 2): " + str(convergence_order(with_gradient)))

    info("Gradients (finite differencing): " + str(gradient_fd))
    info("Gradient (adjoint): " + str(dJda))

    return min(convergence_order(with_gradient))
Esempio n. 21
0
def _taylor_test_single_control(J, m, Jm, dJdm, HJm, seed, perturbation_direction, value):
    import function

    # Check inputs
    if not isinstance(m, libadjoint.Parameter):
        raise ValueError, "m must be a valid control instance."

    def get_const(val):
        if isinstance(val, str):
            return float(constant.constant_values[val])
        else:
            return float(val)

    def get_value(param, value):
        if value is not None:
            return value
        else:
            try:
                return param.data()
            except libadjoint.exceptions.LibadjointErrorNeedValue:
                info_red("Do you need to pass forget=False to compute_gradient?")
                raise

    # First, compute perturbation sizes.
    seed_default = 0.01
    if seed is None:
        if isinstance(m, controls.ConstantControl):
            seed = get_const(m.a) / 5.0

            if seed == 0.0: seed = 0.1
        elif isinstance(m, controls.FunctionControl):
            ic = get_value(m, value)
            if len(ic.vector()) == 1: # our control is in R
                seed = float(ic) / 5.0
            else:
                seed = seed_default
        else:
            seed = seed_default

    perturbation_sizes = [seed/(2.0**i) for i in range(5)]

    # Next, compute the perturbation direction.
    if perturbation_direction is None:
        if isinstance(m, controls.ConstantControl):
            perturbation_direction = 1
        elif isinstance(m, controls.ConstantControls):
            perturbation_direction = numpy.array([get_const(x)/5.0 for x in m.v])
        elif isinstance(m, controls.FunctionControl):
            ic = get_value(m, value)

            # Check for MultiMeshFunction_space
            if isinstance(ic.function_space(), compatibility.multi_mesh_function_space_type):
                perturbation_direction = backend.MultiMeshFunction(ic.function_space())
            else:
                perturbation_direction = function.Function(ic.function_space())

            compatibility.randomise(perturbation_direction)

        else:
            raise libadjoint.exceptions.LibadjointErrorNotImplemented("Don't know how to compute a perturbation direction")
    else:
        if isinstance(m, controls.FunctionControl):
            ic = get_value(m, value)
        elif isinstance(m, controls.ConstantControl):
            perturbation_direction = float(perturbation_direction)

    # So now compute the perturbations:
    if not isinstance(perturbation_direction, (backend.Function, backend.MultiMeshFunction)):
        perturbations = [x*perturbation_direction for x in perturbation_sizes]
    else:
        perturbations = []
        for x in perturbation_sizes:
            perturbation = perturbation_direction.copy(deepcopy=True)
            vec = perturbation.vector()
            vec *= x
            perturbations.append(perturbation)

    # And now the perturbed inputs:
    if isinstance(m, controls.ConstantControl):
        pinputs = [backend.Constant(get_const(m.a) + x) for x in perturbations]
    elif isinstance(m, controls.ConstantControls):
        a = numpy.array([get_const(x) for x in m.v])

        def make_const(arr):
            return [backend.Constant(x) for x in arr]

        pinputs = [make_const(a + x) for x in perturbations]
    elif isinstance(m, controls.FunctionControl):
        pinputs = []
        for x in perturbations:
            pinput = x.copy(deepcopy=True)
            vec = pinput.vector()
            vec += ic.vector()
            pinputs.append(pinput)

    # Issue 34: We must evaluate HJm before we evaluate the tape at the
    # perturbed controls below.
    if HJm is not None:
        HJm_values = []
        for perturbation in perturbations:
            HJmp = HJm(perturbation)
            HJm_values.append(HJmp)

    # At last: the common bit!
    functional_values = []
    for pinput in pinputs:
        Jp = J(pinput)
        functional_values.append(Jp)

    # First-order Taylor remainders (not using adjoint)
    no_gradient = [abs(perturbed_J - Jm) for perturbed_J in functional_values]

    info("Taylor remainder without gradient information: " + str(no_gradient))
    info("Convergence orders for Taylor remainder without gradient information (should all be 1): " + str(convergence_order(no_gradient)))

    with_gradient = []
    for i in range(len(perturbations)):
        if isinstance(m, controls.ConstantControl) or isinstance(m, controls.ConstantControls):
            remainder = taylor_remainder_with_gradient(m, Jm, dJdm, functional_values[i], perturbations[i])
        else:
            remainder = taylor_remainder_with_gradient(m, Jm, dJdm, functional_values[i], perturbations[i], ic=ic)
        with_gradient.append(remainder)

    if min(with_gradient + no_gradient) < 1e-16:
        warning("Warning: The Taylor remainders are close to machine precision (< %s). Try increasing the seed value in case the Taylor remainder test fails." % min(with_gradient + no_gradient))

    info("Taylor remainder with gradient information: " + str(with_gradient))
    info("Convergence orders for Taylor remainder with gradient information (should all be 2): " + str(convergence_order(with_gradient)))

    if HJm is not None:
        with_hessian = []
        if isinstance(m, controls.ConstantControl):
            for i in range(len(perturbations)):
                remainder = abs(functional_values[i] - Jm - float(dJdm)*perturbations[i] - 0.5*perturbations[i]*HJm_values[i])
                with_hessian.append(remainder)
        elif isinstance(m, controls.ConstantControls):
            for i in range(len(perturbations)):
                remainder = abs(functional_values[i] - Jm - numpy.dot(dJdm, perturbations[i]) - 0.5*numpy.dot(perturbations[i], HJm_values[i]))
                with_hessian.append(remainder)
        elif isinstance(m, controls.FunctionControl):
            for i in range(len(perturbations)):
                remainder = abs(functional_values[i] - Jm - dJdm.vector().inner(perturbations[i].vector()) - 0.5*perturbations[i].vector().inner(HJm_values[i].vector()))
                with_hessian.append(remainder)

        info("Taylor remainder with Hessian information: " + str(with_hessian))
        info("Convergence orders for Taylor remainder with Hessian information (should all be 3): " + str(convergence_order(with_hessian)))
        return min(convergence_order(with_hessian))
    else:
        return min(convergence_order(with_gradient))