Beispiel #1
0
    def __call__(self, m, annotate=False):

        logger.info("Evaluating model")
        # Take small steps with gamma until we have only one point left
        # We do not want to record this as we only want to optimize the final value
        logger.debug(Text.yellow("Stop annotating"))
        dolfin.parameters["adjoint"]["stop_annotating"] = True

        logger.debug("Try to step up gamma")

        w_old = self.cphm.solver.state
        gamma_old = self.gamma_previous.copy(True)
        logger.info("Gamma old = {}".format(
            numpy_mpi.gather_broadcast(gamma_old.vector().get_local())))

        try:
            self.cphm.next_active(m, self.gamma_previous)

        except SolverDidNotConverge as ex:
            logger.debug("Stepping up gamma failed")

            logger.debug("Assign the old state and old gamma")
            # Assign the old state
            self.cphm.solver.reinit(w_old)
            # Assign the old gamma
            logger.info("Gamma old = {}".format(
                numpy_mpi.gather_broadcast(gamma_old.vector().get_local())))
            self.cphm.solver.material.activation.assign(gamma_old)
            self.gamma_previous.assign(gamma_old)

            raise ex

        else:
            # Stepping up gamma succeded
            logger.debug("Stepping up gamma succeded")
            # Get the current state
            w = self.cphm.solver.state

            self.gamma_previous.assign(m)
            logger.debug(Text.yellow("Start annotating"))
            dolfin.parameters["adjoint"]["stop_annotating"] = not annotate

            # Assign the state where we have only one step with gamma left, and make sure
            # that dolfin adjoint record this.
            logger.debug("Assign the new state and gamma")
            self.cphm.solver.state.assign(w, annotate=annotate)

            # Now we make the final solve
            self.cphm.solver.material.activation.assign(m)

            w = self.cphm.solver.state

            logger.debug("Solve the forward problem with the new gamma")

            forward_result = BasicForwardRunner.solve_the_forward_problem(
                self, self.cphm, annotate, "active")
            w = self.cphm.solver.state

            return forward_result, False
    def print_line(self):
        v_sim = numpy_mpi.gather_broadcast(
            self.simulated_fun.vector().get_local())[0]
        v_meas = numpy_mpi.gather_broadcast(
            self.target_fun.vector().get_local())[0]
        I = self.get_value()

        return "\t{:<18.2f}\t{:<20.2f}\t{:<10.2e}".format(v_meas, v_sim, I)
Beispiel #3
0
    def assign_control(self, value, annotate=True):
        """
        Assign value to control parameter
        """
        control_new = dolfin_adjoint.Function(self.control.function_space(),
                                              name="new control")

        if isinstance(
                value,
            (
                dolfin.Function,
                dolfin_adjoint.Function,
                RegionalParameter,
                MixedParameter,
            ),
        ):
            control_new.assign(value)

        elif isinstance(value, float) or isinstance(value, int):
            numpy_mpi.assign_to_vector(control_new.vector(), np.array([value]))

        elif isinstance(value, pyadjoint.enlisting.Enlist):
            val_delisted = dolfin_adjoint.delist(value, self.controls)
            control_new.assign(val_delisted)

        else:
            numpy_mpi.assign_to_vector(control_new.vector(),
                                       numpy_mpi.gather_broadcast(value))

        self.control.assign(control_new, annotate=annotate)
Beispiel #4
0
    def derivative(self, *args, **kwargs):

        logger.debug("\nEvaluate gradient...")
        self.collector["nr_derivative_calls"] += 1

        t = dolfin.Timer("Backward run")
        t.start()
        out = super().derivative()
        back_time = t.stop()
        logger.info(("Evaluating gradient done. "
                     "Time to evaluate = {} seconds".format(back_time)))
        self.collector["backward_times"].append(back_time)

        # Multiply with some small number to that we take smaller steps
        gathered_out = numpy_mpi.gather_broadcast(out.vector().get_local())

        self.collector["gradient_norm"].append(np.linalg.norm(gathered_out))
        self.collector["gradient_norm_scaled"].append(
            np.linalg.norm(gathered_out) * self.scale * self.derivative_scale)
        logger.info(("|dJ|(actual) = {}\t"
                     "|dJ|(scaled) = {}").format(
                         self.collector["gradient_norm"][-1],
                         self.collector["gradient_norm_scaled"][-1],
                     ))

        return self.scale * gathered_out * self.derivative_scale
def print_optimization_report(params,
                              opt_controls,
                              init_controls,
                              ini_for_res,
                              opt_for_res,
                              opt_result=None):

    if opt_result:
        logger.info("\nOptimization terminated...")

        logger.info("\tFunction Evaluations: {}".format(opt_result["nfev"]))
        logger.info("\tGradient Evaluations: {}".format(opt_result["njev"]))
        logger.info("\tNumber of iterations: {}".format(opt_result["nit"]))
        logger.info("\tNumber of crashes: {}".format(opt_result["ncrash"]))
        logger.info("\tRun time: {:.2f} seconds".format(
            opt_result["run_time"]))

    logger.info("\nFunctional Values")
    logger.info(" " * 7 + "\t" + print_head(ini_for_res, False))

    if "grad_norm" not in opt_result or len(opt_result["grad_norm"]) == 0:
        grad_norm_ini = 0.0
        grad_norm_opt = 0.0
    else:
        grad_norm_ini = opt_result["grad_norm"][0]
        grad_norm_opt = opt_result["grad_norm"][-1]

    logger.info("{:7}\t{}".format(
        "Initial", print_line(ini_for_res, grad_norm=grad_norm_ini)))
    logger.info("{:7}\t{}".format(
        "Optimal", print_line(opt_for_res, grad_norm=grad_norm_opt)))

    if params["phase"] == PHASES[0]:
        logger.info("\nMaterial Parameters")
        logger.info("Initial {}".format(init_controls))
        logger.info("Optimal {}".format(
            numpy_mpi.gather_broadcast(opt_controls.vector().get_local())))
    else:
        logger.info("\nContraction Parameter")
        logger.info("\tMin\tMean\tMax")
        logger.info("Initial\t{:.5f}\t{:.5f}\t{:.5f}".format(
            init_controls.min(), init_controls.mean(), init_controls.max()))
        opt_controls_arr = numpy_mpi.gather_broadcast(
            opt_controls.vector().get_local())
        logger.info("Optimal\t{:.5f}\t{:.5f}\t{:.5f}".format(
            opt_controls_arr.min(), opt_controls_arr.mean(),
            opt_controls_arr.max()))
Beispiel #6
0
def get_simulated_strain_traces(phm):
    simulated_strains = {
        strain: np.zeros(17) for strain in list(STRAIN_NUM_TO_KEY.values())
    }
    strains = phm.strains
    for direction in range(3):
        for region in range(17):
            simulated_strains[STRAIN_NUM_TO_KEY[direction]][region] = numpy_mpi.gather_broadcast(
                strains[region].vector().get_local()
            )[direction]
    return simulated_strains
Beispiel #7
0
    def __init__(self, for_run, paramvec, scale=1.0, relax=1.0, verbose=False):

        self.log_level = logger.level
        self.reset()
        self.for_run = for_run
        self.paramvec = paramvec

        self.initial_paramvec = numpy_mpi.gather_broadcast(paramvec.vector().get_local())
        self.scale = scale
        self.derivative_scale = relax

        self.verbose = verbose
        from .optimal_control import has_scipy016
Beispiel #8
0
    def assimilate(self, min_value=None, max_value=None, tol=None):
        """
        FIXME
        """

        rd = self.create_reduced_functional()

        # Create optimal control problem
        self.oc_problem = OptimalControl(min_value=min_value,
                                         max_value=max_value,
                                         tol=tol)
        x = numpy_mpi.gather_broadcast(self.control.vector().get_local())

        self.oc_problem.build_problem(rd, x)

        self.result = self.oc_problem.solve()
        return self.result
Beispiel #9
0
    def assign(self, u=None, annotate=True):
        """
        Assign the model observation and compute the functional

        Arguments
        ---------
        u : :py:class:`dolfin.Function`
            The input to the model observation, e.g the displacement

        Returns
        -------
        functional : :py:class:`dolfin_adjoint.Function`
            A scalar representing the mismatch between model and data defined
            in the :meth:`OptimizationTarget.form` method.
        """
        # Assign model observation for dolfin-adjoint recording
        model = self.model(u)
        self.model_function.assign(model, annotate=annotate)

        # Assign data observation for dolfin-adjoint recording
        data = self.dolfin_observations[self.count]
        self.data_function.assign(data, annotate=annotate)

        form = self.form()

        dolfin_adjoint.solve(
            self._trial * self._test * dolfin.dx == self._test * form *
            dolfin.dx,
            self._functional,
        )

        if self.collect:
            self.collector["model"].append(
                dolfin.Vector(self.model_function.vector()))
            self.collector["data"].append(
                dolfin.Vector(self.data_function.vector()))
            self.collector["functional"].append(
                numpy_mpi.gather_broadcast(
                    self._functional.vector().get_local())[0])

        return self.weight * self._functional
Beispiel #10
0
    def derivative(self, *args, **kwargs):

        logger.debug("\nEvaluate gradient...")
        self.nr_der_calls += 1
        import math

        t = dolfin.Timer("Backward run")
        t.start()

        out = dolfin_adjoint.ReducedFunctional.derivative(self, forget=False)
        back_time = t.stop()
        logger.debug(
            (
                "Evaluating gradient done. "
                + "Time to evaluate = {} seconds".format(back_time)
            )
        )
        self.backward_times.append(back_time)

        for num in out[0].vector().get_local():
            if math.isnan(num):
                raise Exception("NaN in adjoint gradient calculation.")

        # Multiply with some small number to that we take smaller steps
        gathered_out = numpy_mpi.gather_broadcast(out[0].vector().get_local())

        self.grad_norm.append(np.linalg.norm(gathered_out))
        self.grad_norm_scaled.append(
            np.linalg.norm(gathered_out) * self.scale * self.derivative_scale
        )
        logger.debug(
            "|dJ|(actual) = {}\t|dJ|(scaled) = {}".format(
                self.grad_norm[-1], self.grad_norm_scaled[-1]
            )
        )
        return self.scale * gathered_out * self.derivative_scale
Beispiel #11
0
    def __call__(self, value):

        logger.debug("\nEvaluate functional...")

        # annotation.annotate = True
        # Start recording

        # dolfin_adjoint.adj_reset()
        tape = dolfin_adjoint.get_working_tape()
        tape.reset_blocks()

        self.collector["count"] += 1
        new_control = dolfin_adjoint.Function(self.control.function_space(),
                                              name="new_control")
        # self.assign_control(value, annotate=annotation.annotate)

        # annotation.annotate = False
        if isinstance(
                value,
            (
                dolfin_adjoint.Function,
                dolfin.Function,
                dolfin_adjoint.Constant,
                dolfin.Constant,
                RegionalParameter,
                MixedParameter,
            ),
        ):
            new_control.assign(value)
        elif isinstance(value, float) or isinstance(value, int):
            numpy_mpi.assign_to_vector(new_control.vector(), np.array([value]))
        # elif isinstance(value, pyadjoint.enlisting.Enlist):

        else:
            numpy_mpi.assign_to_vector(new_control.vector(),
                                       numpy_mpi.gather_broadcast(value))

        if self.verbose:

            arr = numpy_mpi.gather_broadcast(new_control.vector().array())
            msg = ("\nCurrent value of control:"
                   "\n\t{:>8}\t{:>8}\t{:>8}\t{:>8}\t{:>8}"
                   "\n\t{:>8.2f}\t{:>8.2f}\t{:>8.2f}\t{:>8d}\t{:>8d}").format(
                       "Min",
                       "Mean",
                       "Max",
                       "argmin",
                       "argmax",
                       np.min(arr),
                       np.mean(arr),
                       np.max(arr),
                       np.argmin(arr),
                       np.argmax(arr),
                   )

            logger.debug(msg)

        # Change loglevel to avoid too much printing
        change_log_level = (self.log_level
                            == logging.INFO) and not self.verbose

        if change_log_level:
            logger.setLevel(logging.WARNING)

        t = dolfin.Timer("Forward run")
        t.start()

        logger.debug("\nEvaluate forward model")

        # annotation.annotate = True
        crash = False
        try:
            self.forward_result = self.forward_model(new_control,
                                                     annotate=True)
        except SolverDidNotConverge:
            crash = True

        forward_time = t.stop()
        self.collector["forward_times"].append(forward_time)
        logger.debug(("Evaluating forward model done. "
                      "Time to evaluate = {} seconds".format(forward_time)))

        if change_log_level:
            logger.setLevel(self.log_level)

        if self.first_call:
            # Store initial results
            self.collector["initial_results"] = self.forward_result
            self.first_call = False

            # Some printing
            # logger.info(utils.print_head(self.for_res))

        control = dolfin_adjoint.Control(self.control)

        # dolfin_adjoint.ReducedFunctional.__init__(
        #     self, dolfin_adjoint.Functional(self.forward_result.functional), control
        # )
        super().__init__(self.forward_result.functional, control)

        if crash:
            # This exection is thrown if the solver uses more than x steps.
            # The solver is stuck, return a large value so it does not get
            # stuck again
            logger.warning(
                Text.red(("Iteration limit exceeded. "
                          "Return a large value of the functional")))
            # Return a big value, and make sure to increment the big value
            # so the the next big value is different from the current one.
            func_value = np.inf
            self.collector["nr_crashes"] += 1

        else:
            func_value = self.forward_result.functional

        # grad_norm = None if len(self.grad_norm_scaled) == 0 \
        # else self.grad_norm_scaled[-1]

        self.collector["functional_values"].append(
            float(func_value) * self.scale)
        self.collector["controls"].append(dolfin.Vector(self.control.vector()))

        logger.debug(Text.yellow("Stop annotating"))
        annotation.annotate = False

        self.print_line()
        return self.scale * func_value
                u_int = interpolate(project(ui, V), V0)

            elif approx == "project":
                u_int = project(ui, V0)

            else:
                u_int = ui

            F_ref = grad(u_int) + Identity(3)

            print(("\nApprox = {}:".format(approx)))
            target_vol = VolumeTarget(geo.mesh, dS, "LV", approx)
            target_vol.set_target_functions()
            target_vol.assign_simulated(u)

            vol = numpy_mpi.gather_broadcast(
                target_vol.simulated_fun.vector().get_local())[0]
            print(("Volume = ", vol))

            target_strain = RegionalStrainTarget(
                geo.mesh,
                basis,
                dX,
                nregions=nregions,
                tensor="gradu",
                F_ref=F_ref,
                approx=approx,
            )

            target_strain.set_target_functions()
            target_strain.assign_simulated(u)
 def get_value(self):
     return sum([
         numpy_mpi.gather_broadcast(
             self.functional[i].vector().get_local())[0]
         for i in range(self.nregions)
     ])
    def __init__(
        self,
        mesh,
        crl_basis,
        dmu,
        weights=None,
        nregions=None,
        tensor="gradu",
        F_ref=None,
        approx="original",
        map_strain=False,
    ):
        """
        Initialize regional strain target

        Parameters
        ----------
        mesh: :py:class:`dolfin.Mesh`
            The mesh
        crl_basis: dict
            Basis function for the cicumferential, radial
            and longituginal components
        dmu: :py:class:`dolfin.Measure`
            Measure with subdomain information
        weights: :py:function:`numpy.array`
            Weights on the different segements
        nregions: int
            Number of strain regions
        tensor: str
            Which strain tensor to use, e.g gradu, E, C, F
        F_ref: :py:class:`dolfin.Function`
            Tensor to map strains to reference
        
        """
        self._name = "Regional Strain"

        assert tensor in ["gradu", "E"]
        self._tensor = tensor
        self.approx = approx
        self._map_strain = map_strain
        if map_strain:
            from .unloading.utils import normalize_vector_field

        dim = mesh.geometry().dim()
        self.dim = dim
        self._F_ref = F_ref if F_ref is not None else dolfin.Identity(dim)

        logger.debug("Load local basis.")
        logger.debug("Map local basis to new reference: {}".format(map_strain))
        self.crl_basis = []
        for l in ["circumferential", "radial", "longitudinal"]:
            msg = "{} : ".format(l)

            if l in crl_basis:
                msg += "True"
                logger.debug(msg)

                if map_strain:

                    Fe = self._F_ref * crl_basis[l]
                    logger.debug("Project")
                    e_ = dolfin_adjoint.project(Fe)
                    logger.debug("Normalize")
                    e = normalize_vector_field(e_)
                else:
                    e = crl_basis[l]

                self.crl_basis.append(e)

            else:
                msg += "False"
                logger.debug(msg)

        self.nbasis = len(self.crl_basis)

        assert self.nbasis > 0, "Number of basis functions must be greater than zero"
        self.regions = np.array(
            list(set(numpy_mpi.gather_broadcast(
                dmu.subdomain_data().array()))))

        self.nregions = len(self.regions)
        if weights is None:
            self.weights_arr = np.ones((self.nregions, self.nbasis))
        else:
            self.weights_arr = weights

        self.target_space = dolfin.VectorFunctionSpace(mesh,
                                                       "R",
                                                       0,
                                                       dim=self.nbasis)
        self.weight_space = dolfin.TensorFunctionSpace(mesh, "R", 0)
        self.dmu = dmu

        self.meshvols = [
            dolfin.Constant(dolfin.assemble(
                dolfin.Constant(1.0) * dmu(int(i))),
                            name="mesh volume") for i in self.regions
        ]

        OptimizationTarget.__init__(self, mesh)
Beispiel #15
0
def get_max(f):
    return numpy_mpi.gather_broadcast(f.vector().get_local()).max()
    def build_problem(self, params, rd, paramvec):
        """Build optimal control problem

        params : dict
            Application parameter
        rd : :py:class`dolfin_adjoint.ReducedFunctional` 
            The reduced functional
        paramvec : :py:class`dolfin_adjoint.function`
            Control parameter
       
        """

        msg = "No supported optimization module installed"
        assert any(opt_import), msg

        opt_params = params["Optimization_parameters"].to_dict()

        x = gather_broadcast(paramvec.vector().get_local())
        nvar = len(x)
        self.paramvec = paramvec
        self.x = x
        self.rd = rd
        self._initial_guess = np.copy(x)

        if params["phase"] == PHASES[0]:

            lb = np.array([opt_params["matparams_min"]] * nvar)
            ub = np.array([opt_params["matparams_max"]] * nvar)

            tol = opt_params["passive_opt_tol"]
            max_iter = opt_params["passive_maxiter"]

            if opt_params["fixed_matparams"] != "":

                fixed = np.array(opt_params["fixed_matparams"].split(","), dtype=int)

                if opt_params["fixed_matparams_values"] == "":
                    msg = (
                        "Fixed matparams will be fixed to the "
                        + "initial value. Please provide input "
                        + "to 'fixed_mataparams_values' for more "
                        + "control."
                    )
                    logger.warning(msg)
                    fixed_values = np.zeros(len(fixed))
                    for i, fi in enumerate(fixed):
                        fixed_values[i] = x[fi]

                else:
                    try:
                        fixed_values = np.array(
                            opt_params["fixed_matparams_values"].split(","), dtype=int
                        )
                    except:
                        logger.warning("Wrong format for 'fixed_matparams_values'")
                        fixed_values = np.zeros(len(fixed))
                        for i, fi in enumerate(fixed):
                            fixed_values[i] = x[fi]

                    if not len(fixed_values) == len(fixed):
                        msg = "Number of fixed values and fixed indices does not match"
                        logger.warning(msg)
                        fixed_values = np.zeros(len(fixed))
                        for i, fi in enumerate(fixed):
                            fixed_values[i] = x[fi]

                for i, fi in enumerate(fixed):
                    lb[fi] = ub[fi] = fixed_values[i]
                    self._initial_guess[fi] = fixed_values[i]

                logger.info("Upper bounds : {}".format(ub))
                logger.info("Lower bounds : {}".format(lb))

        else:

            lb = np.array([opt_params["gamma_min"]] * nvar)
            ub = np.array([opt_params["gamma_max"]] * nvar)

            tol = opt_params["active_opt_tol"]
            max_iter = opt_params["active_maxiter"]

        self.tol = tol
        self.max_iter = max_iter

        if nvar == 1:

            self.options = {
                "method": opt_params["method_1d"],
                "bounds": list(zip(lb, ub))[0],
                "tol": tol,
                "options": {"maxiter": max_iter},
            }

            self.oneD = True
            self.opt_type = "scipy_brent"

        else:

            self.oneD = False

            opt_params["nvar"] = nvar
            opt_params["m"] = x

            self.opt_type = opt_params.pop("opt_type", "scipy_slsqp")
            self._get_options(lb, ub, tol, max_iter, **opt_params)

        logger.info("".center(72, "#"))
        logger.info(" Building optimal control problem ".center(72, "#"))
        msg = (
            "\n\tNumber of variables:\t{}".format(nvar)
            + "\n\tLower bound:\t{}".format(np.min(lb))
            + "\n\tUpper bound:\t{}".format(np.max(ub))
            + "\n\tTolerance:\t{}".format(tol)
            + "\n\tMaximum iterations:\t{}".format(max_iter)
            + "\n\tOptimization algoritmh:\t{}\n".format(self.opt_type)
        )
        logger.info(msg)
        logger.info("".center(72, "#"))
Beispiel #17
0
def make_control(params, patient):

    ##  Contraction parameter
    if params["gamma_space"] == "regional":
        sfun = merge_control(patient, params["merge_active_control"])
        gamma = RegionalParameter(sfun)
    else:
        gamma_family, gamma_degree = params["gamma_space"].split("_")
        gamma_space = dolfin.FunctionSpace(patient.mesh, gamma_family, int(gamma_degree))

        gamma = dolfin_adjoint.Function(gamma_space, name="activation parameter")

    ##  Material parameters

    # Create an object for each single material parameter
    if params["matparams_space"] == "regional":
        sfun = merge_control(patient, params["merge_passive_control"])
        paramvec_ = RegionalParameter(sfun)

    else:

        family, degree = params["matparams_space"].split("_")
        matparams_space = dolfin.FunctionSpace(patient.mesh, family, int(degree))
        paramvec_ = dolfin_adjoint.Function(matparams_space, name="matparam vector")

    # If we want to estimate more than one parameter

    # Number of passive parameters to optimize
    npassive = sum([not v for v in list(params["Fixed_parameters"].values())])

    if npassive <= 1:
        # If there is only one parameter, just pick the same object
        paramvec = paramvec_

        # If there is none then
        if npassive == 0:
            logger.debug("All material paramters are fixed")
            params["optimize_matparams"] = False

    else:

        # Otherwise, we make a mixed parameter
        paramvec = MixedParameter(paramvec_, npassive)
        # Make an iterator for the function assigment
        nopts_par = 0

    if params["phase"] in [PHASES[1]]:
        # Load the parameters from the result file

        # Open simulation file
        with dolfin.HDF5File(dolfin.mpi_comm_world(), params["sim_file"], "r") as h5file:

            # Get material parameter from passive phase file
            h5file.read(paramvec, PASSIVE_INFLATION_GROUP + "/optimal_control")

    matparams = params["Material_parameters"].to_dict()

    for par, val in matparams.items():

        # Check if material parameter should be fixed
        if not params["Fixed_parameters"][par]:
            # If not, then we need to put the parameter into some dolfin function

            # Use the materal parameters from the parameters as initial guess
            if params["phase"] in [PHASES[0], PHASES[2]]:

                val_const = (
                    dolfin_adjoint.Constant(val)
                    if paramvec_.value_size() == 1
                    else dolfin_adjoint.Constant([val] * paramvec_.value_size())
                )

                if npassive <= 1:
                    paramvec.assign(val_const)

                else:
                    paramvec.assign_sub(val_const, nopts_par)

            if npassive <= 1:
                matparams[par] = paramvec

            else:
                matparams[par] = split(paramvec)[nopts_par]
                nopts_par += 1

    # Print the material parameter to stdout
    logger.info("\nMaterial Parameters")
    nopts_par = 0

    for par, v in matparams.items():
        if isinstance(v, (float, int)):
            logger.info("\t{}\t= {:.3f}".format(par, v))
        else:

            if npassive <= 1:
                v_ = numpy_mpi.gather_broadcast(v.vector().get_local())

            else:
                v_ = numpy_mpi.gather_broadcast(
                    paramvec.split(deepcopy=True)[nopts_par].vector().get_local()
                )
                nopts_par += 1

            sp_str = "(mean), spatially resolved" if len(v_) > 1 else ""
            logger.info("\t{}\t= {:.3f} {}".format(par, v_.mean(), sp_str))

    return paramvec, gamma, matparams
def solve_oc_problem(params,
                     rd,
                     paramvec,
                     return_solution=False,
                     store_solution=True):
    """Solve the optimal control problem

    :param params: Application parameters
    :param rd: The reduced functional
    :param paramvec: The control parameter(s)

    """

    # Create optimal control problem
    oc_problem = OptimalControl()
    oc_problem.build_problem(params, rd, paramvec)

    opt_params = params["Optimization_parameters"]
    x = oc_problem.get_initial_guess()
    nvar = len(x)

    if params["phase"] == PHASES[0] and not params["optimize_matparams"]:

        rd(x)
        rd.for_res["initial_control"] = (rd.initial_paramvec, )
        rd.for_res["optimal_control"] = rd.paramvec

        if store_solution:
            store(params, rd, {})

        if return_solution:
            return params, rd, {}

    else:

        logger.info("\n" + "".center(72, "-"))
        logger.info("Solve optimal contol problem".center(72, "-"))
        logger.info("".center(72, "-"))

        # Some flags
        solved = False
        done = False
        paramvec_start = paramvec.copy(True)
        state_start = rd.for_run.cphm.get_state()
        niter = 0

        par_max = np.max(
            numpy_mpi.gather_broadcast(paramvec_start.vector().get_local()))
        par_min = np.min(
            numpy_mpi.gather_broadcast(paramvec_start.vector().get_local()))
        gamma_max = float(params["Optimization_parameters"]["gamma_max"])
        mat_max = float(params["Optimization_parameters"]["matparams_max"])
        mat_min = float(params["Optimization_parameters"]["matparams_min"])

        while not done and niter < 10:
            # Evaluate the reduced functional in case the solver chrashes at the first point.
            # If this is not done, and the solver crashes in the first point
            # then Dolfin adjoit has no recording and will raise an exception.

            # If this fails, there is no hope.
            try:

                rd(paramvec)
            except SolverDidNotConverge:
                print("NOOOO!")
                if len(rd.controls_lst) > 0:
                    assign_to_vector(paramvec.vector(),
                                     rd.controls_lst[-1].array())
                else:
                    msg = "Unable to converge. " + "Choose a different initial guess"
                    logger.error(msg)
                try:
                    rd(paramvec)
                except:
                    msg = "Unable to converge. " + "Try changing the scales and restart"
                    logger.error(msg)

            # Create optimal control problem
            oc_problem = OptimalControl()
            oc_problem.build_problem(params, rd, paramvec)

            try:
                # Try to solve the problem
                rd, opt_result = oc_problem.solve()

            except SolverDidNotConverge:

                logger.warning(Text.red("Solver failed - reduce step size"))
                # If the solver did not converge assign the state from
                # previous iteration and reduce the step size and try again
                rd.reset()
                rd.derivative_scale /= 2.0

                # There might be many reasons for why the sovler is not converging,
                # but most likely it happens because the optimization algorithms try to
                # evaluate the function in a point in the parameter space, which is close
                # to the boundary. One thing we can do is to reduce the mangnitude of the
                # gradient (but keeping the direction) so that the step size reduces.
                # Another thing we can do is to actually change the bounds so that
                # the algorithm do not go into the nasty parts of the parameters space.
                # Usually the main problem is that the optimziation tries an activation that
                # is too strong (high gamma max) in the active phase, or at material parameter
                # set that is too soft (low material parameters) in the passive phase
                params["Optimization_parameters"]["gamma_max"] = np.max([
                    par_max,
                    0.9 * params["Optimization_parameters"]["gamma_max"]
                ])
                params["Optimization_parameters"]["matparams_min"] = np.min([
                    par_min,
                    2 * params["Optimization_parameters"]["matparams_min"]
                ])

            else:
                params["Optimization_parameters"]["gamma_max"] = gamma_max
                params["Optimization_parameters"]["matparams_min"] = mat_min
                rd.derivative_scale = 1.0
                done = True

            niter += 1

        if not done:
            opt_result = {}
            control_idx = np.argmin(rd.func_values_lst)
            x = numpy_mpi.gather_broadcast(
                rd.controls_lst[control_idx].array())
            msg = "Unable to solve problem. Choose the best value"
            logger.warning(msg)
        else:
            x = (np.array([opt_result.pop("x")]) if nvar == 1 else
                 numpy_mpi.gather_broadcast(opt_result.pop("x")))

        optimum = dolfin_adjoint.Function(paramvec.function_space())
        numpy_mpi.assign_to_vector(optimum.vector(),
                                   numpy_mpi.gather_broadcast(x))

        logger.info(Text.blue("\nForward solution at optimal parameters"))
        val = rd.for_run(optimum, False)

        numpy_mpi.assign_to_vector(paramvec.vector(),
                                   numpy_mpi.gather_broadcast(x))

        rd.for_res["initial_control"] = (rd.initial_paramvec, )
        rd.for_res["optimal_control"] = rd.paramvec

        print_optimization_report(
            params,
            rd.paramvec,
            rd.initial_paramvec,
            rd.ini_for_res,
            rd.for_res,
            opt_result,
        )

        if store_solution:
            store(params, rd, opt_result)

        if return_solution:
            return params, rd, opt_result
def run_active_optimization(params, patient):
    """FIXME! briefly describe function

    :param params: 
    :param patient: 
    :returns: 
    :rtype: 

    """

    from .io import contract_point_exists

    logger.info(Text.blue("\nRun Active Optimization"))

    # Load patient data, and set up the simulation
    measurements, solver_parameters, pressure, gamma = setup_simulation(
        params, patient)

    # Loop over contract points
    i = 0
    logger.info("Number of contract points: {}".format(
        patient.num_contract_points))

    while i < patient.num_contract_points:
        params["active_contraction_iteration_number"] = i

        if not contract_point_exists(params):

            # Number of times we have interpolated in order
            # to be able to change the pressure
            attempts = 0
            pressure_change = False

            while not pressure_change and attempts < 8:

                try:
                    rd, gamma = run_active_optimization_step(
                        params,
                        patient,
                        solver_parameters,
                        measurements,
                        pressure,
                        gamma,
                    )
                except UnableToChangePressureExeption:
                    logger.info("Unable to change pressure. Exception caught")

                    logger.info("Lets interpolate. Add one extra point")
                    patient.interpolate_data(i +
                                             patient.passive_filling_duration -
                                             1)

                    # Update the measurements
                    measurements = get_measurements(params, patient)

                    attempts += 1

                else:
                    pressure_change = True

                    # If you want to apply a different initial guess than
                    # the pevious value, assign this now and evaluate.

                    if params["initial_guess"] == "zero":
                        zero = get_constant(gamma.value_size(),
                                            gamma.value_rank(), 0.0)

                        g = Function(gamma.function_space())
                        g.assign(zero)
                        rd(g)
                    elif params["initial_guess"] == "smooth":

                        # We find a constant that represents the previous state

                        if params["gamma_space"] == "regional":

                            # Sum all regional values with weights given by the size of the regions
                            meshvols = [
                                assemble(
                                    (1.0) *
                                    dx(domain=patient.mesh,
                                       subdomain_data=patient.sfun)(int(r)))
                                for r in set(
                                    numpy_mpi.gather_broadcast(
                                        patient.sfun.array()))
                            ]
                            meshvol = sum(meshvols)
                            g_arr = numpy_mpi.gather_broadcast(
                                gamma.vector().get_local())
                            val = sum(np.multiply(g_arr,
                                                  meshvols)) / float(meshvol)
                            c = get_constant(gamma.value_size(),
                                             gamma.value_rank(), val)

                        else:

                            # Project the activation parameter onto the real line
                            g_proj = dolfin_adjoint.project(
                                gamma,
                                dolfin.FunctionSpace(patient.mesh, "R", 0))
                            val = numpy_mpi.gather_broadcast(
                                g_proj.vector().get_local())[0]
                            c = get_constant(gamma.value_size(),
                                             gamma.value_rank(), val)

                        g = Function(gamma.function_space())
                        g.assign(c)
                        rd(g)

                    logger.info("\nSolve optimization problem.......")
                    solve_oc_problem(params, rd, gamma)
                    dolfin_adjoint.adj_reset()

            if not pressure_change:
                raise RuntimeError("Unable to increasure")

        else:

            # Make sure to do interpolation if that was done earlier
            plv = get_simulated_pressure(params)
            if not plv == measurements["pressure"][i + 1]:
                logger.info("Interpolate")
                patient.interpolate_data(i + patient.passive_filling_duration -
                                         1)
                measurements = get_measurements(params, patient)
                i -= 1
        i += 1
Beispiel #20
0
    def __call__(self, value, return_fail=False):

        logger.debug("\nEvaluate functional...")
        dolfin_adjoint.adj_reset()
        self.iter += 1

        paramvec_new = dolfin_adjoint.Function(self.paramvec.function_space(), name="new control")

        if isinstance(value, (dolfin.Function, RegionalParameter, MixedParameter)):
            paramvec_new.assign(value)
        elif isinstance(value, float) or isinstance(value, int):
            numpy_mpi.assign_to_vector(paramvec_new.vector(), np.array([value]))
        elif isinstance(value, dolfin_adjoint.enlisting.Enlisted):
            val_delisted = delist(value, self.controls)
            paramvec_new.assign(val_delisted)

        else:
            numpy_mpi.assign_to_vector(paramvec_new.vector(), numpy_mpi.gather_broadcast(value))

        logger.debug(Text.yellow("Start annotating"))
        dolfin.parameters["adjoint"]["stop_annotating"] = False

        if self.verbose:
            arr = numpy_mpi.gather_broadcast(paramvec_new.vector().get_local())
            msg = (
                "\nCurrent value of control:"
                + "\n\t{:>8}\t{:>8}\t{:>8}\t{:>8}\t{:>8}".format(
                    "Min", "Mean", "Max", "argmin", "argmax"
                )
                + "\n\t{:>8.2f}\t{:>8.2f}\t{:>8.2f}\t{:>8d}\t{:>8d}".format(
                    np.min(arr),
                    np.mean(arr),
                    np.max(arr),
                    np.argmin(arr),
                    np.argmax(arr),
                )
            )
            logger.info(msg)
        # Change loglevel to avoid to much printing (do not change if in dbug mode)
        change_log_level = (self.log_level == logging.INFO) and not self.verbose

        if change_log_level:
            logger.setLevel(logging.WARNING)

        t = dolfin.Timer("Forward run")
        t.start()

        logger.debug("\nEvaluate forward model")

        self.for_res, crash = self.for_run(paramvec_new, True)

        for_time = t.stop()
        logger.debug(
            (
                "Evaluating forward model done. "
                + "Time to evaluate = {} seconds".format(for_time)
            )
        )
        self.forward_times.append(for_time)

        if change_log_level:
            logger.setLevel(self.log_level)

        if self.first_call:
            # Store initial results
            self.ini_for_res = self.for_res
            self.first_call = False

            # Some printing
            logger.info(print_head(self.for_res))

        control = dolfin_adjoint.Control(self.paramvec)

        dolfin_adjoint.ReducedFunctional.__init__(
            self, dolfin_adjoint.Functional(self.for_res["total_functional"]), control
        )

        if crash:
            # This exection is thrown if the solver uses more than x steps.
            # The solver is stuck, return a large value so it does not get stuck again
            logger.warning(
                Text.red(
                    "Iteration limit exceeded. Return a large value of the functional"
                )
            )
            # Return a big value, and make sure to increment the big value so the
            # the next big value is different from the current one.
            func_value = np.inf
            self.nr_crashes += 1

        else:
            func_value = self.for_res["func_value"]

        grad_norm = (
            None if len(self.grad_norm_scaled) == 0 else self.grad_norm_scaled[-1]
        )

        self.func_values_lst.append(func_value * self.scale)
        self.controls_lst.append(dolfin.Vector(paramvec_new.vector()))

        logger.debug(Text.yellow("Stop annotating"))
        dolfin.parameters["adjoint"]["stop_annotating"] = True

        self.print_line()

        if return_fail:
            return self.scale * func_value, crash

        return self.scale * func_value
 def get_value(self):
     return numpy_mpi.gather_broadcast(
         self.functional.vector().get_local())[0]