def _get_results(self):
        """Removed results unit from function

        :return: The formatted results
        :rtype: ResultsObject

        """
        results = ResultsObject()
        results.objective = self.objective_value
        results.parameter_covariance = self.cov
        results.load_from_pyomo_model(self.model)
        results.show_parameters(self.confidence)

        if self._spectra_given:
            from kipet.calculation_tools.beer_lambert import D_from_SC
            D_from_SC(self.model, results)

        if hasattr(self.model, self.__var.model_parameter_scaled): 
            setattr(results, self.__var.model_parameter, {name: getattr(self.model, self.__var.model_parameter)[name].value*getattr(self.model, self.__var.model_parameter_scaled)[name].value for name in self.model.parameter_names})
        else:
            setattr(results, self.__var.model_parameter, {name: getattr(self.model, self.__var.model_parameter)[name].value for name in self.model.parameter_names})

        if self.termination_condition!=None and self.termination_condition!=TerminationCondition.optimal:
            raise Exception("The current iteration was unsuccessful.")
        else:
            if self._estimability == True:
                return self.hessian, results
            else:
                return results

        return results
Exemplo n.º 2
0
def compute_diff_results(results1, results2):
    """Calculate differences between results

    :param results1: The first results object
    :param results2: The second results object

    :return: The ResultsObject containing the differences
    :rtype: ResultsObject

    """
    diff_results = ResultsObject()
    diff_results.Z = results1.Z - results2.Z
    diff_results.S = results1.S - results2.S
    diff_results.C = results1.C - results2.C
    return diff_results
    def _get_results(self, Se):
        """Arranges the results into a ResultsObject

        :param list Se: The list of estimable parameters

        :return: The results from the parameter estimation process
        :rtype: ResultsObject

        """
        scaled_parameter_var = 'K'
        results = ResultsObject()
        results.estimable_parameters = Se
        results.load_from_pyomo_model(self.model)

        if hasattr(self.model, scaled_parameter_var):
            results.P = {
                name: self.model.P[name].value *
                getattr(self.model, scaled_parameter_var)[name].value
                for name in self.model.parameter_names
            }
        else:
            results.P = {
                name: self.model.P[name].value
                for name in self.model.parameter_names
            }

        return results
Exemplo n.º 4
0
    def run_sim(self, solver, **kwds):
        """ Runs simulation by solving nonlinear system with IPOPT

        :param str solver: The name of the nonlinear solver to used
        :param dict kwds: A dict of options passed to the solver

        :Keyword Args:

            - solver_opts (dict, optional): Options passed to the nonlinear solver
            - variances (dict, optional): Map of component name to noise variance. The
              map also contains the device noise variance
            - tee (bool,optional): flag to tell the simulator whether to stream output
              to the terminal or not

        :return: None

        """
        solver_opts = kwds.pop('solver_opts', dict())
        tee = kwds.pop('tee', False)
        seed = kwds.pop('seed', None)

        if not self.model.alltime.get_discretization_info():
            raise RuntimeError(
                'apply discretization first before runing simulation')

        np.random.seed(seed)
        opt = SolverFactory(solver)
        for key, val in solver_opts.items():
            opt.options[key] = val

        solver_results = opt.solve(self.model,
                                   tee=tee,
                                   symbolic_solver_labels=True)
        results = ResultsObject()
        results.load_from_pyomo_model(self.model)

        return results
Exemplo n.º 5
0
def run_sim(model, solver, **kwds):
    """ Runs simulation by solving nonlinear system with IPOPT

    :param str solver: The name of the nonlinear solver to used
    :param dict kwds: A dict of options passed to the solver

    :Keyword Args:

        - solver_opts (dict, optional): Options passed to the nonlinear solver
        - variances (dict, optional): Map of component name to noise variance. The
          map also contains the device noise variance
        - tee (bool,optional): flag to tell the simulator whether to stream output
          to the terminal or not

    :return: None

    """
    solver_opts = kwds.pop('solver_opts', dict())
    sigmas = kwds.pop('variances', dict())
    tee = kwds.pop('tee', False)
    seed = kwds.pop('seed', None)

    if not model.alltime.get_discretization_info():
        raise RuntimeError(
            'apply discretization first before runing simulation')

    # Adjusts the seed to reproduce results with noise
    np.random.seed(seed)

    # Variables
    # Z_var = self.model.Z
    # if hasattr(self.model, 'Cm'):
    #     C_var = self.model.Cm  # added for estimation with inputs and conc data CS

    # if self._huplc_given: #added for additional data CS
    #     Dhat_var = self.model.Dhat
    #     Chat_var = self.model.Chat

    # # Deactivates objective functions for simulation
    # if self.model.nobjectives():
    #     objectives_map = self.model.component_map(ctype=Objective, active=True)
    #     active_objectives_names = []
    #     for obj in objectives_map.values():
    #         name = obj.getname()
    #         active_objectives_names.append(name)
    #         str_warning = 'Deactivating objective {} for simulation'.format(name)
    #         warnings.warn(str_warning)
    #         obj.deactivate()

    opt = SolverFactory(solver)

    for key, val in solver_opts.items():
        opt.options[key] = val

    solver_results = opt.solve(model, tee=tee, symbolic_solver_labels=True)
    results = ResultsObject()

    # activates objective functions that were deactivated
    # if self.model.nobjectives():
    #     active_objectives_names = []
    #     objectives_map = self.model.component_map(ctype=Objective)
    #     for name in active_objectives_names:
    #         objectives_map[name].activate()

    # retriving solutions to results object
    results.load_from_pyomo_model(model)

    return results
Exemplo n.º 6
0
def run_alternate_method(var_est_object, solver, run_opt_kwargs):
    """Calls the alternative method - Short et al 2020

    This is an improved method for determining the component variances. This method has been removed from the
    VarianceEstimator class for simplification.

    :param VarianceEstimator var_est_object: The variance estimation object
    :param str solver: The solver being used (currently not used)
    :param dict run_opt_kwargs: The dict of user settings passed on from the ReactionModel

    :return results: The results from the variance estimation
    :rtype: ResultsObject

    """
    # Unpack the keyword arguments
    solver_opts = run_opt_kwargs.pop('solver_opts', dict())
    init_sigmas = run_opt_kwargs.pop('initial_sigmas', float())
    tee = run_opt_kwargs.pop('tee', False)
    tol = run_opt_kwargs.pop('tolerance', 5.0e-5)
    A = run_opt_kwargs.pop('subset_lambdas', None)
    secant_point2 = run_opt_kwargs.pop('secant_point', None)
    individual_species = run_opt_kwargs.pop('individual_species', False)

    # Solver is fixed to ipopt
    solver = 'ipopt'

    nu_squared = var_est_object.solve_max_device_variance(
        solver, tee=tee, subset_lambdas=A, solver_opts=solver_opts)

    second_point = init_sigmas * 10
    if secant_point2 is not None:
        second_point = secant_point2

    itersigma = dict()
    itersigma[0] = second_point
    itersigma[1] = init_sigmas
    iterdelta = dict()
    count = 1
    tol = tol
    funcval = 1000
    tee = False

    iterdelta[0] = _solve_delta_given_sigma(var_est_object,
                                            solver,
                                            tee=tee,
                                            subset_lambdas=A,
                                            solver_opts=solver_opts,
                                            init_sigmas=itersigma[0])

    while abs(funcval) >= tol:
        print("Overall sigma value at iteration", count, ": ",
              itersigma[count])

        new_delta = _solve_delta_given_sigma(var_est_object,
                                             solver,
                                             tee=tee,
                                             subset_lambdas=A,
                                             solver_opts=solver_opts,
                                             init_sigmas=itersigma[count])

        print("New delta_sq val: ", new_delta)
        iterdelta[count] = new_delta

        def func1(nu_squared, new_delta, init_sigmas):
            sigmult = 0
            nwp = len(var_est_object.model.meas_lambdas)
            for l in var_est_object.model.meas_lambdas:
                for k in var_est_object.comps['unknown_absorbance']:
                    sigmult += value(var_est_object.model.S[l, k])
            funcval = nu_squared - new_delta - init_sigmas * (sigmult / nwp)
            return funcval, sigmult

        funcval, sigmult = func1(nu_squared, new_delta, itersigma[count])

        if abs(funcval) <= tol:
            break
        else:
            denom_secant = func1(
                nu_squared, new_delta, itersigma[count])[0] - func1(
                    nu_squared, iterdelta[count - 1], itersigma[count - 1])[0]
            itersigma[count + 1] = itersigma[count] - func1(
                nu_squared, new_delta, itersigma[count])[0] * (
                    (itersigma[count] - itersigma[count - 1]) / denom_secant)

            if itersigma[count + 1] < 0:
                itersigma[count + 1] = -1 * itersigma[count + 1]
            count += 1

    if individual_species:
        print(
            "Solving for individual species' variance based on the obtained delta"
        )
        max_likelihood_val, sigma_vals, stop_it, results = \
            _solve_sigma_given_delta(var_est_object,
                                          solver,
                                          subset_lambdas= A,
                                          solver_opts=solver_opts,
                                          tee=tee,
                                          delta=new_delta)
    else:
        print("The overall model variance is: ", itersigma[count])

        sigma_vals = {}
        for k in var_est_object.comps['unknown_absorbance']:
            sigma_vals[k] = abs(itersigma[count])

    print(f'sigma_vals: {sigma_vals}')
    results = ResultsObject()
    results.load_from_pyomo_model(var_est_object.model)
    results.sigma_sq = sigma_vals
    results.sigma_sq['device'] = new_delta

    return results
Exemplo n.º 7
0
def run_direct_sigmas_method(var_est_object,
                             solver,
                             run_opt_kwargs,
                             fixed=False):
    """"Calls the direct sigmas method

    :param VarianceEstimator var_est_object: The variance estimation object
    :param str solver: The solver being used
    :param dict run_opt_kwargs: The dict of user settings passed on from the ReactionModel

    :return results: The results from the variance estimation
    :rtype: ResultsObject

    """
    solver_opts = run_opt_kwargs.pop('solver_opts', dict())
    tee = run_opt_kwargs.pop('tee', False)
    A = run_opt_kwargs.pop('freq_subset_lambdas', None)
    fixed_device_var = run_opt_kwargs.pop('fixed_device_variance', None)
    device_range = run_opt_kwargs.pop('device_range', None)
    num_points = run_opt_kwargs.pop('num_points', None)

    print("Solving for sigmas assuming known device variances")

    print('Device range')
    print(device_range)

    if device_range:
        if not isinstance(device_range, tuple):
            print("device_range is of type {}".format(type(device_range)))
            print("It should be a tuple")
            raise Exception
        elif device_range[0] > device_range[1]:
            print(
                "device_range needs to be arranged in order from lowest to highest"
            )
            raise Exception
        else:
            print(
                "Device range means that we will solve iteratively for different delta values in that range"
            )

    else:
        fixed = True
        if fixed_device_var is None:
            raise ValueError(
                "If using fixed variance, this needs to be provided.")

    if device_range and not num_points:
        print(
            "Need to specify the number of points that we wish to evaluate in the device range"
        )
    if not num_points:
        pass
    elif not isinstance(num_points, int):
        print("num_points needs to be an integer!")
        raise Exception

    if not device_range and not num_points:

        print("assessing for the value of delta provided")
        if not fixed_device_var:
            print(
                "If iterative method not selected then need to provide fixed device variance (delta**2)"
            )
            raise Exception
        else:
            if not isinstance(fixed_device_var, float):
                raise Exception(
                    "fixed device variance needs to be of type float")

    if not fixed:

        results_sigmas_dict = {}

        dist = abs((device_range[1] - device_range[0]) / num_points)

        max_likelihood_vals = []
        delta_vals = []
        iteration_counter = []
        delta = device_range[0]

        count = 0

        print('*** Starting Variance Iterations ***')

        while delta < device_range[1]:

            results_sigmas_dict[count] = {}

            print(f"Iteration: {count}\tdelta_sq: {delta}")
            max_likelihood_val, sigma_vals, stop_it, results = \
                _solve_sigma_given_delta(var_est_object,
                                         solver,
                                         subset_lambdas=A,
                                         solver_opts=solver_opts,
                                         tee=tee,
                                         delta=delta
                                    )

            if max_likelihood_val >= 5000:
                max_likelihood_vals.append(5000)
                delta_vals.append(log(delta))
                iteration_counter.append(count)
                max_likelihood_vals.append(max_likelihood_val)
                delta_vals.append(log(delta))
            else:
                iteration_counter.append(count)

            results_sigmas_dict[count]['delta'] = delta
            results_sigmas_dict[count]['simgas'] = sigma_vals
            #results_sigmas_dict[count]['results'] = results

            delta = delta + dist
            count += 1

        return results_sigmas_dict

    else:
        # The optimization will be conducted at the fixed value for delta

        max_likelihood_val, sigma_vals, stop_it, results = \
            _solve_sigma_given_delta(var_est_object,
                                     solver,
                                     subset_lambdas=A,
                                     solver_opts=solver_opts,
                                     tee=tee,
                                     delta=fixed_device_var
                                )

        delta = fixed_device_var

        results = ResultsObject()
        results.load_from_pyomo_model(var_est_object.model)
        results.sigma_sq = sigma_vals
        results.sigma_sq['device'] = delta

        return results
Exemplo n.º 8
0
def run_method(var_est_object, solver, run_opt_kwargs):
    """This is the original method for estimating variances from Chen et
    al. 2016

    This is an improved method for determining the component variances. This method has been removed from the
    VarianceEstimator class for simplification.

    :param VarianceEstimator var_est_object: The variance estimation object
    :param str solver: The solver being used (currently not used)
    :param dict run_opt_kwargs: The dict of user settings passed on from the ReactionModel

    :return results: The results from the variance estimation
    :rtype: ResultsObject

    """
    solver_opts = run_opt_kwargs.pop('solver_opts', dict())
    max_iter = run_opt_kwargs.pop('max_iter', 400)
    tol = run_opt_kwargs.pop('tolerance', 5.0e-5)
    tee = run_opt_kwargs.pop('tee', False)
    norm_order = run_opt_kwargs.pop('norm', np.inf)
    A = run_opt_kwargs.pop('subset_lambdas', None)
    init_C = run_opt_kwargs.pop('init_C', None)
    lsq_ipopt = run_opt_kwargs.pop('lsq_ipopt', False)
    species_list = run_opt_kwargs.pop('subset_components', None)
    fixed_device_var = run_opt_kwargs.pop('fixed_device_variance', None)

    if init_C is None:
        solve_initalization(var_est_object,
                            solver,
                            subset_lambdas=A,
                            solver_opts=solver_opts,
                            tee=tee)
    else:
        for t in var_est_object.model.times_spectral:
            for k in var_est_object.comps['unknown_absorbance']:
                var_est_object.model.C[t, k].value = init_C[k][t]
                var_est_object.model.Z[t, k].value = init_C[k][t]

        # This comes from Optimizer
        s_array = S_from_DC(var_est_object.model, init_C)
        S_frame = pd.DataFrame(
            data=s_array,
            columns=var_est_object.comps['unknown_absorbance'],
            index=var_est_object._meas_lambdas)

        if hasattr(var_est_object, '_abs_components'):
            component_set = var_est_object._abs_components
        else:
            component_set = var_est_object._mixture_components

        for l in var_est_object._meas_lambdas:
            for k in component_set:
                var_est_object.model.S[l, k].value = S_frame[k][l]  # 1e-2
                if hasattr(var_est_object.model, 'known_absorbance'):
                    if k in var_est_object.model.known_absorbance:
                        var_est_object.model.S[
                            l,
                            k].value = var_est_object.model.known_absorbance_data[
                                k][l]

    print("{: >11} {: >20}".format('Iter', '|Zi-Zi+1|'))
    logiterfile = "iterations.log"
    if os.path.isfile(logiterfile):
        os.remove(logiterfile)

    if lsq_ipopt:
        build_s_model(var_est_object)
        build_c_model(var_est_object)
    else:
        if species_list is None:
            build_scipy_lsq_arrays(var_est_object)
        else:
            lsq_ipopt = True
            build_s_model(var_est_object)
            build_c_model(var_est_object)

    for it in range(max_iter):

        rb = ResultsObject()

        # vars_to_load = ['Z', 'C', 'Cs', 'S', 'Y']
        # if not hasattr(var_est_object, '_abs_components'):
        #     vars_to_load.remove('Cs')
        rb.load_from_pyomo_model(
            var_est_object.model)  #, to_load=vars_to_load)

        solve_Z(var_est_object, solver)

        if lsq_ipopt:
            solve_S(var_est_object, solver)
            solve_C(var_est_object, solver)
        else:
            solved_s = solve_s_scipy(var_est_object)
            solved_c = solve_c_scipy(var_est_object)

        ra = ResultsObject()
        ra.load_from_pyomo_model(
            var_est_object.model)  #, to_load=vars_to_load)

        r_diff = compute_diff_results(rb, ra)
        Z_norm = r_diff.compute_var_norm('Z', norm_order)

        if it > 0:
            print("{: >11} {: >20}".format(it, Z_norm))
        _log_iterations(var_est_object, logiterfile, it)
        if Z_norm < tol and it >= 1:
            break

    results = ResultsObject()

    results.load_from_pyomo_model(var_est_object.model)

    print('Iterative optimization converged. Estimating variances now')
    solved_variances = _solve_variances(var_est_object,
                                        results,
                                        fixed_dev_var=fixed_device_var)

    compute_D_given_SC(var_est_object, results)

    results.P = {
        name: var_est_object.model.P[name].value
        for name in var_est_object.model.parameter_names
    }

    # removes temporary files. This needs to be changes to work with pyutilib
    if os.path.exists(var_est_object._tmp2):
        os.remove(var_est_object._tmp2)
    if os.path.exists(var_est_object._tmp3):
        os.remove(var_est_object._tmp3)
    if os.path.exists(var_est_object._tmp4):
        os.remove(var_est_object._tmp4)

    return results
    def solve_consolidated_model(self, global_params=None, **kwargs):
        """This function consolidates the individual models into a single
        optimization problem that links the parameters and spectra (if able)
        from each experiment.

        :param list global_params: This is the list of global parameters to be linked in the MEE
        :param dict kwargs: The dictionary of options passed from ReactionSet
        
        """
        solver_opts = kwargs.get('solver_opts', {'linear_solver': 'ma57'})
        tee = kwargs.get('tee', True)
        scaled_variance = kwargs.get('scaled_variance', False)
        shared_spectra = kwargs.get('shared_spectra', True)
        solver = kwargs.get('solver', 'ipopt')
        parameter_means = kwargs.get('mean_start', True)

        covariance = kwargs.get('covariance', None)

        from kipet import __version__ as version_number

        print('#' * 40)
        print(f'# KIPET version {version_number}')
        print(f'# Date: {list(self.reaction_models.values())[0].timestamp}')
        print(f'# Date: {list(self.reaction_models.values())[0].file.stem}')
        print(
            f'# ReactionModel instances: {", ".join(list(self.reaction_models.keys()))}'
        )
        print('#' * 40)

        print("\n# Multiple Experiments: Starting parameter estimation \n")

        combined_model = ConcreteModel()

        self.variance_scale = 1
        if scaled_variance == True:
            self._scale_variances()

        if global_params is None:
            # This needs to be a global attr
            self.global_params = self.all_params
        else:
            self.global_params = global_params

        # Parameter name list
        self.global_params_full = [f'P[{p}]' for p in self.global_params]

        def build_individual_blocks(m, exp):
            """This function forms the rule for the construction of the individual blocks 
            for multiple experiments, referenced in run_parameter_estimation. This function 
            is not meant to be used by users directly.

            :param ConcreteModel m: The concrete model that we are adding the block to
            :param list exp: A list containing the experiments
                
            :return ConcreteModel m: Pyomo model inside the block (after modification)

            """
            list_components = self.reaction_models[exp].components.names
            with_d_vars = True
            m = copy.copy(self.reaction_models[exp].p_model)

            # Quick fix - I don't know what is causing this
            if hasattr(m, 'alltime_domain'):
                m.del_component('alltime_domain')
            if hasattr(m, 'huplctime_domain'):
                m.del_component('huplctime_domain')

            if with_d_vars and hasattr(m, 'D'):

                m.D_bar = Var(m.times_spectral, m.meas_lambdas)

                def rule_D_bar(m, t, l):
                    return m.D_bar[t, l] == sum(
                        getattr(m, self.__var.concentration_spectra)[t, k] *
                        getattr(m, self.__var.spectra_species)[l, k]
                        for k in self.reaction_models[exp].p_estimator.
                        comps['unknown_absorbance'])

                m.D_bar_constraint = Constraint(m.times_spectral,
                                                m.meas_lambdas,
                                                rule=rule_D_bar)

            m.error = Var(bounds=(0, None))

            def rule_objective(m):

                expr = 0
                spectral_term = 0
                concentration_term = 0
                measured_concentration_term = 0
                complementary_state_term = 0
                weights = [1, 1, 1, 1]
                obj_variances = self.variances

                if hasattr(m, self.__var.spectra_data):
                    spectral_term = absorption_objective(
                        m,
                        device_variance=obj_variances[exp]['device'],
                        g_option=self.reaction_models[exp].
                        _G_data['G_contribution'],
                        with_d_vars=with_d_vars,
                        shared_spectra=shared_spectra,
                        species_list=list_components)

                    concentration_term = conc_objective(
                        m, variance=obj_variances[exp], source='spectra')

                if hasattr(m, self.__var.concentration_measured):
                    measured_concentration_term = conc_objective(
                        m, variance=obj_variances[exp])

                if hasattr(m, self.__var.state):
                    complementary_state_term = comp_objective(
                        m, variance=obj_variances[exp])

                expr = weights[0]*spectral_term + \
                    weights[1]*concentration_term + \
                    weights[2]*measured_concentration_term + \
                    weights[3]*complementary_state_term

                return m.error == expr

            m.obj_const = Constraint(rule=rule_objective)

            return m

        combined_model.experiment = Block(self.experiments,
                                          rule=build_individual_blocks)
        combined_model.map_exp_to_count = dict(enumerate(self.experiments))

        def param_linking_rule(m, exp, param):
            prev_exp = None
            key = next(
                key for key, value in combined_model.map_exp_to_count.items()
                if value == exp)
            if key == 0:
                return Constraint.Skip
            else:
                for key, val in combined_model.map_exp_to_count.items():
                    if val == exp:
                        prev_exp = combined_model.map_exp_to_count[key - 1]
                if param in self.global_params and prev_exp != None:
                    return getattr(
                        combined_model.experiment[exp],
                        self.__var.model_parameter)[param] == getattr(
                            combined_model.experiment[prev_exp],
                            self.__var.model_parameter)[param]
                else:
                    return Constraint.Skip

        set_fixed_params = set()

        for exp in self.experiments:
            for param, param_obj in getattr(
                    combined_model.experiment[exp],
                    self.__var.model_parameter).items():
                if param_obj.is_fixed():
                    set_fixed_params.add(param)

        if len(set_fixed_params) > 0:
            print(
                f'# Multiple Experiments: The fixed parameters are:\n{set_fixed_params}'
            )

        set_params_across_blocks = self.all_params.difference(set_fixed_params)
        combined_model.parameter_linking = Constraint(self.experiments,
                                                      set_params_across_blocks,
                                                      rule=param_linking_rule)

        def wavelength_linking_rule(m, exp, wave, comp):
            prev_exp = None
            key = next(
                key for key, value in combined_model.map_exp_to_count.items()
                if value == exp)
            if key == 0:
                return Constraint.Skip
            else:
                for key, val in combined_model.map_exp_to_count.items():
                    if val == exp:
                        prev_exp = combined_model.map_exp_to_count[key - 1]
                if wave in self.all_wavelengths and prev_exp != None:
                    if comp in combined_model.experiment[
                            prev_exp].mixture_components and comp in combined_model.experiment[
                                exp].mixture_components:
                        return getattr(
                            combined_model.experiment[exp],
                            self.__var.spectra_species)[wave, comp] == getattr(
                                combined_model.experiment[prev_exp],
                                self.__var.spectra_species)[wave, comp]
                    else:
                        return Constraint.Skip
                else:
                    return Constraint.Skip

        if shared_spectra == True:
            combined_model.spectra_linking = Constraint(
                self.experiments,
                self.all_wavelengths,
                self.all_species,
                rule=wavelength_linking_rule)

        # Add in experimental weights
        combined_model.objective = Objective(
            sense=minimize,
            expr=sum(b.error for b in combined_model.experiment[:]))

        self.model = combined_model

        models = {k: v.p_model for k, v in self.reaction_models.items()}
        self.param_names = define_free_parameters(models,
                                                  self.global_params_full,
                                                  kind='variable')
        self.param_names_full = define_free_parameters(models,
                                                       self.global_params_full,
                                                       kind='full')

        if covariance in ['k_aug', 'ipopt_sens']:

            tee = kwargs.pop('tee', False)
            solver_opts = kwargs.pop('solver_opts', dict())
            optimizer = None

            # At the moment k_aug is not working for this
            if covariance == 'k_aug':
                covariance = 'ipopt_sens'

            if covariance == 'ipopt_sens':
                if not 'compute_red_hessian' in solver_opts.keys():
                    solver_opts['compute_red_hessian'] = 'yes'

                # Create the optimizer
                optimizer = SolverFactory(covariance)
                for key, val in solver_opts.items():
                    optimizer.options[key] = val

            self.covariance(covariance, optimizer)

        else:
            optimizer = SolverFactory('ipopt')
            optimizer.solve(combined_model, options=solver_opts, tee=True)

        solver_results = {}

        for i in combined_model.experiment:
            solver_results[i] = ResultsObject()
            solver_results[i].load_from_pyomo_model(
                combined_model.experiment[i])
            if self.rm_cov is not None:
                solver_results[i].parameter_covariance = self.rm_cov[i]

            #setattr(solver_results[i], 'variances', self.rm_variances[i])

        self.results = solver_results
        print('\n# Multiple Experiments: Parameter estimation complete\n')

        return solver_results