コード例 #1
0
ファイル: utilities.py プロジェクト: arun-pn/dynamic-MBDoE
def FIM_t(xpdot, b, criterion):
    '''
    computes FIM at a given time.
    b is the bonary variable which selects or not the time point
    '''
    n_x = 2
    n_theta = 4
    FIM_sample = np.zeros([n_theta, n_theta])
    FIM_0 = cad.inv((((10.0 - 0.001)**2) / 12) * cad.SX.eye(n_theta))
    FIM_sample += FIM_0
    for i in range(np.shape(xpdot)[0] - 1):
        xp_r = cad.reshape(xpdot[i + 1], (n_x, n_theta))
        #    vv = np.zeros([ntheta[0], ntheta[0], 1 + N])
        #    for i in range(0, 1 + N):
        FIM_sample += b[i] * xp_r.T @ np.linalg.inv(
            np.array([[0.01, 0], [0, 0.05]])
        ) @ xp_r  # + np.linalg.inv(np.array([[0.01, 0, 0, 0], [0, 0.05, 0, 0], [0, 0, 1, 0], [0, 0, 0, 0.2]]))
#    FIM  = solve(FIM1, SX.eye(FIM1.size1()))
#   [Q, R] = qr(FIM1.expand())

    if criterion == 'D_optimal':
        #        objective = -cad.log(cad.det(FIM_sample) + 1e-10)
        objective = -2 * cad.sum1(cad.log(cad.diag(
            cad.chol(FIM_sample))))  # by Cholesky factorisation


#        objective = -cad.log((cad.det(FIM_sample)**2))
#        objective = -cad.det(FIM_sample)
    elif criterion == 'A_optimal':
        objective = -cad.log(cad.trace(FIM_sample) + 1e-10)
    return objective
コード例 #2
0
def sample_parameter_normal_distribution_with_sobol(mean,
                                                    covariance,
                                                    n_samples=1):
    """Sample parameter using Sobol sampling with a normal distribution.

    :param mean:
    :param covariance:
    :param n_samples:
    :return:
    """
    if isinstance(mean, list):
        mean = vertcat(*mean)

    n_uncertain = mean.size1()

    # Uncertain parameter design
    sobol_design = sobol_seq.i4_sobol_generate(n_uncertain, n_samples,
                                               math.ceil(np.log2(n_samples)))
    sobol_samples = DM(sobol_design.T)
    for i in range(n_uncertain):
        sobol_samples[i, :] = norm(loc=0., scale=1).ppf(sobol_samples[i, :])

    unscaled_sample = DM.zeros(n_uncertain, n_samples)

    for i in range(n_samples):
        unscaled_sample[:, i] = mean + mtimes(
            chol(covariance).T, sobol_samples[:, i])

    return unscaled_sample
コード例 #3
0
    def _get_sigma_points_and_weights(self, x_mean, x_cov):
        # Initialize variables
        sigma_points = []
        weights_m = []
        weights_c = []

        ell = self._ell

        # Tuning parameters
        alpha = 1e-3
        kappa = 0
        beta = 2
        lamb = alpha**2 * (ell + kappa)

        # Sigma points
        sqr_root_matrix = chol((ell + lamb) * x_cov).T
        sigma_points.append(x_mean)
        for i in range(self.n_sigma_points - 1):
            ind = i % ell
            sign = 1 if i < ell else -1
            sigma_points.append(x_mean + sign * sqr_root_matrix[:, ind])

        # Weights
        weights_m.append(lamb / (ell + lamb))
        weights_c.append(lamb / (ell + lamb) + (1 - alpha**2 + beta))
        for i in range(self.n_sigma_points - 1):
            weights_m.append(1 / (2 * (ell + lamb)))
            weights_c.append(1 / (2 * (ell + lamb)))

        return sigma_points, weights_m, weights_c
コード例 #4
0
ファイル: pce.py プロジェクト: marcoaaguiar/yaocptool
    def _sample_parameters(self):
        n_samples = self.n_samples
        n_uncertain = self.n_uncertain

        mean = vertcat(self.socp.p_unc_mean,
                       self.socp.uncertain_initial_conditions_mean)

        if self.socp.n_p_unc > 0 and self.socp.n_uncertain_initial_condition > 0:
            covariance = diagcat(self.socp.p_unc_cov,
                                 self.socp.uncertain_initial_conditions_cov)
        elif self.socp.n_p_unc > 0:
            covariance = self.socp.p_unc_cov
        elif self.socp.n_uncertain_initial_condition > 0:
            covariance = self.socp.uncertain_initial_conditions_cov
        else:
            raise ValueError("No uncertainties found n_p_unc = {}, "
                             "n_uncertain_initial_condition={}".format(
                                 self.socp.n_p_unc,
                                 self.socp.n_uncertain_initial_condition))

        dist = self.socp.p_unc_dist + self.socp.uncertain_initial_conditions_distribution

        for d in dist:
            if not d == 'normal':
                raise NotImplementedError(
                    'Distribution "{}" not implemented, only "normal" is available.'
                    .format(d))

        sampled_epsilon = sample_parameter_normal_distribution_with_sobol(
            DM.zeros(mean.shape), DM.eye(covariance.shape[0]), n_samples)
        sampled_parameters = SX.zeros(n_uncertain, n_samples)
        for s in range(n_samples):
            sampled_parameters[:, s] = mean + mtimes(sampled_epsilon[:, s].T,
                                                     chol(covariance)).T
        return sampled_parameters
コード例 #5
0
FIM1_maxeigv = []
for i in range(np.shape(xp_opt)[1] - 1):
    xp_r = np.reshape(xp_opt[:, i + 1], (2, 4))
    #    vv = np.zeros([ntheta[0], ntheta[0], 1 + N])
    #    for i in range(0, 1 + N):
    FIM_t += [xp_r.T @ np.linalg.inv(np.array([[0.01, 0], [0, 0.05]])) @ xp_r]

obsFIM = []
for i in range(np.shape(FIM_t)[0]):
    obsFIM += [
        np.linalg.inv((((10.0 - 0.001)**2) / 12) * np.identity(n_theta)) +
        sum(FIM_t[:i + 1])
    ]

for i in range(np.shape(xp_opt)[1] - 1):
    FIM1_det += [2 * cad.sum1(cad.log(cad.diag(cad.chol(obsFIM[i]))))]
    FIM1_trace += [cad.trace(obsFIM[i])]
    FIM_det += [2 * np.sum(np.log(np.diag(np.linalg.cholesky(obsFIM[i]))))]
    FIM_trace += [np.trace(obsFIM[i])]
    FIM1_mineigv += [min(np.linalg.eig(obsFIM[i])[0])]
    FIM1_maxeigv += [max(np.linalg.eig(obsFIM[i])[0])]

V_theta = np.linalg.inv(sum(FIM_t[i] for i in range(len(FIM_t))))
theta1 = w_opt[n_x + n_x * n_theta:n_x + n_x * n_theta + n_theta]

t_value = np.zeros(n_theta)
for i in range(n_theta):
    t_value[i] = theta1[i] / np.sqrt(V_theta[i, i])

FIM1_trace_array = np.array(FIM1_trace)
FIM1_det_array = np.array(FIM1_det)
コード例 #6
0
def calc_NLL(hyper, X, Y, squaredist, meanFunc='zero', prior=None):
    """ Objective function

    Calculate the negative log likelihood function using Casadi SX symbols.

    # Arguments:
        hyper: Array with hyperparameters [ell_1 .. ell_Nx sf sn], where Nx is the
            number of inputs to the GP.
        X: Training data matrix with inputs of size (N x Nx).
        Y: Training data matrix with outpyts of size (N x Ny),
            with Ny number of outputs.

    # Returns:
        NLL: The negative log likelihood function (scalar)
    """

    N, Nx = ca.MX.size(X)
    ell = hyper[:Nx]
    sf2 = hyper[Nx]**2
    sn2 = hyper[Nx + 1]**2

    m = get_mean_function(hyper, X.T, func=meanFunc)

    # Calculate covariance matrix
    K_s = ca.SX.sym('K_s', N, N)
    sqdist = ca.SX.sym('sqd', N, N)
    elli = ca.SX.sym('elli')
    ki = ca.Function('ki', [sqdist, elli, K_s], [sqdist / elli**2 + K_s])
    K1 = ca.MX(N, N)
    for i in range(Nx):
        K1 = ki(squaredist[:, (i * N):(i + 1) * N], ell[i], K1)

    sf2_s = ca.SX.sym('sf2')
    exponent = ca.SX.sym('exp', N, N)
    K_exp = ca.Function('K', [exponent, sf2_s],
                        [sf2_s * ca.SX.exp(-.5 * exponent)])
    K2 = K_exp(K1, sf2)

    K = K2 + sn2 * ca.MX.eye(N)
    K = (K + K.T) * 0.5  # Make sure matrix is symmentric

    A = ca.SX.sym('A', ca.MX.size(K))
    cholesky = ca.Function('cholesky', [A], [ca.chol(A).T])
    L = cholesky(K)

    B = 2 * ca.sum1(ca.SX.log(ca.diag(A)))
    log_determinant = ca.Function('log_det', [A], [B])
    log_detK = log_determinant(L)

    Y_s = ca.SX.sym('Y', ca.MX.size(Y))
    L_s = ca.SX.sym('L', ca.Sparsity.lower(N))
    sol = ca.Function('sol', [L_s, Y_s], [ca.solve(L_s, Y_s)])
    invLy = sol(L, Y - m(X.T))

    invLy_s = ca.SX.sym('invLy', ca.MX.size(invLy))
    sol2 = ca.Function('sol2', [L_s, invLy_s], [ca.solve(L_s.T, invLy_s)])
    alpha = sol2(L, invLy)

    alph = ca.SX.sym('alph', ca.MX.size(alpha))
    detK = ca.SX.sym('det')

    # Calculate hyperpriors
    theta = ca.SX.sym('theta')
    mu = ca.SX.sym('mu')
    s2 = ca.SX.sym('s2')
    prior_gauss = ca.Function(
        'hyp_prior', [theta, mu, s2],
        [-(theta - mu)**2 / (2 * s2) - 0.5 * ca.log(2 * ca.pi * s2)])
    log_prior = 0
    if prior is not None:
        for i in range(Nx):
            log_prior += prior_gauss(ell[i], prior['ell_mean'],
                                     prior['ell_std']**2)
        log_prior += prior_gauss(sf2, prior['sf_mean'], prior['sf_std']**2)
        log_prior += prior_gauss(sn2, prior['sn_mean'], prior['sn_std']**2)

    NLL = ca.Function('NLL', [Y_s, alph, detK],
                      [0.5 * ca.mtimes(Y_s.T, alph) + 0.5 * detK])
    return NLL(Y - m(X.T), alpha, log_detK) + log_prior
コード例 #7
0
ファイル: mpc_class.py プロジェクト: zhangwjjj/GP-MPC
    def __init__(self,
                 horizon,
                 model,
                 gp=None,
                 Q=None,
                 P=None,
                 R=None,
                 S=None,
                 lam=None,
                 lam_state=None,
                 ulb=None,
                 uub=None,
                 xlb=None,
                 xub=None,
                 terminal_constraint=None,
                 feedback=True,
                 percentile=None,
                 gp_method='TA',
                 costFunc='quad',
                 solver_opts=None,
                 discrete_method='gp',
                 inequality_constraints=None,
                 num_con_par=0,
                 hybrid=None,
                 Bd=None,
                 Bf=None):
        """ Initialize and build the MPC solver

        # Arguments:
            horizon: Prediction horizon with control inputs
            model: System model

        # Optional Argumants:
            gp: GP model
            Q: State penalty matrix, default=diag(1,...,1)
            P: Termial penalty matrix, default=diag(1,...,1)
                if feedback is True, then P is the solution of the DARE,
                discarding this option.
            R: Input penalty matrix, default=diag(1,...,1)*0.01
            S: Input rate of change penalty matrix, default=diag(1,...,1)*0.1
            lam: Slack variable penalty for constraints, defalt=1000
            lam_state: Slack variable penalty for violation of upper/lower
                        state boundy, defalt=None
            ulb: Lower boundry input
            uub: Upper boundry input
            xlb: Lower boundry state
            xub: Upper boundry state
            terminal_constraint: Terminal condition on the state
                    * if None: No terminal constraint is used
                    * if zero: Terminal state is equal to zero
                    * if nonzero: Terminal state is bounded within +/- the constraint
                    * if not None and feedback is True, then the expected value of
                        the Lyapunov function E{x^TPx} < terminal_constraint
                        is used as a terminal constraint.
            feedback: If true, use an LQR feedback function u= Kx + v
            percentile: Measure how far from the contrain that is allowed,
                        P(X in constrained set) > percentile,
                        percentile= 1 - probability of violation,
                        default=0.95
            gp_method: Method of propagating the uncertainty
                    Possible options:
                        'TA': Second order Taylor approximation
                        'ME': Mean equivalent approximation

            costFunc: Cost function to use in the objective
                    'quad': Expected valaue of Quadratic Cost
                    'sat':  Expected value of Saturating cost
            solver_opts: Additional options to pass to the NLP solver
                    e.g.: solver_opts['print_time'] = False
                          solver_opts['ipopt.tol'] = 1e-8
            discrete_method: 'gp' -  Gaussian process model
                             'rk4' - Runga-Kutta 4 Integrator
                             'exact' - CVODES or IDEAS (for ODEs or DEAs)
                             'hybrid' - GP model for dynamic equations, and RK4
                                        for kinematic equations
                             'd_hybrid' - Same as above, without uncertainty
                             'f_hybrid' - GP estimating modelling errors, with
                                          RK4 computing the the actual model
            num_con_par: Number of parameters to pass to the inequality function
            inequality_constraints: Additional inequality constraints
                    Use a function with inputs (x, covar, u, eps) and
                    that returns a dictionary with inequality constraints and limits.
                        e.g. cons = dict(con_ineq=con_ineq_array,
                                         con_ineq_lb=con_ineq_lb_array,
                                         con_ineq_ub=con_ineq_ub_array
                                    )

        # NOTES:
            * Differentiation of Sundails integrators is not supported with SX graph,
                meaning that the solver option 'extend_graph' must be set to False
                to use MX graph instead when using the 'exact' discrete method.
            * At the moment the f_hybrid option is not finished implemented...
        """

        build_solver_time = -time.time()
        dt = model.sampling_time()
        Ny, Nu, Np = model.size()
        Nx = Nu + Ny
        Nt = int(horizon / dt)

        self.__dt = dt
        self.__Nt = Nt
        self.__Ny = Ny
        self.__Nx = Nx
        self.__Nu = Nu
        self.__num_con_par = num_con_par
        self.__model = model
        self.__hybrid = hybrid
        self.__gp = gp
        self.__feedback = feedback
        self.__discrete_method = discrete_method
        """ Default penalty values """
        if P is None:
            P = np.eye(Ny)
        if Q is None:
            Q = np.eye(Ny)
        if R is None:
            R = np.eye(Nu) * 0.01
        if S is None:
            S = np.eye(Nu) * 0.1
        if lam is None:
            lam = 1000

        self.__Q = Q
        self.__P = P
        self.__R = R
        self.__S = S
        self.__Bd = Bd
        self.__Bf = Bf

        if xub is None:
            xub = np.full((Ny), np.inf)
        if xlb is None:
            xlb = np.full((Ny), -np.inf)
        if uub is None:
            uub = np.full((Nu), np.inf)
        if ulb is None:
            ulb = np.full((Nu), -np.inf)
        """ Default percentile probability """
        if percentile is None:
            percentile = 0.95
        quantile_x = np.ones(Ny) * norm.ppf(percentile)
        quantile_u = np.ones(Nu) * norm.ppf(percentile)
        Hx = ca.MX.eye(Ny)
        Hu = ca.MX.eye(Nu)
        """ Create parameter symbols """
        mean_0_s = ca.MX.sym('mean_0', Ny)
        mean_ref_s = ca.MX.sym('mean_ref', Ny)
        u_0_s = ca.MX.sym('u_0', Nu)
        covariance_0_s = ca.MX.sym('covariance_0', Ny * Ny)
        K_s = ca.MX.sym('K', Nu * Ny)
        P_s = ca.MX.sym('P', Ny * Ny)
        con_par = ca.MX.sym('con_par', num_con_par)
        param_s = ca.vertcat(mean_0_s, mean_ref_s, covariance_0_s, u_0_s, K_s,
                             P_s, con_par)
        """ Select wich GP function to use """
        if discrete_method is 'gp':
            self.__gp.set_method(gp_method)
#TODO:Fix
        if solver_opts['expand'] is not False and discrete_method is 'exact':
            raise TypeError(
                "Can't use exact discrete system with expanded graph")
        """ Initialize state variance with the GP noise variance """
        if gp is not None:
            #TODO: Cannot use gp variance with hybrid model
            self.__variance_0 = np.full((Ny), 1e-10)  #gp.noise_variance()
        else:
            self.__variance_0 = np.full((Ny), 1e-10)
        """ Define which cost function to use """
        self.__set_cost_function(costFunc, mean_ref_s, P_s.reshape((Ny, Ny)))
        """ Feedback function """
        mean_s = ca.MX.sym('mean', Ny)
        v_s = ca.MX.sym('v', Nu)
        if feedback:
            u_func = ca.Function(
                'u', [mean_s, mean_ref_s, v_s, K_s],
                [v_s + ca.mtimes(K_s.reshape((Nu, Ny)), mean_s - mean_ref_s)])
        else:
            u_func = ca.Function('u', [mean_s, mean_ref_s, v_s, K_s], [v_s])
        self.__u_func = u_func
        """ Create variables struct """
        var = ctools.struct_symMX([(
            ctools.entry('mean', shape=(Ny, ), repeat=Nt + 1),
            ctools.entry('L',
                         shape=(int((Ny**2 - Ny) / 2 + Ny), ),
                         repeat=Nt + 1),
            ctools.entry('v', shape=(Nu, ), repeat=Nt),
            ctools.entry('eps', shape=(3, ), repeat=Nt + 1),
            ctools.entry('eps_state', shape=(Ny, ), repeat=Nt + 1),
        )])
        num_slack = 3  #TODO: Make this a little more dynamic...
        num_state_slack = Ny
        self.__var = var
        self.__num_var = var.size

        # Decision variable boundries
        self.__varlb = var(-np.inf)
        self.__varub = var(np.inf)
        """ Adjust hard boundries """
        for t in range(Nt + 1):
            j = Ny
            k = 0
            for i in range(Ny):
                # Lower boundry of diagonal
                self.__varlb['L', t, k] = 0
                k += j
                j -= 1
            self.__varlb['eps', t] = 0
            self.__varlb['eps_state', t] = 0
            if xub is not None:
                self.__varub['mean', t] = xub
            if xlb is not None:
                self.__varlb['mean', t] = xlb
            if lam_state is None:
                self.__varub['eps_state'] = 0
        """ Input covariance matrix """
        if discrete_method is 'hybrid':
            N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
            Nz_gp = Ny_gp + Nu_gp
            covar_d_sx = ca.SX.sym('cov_d', Ny_gp, Ny_gp)
            K_sx = ca.SX.sym('K', Nu, Ny)
            covar_u_func = ca.Function(
                'cov_u',
                [covar_d_sx, K_sx],
                #                                       [K_sx @ covar_d_sx @ K_sx.T])
                [ca.SX(Nu, Nu)])
            covar_s = ca.SX(Nz_gp, Nz_gp)
            covar_s[:Ny_gp, :Ny_gp] = covar_d_sx
            #            covar_s = ca.blockcat(covar_x_s, cov_xu, cov_xu.T, cov_u)
            covar_func = ca.Function('covar', [covar_d_sx], [covar_s])
        elif discrete_method is 'f_hybrid':
            #TODO: Fix this...
            N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
            Nz_gp = Ny_gp + Nu_gp
            #            covar_x_s = ca.MX.sym('covar_x', Ny_gp, Ny_gp)
            covar_d_sx = ca.SX.sym('cov_d', Ny_gp, Ny_gp)
            K_sx = ca.SX.sym('K', Nu, Ny)
            #
            covar_u_func = ca.Function(
                'cov_u',
                [covar_d_sx, K_sx],
                #                                       [K_sx @ covar_d_sx @ K_sx.T])
                [ca.SX(Nu, Nu)])
            #            cov_xu_func = ca.Function('cov_xu', [covar_x_sx, K_sx],
            #                                      [covar_x_sx @ K_sx.T])
            #            cov_xu = cov_xu_func(covar_x_s, K_s.reshape((Nu, Ny)))
            #            cov_u = covar_u_func(covar_x_s, K_s.reshape((Nu, Ny)))
            covar_s = ca.SX(Nz_gp, Nz_gp)
            covar_s[:Ny_gp, :Ny_gp] = covar_d_sx
            #            covar_s = ca.blockcat(covar_x_s, cov_xu, cov_xu.T, cov_u)
            covar_func = ca.Function('covar', [covar_d_sx], [covar_s])
        else:
            covar_x_s = ca.MX.sym('covar_x', Ny, Ny)
            covar_x_sx = ca.SX.sym('cov_x', Ny, Ny)
            K_sx = ca.SX.sym('K', Nu, Ny)
            covar_u_func = ca.Function('cov_u', [covar_x_sx, K_sx],
                                       [K_sx @ covar_x_sx @ K_sx.T])
            cov_xu_func = ca.Function('cov_xu', [covar_x_sx, K_sx],
                                      [covar_x_sx @ K_sx.T])
            cov_xu = cov_xu_func(covar_x_s, K_s.reshape((Nu, Ny)))
            cov_u = covar_u_func(covar_x_s, K_s.reshape((Nu, Ny)))
            covar_s = ca.blockcat(covar_x_s, cov_xu, cov_xu.T, cov_u)
            covar_func = ca.Function('covar', [covar_x_s], [covar_s])
        """ Hybrid output covariance matrix """
        if discrete_method is 'hybrid':
            N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
            covar_d_sx = ca.SX.sym('covar_d', Ny_gp, Ny_gp)
            covar_x_sx = ca.SX.sym('covar_x', Ny, Ny)
            u_s = ca.SX.sym('u', Nu)

            cov_x_next_s = ca.SX(Ny, Ny)
            cov_x_next_s[:Ny_gp, :Ny_gp] = covar_d_sx
            #TODO: Missing kinematic states
            covar_x_next_func = ca.Function(
                'cov',
                #[mean_s, u_s, covar_d_sx, covar_x_sx],
                [covar_d_sx],
                [cov_x_next_s])
            """ f_hybrid output covariance matrix """
        elif discrete_method is 'f_hybrid':
            N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
            #            Nz_gp = Ny_gp + Nu_gp
            covar_d_sx = ca.SX.sym('covar_d', Ny_gp, Ny_gp)
            covar_x_sx = ca.SX.sym('covar_x', Ny, Ny)
            #            L_x       = ca.SX.sym('L', ca.Sparsity.lower(Ny))
            #            L_d       = ca.SX.sym('L', ca.Sparsity.lower(3))
            mean_s = ca.SX.sym('mean', Ny)
            u_s = ca.SX.sym('u', Nu)

            #            A_f     = hybrid.rk4_jacobian_x(mean_s[Ny_gp:], mean_s[:Ny_gp])
            #            B_f     = hybrid.rk4_jacobian_u(mean_s[Ny_gp:], mean_s[:Ny_gp])
            #            C       = ca.horzcat(A_f, B_f)
            #            cov = ca.blocksplit(covar_x_s, Ny_gp, Ny_gp)
            #            cov[-1][-1] = covar_d_sx
            #            cov_i = ca.blockcat(cov)
            #            cov_f   =  C @ cov_i @ C.T
            #            cov[0][0] = cov_f

            cov_x_next_s = ca.SX(Ny, Ny)
            cov_x_next_s[:Ny_gp, :Ny_gp] = covar_d_sx
            #            cov_x_next_s[Ny_gp:, Ny_gp:] =
            #TODO: Pre-solve the GP jacobian using the initial condition in the solve iteration
            #            jac_mean  = ca.SX(Ny_gp, Ny)
            #            jac_mean = self.__gp.jacobian(mean_s[:Ny_gp], u_s, 0)
            #            A = ca.horzcat(jac_f, Bd)
            #            jac = Bf @ jac_f @ Bf.T + Bd @ jac_mean @ Bd.T

            #            temp = jac_mean @ covar_x_s
            #            temp = jac_mean @ L_s
            #            cov_i = ca.SX(Ny + 3, Ny + 3)
            #            cov_i[:Ny,:Ny] = covar_x_s
            #            cov_i[Ny:, Ny:] = covar_d_s
            #            cov_i[Ny:, :Ny] = temp
            #            cov_i[:Ny, Ny:] = temp.T
            #TODO: This is just a new TA implementation... CLEAN UP...
            covar_x_next_func = ca.Function(
                'cov',
                [mean_s, u_s, covar_d_sx, covar_x_sx],
                #TODO: Clean up
                #                                            [A @ cov_i @ A.T])
                #                                            [Bd @ covar_d_s @ Bd.T + jac @ covar_x_s @ jac.T])
                #                                             [ca.blockcat(cov)])
                [cov_x_next_s])
            # Cholesky factorization of covariance function

    #            S_x_next_func = ca.Function( 'S_x', [mean_s, u_s, covar_d_s, covar_x_s],
    #                                            [Bd @ covar_d_s + jac @ covar_x_s])

        L_s = ca.SX.sym('L', ca.Sparsity.lower(Ny))
        L_to_cov_func = ca.Function('cov', [L_s], [L_s @ L_s.T])
        covar_x_sx = ca.SX.sym('cov_x', Ny, Ny)
        cholesky = ca.Function('cholesky', [covar_x_sx],
                               [ca.chol(covar_x_sx).T])
        """ Set initial values """
        obj = ca.MX(0)
        con_eq = []
        con_ineq = []
        con_ineq_lb = []
        con_ineq_ub = []
        con_eq.append(var['mean', 0] - mean_0_s)
        L_0_s = ca.MX(ca.Sparsity.lower(Ny), var['L', 0])
        L_init = cholesky(covariance_0_s.reshape((Ny, Ny)))
        con_eq.append(L_0_s.nz[:] - L_init.nz[:])
        u_past = u_0_s
        """ Build constraints """
        for t in range(Nt):
            # Input to GP
            mean_t = var['mean', t]
            u_t = u_func(mean_t, mean_ref_s, var['v', t], K_s)
            L_x = ca.MX(ca.Sparsity.lower(Ny), var['L', t])
            covar_x_t = L_to_cov_func(L_x)

            if discrete_method is 'hybrid':
                N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
                covar_t = covar_func(covar_x_t[:Ny_gp, :Ny_gp])
            elif discrete_method is 'd_hybrid':
                N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
                covar_t = ca.MX(Ny_gp + Nu_gp, Ny_gp + Nu_gp)
            elif discrete_method is 'gp':
                covar_t = covar_func(covar_x_t)
            else:
                covar_t = ca.MX(Nx, Nx)
            """ Select the chosen integrator """
            if discrete_method is 'rk4':
                mean_next_pred = model.rk4(mean_t, u_t, [])
                covar_x_next_pred = ca.MX(Ny, Ny)
            elif discrete_method is 'exact':
                mean_next_pred = model.Integrator(x0=mean_t, p=u_t)['xf']
                covar_x_next_pred = ca.MX(Ny, Ny)
            elif discrete_method is 'd_hybrid':
                # Deterministic hybrid GP model
                N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
                mean_d, covar_d = self.__gp.predict(mean_t[:Ny_gp], u_t,
                                                    covar_t)
                mean_next_pred = ca.vertcat(
                    mean_d, hybrid.rk4(mean_t[Ny_gp:], mean_t[:Ny_gp], []))
                covar_x_next_pred = ca.MX(Ny, Ny)
            elif discrete_method is 'hybrid':
                # Hybrid GP model
                N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
                mean_d, covar_d = self.__gp.predict(mean_t[:Ny_gp], u_t,
                                                    covar_t)
                mean_next_pred = ca.vertcat(
                    mean_d, hybrid.rk4(mean_t[Ny_gp:], mean_t[:Ny_gp], []))
                #covar_x_next_pred = covar_x_next_func(mean_t, u_t, covar_d,
                #                                        covar_x_t)
                covar_x_next_pred = covar_x_next_func(covar_d)
            elif discrete_method is 'f_hybrid':
                #TODO: Hybrid GP model estimating model error
                N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
                mean_d, covar_d = self.__gp.predict(mean_t[:Ny_gp], u_t,
                                                    covar_t)
                mean_next_pred = ca.vertcat(
                    mean_d, hybrid.rk4(mean_t[Ny_gp:], mean_t[:Ny_gp], []))
                covar_x_next_pred = covar_x_next_func(mean_t, u_t, covar_d,
                                                      covar_x_t)
            else:  # Use GP as default
                mean_next_pred, covar_x_next_pred = self.__gp.predict(
                    mean_t, u_t, covar_t)
            """ Continuity constraints """
            mean_next = var['mean', t + 1]
            con_eq.append(mean_next_pred - mean_next)

            L_x_next = ca.MX(ca.Sparsity.lower(Ny), var['L', t + 1])
            covar_x_next = L_to_cov_func(L_x_next).reshape((Ny * Ny, 1))
            L_x_next_pred = cholesky(covar_x_next_pred)
            con_eq.append(L_x_next_pred.nz[:] - L_x_next.nz[:])
            """ Chance state constraints """
            cons = self.__constraint(mean_next, L_x_next, Hx, quantile_x, xub,
                                     xlb, var['eps_state', t])
            con_ineq.extend(cons['con'])
            con_ineq_lb.extend(cons['con_lb'])
            con_ineq_ub.extend(cons['con_ub'])
            """ Input constraints """
            #            cov_u = covar_u_func(covar_x_t, K_s.reshape((Nu, Ny)))
            cov_u = ca.MX(Nu, Nu)
            #            cons = self.__constraint(u_t, cov_u, Hu, quantile_u, uub, ulb)
            #            con_ineq.extend(cons['con'])
            #            con_ineq_lb.extend(cons['con_lb'])
            #            con_ineq_ub.extend(cons['con_ub'])
            if uub is not None:
                con_ineq.append(u_t)
                con_ineq_ub.extend(uub)
                con_ineq_lb.append(np.full((Nu, ), -ca.inf))
            if ulb is not None:
                con_ineq.append(u_t)
                con_ineq_ub.append(np.full((Nu, ), ca.inf))
                con_ineq_lb.append(ulb)
            """ Add extra constraints """
            if inequality_constraints is not None:
                cons = inequality_constraints(var['mean', t + 1], covar_x_next,
                                              u_t, var['eps', t], con_par)
                con_ineq.extend(cons['con_ineq'])
                con_ineq_lb.extend(cons['con_ineq_lb'])
                con_ineq_ub.extend(cons['con_ineq_ub'])
            """ Objective function """
            u_delta = u_t - u_past
            obj += self.__l_func(var['mean', t], covar_x_t, u_t, cov_u, u_delta) \
                    + np.full((1, num_slack),lam) @ var['eps', t]
            if lam_state is not None:
                obj += np.full(
                    (1, num_state_slack), lam_state) @ var['eps_state', t]
            u_t = u_past
        L_x = ca.MX(ca.Sparsity.lower(Ny), var['L', Nt])
        covar_x_t = L_to_cov_func(L_x)
        obj += self.__lf_func(var['mean', Nt], covar_x_t, P_s.reshape((Ny, Ny))) \
            + np.full((1, num_slack),lam) @ var['eps', Nt]
        if lam_state is not None:
            obj += np.full(
                (1, num_state_slack), lam_state) @ var['eps_state', Nt]

        num_eq_con = ca.vertcat(*con_eq).size1()
        num_ineq_con = ca.vertcat(*con_ineq).size1()
        con_eq_lb = np.zeros((num_eq_con, ))
        con_eq_ub = np.zeros((num_eq_con, ))
        """ Terminal contraint """
        if terminal_constraint is not None and not feedback:
            con_ineq.append(var['mean', Nt] - mean_ref_s)
            num_ineq_con += Ny
            con_ineq_lb.append(np.full((Ny, ), -terminal_constraint))
            con_ineq_ub.append(np.full((Ny, ), terminal_constraint))
        elif terminal_constraint is not None and feedback:
            con_ineq.append(
                self.__lf_func(var['mean', Nt], covar_x_t, P_s.reshape(
                    (Ny, Ny))))
            num_ineq_con += 1
            con_ineq_lb.append(0)
            con_ineq_ub.append(terminal_constraint)
        con = ca.vertcat(*con_eq, *con_ineq)
        self.__conlb = ca.vertcat(con_eq_lb, *con_ineq_lb)
        self.__conub = ca.vertcat(con_eq_ub, *con_ineq_ub)
        """ Build solver object """
        nlp = dict(x=var, f=obj, g=con, p=param_s)
        options = {
            'ipopt.print_level': 0,
            'ipopt.mu_init': 0.01,
            'ipopt.tol': 1e-8,
            'ipopt.warm_start_init_point': 'yes',
            'ipopt.warm_start_bound_push': 1e-9,
            'ipopt.warm_start_bound_frac': 1e-9,
            'ipopt.warm_start_slack_bound_frac': 1e-9,
            'ipopt.warm_start_slack_bound_push': 1e-9,
            'ipopt.warm_start_mult_bound_push': 1e-9,
            'ipopt.mu_strategy': 'adaptive',
            'print_time': False,
            'verbose': False,
            'expand': True
        }
        if solver_opts is not None:
            options.update(solver_opts)
        self.__solver = ca.nlpsol('mpc_solver', 'ipopt', nlp, options)

        # First prediction used in the NLP, used in plot later
        self.__var_prediction = np.zeros((Nt + 1, Ny))
        self.__mean_prediction = np.zeros((Nt + 1, Ny))
        self.__mean = None

        build_solver_time += time.time()
        print('\n________________________________________')
        print('# Time to build mpc solver: %f sec' % build_solver_time)
        print('# Number of variables: %d' % self.__num_var)
        print('# Number of equality constraints: %d' % num_eq_con)
        print('# Number of inequality constraints: %d' % num_ineq_con)
        print('----------------------------------------')
コード例 #8
0
ファイル: pce.py プロジェクト: marcoaaguiar/yaocptool
def get_ls_factor(n_uncertain, n_samples, pc_order, lamb=0.0):
    # Uncertain parameter design
    sobol_design = sobol_seq.i4_sobol_generate(n_uncertain, n_samples,
                                               ceil(np.log2(n_samples)))
    sobol_samples = np.transpose(sobol_design)
    for i in range(n_uncertain):
        sobol_samples[i, :] = norm(loc=0., scale=1).ppf(sobol_samples[i, :])

    # Polynomial function definition
    x = SX.sym('x')
    he0fcn = Function('He0fcn', [x], [1.])
    he1fcn = Function('He1fcn', [x], [x])
    he2fcn = Function('He2fcn', [x], [x**2 - 1])
    he3fcn = Function('He3fcn', [x], [x**3 - 3 * x])
    he4fcn = Function('He4fcn', [x], [x**4 - 6 * x**2 + 3])
    he5fcn = Function('He5fcn', [x], [x**5 - 10 * x**3 + 15 * x])
    he6fcn = Function('He6fcn', [x], [x**6 - 15 * x**4 + 45 * x**2 - 15])
    he7fcn = Function('He7fcn', [x], [x**7 - 21 * x**5 + 105 * x**3 - 105 * x])
    he8fcn = Function('He8fcn', [x],
                      [x**8 - 28 * x**6 + 210 * x**4 - 420 * x**2 + 105])
    he9fcn = Function('He9fcn', [x],
                      [x**9 - 36 * x**7 + 378 * x**5 - 1260 * x**3 + 945 * x])
    he10fcn = Function(
        'He10fcn', [x],
        [x**10 - 45 * x**8 + 640 * x**6 - 3150 * x**4 + 4725 * x**2 - 945])
    helist = [
        he0fcn, he1fcn, he2fcn, he3fcn, he4fcn, he5fcn, he6fcn, he7fcn, he8fcn,
        he9fcn, he10fcn
    ]

    # Calculation of factor for least-squares
    xu = SX.sym("xu", n_uncertain)
    exps = (p for p in product(range(pc_order + 1), repeat=n_uncertain)
            if sum(p) <= pc_order)
    next(exps)
    exps = list(exps)

    psi = SX.ones(
        int(
            factorial(n_uncertain + pc_order) /
            (factorial(n_uncertain) * factorial(pc_order))))
    for i in range(len(exps)):
        for j in range(n_uncertain):
            psi[i + 1] *= helist[exps[i][j]](xu[j])
    psi_fcn = Function('PSIfcn', [xu], [psi])

    nparameter = SX.size(psi)[0]
    psi_matrix = SX.zeros(n_samples, nparameter)
    for i in range(n_samples):
        psi_a = psi_fcn(sobol_samples[:, i])
        for j in range(SX.size(psi)[0]):
            psi_matrix[i, j] = psi_a[j]

    psi_t_psi = mtimes(psi_matrix.T, psi_matrix) + lamb * DM.eye(nparameter)
    chol_psi_t_psi = chol(psi_t_psi)
    inv_chol_psi_t_psi = solve(chol_psi_t_psi, SX.eye(nparameter))
    inv_psi_t_psi = mtimes(inv_chol_psi_t_psi, inv_chol_psi_t_psi.T)

    ls_factor = mtimes(inv_psi_t_psi, psi_matrix.T)
    ls_factor = DM(ls_factor)

    # Calculation of expectations for variance function
    n_sample_expectation_vector = 100000
    x_sample = np.random.multivariate_normal(np.zeros(n_uncertain),
                                             np.eye(n_uncertain),
                                             n_sample_expectation_vector)
    psi_squared_sum = DM.zeros(SX.size(psi)[0])
    for i in range(n_sample_expectation_vector):
        psi_squared_sum += psi_fcn(x_sample[i, :])**2
    expectation_vector = psi_squared_sum / n_sample_expectation_vector

    return ls_factor, expectation_vector, psi_fcn