Example #1
0
def new_model(
        # Initial conditions
        x_b0=0, y_b0=0, z_b0=0, vx_b0=10, vy_b0=5, vz_b0=15,
        x_c0=20, y_c0=5, vx_c0=0, vy_c0=0,
        # Initial covariance
        S0=ca.diagcat([0.1, 0.1, 0, 0.1, 0.1, 0,
                       1e-2, 1e-2, 1e-2, 1e-2, 1e-2, 1e-2]) * 0.25,
        # Hypercovariance weight
        L0_weight=1e-5,
        # Mass of the ball
        mass=0.15,
        # Discretization time step
        dt=0.1,
        # Number of Runge-Kutta integration intervals per time step
        n_rk=10,
        # Reaction time (in units of dt)
        n_delay=1,
        # System noise weight
        M_weight=1e-3,
        # Observation noise
        N_min=1e-2,  # when looking directly at the ball
        N_max=1e0,   # when the ball is 90 degrees from the gaze direction
        # Final cost: w_cl * distance_between_ball_and_catcher
        w_cl=1e3,
        # Running cost on controls: u.T * R * u
        R=1e0 * ca.diagcat([1e1, 1e0, 1e0, 1e-1]),
        # Final cost of uncertainty: w_Sl * tr(S)
        w_Sl=1e3,
        # Running cost of uncertainty: w_S * tr(S)
        w_S=1e2,
        # Control limits
        F_c1=7.5, F_c2=2.5,
        w_max=2 * ca.pi,
        psi_max=0.8 * ca.pi/2,
):
    phi0 = ca.arctan2(y_b0-y_c0, x_b0-x_c0)  # direction towards the ball
    if phi0 < 0:
        phi0 += 2 * ca.pi
    psi0 = 0

    # Initial mean
    m0 = ca.DMatrix([x_b0, y_b0, z_b0, vx_b0, vy_b0, vz_b0,
                     x_c0, y_c0, vx_c0, vy_c0, phi0, psi0])

    # Hypercovariance
    L0 = ca.DMatrix.eye(m0.size()) * L0_weight

    # System noise matrix
    M = ca.DMatrix.eye(m0.size()) * M_weight

    # Catcher dynamics is less noisy
    M[-6:, -6:] = ca.DMatrix.eye(6) * 1e-5

    return Model((m0, S0, L0), dt, n_rk, n_delay, (M, N_min, N_max),
                 (w_cl, R, w_Sl, w_S), (F_c1, F_c2, w_max, psi_max))
Example #2
0
    def set_initial_condition_as_uncertain(self,
                                           par,
                                           mean,
                                           cov,
                                           distribution='normal'):
        par = vertcat(par)
        mean = vertcat(mean)
        cov = vertcat(cov)

        if not par.numel() == mean.numel():
            raise ValueError('Size of "par" and "mean" differ. par.numel()={} '
                             'and mean.numel()={}'.format(
                                 par.numel(), mean.numel()))
        if not cov.shape == (mean.numel(), mean.numel()):
            raise ValueError(
                'The input "cov" is not a square matrix of same size as "mean". '
                'cov.shape={} and mean.numel()={}'.format(
                    cov.shape, mean.numel()))

        self.uncertain_initial_conditions = vertcat(
            self.uncertain_initial_conditions, par)
        self.uncertain_initial_conditions_mean = vertcat(
            self.uncertain_initial_conditions_mean, mean)
        self.uncertain_initial_conditions_distribution = self.uncertain_initial_conditions_distribution + [
            distribution
        ] * par.numel()
        if cov.numel() > 0 and self.uncertain_initial_conditions_cov.numel(
        ) > 0:
            self.uncertain_initial_conditions_cov = diagcat(
                self.uncertain_initial_conditions_cov, cov)
        elif cov.numel() > 0:
            self.uncertain_initial_conditions_cov = cov
        else:
            self.uncertain_initial_conditions_cov = self.uncertain_initial_conditions_cov
Example #3
0
    def set_parameter_as_uncertain_parameter(self,
                                             par,
                                             mean,
                                             cov,
                                             distribution='normal'):
        par = vertcat(par)
        mean = vertcat(mean)
        cov = vertcat(cov)

        if not par.numel() == mean.numel():
            raise ValueError('Size of "par" and "mean" differ. par.numel()={} '
                             'and mean.numel()={}'.format(
                                 par.numel(), mean.numel()))
        if not cov.shape == (mean.numel(), mean.numel()):
            raise ValueError(
                'The input "cov" is not a square matrix of same size as "mean". '
                'cov.shape={} and mean.numel()={}'.format(
                    cov.shape, mean.numel()))

        self.p_unc = vertcat(self.p_unc, par)
        self.p_unc_mean = vertcat(self.p_unc_mean, mean)
        # TODO: Work around for casadi diagcat bug, remove when patched.
        if self.p_unc_cov.numel() == 0:
            self.p_unc_cov = cov
        else:
            self.p_unc_cov = diagcat(self.p_unc_cov, cov)
        self.p_unc_dist = self.p_unc_dist + [distribution] * par.numel()
    def set_augmented_discrete_system(self):
        """
        Pendulum dynamics with integral action.

        :param x: state
        :type x: casadi.DM
        :param u: control input
        :type u: casadi.DM
        """

        # Grab equilibrium dynamics
        Ad_eq = self.Ad(self.x_eq, self.u_eq, self.w)
        Bd_eq = self.Bd(self.x_eq, self.u_eq, self.w)
        Bw_eq = self.Bw(self.x_eq, self.u_eq, self.w)

        # Instantiate augmented system
        self.Ad_i = ca.DM.zeros(5,5)
        self.Bd_i = ca.DM.zeros(5,1)
        self.Bw_i = ca.DM.zeros(5,1)
        self.Cd_i = ca.DM.zeros(1,5)
        self.R_i = ca.DM.zeros(5,1)

        # Populate matrices
        self.Ad_i = ca.diagcat(Ad_eq, ca.DM(1))
        self.Ad_i[4,0:4] = -self.dt * self.Cd_eq
        self.Bd_i = ca.vertcat(Bd_eq, ca.DM(0))
        self.Bw_i = ca.vertcat(Bw_eq, ca.DM(0))
        self.Cd_i = ca.horzcat(self.Cd_eq, ca.DM(1))
        self.R_i[4,0] = self.dt
Example #5
0
    def _sample_parameters(self):
        n_samples = self.n_samples
        n_uncertain = self.n_uncertain

        mean = vertcat(self.socp.p_unc_mean,
                       self.socp.uncertain_initial_conditions_mean)

        if self.socp.n_p_unc > 0 and self.socp.n_uncertain_initial_condition > 0:
            covariance = diagcat(self.socp.p_unc_cov,
                                 self.socp.uncertain_initial_conditions_cov)
        elif self.socp.n_p_unc > 0:
            covariance = self.socp.p_unc_cov
        elif self.socp.n_uncertain_initial_condition > 0:
            covariance = self.socp.uncertain_initial_conditions_cov
        else:
            raise ValueError("No uncertainties found n_p_unc = {}, "
                             "n_uncertain_initial_condition={}".format(
                                 self.socp.n_p_unc,
                                 self.socp.n_uncertain_initial_condition))

        dist = self.socp.p_unc_dist + self.socp.uncertain_initial_conditions_distribution

        for d in dist:
            if not d == 'normal':
                raise NotImplementedError(
                    'Distribution "{}" not implemented, only "normal" is available.'
                    .format(d))

        sampled_epsilon = sample_parameter_normal_distribution_with_sobol(
            DM.zeros(mean.shape), DM.eye(covariance.shape[0]), n_samples)
        sampled_parameters = SX.zeros(n_uncertain, n_samples)
        for s in range(n_samples):
            sampled_parameters[:, s] = mean + mtimes(sampled_epsilon[:, s].T,
                                                     chol(covariance)).T
        return sampled_parameters
Example #6
0
def diagcat(inputobj):

    return ca.diagcat(inputobj)
Example #7
0
vy_b0 = 4
vz_b0 = 15

x_c0 = 22
y_c0 = 7
vx_c0 = vy_c0 = 0
phi0 = ca.arctan2(y_b0-y_c0, x_b0-x_c0)  # direction towards the ball
if phi0 < 0:
    phi0 += 2 * ca.pi
psi0 = 0

# Initial mean
m0 = ca.DMatrix([x_b0, y_b0, z_b0, vx_b0, vy_b0, vz_b0,
                 x_c0, y_c0, vx_c0, vy_c0, phi0, psi0])
# Initial covariance
S0 = ca.diagcat([0.2, 0.2, 0, 0.5, 0.5, 0,
                 1e-2, 1e-2, 1e-2, 1e-2, 1e-2, 1e-2]) * 0.25
# Hypercovariance
L0 = ca.DMatrix.eye(m0.size()) * 1e-5
# Discretization step
dt = 0.1
# Number of Runge-Kutta integration intervals per time step
n_rk = 10
# Reaction time (in units of dt)
n_delay = 2
# System noise matrix
M = ca.DMatrix.eye(m0.size()) * 1e-3
M[-6:, -6:] = ca.DMatrix.eye(6) * 1e-5  # catcher's dynamics is less noisy
# Observation noise
N_min = 1e-2  # when looking directly at the ball
N_max = 1e0   # when the ball is 90 degrees from the gaze direction
# Final cost: w_cl * distance_between_ball_and_catcher
Example #8
0
def diagcat(inputobj):

    return ca.diagcat(*inputobj)
Example #9
0
F = ca.SXFunction('Discrete dynamics',
                  [state, control, dt_sym], [state_next], op)
Fj_x = F.jacobian('state')
Fj_u = F.jacobian('control')
F_xx = ca.SXFunction('F_xx', [state, control, dt_sym],
                     [ca.jacobian(F.jac('state')[i, :].T, state) for i in
                      range(nx)])
F_uu = ca.SXFunction('F_uu', [state, control, dt_sym],
                     [ca.jacobian(F.jac('control')[i, :].T, control) for i in
                      range(nx)])
F_ux = ca.SXFunction('F_ux', [state, control, dt_sym],
                     [ca.jacobian(F.jac('control')[i, :].T, state) for i in
                      range(nx)])

# Cost functions
Qf = ca.diagcat([1., 1., 0.])
final_cost = 0.5 * ca.mul([state.cat.T, Qf, state.cat])
op = {'input_scheme': ['state'],
      'output_scheme': ['cost']}
lf = ca.SXFunction('Final cost', [state], [final_cost], op)

R = ca.DMatrix.eye(nu) * dt * 1e-2
cost = 0.5 * ca.mul([control.cat.T, R, control.cat])
op = {'input_scheme': ['state', 'control', 'dt'],
      'output_scheme': ['cost']}
l = ca.SXFunction('Running cost', [state, control, dt_sym], [cost], op)

# Quadratic expansions of costs
lfg = lf.gradient('state')          # lf_x
lfh = lf.hessian('state')           # lf_xx
Example #10
0
# Extended belief (mu, Sigma, Lambda) for plotting
ext_belief = cat.struct_symSX([
        cat.entry('m',struct=state),
        cat.entry('S',shapestruct=(state,state)),
        cat.entry('L',shapestruct=(state,state))
    ])


# %% =========================================================================
#                            Initial conditions
# ============================================================================

# State
m0 = ca.DMatrix([0., 0., 5., 4.,
                 15., 0., ca.pi/2])
S0 = ca.diagcat([1., 1., 1., 1.,
                 1., 1., 1e-2]) * 0.25
L0 = ca.DMatrix.eye(nx) * 1e-3
L0[-1,-1] = 1e-5

mu0 = np.array(m0).ravel() # to get samples from normal distribution

b0 = belief()
b0['m'] = m0
b0['S'] = ca.densify(S0)

eb0 = ext_belief()
eb0['m'] = m0
eb0['S'] = ca.densify(S0)
eb0['L'] = ca.densify(L0)

# Covariances
Example #11
0
F = ca.SXFunction('Discrete dynamics', [state, control, dt_sym], [state_next],
                  op)
Fj_x = F.jacobian('state')
Fj_u = F.jacobian('control')
F_xx = ca.SXFunction(
    'F_xx', [state, control, dt_sym],
    [ca.jacobian(F.jac('state')[i, :].T, state) for i in range(nx)])
F_uu = ca.SXFunction(
    'F_uu', [state, control, dt_sym],
    [ca.jacobian(F.jac('control')[i, :].T, control) for i in range(nx)])
F_ux = ca.SXFunction(
    'F_ux', [state, control, dt_sym],
    [ca.jacobian(F.jac('control')[i, :].T, state) for i in range(nx)])

# Cost functions
Qf = ca.diagcat([1., 1., 0.])
final_cost = 0.5 * ca.mul([state.cat.T, Qf, state.cat])
op = {'input_scheme': ['state'], 'output_scheme': ['cost']}
lf = ca.SXFunction('Final cost', [state], [final_cost], op)

R = ca.DMatrix.eye(nu) * dt * 1e-2
cost = 0.5 * ca.mul([control.cat.T, R, control.cat])
op = {'input_scheme': ['state', 'control', 'dt'], 'output_scheme': ['cost']}
l = ca.SXFunction('Running cost', [state, control, dt_sym], [cost], op)

# Quadratic expansions of costs
lfg = lf.gradient('state')  # lf_x
lfh = lf.hessian('state')  # lf_xx

lg_x = l.gradient('state')  # l_x
lh_x = l.hessian('state')  # l_xx
Example #12
0
    def __init__(self, dae, t, order, method='legendre', 
        parallelization='serial', tdp_fun=None, expand=True, repeat_param=False, options={}):

        """Constructor

        @param t time vector of length N+1 defining N collocation intervals
        @param order number of collocation points per interval
        @param method collocation method ('legendre', 'radau')
        @param dae DAE model
        @param parallelization parallelization of the outer map. Possible set of values is the same as for casadi.Function.map().

        @return Returns a dictionary with the following keys:
        'X' -- state at collocation points
        'Z' -- alg. state at collocation points
        'x0' -- initial state
        'eq' -- the expression eq == 0 defines the collocation equation. eq depends on X, Z, x0, p.
        'Q' -- quadrature values at collocation points depending on x0, X, Z, p.
        """

        # Convert whatever DAE to implicit DAE
        dae = dae.makeImplicit()

        M = order
        N = len(t) - 1

        #
        # Define variables and functions corresponfing to all control intervals
        # 

        K = cs.MX.sym('K', dae.nx, N * M)   # State derivatives at collocation points
        Z = cs.MX.sym('Z', dae.nz, N * M)   # Alg state at collocation points
        x = cs.MX.sym('x', dae.nx, N + 1)   # State at the ends of collocation intervals (t)

        u = cs.MX.sym('u', dae.nu, N)    # Input on collocation intervals
        U = cs.horzcat(*[cs.repmat(u[:, n], 1, M) for n in range(N)]) # Input at collocation points

        # Butcher tableau for the selected method
        butcher = butcherTableuForCollocationMethod(order, method)

        # Interval lengths
        h = np.diff(t)

        # Integrated state at collocation points
        Mx = cs.kron(cs.DM.eye(N), cs.DM.ones(1, M))
        MK = cs.kron(cs.diagcat(*h), butcher.A.T)    # integration matrix
        X = cs.mtimes(x[:, : -1], Mx) + cs.mtimes(K, MK)

        # Integrated state at the ends of collocation intervals
        xf = x[:, : -1] + cs.mtimes(K, cs.kron(cs.diagcat(*h), butcher.b))
        
        # Points in time at which the collocation equations are calculated
        # TODO: this possibly can be sped up a little bit.
        tc = np.hstack([t[n] + h[n] * butcher.c for n in range(N)])
        
        # Values of the time-dependent parameter
        if tdp_fun is not None:
            tdp_val = cs.horzcat(*[tdp_fun(t) for t in tc])
        else:
            assert dae.ntdp == 0
            tdp_val = np.zeros((0, tc.size))

        # DAE function
        dae_fun = dae.createFunction('dae', ['xdot', 'x', 'z', 'u', 'p', 't', 'tdp'], ['dae', 'quad'])
        if expand:
            dae_fun = dae_fun.expand()  # expand() for speed

        if repeat_param:
            reduce_in = []
            p = cs.MX.sym('P', dae.np, N * M)
        else:
            reduce_in = [4]
            p = cs.MX.sym('P', dae.np)

        dae_map = dae_fun.map('dae_map', parallelization, N * M, reduce_in, [], options)
        dae_out = dae_map(xdot=K, x=X, z=Z, u=U, p=p, t=tc, tdp=tdp_val)

        eqc = ce.struct_MX([
            ce.entry('collocation', expr=dae_out['dae']),
            ce.entry('continuity', expr=xf - x[:, 1 :]),
            ce.entry('param', expr=cs.diff(p, 1, 1))
        ])

        # Integrate the quadrature state
        quad = dae_out['quad']

        #t0 = time.time()
        q = [cs.MX.zeros(dae.nq)]  # Integrated quadrature at interval ends
        
        # TODO: speed up the calculation of q.
        for n in range(N):
            q.append(q[-1] + h[n] * cs.mtimes(quad[:, n * M : (n + 1) * M], butcher.b))

        q = cs.horzcat(*q)

        Q = cs.mtimes(q[:, : -1], Mx) + cs.mtimes(quad, MK)  # Integrated quadrature at collocation points
        #print('Creating Q took {0:.3f} s.'.format(time.time() - t0))

        self._N = N
        self._M = M

        self._eq = eqc
        self._x = x
        self._X = X
        self._K = K
        self._Z = Z
        self._U = U
        self._u = u
        self._quad = quad
        self._Q = Q
        self._q = q
        self._p = p
        self._tc = tc
        self._butcher = butcher
        self._tdp = tdp_val
        self._t = t