Example #1
0
def _plot_control_solution(model, interval, initial_ys, us):
    """Plots a single shooting solution.
    
    Parameters:
    model      -- the model to simulate.
    interval   -- a tuple: (start_time, end_time)
    initial_ys -- the initial states at the beginning of the simulation.
    us         -- the constant control signal(s) used throughout the
                  simulation.
    """
    model.reset()
    model.x = initial_ys
    model.u = us

    p.figure(1)
    p.subplot(211)
    simulator = SundialsODESimulator(model, start_time=interval[0],
                                     final_time=interval[1])
    simulator.run()
    T, Y = simulator.get_solution()
    p.hold(True)
    for i in range(len(model.x)):
        p.plot(T,Y[:,i],label="State #%s" % (i + 1), linewidth=2)

    p.subplot(212)
    p.hold(True)
    for i in range(len(model.u)):
        p.plot(interval, [us[i], us[i]], label="Input #%s" % (i + 1))
    p.hold(False)
Example #2
0
def _eval_initial_ys(model, grid, time_step=0.2):
    """Generate a feasible initial guesstimate of the initial states for each
       segment in a grid.
       
    This is done by doing a simulation from start to end and extracting the
    states at the time points between the segments specified in the grid.
    
    Parameters:
    model -- the model which is to be simulated.
    grid  -- the segment grid list. Each element in grid corresponds to a
             segment and contains a tupleconsisting of start and end time of
             that segment.
    
    Keyword parameters:
    time_step -- the time step size used in the integration.
    
    """
    # TODO: Move this to MultipleShooter
    from scipy import interpolate
    _check_grid_consistency(grid)
    
    simulator = SundialsODESimulator(model,start_time=model.opt_interval_get_start_time(),
                                    final_time=model.opt_interval_get_final_time(),
                                    time_step=time_step)
    simulator.run()
    T, ys = simulator.get_solution()
    T = N.array(T)
    
    tck = interpolate.interp1d(T, ys, axis=0)
    initials = map(lambda interval: tck(interval[0]).flatten(), grid)
    initials = N.array(initials).flatten()
    return initials
Example #3
0
    def test_optimization_cost_eval(self):
        """Test evaluation of optimization cost function."""
        simulator = SundialsODESimulator(self.m)
        simulator.run()
        T, ys = simulator.get_solution()

        self.vdp.set_x_p(ys[-1], 0)
        self.vdp.set_dx_p(self.vdp.dx, 0)
        cost = self.vdp.opt_eval_J()
        nose.tools.assert_not_equal(cost, 0)
Example #4
0
    def test_optimization_cost_jacobian(self):
        """Test evaluation of optimization cost function jacobian.
        
        Note:
        This test is model specific for the VDP oscillator.
        """
        simulator = SundialsODESimulator(self.m)
        simulator.run()
        T, ys = simulator.get_solution()

        self.vdp.set_x_p(ys[-1], 0)
        self.vdp.set_dx_p(self.vdp.dx, 0)
        jac = self.vdp.opt_eval_jac_J(jmi.JMI_DER_X_P)
        N.testing.assert_almost_equal(jac, [[0, 0, 1]])
Example #5
0
def _shoot(model, start_time, end_time, sensi=True, time_step=0.2):
    """Performs a single 'shot' (simulation) from start_time to end_time.
    
    Model parameters/states etc. must be set BEFORE calling this method.
    
    The function returns a tuple consisting of:
        1. The cost gradient with respect to initial states and
           input U.
        2. The final ($t_{tp_0}=1$) simulation states.
        3. A dictionary holding the indices for the gradient.
        4. The corresponding sensitivity matrix (if sensi is not False)
    
    if sensi is set to False no sensitivity analysis will be done and the 
    
    Parameters:
    model      -- the model which is to be used in the shot (simulation).
    start_time -- the time when simulation should start.
    end_time   -- the time when the simulation should finish.
    
    Keyword parameters:
    sensi     -- True/False, if sensivity is to be conducted. (default=True)
    time_step -- the time_step to be taken within the integration code.
                 (default=0.2)
        
    Notes:
     * Assumes cost function is only dependent on state X and control signal U.
    
    """
    simulator = SundialsODESimulator(model, start_time=start_time,
        final_time=end_time, sensitivity_analysis=sensi, time_step=time_step,
        return_last=True)
    simulator.run()
    T, last_y = simulator.get_solution()
    sens = simulator.get_sensitivities()
    params = simulator.get_sensitivity_indices()
    
    model.set_x_p(last_y, 0)
    model.set_dx_p(model.dx, 0)
    model.set_u_p(model.u, 0)
    
    if sensi:
        sens_rows = range(params.xinit_start, params.xinit_end) + \
                    range(params.u_start, params.u_end)
        sens_mini = sens[sens_rows]
        gradparams = {
            'xinit_start': 0,
            'xinit_end': params.xinit_end - params.xinit_start,
            'u_start': params.xinit_end - params.xinit_start,
            'u_end': params.xinit_end - params.xinit_start + params.u_end - \
                     params.u_start,
        }
    
        cost_jac_x = model.opt_eval_jac_J(pyjmi.JMI_DER_X_P).flatten()
        cost_jac_u = model.opt_eval_jac_J(pyjmi.JMI_DER_U_P).flatten()
        cost_jac = N.concatenate ( [cost_jac_x, cost_jac_u] )
        
        # See my master thesis report for the specifics of these calculations
        # Both lines below have been verified to work correctly
        costgradient_x = N.dot(sens[params.xinit_start:params.xinit_end, :],
                               cost_jac_x).flatten()
        costgradient_u = N.dot(sens[params.u_start:params.u_end, :],
                               cost_jac_x).flatten() + cost_jac_u
        
        # The full cost gradient w.r.t. the states and the input
        costgradient = N.concatenate( [costgradient_x, costgradient_u] )
    else:
        costgradient = None
        gradparams = None
        sens_mini = None
    
    # TODO: Create a return type instead of returning tuples
    return costgradient, last_y, gradparams, sens_mini